prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
import pandas as pd
import os
from os import listdir
from os.path import isfile, join
import numpy as np
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error
from sklearn.linear_model import LinearRegression
from sklearn.pipeline import Pipeline, make_pipeline
from sklearn.preprocessing import PolynomialFeatures
from sklearn.model_selection import GridSearchCV
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import Ridge
from sklearn.linear_model import ElasticNet
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import GradientBoostingRegressor
from thundersvm import SVR
from sklearn.svm import LinearSVR
import time
import util
def models(dataset_canarin_seconds):
# - Method 2: Prediction of decibels calibrated starting from those of the microphone and from the environmental data of the canary - using a frequency per second.
# - As in the previous case, we use the data with a sampling frequency per second (such as that of the microphone and the sound level meter).
# Due to the quantity of data we use, also in this case, those relating to a month of sampling.
# - Split the dataset
X_train, X_val, y_train, y_val = train_test_split(
dataset_canarin_seconds.drop("db_phon", axis=1), # X = tutto tranne db_phon
dataset_canarin_seconds["db_phon"], # y = db_phon
test_size=1/3, random_state=42 # parametri divisione
)
# - Linear regression. (multivariate regression)
print("Linear regression")
lrm = LinearRegression()
lrm.fit(X_train, y_train)
# - Print out the error metrics
util.print_error_stats(X_val, y_val, lrm)
# - The value of the coefficients attributed by the model to the individual features to understand which ones are most important.
print(pd.Series(lrm.coef_, index=X_train.columns))
# - Applying the standardization of data on linear regression
print("Linear regression standardized")
model = Pipeline([
("scale", StandardScaler()),
("linreg", LinearRegression())
])
model.fit(X_train, y_train)
# - Print out the error metrics
util.print_error_stats(X_val, y_val, model)
print(pd.Series(model.named_steps["linreg"].coef_, index=X_train.columns))
# - Polynomial regression
print("Polynomial regression")
model = Pipeline([
("poly", PolynomialFeatures(degree=2, include_bias=False)),
("scale", StandardScaler()),
("linreg", LinearRegression())
])
model.fit(X_train, y_train)
# - Print out the error metrics
util.print_error_stats(X_val, y_val, model)
# - Regularization with __Ridge__
print("Polynomial regression with Ridge")
model = Pipeline([
("scale", StandardScaler()),
("poly", PolynomialFeatures(degree=2, include_bias=False)),
("regr", Ridge(alpha=0.5))
])
model.fit(X_train, y_train)
# - Print out the error metrics
util.print_error_stats(X_val, y_val, model)
# - Regularization with __Elastic Net__, (hybrid between Ridge and Lasso)
print("Polynomial regression with ElasticNet")
model = Pipeline([
("poly", PolynomialFeatures(degree=2, include_bias=False)),
("scale", StandardScaler()),
("linreg", ElasticNet(alpha=0.5, l1_ratio=0.2))
])
model.fit(X_train, y_train)
# - Print out the error metrics
util.print_error_stats(X_val, y_val, model)
# - We have tested some of the main models and understood which of these lead to a drop in the error,
# let's perform the tuning of the hyperparameters using __Grid Search__.
print("Polynomial regression with GRID search")
model = Pipeline([
("poly", PolynomialFeatures(include_bias=False)),
("scale", StandardScaler()),
("linreg", LinearRegression())
])
grid = {
"poly__degree": range(1, 11),
}
gs = GridSearchCV(model, param_grid=grid)
gs.fit(X_train, y_train)
print( | pd.DataFrame(gs.cv_results_) | pandas.DataFrame |
import pytest
import os
from mapping import util
from pandas.util.testing import assert_frame_equal, assert_series_equal
import pandas as pd
from pandas import Timestamp as TS
import numpy as np
@pytest.fixture
def price_files():
cdir = os.path.dirname(__file__)
path = os.path.join(cdir, 'data/')
files = ["CME-FVU2014.csv", "CME-FVZ2014.csv"]
return [os.path.join(path, f) for f in files]
def assert_dict_of_frames(dict1, dict2):
assert dict1.keys() == dict2.keys()
for key in dict1:
assert_frame_equal(dict1[key], dict2[key])
def test_read_price_data(price_files):
# using default name_func in read_price_data()
df = util.read_price_data(price_files)
dt1 = TS("2014-09-30")
dt2 = TS("2014-10-01")
idx = pd.MultiIndex.from_tuples([(dt1, "CME-FVU2014"),
(dt1, "CME-FVZ2014"),
(dt2, "CME-FVZ2014")],
names=["date", "contract"])
df_exp = pd.DataFrame([119.27344, 118.35938, 118.35938],
index=idx, columns=["Open"])
assert_frame_equal(df, df_exp)
def name_func(fstr):
file_name = os.path.split(fstr)[-1]
name = file_name.split('-')[1].split('.')[0]
return name[-4:] + name[:3]
df = util.read_price_data(price_files, name_func)
dt1 = TS("2014-09-30")
dt2 = TS("2014-10-01")
idx = pd.MultiIndex.from_tuples([(dt1, "2014FVU"), (dt1, "2014FVZ"),
(dt2, "2014FVZ")],
names=["date", "contract"])
df_exp = pd.DataFrame([119.27344, 118.35938, 118.35938],
index=idx, columns=["Open"])
assert_frame_equal(df, df_exp)
def test_calc_rets_one_generic():
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-05'), 'CLG5')])
rets = pd.Series([0.1, 0.05, 0.1, 0.8], index=idx)
vals = [1, 0.5, 0.5, 1]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-05'), 'CLG5')
])
weights = pd.DataFrame(vals, index=widx, columns=['CL1'])
wrets = util.calc_rets(rets, weights)
wrets_exp = pd.DataFrame([0.1, 0.075, 0.8],
index=weights.index.levels[0],
columns=['CL1'])
assert_frame_equal(wrets, wrets_exp)
def test_calc_rets_two_generics():
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5'),
(TS('2015-01-05'), 'CLG5'),
(TS('2015-01-05'), 'CLH5')])
rets = pd.Series([0.1, 0.15, 0.05, 0.1, 0.8, -0.5, 0.2], index=idx)
vals = [[1, 0], [0, 1],
[0.5, 0], [0.5, 0.5], [0, 0.5],
[1, 0], [0, 1]]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5'),
(TS('2015-01-05'), 'CLG5'),
(TS('2015-01-05'), 'CLH5')
])
weights = pd.DataFrame(vals, index=widx, columns=['CL1', 'CL2'])
wrets = util.calc_rets(rets, weights)
wrets_exp = pd.DataFrame([[0.1, 0.15], [0.075, 0.45], [-0.5, 0.2]],
index=weights.index.levels[0],
columns=['CL1', 'CL2'])
assert_frame_equal(wrets, wrets_exp)
def test_calc_rets_two_generics_nans_in_second_generic():
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5'),
(TS('2015-01-05'), 'CLG5'),
(TS('2015-01-05'), 'CLH5')])
rets = pd.Series([0.1, np.NaN, 0.05, 0.1, np.NaN, -0.5, 0.2],
index=idx)
vals = [[1, 0], [0, 1],
[0.5, 0], [0.5, 0.5], [0, 0.5],
[1, 0], [0, 1]]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5'),
( | TS('2015-01-05') | pandas.Timestamp |
from __future__ import print_function
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib
matplotlib.rcParams['pdf.fonttype'] = 42
matplotlib.rcParams['ps.fonttype'] = 42
from matplotlib.backends.backend_pdf import PdfPages
import seaborn as sns; sns.set()
from scipy.stats import ranksums, levene
import csv
import numpy as np
from matplotlib.ticker import FuncFormatter
"""
Load both song and sunrise data and organize/subset by before sunrise and month
"""
# load in song data
data_path = "C:/Users/username/Box " \
"Sync/FolderName/ChippiesTimeOfDay" \
"/FinalChippiesDataReExportedAs44100Hz_LogTransformed_forTOD.csv"
log_song_data = pd.DataFrame.from_csv(data_path, header=0, index_col=None)
col_to_skip = ['Latitude', 'Longitude', 'RecordingDay',
'RecordingYear', 'RecordingTime',
'RecordingTimeSeconds']
data_subset = log_song_data.drop(col_to_skip, axis=1)
# load in time data --> before or after sunrise, twilights, and noon (only going to use sunrise and noon)
data_path = "C:/Users/username/Box " \
"Sync/FolderName/ChippiesTimeOfDay" \
"/FinalChippiesDataReExportedAs44100Hz_LogTransformed" \
"_forTOD_SunriseTwilightNoon.csv"
time_data = pd.DataFrame.from_csv(data_path, header=0, index_col=None)
# must remove duplicates -- have more than one bird from same recording -- duplicate catalog number and time data
time_data = time_data.drop_duplicates()
# combine tables using catalog no
combined_df = pd.merge(data_subset, time_data, on='CatalogNo')
print(combined_df.shape) # 314
print(combined_df.RecordingMonth.unique())
# remove month 8 since it only has one recording
combined_df = combined_df[combined_df['RecordingMonth'] != 8.0]
song_variables = ['Duration of Song Bout (log(ms))', 'Total Number of '
'Syllables (log(number))']
print(combined_df.groupby('RecordingMonth').count())
combined_df.CatalogNo.to_csv("C:/Users/username\Box "
"Sync\FolderName\ChippiesTimeOfDay\RecordingInfo"
"/TimeOfDayPaper_RecordingsUsed.csv", index=False)
"""
Variance in each month
"""
months = sorted(combined_df.RecordingMonth.unique())
with open("C:/Users/username\Box "
"Sync\FolderName\ChippiesTimeOfDay\MonthDiscrete"
"/Variance_ofMonths.csv",
'wb') as file:
filewriter = csv.writer(file, delimiter=',')
filewriter.writerow(['Song Variable',
'March',
'April',
'May',
'June',
'July'
])
for sv in song_variables:
variance = []
for m in months:
vector = combined_df.loc[combined_df['RecordingMonth'] == m, sv]
variance.append(np.var(vector))
filewriter.writerow([sv] + variance)
"""
Brown-Forsythe test (Levene's with median)
"""
# between all months
values_per_group = [col for col_name, col in combined_df.groupby(
'RecordingMonth')['Duration of Song Bout (log(ms))']]
print(levene(*values_per_group, center='median'))
months = sorted(combined_df.RecordingMonth.unique())
rs = np.zeros((5, 5))
for sv in song_variables:
for i in range(len(months)-1):
for j in range(i+1, len(months)):
m1 = combined_df.loc[combined_df['RecordingMonth'] == months[i], sv]
m2 = combined_df.loc[combined_df['RecordingMonth'] == months[j], sv]
rs[j, i] = levene(m1, m2, center='median')[1]
results = pd.DataFrame(data=rs, index=months)
results.to_csv("C:/Users/username\Box "
"Sync\FolderName\ChippiesTimeOfDay\MonthDiscrete"
"/BrownForsythe_" + sv + ".csv", header=months)
"""
Wilcoxon Ranksums
"""
months = sorted(combined_df.RecordingMonth.unique())
rs = np.zeros((5, 5))
for sv in song_variables:
for i in range(len(months)-1):
for j in range(i+1, len(months)):
m1 = combined_df.loc[combined_df['RecordingMonth'] == months[i], sv]
m2 = combined_df.loc[combined_df['RecordingMonth'] == months[j], sv]
rs[j, i] = ranksums(m1, m2)[1]
results = | pd.DataFrame(data=rs, index=months) | pandas.DataFrame |
# Run infinite with thread for second screen data
import sys
import pandas as pd
from polygon import RESTClient
import threading
import json
import requests
def get_todays_percent_change_of_symbol(symbol):
"""
fetch todays change in percent for the given symbol name
:param symbol: ticker name which will be queried
:return: todaysChangePercent
"""
p_auth = "<KEY>"
query = """https://api.polygon.io/v2/snapshot/locale/us/markets/stocks/tickers/""" + symbol + """?&apiKey=""" + p_auth
print(query)
response = requests.get(query)
json_data = json.loads(response.text)
print(json_data)
try:
change = json_data["ticker"]["todaysChangePerc"]
return change
except:
return None
def get_todays_percent_change_of_symbols(symbol_array, filter_growth=0):
"""
Returns an array of todayschange percent for every symbol_array that has that data
i.e: It wont return data for every symbol name because some symbol doesnt have that data or inactive
:param symbol_array:
:return: dict of {symbol} containing the percentage value
"""
p_auth = "<KEY>"
final_data = {}
required_iteration = int(len(symbol_array) / 1000)
left_data_will_be = len(symbol_array) - (required_iteration * 1000)
if required_iteration == 0:
query = """https://api.polygon.io/v2/snapshot/locale/us/markets/stocks/tickers?tickers="""
values = ",".join(symbol_array) + """&apiKey=""" + p_auth
query = query + values
response = requests.get(query)
json_data = json.loads(response.text)
for ticker in json_data["tickers"]:
if int(ticker["todaysChangePerc"]) > filter_growth:
final_data[ticker["ticker"]] = ticker["todaysChangePerc"]
else:
prev_thousand = 0
for thousand in range(1, required_iteration + 1):
print(f"THOUSAND : {thousand}")
query = """https://api.polygon.io/v2/snapshot/locale/us/markets/stocks/tickers?tickers="""
values = ",".join(symbol_array[prev_thousand:thousand * 1000]) + """&apiKey=""" + p_auth
query = query + values
response = requests.get(query)
json_data = json.loads(response.text)
prev_thousand = thousand * 1000
for ticker in json_data["tickers"]:
if int(ticker["todaysChangePerc"]) > filter_growth:
final_data[ticker["ticker"]] = ticker["todaysChangePerc"]
if left_data_will_be > 0:
print("Getting rest of the data")
query = """https://api.polygon.io/v2/snapshot/locale/us/markets/stocks/tickers?tickers="""
values = ",".join(symbol_array[prev_thousand:len(symbol_array)]) + """&apiKey=""" + p_auth
query = query + values
response = requests.get(query)
json_data = json.loads(response.text)
for ticker in json_data["tickers"]:
if int(ticker["todaysChangePerc"]) > filter_growth:
final_data[ticker["ticker"]] = ticker["todaysChangePerc"]
return final_data
def calculate_top_movers():
df1 = pd.read_csv("app_data/company_tickers_percent.csv")
avg_ = []
ticker = []
percent = []
for i in range(0, len(df1["ticker"])):
tick = df1["ticker"][i]
change = df1["change_percent"][i]
try:
dx = pd.read_csv(f"app_data/market/barset/market_barset_data{tick}.csv")
avg = sum(dx['volume']) / len(dx['volume'])
avg_.append(avg)
ticker.append(tick)
percent.append(change)
except:
pass
df2 = | pd.DataFrame() | pandas.DataFrame |
import unittest
import canopy
import pandas as pd
import numpy as np
temperatureK = 373.15
temperatureC = 100.0
temperatureF = 212.0
temperatureK2 = 473.15
temperatureC2 = 200.0
temperatureF2 = 392.0
class UnitsTest(unittest.TestCase):
def setUp(self):
self.units = canopy.Units()
# Specific units.
def test_specific_units(self):
self.assertAlmostEqual(
self.units.convert_value_between_units(1, 'inHg', 'Pa'),
3386.39, delta=0.01);
self.assertAlmostEqual(
self.units.convert_value_between_units(1000, 'Pa', 'inHg'),
0.2953, delta=0.0001);
def test_try_get_conversion_to_si(self):
existing = self.units.try_get_conversion_to_si('F')
self.assertEqual(existing.factor, 5/9)
self.assertEqual(existing.offset, 459.67 * 5 / 9)
missing = self.units.try_get_conversion_to_si('blah')
self.assertEqual(missing, None)
def test_get_conversion_to_si(self):
existing = self.units.get_conversion_to_si('F')
self.assertEqual(existing.factor, 5/9)
self.assertEqual(existing.offset, 459.67 * 5 / 9)
with self.assertRaises(KeyError):
self.units.get_conversion_to_si('blah')
def test_get_conversion_to_si_or_default(self):
existing = self.units.get_conversion_to_si_or_default('F')
self.assertEqual(existing.factor, 5/9)
self.assertEqual(existing.offset, 459.67 * 5 / 9)
missing = self.units.get_conversion_to_si_or_default('blah')
self.assertEqual(missing.factor, 1)
self.assertEqual(missing.offset, 0)
# FROM SI
def test_convert_value_from_si(self):
self.assertAlmostEqual(self.units.convert_value_from_si(temperatureK, 'C'), temperatureC, delta=0.01)
self.assertAlmostEqual(self.units.convert_value_from_si(temperatureK, 'F'), temperatureF, delta=0.01)
self.assertAlmostEqual(self.units.convert_value_from_si(temperatureK, 'C', True), temperatureK, delta=0.01)
self.assertAlmostEqual(self.units.convert_value_from_si(temperatureK, 'F', True), temperatureK * 9 / 5, delta=0.01)
self.assertEqual(self.units.convert_value_from_si(temperatureK, 'K'), temperatureK)
def test_convert_array_from_si(self):
data = np.array([temperatureK, temperatureK2])
data_copy = np.copy(data)
result = self.units.convert_array_from_si(data, 'F')
self.assertIsNot(result, data)
self.assertFalse(np.array_equal(result, data))
self.assertTrue(np.array_equal(data, data_copy))
self.assertEqual(len(result), 2)
self.assertAlmostEqual(result[0], temperatureF, delta=0.01)
self.assertAlmostEqual(result[1], temperatureF2, delta=0.01)
def test_convert_array_from_si_no_conversion_required(self):
data = np.array([temperatureK, temperatureK2])
result = self.units.convert_array_from_si(data, 'K')
self.assertIs(result, data)
def test_convert_array_from_si_always_return_copy(self):
data = np.array([temperatureK, temperatureK2])
result = self.units.convert_array_from_si(data, 'K', always_return_copy=True)
self.assertIsNot(result, data)
self.assertTrue(np.array_equal(result, data))
def test_convert_array_from_si_inplace(self):
data = np.array([temperatureK, temperatureK2])
result = self.units.convert_array_from_si(data, 'F', inplace=True)
self.assertIs(result, data)
self.assertEqual(len(result), 2)
self.assertAlmostEqual(result[0], temperatureF, delta=0.01)
self.assertAlmostEqual(result[1], temperatureF2, delta=0.01)
def test_convert_series_from_si(self):
data = pd.Series([temperatureK, temperatureK2])
data_copy = data.copy()
result = self.units.convert_series_from_si(data, 'F')
self.assertIsNot(result, data)
self.assertFalse(result.equals(data))
self.assertTrue(data.equals(data_copy))
self.assertEqual(len(result), 2)
self.assertAlmostEqual(result[0], temperatureF, delta=0.01)
self.assertAlmostEqual(result[1], temperatureF2, delta=0.01)
def test_convert_series_from_si_no_conversion_required(self):
data = pd.Series([temperatureK, temperatureK2])
result = self.units.convert_series_from_si(data, 'K')
self.assertIs(result, data)
def test_convert_series_from_si_always_return_copy(self):
data = pd.Series([temperatureK, temperatureK2])
result = self.units.convert_series_from_si(data, 'K', always_return_copy=True)
self.assertIsNot(result, data)
self.assertTrue(result.equals(data))
def test_convert_series_from_si_inplace(self):
data = pd.Series([temperatureK, temperatureK2])
result = self.units.convert_series_from_si(data, 'F', inplace=True)
self.assertIs(result, data)
self.assertEqual(len(result), 2)
self.assertAlmostEqual(result[0], temperatureF, delta=0.01)
self.assertAlmostEqual(result[1], temperatureF2, delta=0.01)
# TO SI
def test_convert_value_to_si(self):
self.assertAlmostEqual(self.units.convert_value_to_si(temperatureC, 'C'), temperatureK, delta=0.01)
self.assertAlmostEqual(self.units.convert_value_to_si(temperatureF, 'F'), temperatureK, delta=0.01)
self.assertEqual(self.units.convert_value_to_si(3, 'e-3'), 0.003)
self.assertEqual(self.units.convert_value_to_si(3, 'e-6'), 0.000003)
self.assertAlmostEqual(self.units.convert_value_to_si(temperatureC, 'C', True), temperatureC, delta=0.01)
self.assertAlmostEqual(self.units.convert_value_to_si(temperatureF, 'F', True), temperatureF * 5 / 9, delta=0.01)
self.assertEqual(self.units.convert_value_to_si(temperatureK, 'K'), temperatureK)
def test_convert_array_to_si(self):
data = np.array([temperatureF, temperatureF2])
data_copy = np.copy(data)
result = self.units.convert_array_to_si(data, 'F')
self.assertIsNot(result, data)
self.assertFalse(np.array_equal(result, data))
self.assertTrue(np.array_equal(data, data_copy))
self.assertEqual(len(result), 2)
self.assertAlmostEqual(result[0], temperatureK, delta=0.01)
self.assertAlmostEqual(result[1], temperatureK2, delta=0.01)
def test_convert_array_to_si_no_conversion_required(self):
data = np.array([temperatureK, temperatureK2])
result = self.units.convert_array_to_si(data, 'K')
self.assertIs(result, data)
def test_convert_array_to_si_always_return_copy(self):
data = np.array([temperatureK, temperatureK2])
result = self.units.convert_array_to_si(data, 'K', always_return_copy=True)
self.assertIsNot(result, data)
self.assertTrue(np.array_equal(result, data))
def test_convert_array_to_si_inplace(self):
data = np.array([temperatureF, temperatureF2])
result = self.units.convert_array_to_si(data, 'F', inplace=True)
self.assertIs(result, data)
self.assertEqual(len(result), 2)
self.assertAlmostEqual(result[0], temperatureK, delta=0.01)
self.assertAlmostEqual(result[1], temperatureK2, delta=0.01)
def test_convert_series_to_si(self):
data = pd.Series([temperatureF, temperatureF2])
data_copy = data.copy()
result = self.units.convert_series_to_si(data, 'F')
self.assertIsNot(result, data)
self.assertFalse(result.equals(data))
self.assertTrue(data.equals(data_copy))
self.assertEqual(len(result), 2)
self.assertAlmostEqual(result[0], temperatureK, delta=0.01)
self.assertAlmostEqual(result[1], temperatureK2, delta=0.01)
def test_convert_series_to_si_no_conversion_required(self):
data = pd.Series([temperatureK, temperatureK2])
result = self.units.convert_series_to_si(data, 'K')
self.assertIs(result, data)
def test_convert_series_to_si_always_return_copy(self):
data = pd.Series([temperatureK, temperatureK2])
result = self.units.convert_series_to_si(data, 'K', always_return_copy=True)
self.assertIsNot(result, data)
self.assertTrue(result.equals(data))
def test_convert_series_to_si_inplace(self):
data = pd.Series([temperatureF, temperatureF2])
result = self.units.convert_series_to_si(data, 'F', inplace=True)
self.assertIs(result, data)
self.assertEqual(len(result), 2)
self.assertAlmostEqual(result[0], temperatureK, delta=0.01)
self.assertAlmostEqual(result[1], temperatureK2, delta=0.01)
# BETWEEN UNITS
def test_convert_value_between_units(self):
self.assertAlmostEqual(self.units.convert_value_between_units(temperatureC, 'C', 'F'), temperatureF, delta=0.01)
def test_convert_array_between_units(self):
data = np.array([temperatureC, temperatureC2])
data_copy = np.copy(data)
result = self.units.convert_array_between_units(data, 'C', 'F')
self.assertIsNot(result, data)
self.assertFalse(np.array_equal(result, data))
self.assertTrue(np.array_equal(data, data_copy))
self.assertEqual(len(result), 2)
self.assertAlmostEqual(result[0], temperatureF, delta=0.01)
self.assertAlmostEqual(result[1], temperatureF2, delta=0.01)
# It should convert between non-si units when only factor
data = np.array([1000.0, 2000.0])
result = self.units.convert_array_between_units(data, 'mm', 'km')
self.assertEqual(len(result), 2)
self.assertEqual(result[0], 0.001)
self.assertEqual(result[1], 0.002)
def test_convert_array_between_units_no_conversion_required(self):
data = np.array([temperatureC, temperatureC2])
result = self.units.convert_array_between_units(data, 'C', 'C')
self.assertIs(result, data)
def test_convert_array_between_units_always_return_copy(self):
data = np.array([temperatureC, temperatureC2])
result = self.units.convert_array_between_units(data, 'C', 'C', always_return_copy=True)
self.assertIsNot(result, data)
self.assertTrue(np.array_equal(result, data))
def test_convert_array_between_units_inplace(self):
data = np.array([temperatureC, temperatureC2])
result = self.units.convert_array_between_units(data, 'C', 'F', inplace=True)
self.assertIs(result, data)
self.assertEqual(len(result), 2)
self.assertAlmostEqual(result[0], temperatureF, delta=0.01)
self.assertAlmostEqual(result[1], temperatureF2, delta=0.01)
def test_convert_series_between_units(self):
data = pd.Series([temperatureC, temperatureC2])
data_copy = data.copy()
result = self.units.convert_series_between_units(data, 'C', 'F')
self.assertIsNot(result, data)
self.assertFalse(result.equals(data))
self.assertTrue(data.equals(data_copy))
self.assertEqual(len(result), 2)
self.assertAlmostEqual(result[0], temperatureF, delta=0.01)
self.assertAlmostEqual(result[1], temperatureF2, delta=0.01)
# It should convert between non-si units when only factor
data = pd.Series([1000.0, 2000.0])
result = self.units.convert_series_between_units(data, 'mm', 'km')
self.assertEqual(len(result), 2)
self.assertEqual(result[0], 0.001)
self.assertEqual(result[1], 0.002)
def test_convert_series_between_units_no_conversion_required(self):
data = pd.Series([temperatureC, temperatureC2])
result = self.units.convert_series_between_units(data, 'C', 'C')
self.assertIs(result, data)
def test_convert_series_between_units_always_return_copy(self):
data = pd.Series([temperatureC, temperatureC2])
result = self.units.convert_series_between_units(data, 'C', 'C', always_return_copy=True)
self.assertIsNot(result, data)
self.assertTrue(result.equals(data))
def test_convert_series_between_units_inplace(self):
data = | pd.Series([temperatureC, temperatureC2]) | pandas.Series |
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import os
from copy import deepcopy
import numpy as np
import pandas as pd
from pandas import DataFrame, Series
import unittest
import nose
from numpy.testing import assert_almost_equal, assert_allclose
from numpy.testing.decorators import slow
from pandas.util.testing import (assert_series_equal, assert_frame_equal,
assert_almost_equal)
import trackpy as tp
from trackpy.try_numba import NUMBA_AVAILABLE
from trackpy.linking import PointND, link, Hash_table
# Catch attempts to set values on an inadvertent copy of a Pandas object.
tp.utils.make_pandas_strict()
path, _ = os.path.split(os.path.abspath(__file__))
path = os.path.join(path, 'data')
# Call lambda function for a fresh copy each time.
unit_steps = lambda: [[PointND(t, (x, 0))] for t, x in enumerate(range(5))]
np.random.seed(0)
random_x = np.random.randn(5).cumsum()
random_x -= random_x.min() # All x > 0
max_disp = np.diff(random_x).max()
random_walk_legacy = lambda: [[PointND(t, (x, 5))]
for t, x in enumerate(random_x)]
def hash_generator(dims, box_size):
return lambda: Hash_table(dims, box_size)
def _skip_if_no_numba():
if not NUMBA_AVAILABLE:
raise nose.SkipTest('numba not installed. Skipping.')
def random_walk(N):
return np.cumsum(np.random.randn(N))
def contracting_grid():
"""Two frames with a grid of 441 points.
In the second frame, the points contract, so that the outermost set
coincides with the second-outermost set in the previous frame.
This is a way to challenge (and/or stump) a subnet solver.
"""
pts0x, pts0y = np.mgrid[-10:11,-10:11]
pts0 = pd.DataFrame(dict(x=pts0x.flatten(), y=pts0y.flatten(),
frame=0))
pts1 = pts0.copy()
pts1.frame = 1
pts1.x = pts1.x * 0.9
pts1.y = pts1.y * 0.9
allpts = pd.concat([pts0, pts1], ignore_index=True)
allpts.x += 100 # Because BTree doesn't allow negative coordinates
allpts.y += 100
return allpts
class CommonTrackingTests(object):
do_diagnostics = False # Don't ask for diagnostic info from linker
def test_one_trivial_stepper(self):
# One 1D stepper
N = 5
f = DataFrame({'x': np.arange(N), 'y': np.ones(N), 'frame': np.arange(N)})
expected = f.copy()
expected['particle'] = np.zeros(N)
actual = self.link_df(f, 5)
assert_frame_equal(actual, expected)
actual_iter = self.link_df_iter(f, 5, hash_size=(10, 2))
assert_frame_equal(actual_iter, expected)
if self.do_diagnostics:
assert 'diag_search_range' in self.diag.columns
# Except for first frame, all particles should have been labeled
# with a search_range
assert not any(self.diag['diag_search_range'][
actual_iter.frame > 0].isnull())
def test_two_isolated_steppers(self):
N = 5
Y = 25
# Begin second feature one frame later than the first, so the particle labeling (0, 1) is
# established and not arbitrary.
a = DataFrame({'x': np.arange(N), 'y': np.ones(N), 'frame': np.arange(N)})
b = DataFrame({'x': np.arange(1, N), 'y': Y + np.ones(N - 1), 'frame': np.arange(1, N)})
f = pd.concat([a, b])
expected = f.copy().reset_index(drop=True)
expected['particle'] = np.concatenate([np.zeros(N), np.ones(N - 1)])
expected.sort(['particle', 'frame'], inplace=True)
actual = self.link_df(f, 5)
assert_frame_equal(actual, expected)
actual_iter = self.link_df_iter(f, 5, hash_size=(50, 50))
assert_frame_equal(actual_iter, expected)
# Sort rows by frame (normal use)
actual = self.link_df(f.sort('frame'), 5)
assert_frame_equal(actual, expected)
actual_iter = self.link_df_iter(f.sort('frame'), 5, hash_size=(50, 50))
assert_frame_equal(actual_iter, expected)
# Shuffle rows (crazy!)
np.random.seed(0)
f1 = f.reset_index(drop=True)
f1.reindex(np.random.permutation(f1.index))
actual = self.link_df(f1, 5)
assert_frame_equal(actual, expected)
actual_iter = self.link_df_iter(f1, 5, hash_size=(50, 50))
assert_frame_equal(actual_iter, expected)
def test_two_isolated_steppers_one_gapped(self):
N = 5
Y = 25
# Begin second feature one frame later than the first,
# so the particle labeling (0, 1) is established and not arbitrary.
a = DataFrame({'x': np.arange(N), 'y': np.ones(N),
'frame': np.arange(N)})
a = a.drop(3).reset_index(drop=True)
b = DataFrame({'x': np.arange(1, N), 'y': Y + np.ones(N - 1),
'frame': np.arange(1, N)})
f = pd.concat([a, b])
expected = f.copy()
expected['particle'] = np.concatenate([np.array([0, 0, 0, 2]), np.ones(N - 1)])
expected.sort(['particle', 'frame'], inplace=True)
expected.reset_index(drop=True, inplace=True)
actual = self.link_df(f, 5)
assert_frame_equal(actual, expected)
actual_iter = self.link_df_iter(f, 5, hash_size=(50, 50))
assert_frame_equal(actual_iter, expected)
# link_df_iter() tests not performed, because hash_size is
# not knowable from the first frame alone.
# Sort rows by frame (normal use)
actual = self.link_df(f.sort('frame'), 5)
assert_frame_equal(actual, expected)
actual_iter = self.link_df_iter(f.sort('frame'), 5, hash_size=(50, 50))
assert_frame_equal(actual_iter, expected)
# Shuffle rows (crazy!)
np.random.seed(0)
f1 = f.reset_index(drop=True)
f1.reindex(np.random.permutation(f1.index))
actual = self.link_df(f1, 5)
assert_frame_equal(actual, expected)
actual_iter = self.link_df_iter(f1, 5, hash_size=(50, 50))
assert_frame_equal(actual_iter, expected)
def test_isolated_continuous_random_walks(self):
# Two 2D random walks
np.random.seed(0)
N = 30
Y = 250
M = 20 # margin, because negative values raise OutOfHash
a = DataFrame({'x': M + random_walk(N), 'y': M + random_walk(N), 'frame': np.arange(N)})
b = DataFrame({'x': M + random_walk(N - 1), 'y': M + Y + random_walk(N - 1), 'frame': np.arange(1, N)})
f = pd.concat([a, b])
expected = f.copy().reset_index(drop=True)
expected['particle'] = np.concatenate([np.zeros(N), np.ones(N - 1)])
expected.sort(['particle', 'frame'], inplace=True)
actual = self.link_df(f, 5)
assert_frame_equal(actual, expected)
actual_iter = self.link_df_iter(f, 5, hash_size=(2*M, Y + 2*M))
assert_frame_equal(actual_iter, expected)
# Many 2D random walks
np.random.seed(0)
initial_positions = [(100, 100), (200, 100), (100, 200), (200, 200)]
import itertools
c = itertools.count()
def walk(x, y):
i = next(c)
return DataFrame({'x': x + random_walk(N - i),
'y': y + random_walk(N - i),
'frame': np.arange(i, N)})
f = pd.concat([walk(*pos) for pos in initial_positions])
expected = f.copy().reset_index(drop=True)
expected['particle'] = np.concatenate([i*np.ones(N - i) for i in range(len(initial_positions))])
expected.sort(['particle', 'frame'], inplace=True)
actual = self.link_df(f, 5)
assert_frame_equal(actual, expected)
actual_iter = self.link_df_iter(f, 5, hash_size=(200 + M, 200 + M))
assert_frame_equal(actual_iter, expected)
def test_start_at_frame_other_than_zero(self):
# One 1D stepper
N = 5
FIRST_FRAME = 3
f = DataFrame({'x': np.arange(N), 'y': np.ones(N),
'frame': FIRST_FRAME + np.arange(N)})
expected = f.copy()
expected['particle'] = np.zeros(N)
actual = self.link_df(f, 5)
assert_frame_equal(actual, expected)
actual = self.link_df_iter(f, 5, hash_size=(6, 2))
assert_frame_equal(actual, expected)
def test_blank_frame_no_memory(self):
# One 1D stepper
N = 5
f = DataFrame({'x': np.arange(N), 'y': np.ones(N),
'frame': [0, 1, 2, 4, 5]})
expected = f.copy()
expected['particle'] = np.zeros(N)
actual = self.link_df(f, 5)
assert_frame_equal(actual, expected)
actual = self.link_df_iter(f, 5, hash_size=(10, 10))
assert_frame_equal(actual, expected)
# This doesn't error, but we might wish it would
# give the particle a new ID after the gap. It just
# ignores the missing frame.
def test_real_data_that_causes_duplicate_bug(self):
filename = 'reproduce_duplicate_track_assignment.df'
f = pd.read_pickle(os.path.join(path, filename))
# Not all parameters reproduce it, but these do
self.link_df(f, 8, 2, verify_integrity=True)
def test_search_range(self):
t = self.link(unit_steps(), 1.1, hash_generator((10, 10), 1))
assert len(t) == 1 # One track
t_short = self.link(unit_steps(), 0.9, hash_generator((10, 10), 1))
assert len(t_short) == len(unit_steps()) # Each step is a separate track.
t = self.link(random_walk_legacy(), max_disp + 0.1,
hash_generator((10, 10), 1))
assert len(t) == 1 # One track
t_short = self.link(random_walk_legacy(), max_disp - 0.1,
hash_generator((10, 10), 1))
assert len(t_short) > 1 # Multiple tracks
def test_box_size(self):
"""No matter what the box size, there should be one track, and it should
contain all the points."""
for box_size in [0.1, 1, 10]:
t1 = self.link(unit_steps(), 1.1, hash_generator((10, 10), box_size))
t2 = self.link(random_walk_legacy(), max_disp + 1,
hash_generator((10, 10), box_size))
assert len(t1) == 1
assert len(t2) == 1
assert len(t1[0].points) == len(unit_steps())
assert len(t2[0].points) == len(random_walk_legacy())
def test_easy_tracking(self):
level_count = 5
p_count = 16
levels = []
for j in range(level_count):
level = []
for k in np.arange(p_count) * 2:
level.append(PointND(j, (j, k)))
levels.append(level)
hash_generator = lambda: Hash_table((level_count + 1,
p_count * 2 + 1), .5)
tracks = self.link(levels, 1.5, hash_generator)
assert len(tracks) == p_count
for t in tracks:
x, y = zip(*[p.pos for p in t])
dx = np.diff(x)
dy = np.diff(y)
assert np.sum(dx) == level_count - 1
assert np.sum(dy) == 0
def test_copy(self):
"""Check inplace/copy behavior of link_df, link_df_iter"""
# One 1D stepper
N = 5
f = DataFrame({'x': np.arange(N), 'y': np.ones(N), 'frame': np.arange(N)})
f_inplace = f.copy()
expected = f.copy()
expected['particle'] = np.zeros(N)
# Should add particle column in-place
# UNLESS diagnostics are enabled
actual = self.link_df(f_inplace, 5)
assert_frame_equal(actual, expected)
if self.do_diagnostics:
assert 'particle' not in f_inplace.columns
else:
assert_frame_equal(actual, f_inplace)
# Should copy
actual = self.link_df(f, 5, copy_features=True)
assert_frame_equal(actual, expected)
assert 'particle' not in f.columns
# Should copy
actual_iter = self.link_df_iter(f, 5, hash_size=(10, 2))
assert_frame_equal(actual_iter, expected)
assert 'particle' not in f.columns
@nose.tools.raises(tp.SubnetOversizeException)
def test_oversize_fail(self):
self.link_df(contracting_grid(), 1)
@nose.tools.raises(tp.SubnetOversizeException)
def test_adaptive_fail(self):
"""Check recursion limit"""
self.link_df(contracting_grid(), 1, adaptive_stop=0.92)
def link(self, *args, **kwargs):
kwargs.update(self.linker_opts)
return tp.link(*args, **kwargs)
def link_df(self, *args, **kwargs):
kwargs.update(self.linker_opts)
kwargs['diagnostics'] = self.do_diagnostics
return tp.link_df(*args, **kwargs)
def link_df_iter(self, *args, **kwargs):
kwargs.update(self.linker_opts)
kwargs['diagnostics'] = self.do_diagnostics
args = list(args)
features = args.pop(0)
res = pd.concat(tp.link_df_iter(
(df for fr, df in features.groupby('frame')), *args, **kwargs))
return res.sort(['particle', 'frame']).reset_index(drop=True)
class TestOnce(unittest.TestCase):
# simple API tests that need only run on one engine
def setUp(self):
N = 5
f = DataFrame({'x': np.arange(N), 'y': np.ones(N), 'frame': np.arange(N)})
self.features = f
def test_t_column(self):
f = self.features.copy()
cols = list(f.columns)
name = 'arbitrary name'
cols[cols.index('frame')] = name
f.columns = cols
# smoke tests
tp.link_df(f, 5, t_column=name, verify_integrity=True)
f_iter = (frame for fnum, frame in f.groupby('arbitrary name'))
list(tp.link_df_iter(f_iter, 5, t_column=name, verify_integrity=True))
@nose.tools.raises(ValueError)
def test_check_iter(self):
"""Check that link_df_iter() makes a useful error message if we
try to pass a single DataFrame."""
list(tp.link_df_iter(self.features.copy(), 5))
class SubnetNeededTests(CommonTrackingTests):
"""Tests that assume a best-effort subnet linker (i.e. not "drop")."""
def test_two_nearby_steppers(self):
N = 5
Y = 2
# Begin second feature one frame later than the first, so the particle labeling (0, 1) is
# established and not arbitrary.
a = DataFrame({'x': np.arange(N), 'y': np.ones(N), 'frame': np.arange(N)})
b = DataFrame({'x': np.arange(1, N), 'y': Y + np.ones(N - 1), 'frame': np.arange(1, N)})
f = pd.concat([a, b])
expected = f.copy().reset_index(drop=True)
expected['particle'] = np.concatenate([np.zeros(N), np.ones(N - 1)])
expected.sort(['particle', 'frame'], inplace=True)
actual = self.link_df(f, 5)
| assert_frame_equal(actual, expected) | pandas.util.testing.assert_frame_equal |
import gc
import pickle
import pandas as pd
from gtfspy.networks import walk_transfer_stop_to_stop_network
from gtfspy.routing.connection import Connection
from gtfspy.routing.journey_path_analyzer import NodeJourneyPathAnalyzer
from gtfspy.routing.multi_objective_pseudo_connection_scan_profiler import MultiObjectivePseudoCSAProfiler
from gtfspy.util import makedirs
from gtfspy.util import timeit
from research.route_diversity.diversity_settings import *
from research.route_diversity.rd_utils import get_custom_spatial_bounds
"""
Loops trough a set of target nodes, runs routing for them and stores results in a database
Pipeline
pyfile1
1. Create database, based on the parameters set in settings
2. Divide origin nodes into n parts
3. Run all_to_all.py
4a. pickle
or
4b. direct to db
5. Create indicies once everything is finished
all_to_all.py
srun --mem=1G --time=0:10:00 python3 research/westmetro_paper/scripts/all_to_all.py run_preparations
srun --mem=6G --time=2:00:00 python3 research/westmetro_paper/scripts/all_to_all.py to_db
"""
class JourneyAnalyzePipeline:
def __init__(self, gtfs, day_start, routing_start_time, routing_end_time, analysis_start_time,
analysis_end_time, routing_label_pickle_dir, diversity_pickle_dir, feed, city_coords, **kwargs):
self.gtfs = gtfs
self.tz = self.gtfs.get_timezone_name()
self.day_start = day_start
self.routing_start_time = routing_start_time
self.routing_end_time = routing_end_time
self.analysis_start_time = analysis_start_time
self.analysis_end_time = analysis_end_time
self.routing_label_pickle_dir = routing_label_pickle_dir
self.diversity_pickle_dir = diversity_pickle_dir
self.feed = feed
self.city_coords = city_coords
self.bbox = get_custom_spatial_bounds(**city_coords)
self.performance_measure_dict = {}
self.performance_measure_df = | pd.DataFrame() | pandas.DataFrame |
import copy
import csv
import io
import os
from pathlib import Path
import socket
import tempfile
import threading
import unittest
import pandas as pd
import pyarrow as pa
from pyarrow import csv as arrow_csv
from cleanup import cleanup_on_shutdown, directories_to_delete
import main
from proto.aiengine.v1 import aiengine_pb2
from tests import common
import train
class TrainingLoopTests(unittest.TestCase):
ALGORITHM = os.environ.get("ALGORITHM")
IPC_PATH = Path("/", "tmp", "spice_ai_test_loop.sock")
def setUp(self):
# Preventing tensorflow verbose initialization
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
if self.ALGORITHM is None:
self.ALGORITHM = "dql" # pylint: disable=invalid-name
self.aiengine = main.AIEngine()
self.trader_init_req = common.get_init_from_json(
init_data_path="../../test/assets/aiengine/api/trader_init.json",
pod_name="trader",
)
with open("../../test/assets/data/csv/trader.csv", "r", encoding="utf8") as trader_data:
self.trader_data_csv = trader_data.read()
self.episode_results = []
self.original_post_episode_result = train.post_episode_result
train.post_episode_result = (
lambda request_url, episode_data: self.episode_results.append(
{"request_url": request_url, "episode_data": episode_data}
)
)
self.original_end_of_episode = train.end_of_episode
self.temp_dir = tempfile.mkdtemp(prefix='spice_test_')
directories_to_delete.append(self.temp_dir)
def tearDown(self):
train.post_episode_result = self.original_post_episode_result
train.end_of_episode = self.original_end_of_episode
cleanup_on_shutdown()
def init(
self,
init_req: aiengine_pb2.InitRequest,
expected_error: bool = False,
expected_result: str = "ok",
):
resp = self.aiengine.Init(init_req, None)
self.assertEqual(resp.error, expected_error)
self.assertEqual(resp.result, expected_result)
def add_data(self, pod_name: str, csv_data: str, should_error=False):
table = arrow_csv.read_csv(io.BytesIO(csv_data.encode()))
ready_barrier = threading.Barrier(2, timeout=2)
ipc_thread = threading.Thread(target=self.ipc_server, args=(self.IPC_PATH, table, ready_barrier,))
ipc_thread.start()
ready_barrier.wait()
resp = self.aiengine.AddData(
aiengine_pb2.AddDataRequest(pod=pod_name, unix_socket=str(self.IPC_PATH)), None
)
ipc_thread.join()
if not should_error:
self.assertFalse(resp.error)
return resp
@staticmethod
def ipc_server(ipc_path: Path, table: pa.Table, ready_barrier: threading.Barrier):
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as arrow_socket:
if ipc_path.exists():
ipc_path.unlink()
try:
arrow_socket.settimeout(2)
arrow_socket.bind(str(ipc_path).encode())
arrow_socket.listen(1)
ready_barrier.wait()
connection, _address = arrow_socket.accept()
with connection:
writer = pa.ipc.RecordBatchStreamWriter(connection.makefile(mode="wb"), table.schema)
writer.write_table(table)
except OSError as error:
print(error)
arrow_socket.shutdown(socket.SHUT_RDWR)
ipc_path.unlink()
def start_training(
self, pod_name: str, flight: str = None, number_episodes: int = None, epoch_time: int = None,
expected_error: bool = False, expected_result: str = "started_training"):
train_req = aiengine_pb2.StartTrainingRequest(
pod=pod_name,
number_episodes=number_episodes,
flight=flight,
epoch_time=epoch_time,
learning_algorithm=self.ALGORITHM,
training_data_dir=self.temp_dir)
resp = self.aiengine.StartTraining(train_req, None)
self.assertEqual(resp.error, expected_error)
self.assertEqual(resp.result, expected_result)
def wait_for_training(self):
self.assertIsNotNone(main.Dispatch.TRAINING_THREAD)
main.Dispatch.TRAINING_THREAD.join()
def inference(self, pod_name: str, tag: str, assertion_on_response=None):
resp = self.aiengine.GetInference(
aiengine_pb2.InferenceRequest(pod=pod_name, tag=tag), None
)
self.assertFalse(resp.response.error)
self.assertEqual(resp.tag, tag)
if assertion_on_response is not None:
assertion_on_response(resp)
def validate_episode_data(
self, pod_name, flight, number_episodes, num_actions, episode_results
):
self.assertEqual(len(episode_results), number_episodes)
index = episode_results[0]["episode_data"]["episode"]
for episode_result in episode_results:
episode_data = episode_result["episode_data"]
self.assertEqual(
episode_result["request_url"],
f"http://localhost:8000/api/v0.1/pods/{pod_name}/training_runs/{flight}/episodes",
)
self.assertEqual(episode_data["episode"], index)
self.assertTrue(episode_data["start"])
self.assertTrue(episode_data["end"])
self.assertTrue(episode_data["score"])
actions_count = 0
for action_name in episode_data["actions_taken"]:
actions_count += episode_data["actions_taken"][action_name]
self.assertEqual(actions_count, num_actions)
index += 1
def test_train_inference_loop(self):
# Step 1, init the pod
self.init(self.trader_init_req)
# Step 2, load the csv data
self.add_data("trader", self.trader_data_csv)
flight = "1"
number_episodes = 5
# Step 3, train
self.start_training("trader", flight, number_episodes)
self.wait_for_training()
# Step 4, inference
self.inference(
"trader",
"latest",
lambda response: self.assertNotEqual(response.confidence, 0.0),
)
# Validate the episode data
self.validate_episode_data(
"trader",
flight,
number_episodes,
num_actions=50,
episode_results=self.episode_results,
)
def test_train_inference_loop_train_different_epoch(self):
# Step 1, init the pod
self.init(self.trader_init_req)
# Step 2, load the csv data
self.add_data("trader", self.trader_data_csv)
flight = "1"
number_episodes = 5
# Step 3, train
self.start_training("trader", flight, number_episodes, 1626697490)
self.wait_for_training()
# Step 4, inference
self.inference(
"trader",
"latest",
lambda response: self.assertNotEqual(response.confidence, 0.0),
)
# Validate the episode data
self.validate_episode_data(
"trader",
flight,
number_episodes,
num_actions=49,
episode_results=self.episode_results,
)
def test_train_gap_in_data(self):
with open("./tests/assets/csv/training_loop_gap_0.csv", "r", encoding="utf8") as data:
gap_data_0 = data.read()
with open("./tests/assets/csv/training_loop_gap_1.csv", "r", encoding="utf-8") as data:
gap_data_1 = data.read()
self.init(self.trader_init_req)
self.add_data("trader", gap_data_0)
self.add_data("trader", gap_data_1)
flight = "1"
number_episodes = 5
self.start_training("trader", flight, number_episodes)
self.wait_for_training()
self.validate_episode_data(
"trader",
flight,
number_episodes,
num_actions=50,
episode_results=self.episode_results,
)
gap_start = pd.to_datetime(1626697640, unit="s")
gap_end = pd.to_datetime(1626697860, unit="s")
table = main.data_managers["trader"].massive_table_sparse
filled_table = main.data_managers["trader"]._fill_table(table) # pylint: disable=protected-access
price = list(filled_table[gap_start:gap_start].coinbase_btcusd_close)[-1]
# Validate the forward filling is working.
current_time = gap_start
while current_time < gap_end:
next_price = list(filled_table[current_time:current_time].coinbase_btcusd_close)[
-1
]
self.assertEqual(price, next_price)
price = next_price
current_time += pd.to_timedelta(self.trader_init_req.granularity, unit="s")
def test_data_added_after_training_starts(self):
with open("./tests/assets/csv/training_loop_gap_0.csv", "r", encoding="utf-8") as data:
gap_data_0 = data.read()
with open("./tests/assets/csv/training_loop_gap_1.csv", "r", encoding="utf-8") as data:
gap_data_1 = data.read()
self.init(self.trader_init_req)
self.add_data("trader", gap_data_0)
flight = "1"
number_episodes = 5
self.start_training("trader", flight, number_episodes)
post_data_lock = threading.Lock()
episode_5_lock = threading.Lock()
episode_5_lock.acquire() # pylint: disable=consider-using-with
def release_lock_on_episode_5(episode: int):
if episode == 5 and episode_5_lock.locked():
episode_5_lock.release()
post_data_lock.acquire() # pylint: disable=consider-using-with
train.end_of_episode = release_lock_on_episode_5
# wait for episode 5
post_data_lock.acquire() # pylint: disable=consider-using-with
episode_5_lock.acquire() # pylint: disable=consider-using-with
print("Posting gap_data_1")
self.add_data("trader", gap_data_1)
post_data_lock.release()
self.wait_for_training()
episode_5_lock.release()
post_data_lock.release()
self.validate_episode_data(
"trader",
flight,
number_episodes,
num_actions=10,
episode_results=self.episode_results,
)
def test_epoch_earlier_than_data(self):
self.init(self.trader_init_req)
self.add_data("trader", self.trader_data_csv)
self.start_training("trader", "1", 10, 1626697400, expected_error=True, expected_result="epoch_time_invalid")
def test_epoch_offset_from_data(self):
self.init(self.trader_init_req)
self.add_data("trader", self.trader_data_csv)
self.start_training("trader", "1", 1, 1626697485, expected_error=False, expected_result="started_training")
self.wait_for_training()
def test_epoch_after_latest_data(self):
self.init(self.trader_init_req)
self.add_data("trader", self.trader_data_csv)
self.start_training(
"trader", "1", 10, 1626699240, expected_error=True, expected_result="not_enough_data_for_training")
def test_not_enough_data_for_training_no_data(self):
self.init(self.trader_init_req)
self.start_training("trader", "1", 10, expected_error=True, expected_result="not_enough_data_for_training")
def test_not_enough_data_for_training_late_epoch(self):
self.init(self.trader_init_req)
self.add_data("trader", self.trader_data_csv)
self.start_training(
"trader", "1", 10, epoch_time=1626698020,
expected_error=True, expected_result="not_enough_data_for_training")
def test_invalid_reward_handled_gracefully(self):
trader_init = copy.deepcopy(self.trader_init_req)
trader_init.actions["buy"] = "foo"
self.init(
trader_init,
expected_error=True,
expected_result="invalid_reward_function",
)
def test_no_rewards_handled_gracefully(self):
trader_init = copy.deepcopy(self.trader_init_req)
trader_init.actions.clear()
self.init(
trader_init,
expected_error=True,
expected_result="missing_actions",
)
def test_no_fields_handled_gracefully(self):
trader_init = copy.deepcopy(self.trader_init_req)
trader_init.fields.clear()
self.init(
trader_init,
expected_error=True,
expected_result="missing_fields",
)
def test_invalid_reward_post_error(self):
trader_init = copy.deepcopy(self.trader_init_req)
trader_init.actions["buy"] = "reward = foo"
trader_init.actions["sell"] = "reward = foo"
trader_init.actions["hold"] = "reward = foo"
self.init(trader_init)
self.add_data("trader", self.trader_data_csv)
flight = "1"
number_episodes = 5
self.start_training("trader", flight, number_episodes, 1626697490)
self.wait_for_training()
self.assertEqual(len(self.episode_results), 1)
error_data = self.episode_results[0]["episode_data"]
self.assertEqual(error_data["error"], "invalid_reward_function")
self.assertEqual(
error_data["error_message"], """NameError("name 'foo' is not defined")"""
)
def test_unsafe_reward_error(self):
trader_init = copy.deepcopy(self.trader_init_req)
unsafe_action = "open('/tmp/FILE','w').write('this is unsafe!'); reward = 1"
trader_init.actions["buy"] = unsafe_action
trader_init.actions["sell"] = unsafe_action
trader_init.actions["hold"] = unsafe_action
self.init(trader_init)
self.add_data("trader", self.trader_data_csv)
flight = "1"
number_episodes = 5
self.start_training("trader", flight, number_episodes, 1626697490)
self.wait_for_training()
self.assertEqual(len(self.episode_results), 1)
error_data = self.episode_results[0]["episode_data"]
self.assertEqual(error_data["error"], "invalid_reward_function")
self.assertEqual(
error_data["error_message"], """NameError("name 'open' is not defined")"""
)
def test_invalid_law_post_error(self):
trader_init = copy.deepcopy(self.trader_init_req)
trader_init.laws[0] = "can I do this?"
self.init(trader_init)
self.add_data("trader", self.trader_data_csv)
flight = "1"
number_episodes = 5
self.start_training("trader", flight, number_episodes, 1626697490)
self.wait_for_training()
self.assertEqual(len(self.episode_results), 1)
error_data = self.episode_results[0]["episode_data"]
self.assertEqual(error_data["error"], "invalid_law_expression")
self.assertEqual(
error_data["error_message"],
"""SyntaxError('invalid syntax', ('<string>', 1, 5, 'can I do this?'))""",
)
def test_invalid_datasource_action_post_error(self):
trader_init = copy.deepcopy(self.trader_init_req)
trader_init.datasources[0].actions[
"buy"
] = "local_portfolio_usd_balance1 -= coinbase_btcusd_close\nlocal_portfolio_btc_balance += 1"
self.init(trader_init)
self.add_data("trader", self.trader_data_csv)
flight = "1"
number_episodes = 5
self.start_training("trader", flight, number_episodes, 1626697490)
self.wait_for_training()
self.assertEqual(len(self.episode_results), 1)
error_data = self.episode_results[0]["episode_data"]
self.assertEqual(error_data["error"], "invalid_datasource_action_expression")
self.assertEqual(
error_data["error_message"],
"""NameError("name 'local_portfolio_usd_balance1' is not defined")""",
)
def test_epoch_is_inferred_if_absent(self):
trader_init = copy.deepcopy(self.trader_init_req)
trader_init.epoch_time = 0
trader_init.period = 120
self.init(trader_init)
now_unix_seconds = (
pd.Timestamp.now() - | pd.Timestamp("1970-01-01") | pandas.Timestamp |
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/00_data.ipynb (unless otherwise specified).
__all__ = ['DATA_PATH', 'acquire_data', 'rmtree', 'load_custom_data', 'load_data', 'pad_trajectories',
'normalize_trajectory', 'get_custom_dls', 'get_discriminative_dls', 'get_turning_point_dls', 'get_1vall_dls',
'get_validation_dl', 'get_dls_from_ds', 'create_custom_dataset', 'cut_trajectory', 'validate_model',
'validate_task']
# Cell
from pathlib import Path
import urllib.request as u_request
from zipfile import ZipFile
import csv
import pandas as pd
from andi import andi_datasets, normalize
import numpy as np
from fastai.text.all import *
# Cell
DATA_PATH = Path("../data")
# Cell
def acquire_data(train=True, val=True):
"""Obtains the train and validation datasets of the competition.
The train url maight fail. Get it from https://drive.google.com/drive/folders/1RXziMCO4Y0Fmpm5bmjcpy-Genhzv4QJ4"""
DATA_PATH.mkdir(exist_ok=True)
train_url = ("https://doc-4k-88-drive-data-export.googleusercontent.com/download/qh9kfuk2n3khcj0qvrn9t3a4j19nve1a/" +
"rqpd3tajosn0gta5f9mmbbb1e4u8csnn/1599642000000/17390da5-4567-4189-8a62-1749e1b19b06/108540842544374891611/" +
"ADt3v-N9HwRAxXINIFMKGcsrjzMlrvhOOYitRyphFom1Ma-CUUekLTkDp75fOegXlyeVVrTPjlnqDaK0g6iI7eDL9YJw91-" +
"jiityR3iTfrysZP6hpGA62c4lkZbjGp_NJL-XSDUlPcwiVi5Hd5rFtH1YYP0tiiFCoJZsTT4akE8fjdrkZU7vaqFznxuyQDA8YGaiuYlKu" +
"-F1HiAc9kG_k9EMgkMncNflNJtlugxH5pFcNDdrYiOzIINRIRivt5ScquQ_s4KyuV-zYOQ_g2_VYri8YAg0IqbBrcO-exlp5j-" +
"t02GDh5JZKU3Hky5b70Z8brCL5lvK0SFAFIKOer45ZrFaACA3HGRNJg==?authuser=0&nonce=k5g7m53pp3cqq&user=" +
"108540842544374891611&hash=m7kmrh87gmekjhrdcpbhuf1kj13ui0l2")
val_url = ("https://competitions.codalab.org/my/datasets/download/7ea12913-dfcf-4a50-9f5d-8bf9666e9bb4")
if train:
data = _download_bytes(train_url)
_write_bytes(data, DATA_PATH)
train_path = DATA_PATH/"Development dataset for Training"
train_path.rename(train_path.parent/"train")
if val:
data = _download_bytes(val_url)
_write_bytes(data, DATA_PATH)
val_path = DATA_PATH/"validation_for_scoring"
val_path.rename(val_path.parent/"val")
rmtree(DATA_PATH/"__MACOSX")
def _download_bytes(url):
"Downloads data from `url` as bytes"
u = u_request.urlopen(url)
data = u.read()
u.close()
return data
def _write_bytes(data, path):
"Saves `data` (bytes) into path."
zip_path = _zip_bytes(data)
_unzip_file(zip_path, new_path=path)
def _zip_bytes(data, path=None):
"Saves bytes data as .zip in `path`."
if path is None: path = Path("../temp")
zip_path = path.with_suffix(".zip")
with open(zip_path, "wb") as f:
f.write(data)
return zip_path
def _unzip_file(file_path, new_path=None, purge=True):
"Unzips file in `file_path` to `new_path`."
if new_path is None: new_path = file_path.with_suffix("")
zip_path = file_path.with_suffix(".zip")
with ZipFile(zip_path, 'r') as f:
f.extractall(new_path)
if purge: zip_path.unlink()
def rmtree(root):
for p in root.iterdir():
if p.is_dir(): rmtree(p)
else: p.unlink()
root.rmdir()
# Cell
def load_custom_data(dim=1, models=None, exps=None, path=None):
"Loads data from custom dataset."
path = DATA_PATH/f"custom{dim}.pkl" if path is None else path
df = pd.read_pickle(path)
mod_mask = sum([df['model'] == m for m in models]) if models is not None else np.ones(df.shape[0], dtype=bool)
exp_mask = sum([df['exp'] == e for e in exps]) if exps is not None else np.ones(df.shape[0], dtype=bool)
mask = mod_mask & exp_mask
return df[mask].reset_index(drop=True)
def load_data(task, dim=1, ds='train'):
"Loads 'train' or 'val' data of corresponding dimension."
path = DATA_PATH/ds
try:
df = | pd.read_pickle(path/f"task{task}.pkl") | pandas.read_pickle |
import numpy as np
import os
import pandas
import tables
from functools import partial
from multiprocessing.dummy import Pool
from util.audio import audiofile_to_input_vector
from util.text import text_to_char_array
def pmap(fun, iterable):
pool = Pool()
results = pool.map(fun, iterable)
pool.close()
return results
def process_single_file(row, numcep, numcontext, alphabet):
# row = index, Series
_, file = row
features = audiofile_to_input_vector(file.wav_filename, numcep, numcontext)
features_len = len(features) - 2*numcontext
transcript = text_to_char_array(file.transcript, alphabet)
if features_len < len(transcript):
raise ValueError('Error: Audio file {} is too short for transcription.'.format(file.wav_filename))
return features, features_len, transcript, len(transcript)
# load samples from CSV, compute features, optionally cache results on disk
def preprocess(csv_files, batch_size, numcep, numcontext, alphabet, hdf5_cache_path=None):
COLUMNS = ('features', 'features_len', 'transcript', 'transcript_len')
print('Preprocessing', csv_files)
if hdf5_cache_path and os.path.exists(hdf5_cache_path):
with tables.open_file(hdf5_cache_path, 'r') as file:
features = file.root.features[:]
features_len = file.root.features_len[:]
transcript = file.root.transcript[:]
transcript_len = file.root.transcript_len[:]
# features are stored flattened, so reshape into
# [n_steps, (n_input + 2*n_context*n_input)]
for i in range(len(features)):
features[i] = np.reshape(features[i], [features_len[i], -1])
in_data = list(zip(features, features_len,
transcript, transcript_len))
print('Loaded from cache at', hdf5_cache_path)
return | pandas.DataFrame(data=in_data, columns=COLUMNS) | pandas.DataFrame |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Description : This code do basic statistical tests (i.e., student t-test, fold change,
Benjamini-Hochberg false discovery rate adjustment) for peak table generated
by MZmine-2.53
Copyright : (c) LemasLab, 02/23/2020
Author : <NAME>
License : MIT License
Maintainer : <EMAIL>, <EMAIL>, <EMAIL>
Usage : python add_stats.py -i $input_peak_table
-d $design_file_location
-o $output_peak_table
-l $library_location
"""
import warnings
import logging
import logging.handlers
import pandas as pd
import numpy as np
from statsmodels.stats.multitest import multipletests
from scipy import stats
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.DEBUG, format='[%(asctime)s]: %(levelname)s: %(message)s')
warnings.filterwarnings('ignore')
def add_threshold(row, names):
"""Add threshold for blank subtraction algorithm.
# Arguments:
row: certain row of peak table (pandas dataframe).
names: column names in the peak table of a certain group of samples
# Returns:
threshold value
"""
value = np.mean(row[names]) + 3*np.std(row[names])
return value if value > 0 else 5000
def blank_subtraction_flag(row, name_group, name_threshold, bar):
"""Blank subtraction function.
Blank subtraction algorithm:
- Calculate mean (mean_blank) and standard deviation (sd_blank)
of peak intensions in blank samples.
- Threshold ← mean_blank+3*sd_blank
- If threshold <=0, then replace it with 5,000 (why 5,000?)
- Calculate mean peak intension in fat (mean_fat), whole (mean_whole)
and skim (mean_skim) samples.
- ratio_fat ← (mean_fat-threshold)/threshold;
ratio_whole ← (mean_whole-threshold)/threshold;
ratio_skim ← (mean_skim-threshold)/threshold
- If ratio_fat<self_defined_number (e.g. 100) and
ratio_whole<self_defined_number and ratio_skim<self_defined_number,
then drop the peak.
# Arguments:
row: certain row of peak table (pandas dataframe).
name_group: name of the group.
name_threshold: name of the threshold column.
bar: bar value of blank subtraction algorithm.
# Returns:
If a certain peak of this group still exist after blank subtraction
"""
return (np.mean(row[name_group]) - row[name_threshold])/row[name_threshold] > bar
# Judge whether certain peak intensity of a sample is 0 or not
def zero_intensity_flag(row, name_group):
"""Check if the mean intensity of certain group of samples is zero. If zero, then
the metabolite is not existed in that material.
# Arguments:
row: certain row of peak table (pandas dataframe).
name_group: name of the group.
# Returns:
True (the mean intensity is zero) or False (the mean intensity is not zero).
"""
return np.mean(row[name_group]) <= 0
# Add p-value for student t-test between two groups of samples
def add_pvalue(row, left_names, right_names):
"""Add p value for two group comparison based on student t-test.
# Arguments:
row: certain row of peak table (pandas dataframe).
left_names: column names in the peak table of the first group of samples.
right_names: column names in the peak table of the second group of samples.
# Returns:
p value of student t-test
"""
_, p = stats.ttest_ind(row[left_names], row[right_names])
return p
# Add t-value for student t-test between two groups of samples
def add_tvalue(row, left_names, right_names):
"""Add t value for two group comparison based on student t-test.
# Arguments:
row: certain row of peak table (pandas dataframe).
left_names: column names in the peak table of the first group of samples.
right_names: column names in the peak table of the second group of samples.
# Returns:
t value of student t-test
"""
t, _ = stats.ttest_ind(row[left_names], row[right_names])
return t
# Add fold-change for the mean values of two groups of samples
def fold_change(row, left, right):
"""Add fold change value for two group comparison.
# Arguments:
row: certain row of peak table (pandas dataframe).
left: column name in the peak table of the mean intensity of first group of samples.
right: column name in the peak table of the mean intensity of second group of samples.
# Returns:
fold change value.
"""
if row[right] == 0:
return np.inf
if row[left] == 0:
return -np.inf
result = row[left]/row[right]
return result if result >= 1 else -1/result
# Absolute value of fold-change
def abs_fold_change(row, fold_change_column):
"""Add absolute fold change value for two group comparison.
# Arguments:
row: certain row of peak table (pandas dataframe).
fold_change_column: column name in the peak table of the fold change value.
# Returns:
absolute fold change value.
"""
return abs(row[fold_change_column])
# Add ppm value for identified metabolites.
## The library search result produced by MZmine may exceed 5 ppm,
## so those beyond 5 ppm should be filtered out
def add_ppm(row, library_df):
"""Add part per million (ppm) value for library matching. The library matching done by
MZmine may not follow the threshold strictly (i.e., when setting the ppm to 5, some
metabolites with ppm of more than 5 may also appear in the peak table).
# Arguments:
row: certain row of peak table (pandas dataframe).
library_df: library dataframe.
# Returns:
ppm value of the matched metabolite in the row.
"""
if pd.isnull(row['row identity (main ID)']):
return None
mzs = list(library_df[library_df.Name.str.strip() == row['row identity (main ID)']]['M/Z'])
mz_observe = row["row m/z"]
diff = []
for mz in mzs:
diff.append(abs(mz_observe - mz))
if len(diff) == 0:
return None
mz_theoretical = mzs[diff.index(min(diff))]
return abs((mz_observe-mz_theoretical)*10e5/mz_theoretical)
def add_label(row, group1_name, group2_name):
"""Add label for metabolite represented by the row.
Format: "m_z/retention_time/fold_change".
# Arguments:
row: certain row of peak table (pandas dataframe).
group1_name: name of the group of first group of samples.
group2_name: name of the group of second group of samples.
# Returns:
label (string type).
"""
if pd.isnull(row["row identity (main ID)"]) or \
row["row identity (main ID)"] == "nan" or \
row["row identity (main ID)"] == None:
return str(round(row["row m/z"], 2)) + "/" + \
str(round(row["row retention time"], 2)) + \
"/" + str(round(row["fold_change" + \
"(" + str(group1_name) + " versus " + \
str(group2_name) + ")"], 2))
return str(row["row identity (main ID)"]) + "/" + \
str(round(row["fold_change" + "(" + str(group1_name) + \
" versus " + str(group2_name) + ")"], 2))
def add_stats(data_file="data_pos_ph.csv", design_file="design", \
output_file="pos_withstats.csv", \
library="positive_library.csv"):
"""Add basic statistics to peak table produced by MZmine.
# Arguments:
data_file: peak table.
design_file: design file corresponding to the peak table.
output_file: the name of processed file.
library: location and file name of the library used to identify metabolites.
# Returns:
list of identified metabolites.
# Outputs:
prosessed peak table
"""
data = pd.read_csv(data_file)
data["row identity (main ID)"] = data["row identity (main ID)"].apply(str)
data = data[~(data["row identity (main ID)"].str.contains("adduct|Complex", \
na=False, regex=True))]
data["number of comparisons"] = len(data)
data_library = | pd.read_csv(library) | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
Created on Thu Jul 07 19:20:05 2016
@author: ryandrewjones
"""
import sys
import signal
import click
import os
import cPickle as pickle
import energyPATHWAYS.config as cfg
import energyPATHWAYS.util as util
from energyPATHWAYS.pathways_model import PathwaysModel
import energyPATHWAYS.shape as shape
from energyPATHWAYS.outputs import Output
import csv
import time
import datetime
import logging
import cProfile
import traceback
import pandas as pd
# set up a dummy model
path = os.getcwd()
config = 'config.INI'
scenario_id = 1
cfg.initialize_config(path, config, _log_name='log.log')
cfg.primary_geography = 'intersection_id'
model = PathwaysModel(scenario_id, api_run=False)
# model.run(scenario_id, solve_demand=False, solve_supply=False, save_models=False, append_results=False)
demand = model.demand
demand.add_drivers()
existing_geo_map_key_ids, existing_geo_map_key_names = zip(*util.sql_read_table('GeographyMapKeys'))
next_map_key_id = max(existing_geo_map_key_ids)+1
next_geo_map_id = max(util.sql_read_table('GeographyMap', 'id'))+1
###############################################
# user inputs
driver_ids_to_make_map_keys = [
38,
39,
40,
41,
42,
43,
44,
45,
46,
47,
48,
49,
50,
51,
52,
53,
54,
55,
56,
57,
58,
59,
60,
61]
basis_year_for_map_key = int(cfg.cfgfile.get('case', 'current_year'))
###############################################
# make our new map keys
GeographyMapKeys = [['id', 'name']]
GeographyMap_columns = ['intersection_id', 'geography_map_key_id', 'value', 'id']
GeographyMap = []
for driver_id in driver_ids_to_make_map_keys:
driver = demand.drivers[driver_id]
demand.remap_driver(driver) # remaps to our new super detailed geography
values = util.df_slice(driver.values, basis_year_for_map_key, 'year')
if values.index.nlevels>1:
levels_to_remove = [n for n in values.index.names if n!='intersection_id']
values = util.remove_df_levels(values, levels_to_remove)
new_key_name = driver.name
if new_key_name in existing_geo_map_key_names:
raise ValueError('driver name {} is already in the existing map keys, please rename driver id {}'.format(driver.name, driver.id))
GeographyMapKeys.append([next_map_key_id, new_key_name])
values = values.reset_index()
values['id'] = range(next_geo_map_id, next_geo_map_id+len(values))
values['geography_map_key_id'] = next_map_key_id
GeographyMap.append(values)
next_geo_map_id += len(values)
next_map_key_id+=1
output = | pd.concat(GeographyMap) | pandas.concat |
# import dependencies
import argparse
import os
import numpy as np
import pandas as pd
import sklearn
from sklearn.utils import shuffle
import torch
from torch.utils.data import (DataLoader, RandomSampler, TensorDataset)
import transformers
from transformers import (AutoTokenizer, AutoModelForSequenceClassification)
import datasets
from datasets import load_dataset
from msp_eval import *
if __name__ == '__main__':
# create argument parser
parser = argparse.ArgumentParser()
parser.add_argument('--roberta_version', type=str, default='roberta-large', help='Version of RoBERTa to use')
parser.add_argument('--dataset', help='Dataset to compute maximum softmax probability')
parser.add_argument('--load_dir', type=str, default='output', help='Directory to load tokenizers and models')
parser.add_argument('--output_dir', type=str, default='msp', help='Directory to save numpy array in')
parser.add_argument('--seed', type=int, default=42, help='Random seed for initialization')
parser.add_argument('--file_format', type=str, default='.tsv', help='Data file format for dataset not available for download at HuggingFace Datasets')
parser.add_argument('--in_domain', action='store_true', help='Dataset is in-domain')
parser.add_argument('--entailment', action='store_true', help='Dataset originally intended for an entailment task')
parser.add_argument('--split', type=str, default='eval', help='Dataset split to compute maximum softmax probability on')
args = parser.parse_args()
# huggingface and glue datasets
hf_datasets = ['imdb', 'rte', 'snli', 'sst2']
glue = ['rte', 'sst2']
# custom dataset label keys
label_keys = {
'counterfactual-imdb': 'Sentiment',
}
# training split keys
train_split_keys = {
'imdb': 'train',
'rte': 'train',
'snli': 'train',
'sst2': 'train',
'counterfactual-imdb': 'train'
}
# evaluation split keys
eval_split_keys = {
'imdb': 'test',
'rte': 'validation',
'snli': 'validation',
'sst2': 'validation',
'counterfactual-imdb': 'dev'
}
# test split keys
test_split_keys = {
'imdb': 'unsupervised',
'rte': 'test',
'snli': 'test',
'sst2': 'test',
'counterfactual-imdb': 'test'
}
# dataset feature keys
datasets_to_keys = {
'imdb': ('text', None),
'rte': ('sentence1', 'sentence2'),
'snli': ('premise', 'hypothesis'),
'sst2': ('sentence', None),
'counterfactual-imdb': ('Text', None)
}
# load dataset
print('Loading dataset')
if args.dataset in hf_datasets:
dataset = load_dataset(args.dataset) if args.dataset not in glue else load_dataset('glue', args.dataset)
if args.split == 'train':
dataset = dataset[train_split_keys[args.dataset]]
elif args.split == 'eval':
dataset = dataset[eval_split_keys[args.dataset]]
elif args.split == 'test':
dataset = dataset[test_split_keys[args.dataset]]
elif args.file_format == '.tsv':
train_df = pd.read_table(os.path.join(os.getcwd(), args.dataset, (train_split_keys[args.dataset] + args.file_format)))
eval_df = pd.read_table(os.path.join(os.getcwd(), args.dataset, (eval_split_keys[args.dataset] + args.file_format)))
test_df = pd.read_table(os.path.join(os.getcwd(), args.dataset, (test_split_keys[args.dataset] + args.file_format)))
num_labels = len(np.unique( | pd.Categorical(train_df[label_keys[args.dataset]], ordered=True) | pandas.Categorical |
# -- coding: utf-8 --
import tensorflow as tf
import pandas as pd
import numpy as np
import csv
file_path=r'/Users/guojianzou/PycharmProjects/OD/data/Order_all.csv'
save_path=r'/Users/guojianzou/PycharmProjects/OD/data/data_all.csv'
train_path=r'/Users/guojianzou/PycharmProjects/OD/data/train_data.csv'
combine_path=r'/Users/guojianzou/PycharmProjects/OD/data/combine_data.csv'
data_colum=["ZoneID","Area","Slon","Slat","Elon","Elat","day","hour","min","second"]
def data_save(file_path,save_pave):
'''
:param file_name:
:return:
dtype pd.datafrme
'''
data = pd.read_csv(file_path, encoding='utf-8')
data=data.values
file = open(save_path, 'w', encoding='utf-8')
writer=csv.writer(file)
writer.writerow(data_colum)
for line in data:
# line = char.split(',')
data_line=[int(line[1])]+[float(ch) for ch in line[2:7]]+[int(line[11]),int(line[12])]+[int(line[9][14:16]),int(line[9][17:19])]
writer.writerow(data_line)
file.close()
print('data_save finish')
def train_data(save_path,train_path):
train_colum = ["ZoneID", "day", "hour", "min","label"]
file = open(train_path, 'w', encoding='utf-8')
writer=csv.writer(file)
writer.writerow(train_colum)
data = pd.read_csv(save_path, encoding='utf-8')
for d in range(1,31):
data1=data.loc[data['day'] == d]
if data1.values.shape[0]==0:
print('day empty')
continue
for h in range(0,24):
data2 = data1.loc[data1['hour'] == h]
if data2.values.shape[0] == 0:
print('hour empty')
continue
for m in range(0,60):
data3 = data2.loc[data2['min'] == m]
if data3.values.shape[0] == 0:
print('min empty')
continue
for id in range(162):
data4 = data3.loc[data3['ZoneID'] == id]
if data4.values.shape[0] == 0:
print('zone empty')
continue
line=[id,d,h,m,data4.values.shape[0]]
writer.writerow(line)
file.close()
print('train_data finish!!!!')
return
def data_combine(train_path, combine_path):
train_colum = ["ZoneID", "day", "hour", "min-15","label"]
file = open(combine_path, 'w', encoding='utf-8')
writer=csv.writer(file)
writer.writerow(train_colum)
data = pd.read_csv(train_path, encoding='utf-8')
for d in range(1,31):
data1=data.loc[data['day'] == d]
if data1.values.shape[0]==0:
print(d,' day empty')
continue
for h in range(0,24):
data2 = data1.loc[data1['hour'] == h]
if data2.values.shape[0] == 0:
print(d,h,' hour empty')
continue
for i in range(4):
for id in range(162):
data3 = data2.loc[data2['ZoneID'] == id]
if data3.values.shape[0] == 0:
print(d, h, (i + 1) * 15, id,' zone empty')
line = [id, d, h, (i + 1) * 15, 0]
writer.writerow(line)
continue
sum_ = sum([data3.loc[(data['min'] == (j + i * 15))].values.shape[0] for j in range(10)])
line=[id,d,h,(i+1)*15,sum_]
writer.writerow(line)
file.close()
print('data_combine finish!!!!')
return
# data=data_save(file_path,save_path)
#
# train_data(save_path,train_path)
#
# data_combine(train_path, combine_path)
def sudden_changed(city_dictionary_):
'''
用于处理突变的值
Args:
city_dictionary:
Returns:
'''
if city_dictionary_:
for key in city_dictionary_.keys():
dataFrame=city_dictionary_[key].values
shape=city_dictionary_[key].shape
for i in range(shape[0]):
for j in range(shape[1]):
if i!=0:
if dataFrame[i][j]-dataFrame[i-1][j]>200:
dataFrame[i][j] = dataFrame[i - 1][j]
city_dictionary_[key]= | pd.DataFrame(dataFrame) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""Supports OMNI Combined, Definitive, IMF and Plasma Data, and Energetic
Proton Fluxes, Time-Shifted to the Nose of the Earth's Bow Shock, plus Solar
and Magnetic Indices. Downloads data from the NASA Coordinated Data Analysis
Web (CDAWeb). Supports both 5 and 1 minute files.
Properties
----------
platform
'omni'
name
'hro'
tag
Select time between samples, one of {'1min', '5min'}
sat_id
None supported
Note
----
Files are stored by the first day of each month. When downloading use
omni.download(start, stop, freq='MS') to only download days that could possibly
have data. 'MS' gives a monthly start frequency.
This material is based upon work supported by the
National Science Foundation under Grant Number 1259508.
Any opinions, findings, and conclusions or recommendations expressed in this
material are those of the author(s) and do not necessarily reflect the views
of the National Science Foundation.
Warnings
--------
- Currently no cleaning routine. Though the CDAWEB description indicates that
these level-2 products are expected to be ok.
- Module not written by OMNI team.
Custom Functions
-----------------
time_shift_to_magnetic_poles
Shift time from bowshock to intersection with one of the magnetic poles
calculate_clock_angle
Calculate the clock angle and IMF mag in the YZ plane
calculate_imf_steadiness
Calculate the IMF steadiness using clock angle and magnitude in the YZ plane
calculate_dayside_reconnection
Calculate the dayside reconnection rate
"""
from __future__ import print_function
from __future__ import absolute_import
import functools
import logging
import numpy as np
import pandas as pds
import pysat
from pysat.instruments.methods import nasa_cdaweb as cdw
from pysat.instruments.methods import general as mm_gen
logger = logging.getLogger(__name__)
platform = 'omni'
name = 'hro'
tags = {'1min': '1-minute time averaged data',
'5min': '5-minute time averaged data'}
sat_ids = {'': ['5min']}
_test_dates = {'': {'1min': pysat.datetime(2009, 1, 1),
'5min': pysat.datetime(2009, 1, 1)}}
# support list files routine
# use the default CDAWeb method
fname1 = 'omni_hro_1min_{year:4d}{month:02d}{day:02d}_v01.cdf'
fname5 = 'omni_hro_5min_{year:4d}{month:02d}{day:02d}_v01.cdf'
supported_tags = {'': {'1min': fname1,
'5min': fname5}}
list_files = functools.partial(mm_gen.list_files,
supported_tags=supported_tags,
fake_daily_files_from_monthly=True)
# support load routine
# use the default CDAWeb method
load = functools.partial(cdw.load, fake_daily_files_from_monthly=True)
# support download routine
# use the default CDAWeb method
basic_tag1 = {'dir': '/pub/data/omni/omni_cdaweb/hro_1min',
'remote_fname': '{year:4d}/' + fname1,
'local_fname': fname1}
basic_tag5 = {'dir': '/pub/data/omni/omni_cdaweb/hro_5min',
'remote_fname': '{year:4d}/' + fname5,
'local_fname': fname5}
supported_tags = {'': {'1min': basic_tag1,
'5min': basic_tag5}}
download = functools.partial(cdw.download,
supported_tags,
fake_daily_files_from_monthly=True)
# support listing files currently on CDAWeb
list_remote_files = functools.partial(cdw.list_remote_files,
supported_tags=supported_tags)
def clean(omni):
for fill_attr in ["fillval", "fill"]:
# case insensitive check for attribute name
if omni.meta.has_attr(fill_attr):
# get real name
fill_attr = omni.meta.attr_case_name(fill_attr)
for key in omni.data.columns:
if key != 'Epoch':
idx, = np.where(omni[key] == omni.meta[key, fill_attr])
omni[idx, key] = np.nan
return
def time_shift_to_magnetic_poles(inst):
""" OMNI data is time-shifted to bow shock. Time shifted again
to intersections with magnetic pole.
Parameters
-----------
inst : Instrument class object
Instrument with OMNI HRO data
Notes
---------
Time shift calculated using distance to bow shock nose (BSN)
and velocity of solar wind along x-direction.
Warnings
--------
Use at own risk.
"""
# need to fill in Vx to get an estimate of what is going on
inst['Vx'] = inst['Vx'].interpolate('nearest')
inst['Vx'] = inst['Vx'].fillna(method='backfill')
inst['Vx'] = inst['Vx'].fillna(method='pad')
inst['BSN_x'] = inst['BSN_x'].interpolate('nearest')
inst['BSN_x'] = inst['BSN_x'].fillna(method='backfill')
inst['BSN_x'] = inst['BSN_x'].fillna(method='pad')
# make sure there are no gaps larger than a minute
inst.data = inst.data.resample('1T').interpolate('time')
time_x = inst['BSN_x']*6371.2/-inst['Vx']
idx, = np.where(np.isnan(time_x))
if len(idx) > 0:
logger.info(time_x[idx])
logger.info(time_x)
time_x_offset = [pds.DateOffset(seconds=time)
for time in time_x.astype(int)]
new_index = []
for i, time in enumerate(time_x_offset):
new_index.append(inst.data.index[i] + time)
inst.data.index = new_index
inst.data = inst.data.sort_index()
return
def calculate_clock_angle(inst):
""" Calculate IMF clock angle and magnitude of IMF in GSM Y-Z plane
Parameters
-----------
inst : pysat.Instrument
Instrument with OMNI HRO data
"""
# Calculate clock angle in degrees
clock_angle = np.degrees(np.arctan2(inst['BY_GSM'], inst['BZ_GSM']))
clock_angle[clock_angle < 0.0] += 360.0
inst['clock_angle'] = pds.Series(clock_angle, index=inst.data.index)
# Calculate magnitude of IMF in Y-Z plane
inst['BYZ_GSM'] = pds.Series(np.sqrt(inst['BY_GSM']**2 +
inst['BZ_GSM']**2),
index=inst.data.index)
return
def calculate_imf_steadiness(inst, steady_window=15, min_window_frac=0.75,
max_clock_angle_std=90.0/np.pi, max_bmag_cv=0.5):
""" Calculate IMF steadiness using clock angle standard deviation and
the coefficient of variation of the IMF magnitude in the GSM Y-Z plane
Parameters
-----------
inst : pysat.Instrument
Instrument with OMNI HRO data
steady_window : int
Window for calculating running statistical moments in min (default=15)
min_window_frac : float
Minimum fraction of points in a window for steadiness to be calculated
(default=0.75)
max_clock_angle_std : float
Maximum standard deviation of the clock angle in degrees (default=22.5)
max_bmag_cv : float
Maximum coefficient of variation of the IMF magnitude in the GSM
Y-Z plane (default=0.5)
"""
from pysat.utils import stats as pystats
# We are not going to interpolate through missing values
sample_rate = int(inst.tag[0])
max_wnum = np.floor(steady_window / sample_rate)
if max_wnum != steady_window / sample_rate:
steady_window = max_wnum * sample_rate
logger.warning("sample rate is not a factor of the statistical window")
logger.warning("new statistical window is {:.1f}".format(steady_window))
min_wnum = int(np.ceil(max_wnum * min_window_frac))
# Calculate the running coefficient of variation of the BYZ magnitude
byz_mean = inst['BYZ_GSM'].rolling(min_periods=min_wnum, center=True,
window=steady_window).mean()
byz_std = inst['BYZ_GSM'].rolling(min_periods=min_wnum, center=True,
window=steady_window).std()
inst['BYZ_CV'] = pds.Series(byz_std / byz_mean, index=inst.data.index)
# Calculate the running circular standard deviation of the clock angle
circ_kwargs = {'high': 360.0, 'low': 0.0}
ca_std = \
inst['clock_angle'].rolling(min_periods=min_wnum,
window=steady_window,
center=True).apply(pystats.nan_circstd,
kwargs=circ_kwargs,
raw=True)
inst['clock_angle_std'] = pds.Series(ca_std, index=inst.data.index)
# Determine how long the clock angle and IMF magnitude are steady
imf_steady = np.zeros(shape=inst.data.index.shape)
steady = False
for i, cv in enumerate(inst.data['BYZ_CV']):
if steady:
del_min = int((inst.data.index[i] -
inst.data.index[i-1]).total_seconds() / 60.0)
if np.isnan(cv) or np.isnan(ca_std[i]) or del_min > sample_rate:
# Reset the steadiness flag if fill values are encountered, or
# if an entry is missing
steady = False
if cv <= max_bmag_cv and ca_std[i] <= max_clock_angle_std:
# Steadiness conditions have been met
if steady:
imf_steady[i] = imf_steady[i-1]
imf_steady[i] += sample_rate
steady = True
inst['IMF_Steady'] = | pds.Series(imf_steady, index=inst.data.index) | pandas.Series |
import numpy as np
from datetime import timedelta
import pandas as pd
import pandas.tslib as tslib
import pandas.util.testing as tm
import pandas.tseries.period as period
from pandas import (DatetimeIndex, PeriodIndex, period_range, Series, Period,
_np_version_under1p10, Index, Timedelta, offsets)
from pandas.tests.test_base import Ops
class TestPeriodIndexOps(Ops):
def setUp(self):
super(TestPeriodIndexOps, self).setUp()
mask = lambda x: (isinstance(x, DatetimeIndex) or
isinstance(x, PeriodIndex))
self.is_valid_objs = [o for o in self.objs if mask(o)]
self.not_valid_objs = [o for o in self.objs if not mask(o)]
def test_ops_properties(self):
self.check_ops_properties(
['year', 'month', 'day', 'hour', 'minute', 'second', 'weekofyear',
'week', 'dayofweek', 'dayofyear', 'quarter'])
self.check_ops_properties(['qyear'],
lambda x: isinstance(x, PeriodIndex))
def test_asobject_tolist(self):
idx = pd.period_range(start='2013-01-01', periods=4, freq='M',
name='idx')
expected_list = [pd.Period('2013-01-31', freq='M'),
pd.Period('2013-02-28', freq='M'),
pd.Period('2013-03-31', freq='M'),
pd.Period('2013-04-30', freq='M')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = PeriodIndex(['2013-01-01', '2013-01-02', 'NaT',
'2013-01-04'], freq='D', name='idx')
expected_list = [pd.Period('2013-01-01', freq='D'),
pd.Period('2013-01-02', freq='D'),
pd.Period('NaT', freq='D'),
pd.Period('2013-01-04', freq='D')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
tm.assert_index_equal(result, expected)
for i in [0, 1, 3]:
self.assertEqual(result[i], expected[i])
self.assertIs(result[2], pd.NaT)
self.assertEqual(result.name, expected.name)
result_list = idx.tolist()
for i in [0, 1, 3]:
self.assertEqual(result_list[i], expected_list[i])
self.assertIs(result_list[2], pd.NaT)
def test_minmax(self):
# monotonic
idx1 = pd.PeriodIndex([pd.NaT, '2011-01-01', '2011-01-02',
'2011-01-03'], freq='D')
self.assertTrue(idx1.is_monotonic)
# non-monotonic
idx2 = pd.PeriodIndex(['2011-01-01', pd.NaT, '2011-01-03',
'2011-01-02', pd.NaT], freq='D')
self.assertFalse(idx2.is_monotonic)
for idx in [idx1, idx2]:
self.assertEqual(idx.min(), pd.Period('2011-01-01', freq='D'))
self.assertEqual(idx.max(), pd.Period('2011-01-03', freq='D'))
self.assertEqual(idx1.argmin(), 1)
self.assertEqual(idx2.argmin(), 0)
self.assertEqual(idx1.argmax(), 3)
self.assertEqual(idx2.argmax(), 2)
for op in ['min', 'max']:
# Return NaT
obj = PeriodIndex([], freq='M')
result = getattr(obj, op)()
self.assertIs(result, tslib.NaT)
obj = PeriodIndex([pd.NaT], freq='M')
result = getattr(obj, op)()
self.assertIs(result, tslib.NaT)
obj = PeriodIndex([pd.NaT, pd.NaT, pd.NaT], freq='M')
result = getattr(obj, op)()
self.assertIs(result, tslib.NaT)
def test_numpy_minmax(self):
pr = pd.period_range(start='2016-01-15', end='2016-01-20')
self.assertEqual(np.min(pr), Period('2016-01-15', freq='D'))
self.assertEqual(np.max(pr), Period('2016-01-20', freq='D'))
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.min, pr, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.max, pr, out=0)
self.assertEqual(np.argmin(pr), 0)
self.assertEqual(np.argmax(pr), 5)
if not _np_version_under1p10:
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.argmin, pr, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.argmax, pr, out=0)
def test_representation(self):
# GH 7601
idx1 = PeriodIndex([], freq='D')
idx2 = PeriodIndex(['2011-01-01'], freq='D')
idx3 = PeriodIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = PeriodIndex(['2011-01-01', '2011-01-02', '2011-01-03'],
freq='D')
idx5 = PeriodIndex(['2011', '2012', '2013'], freq='A')
idx6 = PeriodIndex(['2011-01-01 09:00', '2012-02-01 10:00',
'NaT'], freq='H')
idx7 = pd.period_range('2013Q1', periods=1, freq="Q")
idx8 = pd.period_range('2013Q1', periods=2, freq="Q")
idx9 = pd.period_range('2013Q1', periods=3, freq="Q")
idx10 = PeriodIndex(['2011-01-01', '2011-02-01'], freq='3D')
exp1 = """PeriodIndex([], dtype='period[D]', freq='D')"""
exp2 = """PeriodIndex(['2011-01-01'], dtype='period[D]', freq='D')"""
exp3 = ("PeriodIndex(['2011-01-01', '2011-01-02'], dtype='period[D]', "
"freq='D')")
exp4 = ("PeriodIndex(['2011-01-01', '2011-01-02', '2011-01-03'], "
"dtype='period[D]', freq='D')")
exp5 = ("PeriodIndex(['2011', '2012', '2013'], dtype='period[A-DEC]', "
"freq='A-DEC')")
exp6 = ("PeriodIndex(['2011-01-01 09:00', '2012-02-01 10:00', 'NaT'], "
"dtype='period[H]', freq='H')")
exp7 = ("PeriodIndex(['2013Q1'], dtype='period[Q-DEC]', "
"freq='Q-DEC')")
exp8 = ("PeriodIndex(['2013Q1', '2013Q2'], dtype='period[Q-DEC]', "
"freq='Q-DEC')")
exp9 = ("PeriodIndex(['2013Q1', '2013Q2', '2013Q3'], "
"dtype='period[Q-DEC]', freq='Q-DEC')")
exp10 = ("PeriodIndex(['2011-01-01', '2011-02-01'], "
"dtype='period[3D]', freq='3D')")
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5,
idx6, idx7, idx8, idx9, idx10],
[exp1, exp2, exp3, exp4, exp5,
exp6, exp7, exp8, exp9, exp10]):
for func in ['__repr__', '__unicode__', '__str__']:
result = getattr(idx, func)()
self.assertEqual(result, expected)
def test_representation_to_series(self):
# GH 10971
idx1 = PeriodIndex([], freq='D')
idx2 = PeriodIndex(['2011-01-01'], freq='D')
idx3 = PeriodIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = PeriodIndex(['2011-01-01', '2011-01-02',
'2011-01-03'], freq='D')
idx5 = PeriodIndex(['2011', '2012', '2013'], freq='A')
idx6 = PeriodIndex(['2011-01-01 09:00', '2012-02-01 10:00',
'NaT'], freq='H')
idx7 = pd.period_range('2013Q1', periods=1, freq="Q")
idx8 = pd.period_range('2013Q1', periods=2, freq="Q")
idx9 = pd.period_range('2013Q1', periods=3, freq="Q")
exp1 = """Series([], dtype: object)"""
exp2 = """0 2011-01-01
dtype: object"""
exp3 = """0 2011-01-01
1 2011-01-02
dtype: object"""
exp4 = """0 2011-01-01
1 2011-01-02
2 2011-01-03
dtype: object"""
exp5 = """0 2011
1 2012
2 2013
dtype: object"""
exp6 = """0 2011-01-01 09:00
1 2012-02-01 10:00
2 NaT
dtype: object"""
exp7 = """0 2013Q1
dtype: object"""
exp8 = """0 2013Q1
1 2013Q2
dtype: object"""
exp9 = """0 2013Q1
1 2013Q2
2 2013Q3
dtype: object"""
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5,
idx6, idx7, idx8, idx9],
[exp1, exp2, exp3, exp4, exp5,
exp6, exp7, exp8, exp9]):
result = repr(pd.Series(idx))
self.assertEqual(result, expected)
def test_summary(self):
# GH9116
idx1 = PeriodIndex([], freq='D')
idx2 = PeriodIndex(['2011-01-01'], freq='D')
idx3 = PeriodIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = PeriodIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D')
idx5 = PeriodIndex(['2011', '2012', '2013'], freq='A')
idx6 = PeriodIndex(
['2011-01-01 09:00', '2012-02-01 10:00', 'NaT'], freq='H')
idx7 = pd.period_range('2013Q1', periods=1, freq="Q")
idx8 = pd.period_range('2013Q1', periods=2, freq="Q")
idx9 = pd.period_range('2013Q1', periods=3, freq="Q")
exp1 = """PeriodIndex: 0 entries
Freq: D"""
exp2 = """PeriodIndex: 1 entries, 2011-01-01 to 2011-01-01
Freq: D"""
exp3 = """PeriodIndex: 2 entries, 2011-01-01 to 2011-01-02
Freq: D"""
exp4 = """PeriodIndex: 3 entries, 2011-01-01 to 2011-01-03
Freq: D"""
exp5 = """PeriodIndex: 3 entries, 2011 to 2013
Freq: A-DEC"""
exp6 = """PeriodIndex: 3 entries, 2011-01-01 09:00 to NaT
Freq: H"""
exp7 = """PeriodIndex: 1 entries, 2013Q1 to 2013Q1
Freq: Q-DEC"""
exp8 = """PeriodIndex: 2 entries, 2013Q1 to 2013Q2
Freq: Q-DEC"""
exp9 = """PeriodIndex: 3 entries, 2013Q1 to 2013Q3
Freq: Q-DEC"""
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5,
idx6, idx7, idx8, idx9],
[exp1, exp2, exp3, exp4, exp5,
exp6, exp7, exp8, exp9]):
result = idx.summary()
self.assertEqual(result, expected)
def test_resolution(self):
for freq, expected in zip(['A', 'Q', 'M', 'D', 'H',
'T', 'S', 'L', 'U'],
['day', 'day', 'day', 'day',
'hour', 'minute', 'second',
'millisecond', 'microsecond']):
idx = pd.period_range(start='2013-04-01', periods=30, freq=freq)
self.assertEqual(idx.resolution, expected)
def test_add_iadd(self):
rng = pd.period_range('1/1/2000', freq='D', periods=5)
other = pd.period_range('1/6/2000', freq='D', periods=5)
# previously performed setop union, now raises TypeError (GH14164)
with tm.assertRaises(TypeError):
rng + other
with tm.assertRaises(TypeError):
rng += other
# offset
# DateOffset
rng = pd.period_range('2014', '2024', freq='A')
result = rng + pd.offsets.YearEnd(5)
expected = pd.period_range('2019', '2029', freq='A')
tm.assert_index_equal(result, expected)
rng += pd.offsets.YearEnd(5)
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1),
pd.offsets.Minute(), np.timedelta64(365, 'D'),
timedelta(365), Timedelta(days=365)]:
msg = ('Input has different freq(=.+)? '
'from PeriodIndex\\(freq=A-DEC\\)')
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
rng + o
rng = pd.period_range('2014-01', '2016-12', freq='M')
result = rng + pd.offsets.MonthEnd(5)
expected = pd.period_range('2014-06', '2017-05', freq='M')
tm.assert_index_equal(result, expected)
rng += pd.offsets.MonthEnd(5)
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1),
pd.offsets.Minute(), np.timedelta64(365, 'D'),
timedelta(365), Timedelta(days=365)]:
rng = pd.period_range('2014-01', '2016-12', freq='M')
msg = 'Input has different freq(=.+)? from PeriodIndex\\(freq=M\\)'
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
rng + o
# Tick
offsets = [pd.offsets.Day(3), timedelta(days=3),
np.timedelta64(3, 'D'), pd.offsets.Hour(72),
timedelta(minutes=60 * 24 * 3), np.timedelta64(72, 'h'),
Timedelta('72:00:00')]
for delta in offsets:
rng = pd.period_range('2014-05-01', '2014-05-15', freq='D')
result = rng + delta
expected = pd.period_range('2014-05-04', '2014-05-18', freq='D')
tm.assert_index_equal(result, expected)
rng += delta
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1),
pd.offsets.Minute(), np.timedelta64(4, 'h'),
timedelta(hours=23), Timedelta('23:00:00')]:
rng = pd.period_range('2014-05-01', '2014-05-15', freq='D')
msg = 'Input has different freq(=.+)? from PeriodIndex\\(freq=D\\)'
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
rng + o
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), pd.offsets.Minute(120),
timedelta(minutes=120), np.timedelta64(120, 'm'),
Timedelta(minutes=120)]
for delta in offsets:
rng = pd.period_range('2014-01-01 10:00', '2014-01-05 10:00',
freq='H')
result = rng + delta
expected = pd.period_range('2014-01-01 12:00', '2014-01-05 12:00',
freq='H')
tm.assert_index_equal(result, expected)
rng += delta
tm.assert_index_equal(rng, expected)
for delta in [pd.offsets.YearBegin(2), timedelta(minutes=30),
np.timedelta64(30, 's'), Timedelta(seconds=30)]:
rng = pd.period_range('2014-01-01 10:00', '2014-01-05 10:00',
freq='H')
msg = 'Input has different freq(=.+)? from PeriodIndex\\(freq=H\\)'
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
result = rng + delta
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
rng += delta
# int
rng = pd.period_range('2000-01-01 09:00', freq='H', periods=10)
result = rng + 1
expected = pd.period_range('2000-01-01 10:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
rng += 1
tm.assert_index_equal(rng, expected)
def test_sub(self):
rng = period_range('2007-01', periods=50)
result = rng - 5
exp = rng + (-5)
tm.assert_index_equal(result, exp)
def test_sub_isub(self):
# previously performed setop, now raises TypeError (GH14164)
# TODO needs to wait on #13077 for decision on result type
rng = pd.period_range('1/1/2000', freq='D', periods=5)
other = pd.period_range('1/6/2000', freq='D', periods=5)
with tm.assertRaises(TypeError):
rng - other
with tm.assertRaises(TypeError):
rng -= other
# offset
# DateOffset
rng = pd.period_range('2014', '2024', freq='A')
result = rng - pd.offsets.YearEnd(5)
expected = pd.period_range('2009', '2019', freq='A')
tm.assert_index_equal(result, expected)
rng -= pd.offsets.YearEnd(5)
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1),
pd.offsets.Minute(), np.timedelta64(365, 'D'),
timedelta(365)]:
rng = pd.period_range('2014', '2024', freq='A')
msg = ('Input has different freq(=.+)? '
'from PeriodIndex\\(freq=A-DEC\\)')
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
rng - o
rng = pd.period_range('2014-01', '2016-12', freq='M')
result = rng - pd.offsets.MonthEnd(5)
expected = pd.period_range('2013-08', '2016-07', freq='M')
tm.assert_index_equal(result, expected)
rng -= pd.offsets.MonthEnd(5)
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1),
pd.offsets.Minute(), np.timedelta64(365, 'D'),
timedelta(365)]:
rng = pd.period_range('2014-01', '2016-12', freq='M')
msg = 'Input has different freq(=.+)? from PeriodIndex\\(freq=M\\)'
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
rng - o
# Tick
offsets = [pd.offsets.Day(3), timedelta(days=3),
np.timedelta64(3, 'D'), pd.offsets.Hour(72),
timedelta(minutes=60 * 24 * 3), np.timedelta64(72, 'h')]
for delta in offsets:
rng = pd.period_range('2014-05-01', '2014-05-15', freq='D')
result = rng - delta
expected = pd.period_range('2014-04-28', '2014-05-12', freq='D')
tm.assert_index_equal(result, expected)
rng -= delta
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1),
pd.offsets.Minute(), np.timedelta64(4, 'h'),
timedelta(hours=23)]:
rng = pd.period_range('2014-05-01', '2014-05-15', freq='D')
msg = 'Input has different freq(=.+)? from PeriodIndex\\(freq=D\\)'
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
rng - o
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), pd.offsets.Minute(120),
timedelta(minutes=120), np.timedelta64(120, 'm')]
for delta in offsets:
rng = pd.period_range('2014-01-01 10:00', '2014-01-05 10:00',
freq='H')
result = rng - delta
expected = pd.period_range('2014-01-01 08:00', '2014-01-05 08:00',
freq='H')
tm.assert_index_equal(result, expected)
rng -= delta
tm.assert_index_equal(rng, expected)
for delta in [pd.offsets.YearBegin(2), timedelta(minutes=30),
np.timedelta64(30, 's')]:
rng = pd.period_range('2014-01-01 10:00', '2014-01-05 10:00',
freq='H')
msg = 'Input has different freq(=.+)? from PeriodIndex\\(freq=H\\)'
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
result = rng + delta
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
rng += delta
# int
rng = pd.period_range('2000-01-01 09:00', freq='H', periods=10)
result = rng - 1
expected = pd.period_range('2000-01-01 08:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
rng -= 1
tm.assert_index_equal(rng, expected)
def test_comp_nat(self):
left = pd.PeriodIndex([pd.Period('2011-01-01'), pd.NaT,
pd.Period('2011-01-03')])
right = pd.PeriodIndex([pd.NaT, pd.NaT, pd.Period('2011-01-03')])
for l, r in [(left, right), (left.asobject, right.asobject)]:
result = l == r
expected = np.array([False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = l != r
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l == pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT == r, expected)
expected = np.array([True, True, True])
tm.assert_numpy_array_equal(l != pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT != l, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l < pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT > l, expected)
def test_value_counts_unique(self):
# GH 7735
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=10)
# create repeated values, 'n'th element is repeated by n+1 times
idx = PeriodIndex(np.repeat(idx.values, range(1, len(idx) + 1)),
freq='H')
exp_idx = PeriodIndex(['2011-01-01 18:00', '2011-01-01 17:00',
'2011-01-01 16:00', '2011-01-01 15:00',
'2011-01-01 14:00', '2011-01-01 13:00',
'2011-01-01 12:00', '2011-01-01 11:00',
'2011-01-01 10:00',
'2011-01-01 09:00'], freq='H')
expected = Series(range(10, 0, -1), index=exp_idx, dtype='int64')
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
expected = pd.period_range('2011-01-01 09:00', freq='H',
periods=10)
tm.assert_index_equal(idx.unique(), expected)
idx = PeriodIndex(['2013-01-01 09:00', '2013-01-01 09:00',
'2013-01-01 09:00', '2013-01-01 08:00',
'2013-01-01 08:00', pd.NaT], freq='H')
exp_idx = PeriodIndex(['2013-01-01 09:00', '2013-01-01 08:00'],
freq='H')
expected = Series([3, 2], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
exp_idx = PeriodIndex(['2013-01-01 09:00', '2013-01-01 08:00',
pd.NaT], freq='H')
expected = Series([3, 2, 1], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(dropna=False), expected)
tm.assert_index_equal(idx.unique(), exp_idx)
def test_drop_duplicates_metadata(self):
# GH 10115
idx = pd.period_range('2011-01-01', '2011-01-31', freq='D', name='idx')
result = idx.drop_duplicates()
self.assert_index_equal(idx, result)
self.assertEqual(idx.freq, result.freq)
idx_dup = idx.append(idx) # freq will not be reset
result = idx_dup.drop_duplicates()
self.assert_index_equal(idx, result)
self.assertEqual(idx.freq, result.freq)
def test_drop_duplicates(self):
# to check Index/Series compat
base = pd.period_range('2011-01-01', '2011-01-31', freq='D',
name='idx')
idx = base.append(base[:5])
res = idx.drop_duplicates()
tm.assert_index_equal(res, base)
res = Series(idx).drop_duplicates()
tm.assert_series_equal(res, Series(base))
res = idx.drop_duplicates(keep='last')
exp = base[5:].append(base[:5])
tm.assert_index_equal(res, exp)
res = Series(idx).drop_duplicates(keep='last')
tm.assert_series_equal(res, Series(exp, index=np.arange(5, 36)))
res = idx.drop_duplicates(keep=False)
tm.assert_index_equal(res, base[5:])
res = Series(idx).drop_duplicates(keep=False)
tm.assert_series_equal(res, Series(base[5:], index=np.arange(5, 31)))
def test_order_compat(self):
def _check_freq(index, expected_index):
if isinstance(index, PeriodIndex):
self.assertEqual(index.freq, expected_index.freq)
pidx = PeriodIndex(['2011', '2012', '2013'], name='pidx', freq='A')
# for compatibility check
iidx = Index([2011, 2012, 2013], name='idx')
for idx in [pidx, iidx]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, idx)
_check_freq(ordered, idx)
ordered = idx.sort_values(ascending=False)
self.assert_index_equal(ordered, idx[::-1])
_check_freq(ordered, idx[::-1])
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, idx)
self.assert_numpy_array_equal(indexer,
np.array([0, 1, 2]),
check_dtype=False)
_check_freq(ordered, idx)
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
self.assert_index_equal(ordered, idx[::-1])
self.assert_numpy_array_equal(indexer,
np.array([2, 1, 0]),
check_dtype=False)
_check_freq(ordered, idx[::-1])
pidx = PeriodIndex(['2011', '2013', '2015', '2012',
'2011'], name='pidx', freq='A')
pexpected = PeriodIndex(
['2011', '2011', '2012', '2013', '2015'], name='pidx', freq='A')
# for compatibility check
iidx = Index([2011, 2013, 2015, 2012, 2011], name='idx')
iexpected = Index([2011, 2011, 2012, 2013, 2015], name='idx')
for idx, expected in [(pidx, pexpected), (iidx, iexpected)]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, expected)
_check_freq(ordered, idx)
ordered = idx.sort_values(ascending=False)
self.assert_index_equal(ordered, expected[::-1])
_check_freq(ordered, idx)
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, expected)
exp = np.array([0, 4, 3, 1, 2])
self.assert_numpy_array_equal(indexer, exp, check_dtype=False)
_check_freq(ordered, idx)
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
self.assert_index_equal(ordered, expected[::-1])
exp = np.array([2, 1, 3, 4, 0])
self.assert_numpy_array_equal(indexer, exp,
check_dtype=False)
_check_freq(ordered, idx)
pidx = PeriodIndex(['2011', '2013', 'NaT', '2011'], name='pidx',
freq='D')
result = pidx.sort_values()
expected = PeriodIndex(['NaT', '2011', '2011', '2013'],
name='pidx', freq='D')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, 'D')
result = pidx.sort_values(ascending=False)
expected = PeriodIndex(
['2013', '2011', '2011', 'NaT'], name='pidx', freq='D')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, 'D')
def test_order(self):
for freq in ['D', '2D', '4D']:
idx = PeriodIndex(['2011-01-01', '2011-01-02', '2011-01-03'],
freq=freq, name='idx')
ordered = idx.sort_values()
self.assert_index_equal(ordered, idx)
self.assertEqual(ordered.freq, idx.freq)
ordered = idx.sort_values(ascending=False)
expected = idx[::-1]
self.assert_index_equal(ordered, expected)
self.assertEqual(ordered.freq, expected.freq)
self.assertEqual(ordered.freq, freq)
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, idx)
self.assert_numpy_array_equal(indexer,
np.array([0, 1, 2]),
check_dtype=False)
self.assertEqual(ordered.freq, idx.freq)
self.assertEqual(ordered.freq, freq)
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
expected = idx[::-1]
self.assert_index_equal(ordered, expected)
self.assert_numpy_array_equal(indexer,
np.array([2, 1, 0]),
check_dtype=False)
self.assertEqual(ordered.freq, expected.freq)
self.assertEqual(ordered.freq, freq)
idx1 = PeriodIndex(['2011-01-01', '2011-01-03', '2011-01-05',
'2011-01-02', '2011-01-01'], freq='D', name='idx1')
exp1 = PeriodIndex(['2011-01-01', '2011-01-01', '2011-01-02',
'2011-01-03', '2011-01-05'], freq='D', name='idx1')
idx2 = PeriodIndex(['2011-01-01', '2011-01-03', '2011-01-05',
'2011-01-02', '2011-01-01'],
freq='D', name='idx2')
exp2 = PeriodIndex(['2011-01-01', '2011-01-01', '2011-01-02',
'2011-01-03', '2011-01-05'],
freq='D', name='idx2')
idx3 = PeriodIndex([pd.NaT, '2011-01-03', '2011-01-05',
'2011-01-02', pd.NaT], freq='D', name='idx3')
exp3 = PeriodIndex([pd.NaT, pd.NaT, '2011-01-02', '2011-01-03',
'2011-01-05'], freq='D', name='idx3')
for idx, expected in [(idx1, exp1), (idx2, exp2), (idx3, exp3)]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, expected)
self.assertEqual(ordered.freq, 'D')
ordered = idx.sort_values(ascending=False)
self.assert_index_equal(ordered, expected[::-1])
self.assertEqual(ordered.freq, 'D')
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, expected)
exp = np.array([0, 4, 3, 1, 2])
self.assert_numpy_array_equal(indexer, exp, check_dtype=False)
self.assertEqual(ordered.freq, 'D')
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
self.assert_index_equal(ordered, expected[::-1])
exp = np.array([2, 1, 3, 4, 0])
self.assert_numpy_array_equal(indexer, exp, check_dtype=False)
self.assertEqual(ordered.freq, 'D')
def test_nat_new(self):
idx = pd.period_range('2011-01', freq='M', periods=5, name='x')
result = idx._nat_new()
exp = pd.PeriodIndex([pd.NaT] * 5, freq='M', name='x')
tm.assert_index_equal(result, exp)
result = idx._nat_new(box=False)
exp = np.array([tslib.iNaT] * 5, dtype=np.int64)
tm.assert_numpy_array_equal(result, exp)
def test_shift(self):
# GH 9903
idx = pd.PeriodIndex([], name='xxx', freq='H')
with tm.assertRaises(TypeError):
# period shift doesn't accept freq
idx.shift(1, freq='H')
tm.assert_index_equal(idx.shift(0), idx)
tm.assert_index_equal(idx.shift(3), idx)
idx = pd.PeriodIndex(['2011-01-01 10:00', '2011-01-01 11:00'
'2011-01-01 12:00'], name='xxx', freq='H')
tm.assert_index_equal(idx.shift(0), idx)
exp = pd.PeriodIndex(['2011-01-01 13:00', '2011-01-01 14:00'
'2011-01-01 15:00'], name='xxx', freq='H')
tm.assert_index_equal(idx.shift(3), exp)
exp = pd.PeriodIndex(['2011-01-01 07:00', '2011-01-01 08:00'
'2011-01-01 09:00'], name='xxx', freq='H')
tm.assert_index_equal(idx.shift(-3), exp)
def test_repeat(self):
index = pd.period_range('2001-01-01', periods=2, freq='D')
exp = pd.PeriodIndex(['2001-01-01', '2001-01-01',
'2001-01-02', '2001-01-02'], freq='D')
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
index = pd.period_range('2001-01-01', periods=2, freq='2D')
exp = pd.PeriodIndex(['2001-01-01', '2001-01-01',
'2001-01-03', '2001-01-03'], freq='2D')
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
index = pd.PeriodIndex(['2001-01', 'NaT', '2003-01'], freq='M')
exp = pd.PeriodIndex(['2001-01', '2001-01', '2001-01',
'NaT', 'NaT', 'NaT',
'2003-01', '2003-01', '2003-01'], freq='M')
for res in [index.repeat(3), np.repeat(index, 3)]:
tm.assert_index_equal(res, exp)
def test_nat(self):
self.assertIs(pd.PeriodIndex._na_value, pd.NaT)
self.assertIs(pd.PeriodIndex([], freq='M')._na_value, pd.NaT)
idx = pd.PeriodIndex(['2011-01-01', '2011-01-02'], freq='D')
self.assertTrue(idx._can_hold_na)
tm.assert_numpy_array_equal(idx._isnan, np.array([False, False]))
self.assertFalse(idx.hasnans)
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([], dtype=np.intp))
idx = pd.PeriodIndex(['2011-01-01', 'NaT'], freq='D')
self.assertTrue(idx._can_hold_na)
tm.assert_numpy_array_equal(idx._isnan, np.array([False, True]))
self.assertTrue(idx.hasnans)
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([1], dtype=np.intp))
def test_equals(self):
# GH 13107
for freq in ['D', 'M']:
idx = pd.PeriodIndex(['2011-01-01', '2011-01-02', 'NaT'],
freq=freq)
self.assertTrue(idx.equals(idx))
self.assertTrue(idx.equals(idx.copy()))
self.assertTrue(idx.equals(idx.asobject))
self.assertTrue(idx.asobject.equals(idx))
self.assertTrue(idx.asobject.equals(idx.asobject))
self.assertFalse(idx.equals(list(idx)))
self.assertFalse(idx.equals(pd.Series(idx)))
idx2 = pd.PeriodIndex(['2011-01-01', '2011-01-02', 'NaT'],
freq='H')
self.assertFalse(idx.equals(idx2))
self.assertFalse(idx.equals(idx2.copy()))
self.assertFalse(idx.equals(idx2.asobject))
self.assertFalse(idx.asobject.equals(idx2))
self.assertFalse(idx.equals(list(idx2)))
self.assertFalse(idx.equals(pd.Series(idx2)))
# same internal, different tz
idx3 = pd.PeriodIndex._simple_new(idx.asi8, freq='H')
tm.assert_numpy_array_equal(idx.asi8, idx3.asi8)
self.assertFalse(idx.equals(idx3))
self.assertFalse(idx.equals(idx3.copy()))
self.assertFalse(idx.equals(idx3.asobject))
self.assertFalse(idx.asobject.equals(idx3))
self.assertFalse(idx.equals(list(idx3)))
self.assertFalse(idx.equals(pd.Series(idx3)))
class TestPeriodIndexSeriesMethods(tm.TestCase):
""" Test PeriodIndex and Period Series Ops consistency """
def _check(self, values, func, expected):
idx = pd.PeriodIndex(values)
result = func(idx)
if isinstance(expected, pd.Index):
tm.assert_index_equal(result, expected)
else:
# comp op results in bool
tm.assert_numpy_array_equal(result, expected)
s = pd.Series(values)
result = func(s)
exp = pd.Series(expected, name=values.name)
tm.assert_series_equal(result, exp)
def test_pi_ops(self):
idx = PeriodIndex(['2011-01', '2011-02', '2011-03',
'2011-04'], freq='M', name='idx')
expected = PeriodIndex(['2011-03', '2011-04',
'2011-05', '2011-06'], freq='M', name='idx')
self._check(idx, lambda x: x + 2, expected)
self._check(idx, lambda x: 2 + x, expected)
self._check(idx + 2, lambda x: x - 2, idx)
result = idx - Period('2011-01', freq='M')
exp = pd.Index([0, 1, 2, 3], name='idx')
tm.assert_index_equal(result, exp)
result = Period('2011-01', freq='M') - idx
exp = pd.Index([0, -1, -2, -3], name='idx')
tm.assert_index_equal(result, exp)
def test_pi_ops_errors(self):
idx = PeriodIndex(['2011-01', '2011-02', '2011-03',
'2011-04'], freq='M', name='idx')
s = pd.Series(idx)
msg = r"unsupported operand type\(s\)"
for obj in [idx, s]:
for ng in ["str", 1.5]:
with tm.assertRaisesRegexp(TypeError, msg):
obj + ng
with tm.assertRaises(TypeError):
# error message differs between PY2 and 3
ng + obj
with tm.assertRaisesRegexp(TypeError, msg):
obj - ng
with tm.assertRaises(TypeError):
np.add(obj, ng)
if _np_version_under1p10:
self.assertIs(np.add(ng, obj), NotImplemented)
else:
with tm.assertRaises(TypeError):
np.add(ng, obj)
with tm.assertRaises(TypeError):
np.subtract(obj, ng)
if _np_version_under1p10:
self.assertIs(np.subtract(ng, obj), NotImplemented)
else:
with tm.assertRaises(TypeError):
np.subtract(ng, obj)
def test_pi_ops_nat(self):
idx = PeriodIndex(['2011-01', '2011-02', 'NaT',
'2011-04'], freq='M', name='idx')
expected = PeriodIndex(['2011-03', '2011-04',
'NaT', '2011-06'], freq='M', name='idx')
self._check(idx, lambda x: x + 2, expected)
self._check(idx, lambda x: 2 + x, expected)
self._check(idx, lambda x: np.add(x, 2), expected)
self._check(idx + 2, lambda x: x - 2, idx)
self._check(idx + 2, lambda x: np.subtract(x, 2), idx)
# freq with mult
idx = PeriodIndex(['2011-01', '2011-02', 'NaT',
'2011-04'], freq='2M', name='idx')
expected = PeriodIndex(['2011-07', '2011-08',
'NaT', '2011-10'], freq='2M', name='idx')
self._check(idx, lambda x: x + 3, expected)
self._check(idx, lambda x: 3 + x, expected)
self._check(idx, lambda x: np.add(x, 3), expected)
self._check(idx + 3, lambda x: x - 3, idx)
self._check(idx + 3, lambda x: np.subtract(x, 3), idx)
def test_pi_ops_array_int(self):
idx = PeriodIndex(['2011-01', '2011-02', 'NaT',
'2011-04'], freq='M', name='idx')
f = lambda x: x + np.array([1, 2, 3, 4])
exp = PeriodIndex(['2011-02', '2011-04', 'NaT',
'2011-08'], freq='M', name='idx')
self._check(idx, f, exp)
f = lambda x: np.add(x, np.array([4, -1, 1, 2]))
exp = PeriodIndex(['2011-05', '2011-01', 'NaT',
'2011-06'], freq='M', name='idx')
self._check(idx, f, exp)
f = lambda x: x - np.array([1, 2, 3, 4])
exp = PeriodIndex(['2010-12', '2010-12', 'NaT',
'2010-12'], freq='M', name='idx')
self._check(idx, f, exp)
f = lambda x: np.subtract(x, np.array([3, 2, 3, -2]))
exp = PeriodIndex(['2010-10', '2010-12', 'NaT',
'2011-06'], freq='M', name='idx')
self._check(idx, f, exp)
def test_pi_ops_offset(self):
idx = PeriodIndex(['2011-01-01', '2011-02-01', '2011-03-01',
'2011-04-01'], freq='D', name='idx')
f = lambda x: x + offsets.Day()
exp = PeriodIndex(['2011-01-02', '2011-02-02', '2011-03-02',
'2011-04-02'], freq='D', name='idx')
self._check(idx, f, exp)
f = lambda x: x + offsets.Day(2)
exp = PeriodIndex(['2011-01-03', '2011-02-03', '2011-03-03',
'2011-04-03'], freq='D', name='idx')
self._check(idx, f, exp)
f = lambda x: x - offsets.Day(2)
exp = PeriodIndex(['2010-12-30', '2011-01-30', '2011-02-27',
'2011-03-30'], freq='D', name='idx')
self._check(idx, f, exp)
def test_pi_offset_errors(self):
idx = PeriodIndex(['2011-01-01', '2011-02-01', '2011-03-01',
'2011-04-01'], freq='D', name='idx')
s = pd.Series(idx)
# Series op is applied per Period instance, thus error is raised
# from Period
msg_idx = r"Input has different freq from PeriodIndex\(freq=D\)"
msg_s = r"Input cannot be converted to Period\(freq=D\)"
for obj, msg in [(idx, msg_idx), (s, msg_s)]:
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
obj + offsets.Hour(2)
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
offsets.Hour(2) + obj
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
obj - offsets.Hour(2)
def test_pi_sub_period(self):
# GH 13071
idx = PeriodIndex(['2011-01', '2011-02', '2011-03',
'2011-04'], freq='M', name='idx')
result = idx - pd.Period('2012-01', freq='M')
exp = pd.Index([-12, -11, -10, -9], name='idx')
tm.assert_index_equal(result, exp)
result = np.subtract(idx, pd.Period('2012-01', freq='M'))
tm.assert_index_equal(result, exp)
result = pd.Period('2012-01', freq='M') - idx
exp = pd.Index([12, 11, 10, 9], name='idx')
tm.assert_index_equal(result, exp)
result = np.subtract(pd.Period('2012-01', freq='M'), idx)
if _np_version_under1p10:
self.assertIs(result, NotImplemented)
else:
tm.assert_index_equal(result, exp)
exp = pd.TimedeltaIndex([np.nan, np.nan, np.nan, np.nan], name='idx')
tm.assert_index_equal(idx - pd.Period('NaT', freq='M'), exp)
tm.assert_index_equal(pd.Period('NaT', freq='M') - idx, exp)
def test_pi_sub_pdnat(self):
# GH 13071
idx = PeriodIndex(['2011-01', '2011-02', 'NaT',
'2011-04'], freq='M', name='idx')
exp = pd.TimedeltaIndex([pd.NaT] * 4, name='idx')
tm.assert_index_equal(pd.NaT - idx, exp)
tm.assert_index_equal(idx - pd.NaT, exp)
def test_pi_sub_period_nat(self):
# GH 13071
idx = PeriodIndex(['2011-01', 'NaT', '2011-03',
'2011-04'], freq='M', name='idx')
result = idx - pd.Period('2012-01', freq='M')
exp = pd.Index([-12, np.nan, -10, -9], name='idx')
tm.assert_index_equal(result, exp)
result = pd.Period('2012-01', freq='M') - idx
exp = pd.Index([12, np.nan, 10, 9], name='idx')
tm.assert_index_equal(result, exp)
exp = pd.TimedeltaIndex([np.nan, np.nan, np.nan, np.nan], name='idx')
tm.assert_index_equal(idx - pd.Period('NaT', freq='M'), exp)
tm.assert_index_equal(pd.Period('NaT', freq='M') - idx, exp)
def test_pi_comp_period(self):
idx = PeriodIndex(['2011-01', '2011-02', '2011-03',
'2011-04'], freq='M', name='idx')
f = lambda x: x == | pd.Period('2011-03', freq='M') | pandas.Period |
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 24 22:24:56 2020
@author: adwait
"""
import matplotlib.pyplot as plt
import gc
import os
import time
import cv2
from PyQt5.QtCore import Qt, pyqtSignal
# from PyQt5.QtGui import QSizePolicy
from PyQt5.QtWidgets import QWidget, QCheckBox, QLabel, QPushButton, QGroupBox,\
QComboBox, QSpinBox, QGridLayout, QDialog, QLineEdit, QDoubleSpinBox,\
QSizePolicy, QFileDialog, QTabWidget, QTableWidgetItem, QTableWidget,\
QListWidget, QAbstractItemView, QTextEdit
from source.summary.summaryanalyze import SummaryAnal
# from source.threads.summplotthread import
import pandas as pd
import logging
class SummaryWindow(QWidget):
def __init__(self):
super().__init__()
# self.setWindowFlags(Qt.Window)
self.setGeometry(100, 100, 500, 650)
self.setWindowTitle("Configure Summary Plots")
self.layout = QGridLayout()
# self.rangeDict = {"Default" : [[0,1],[0,100],[0,100],
# [0,100],[0,100],[0,1]]}
self.paramDict = {}
self.varlist = []
self.dataTransformList = []
self.transformList = []
self.home()
def home(self): #initialise dialog for summary combine
self.summary = None
# self.sumDialog = QDialog(self)
# self.sumDialog.setWindowTitle("Configure Summary Plots")
## self.sumDialog.resize(300, 300)
dataSourceLabel = QLabel("From:", self)
dataSource = QComboBox(self)
dataSource.addItems(['File', 'Folder', 'Filelist sheet'])
self.paramDict['Data source'] = dataSource
dataFormatLabel = QLabel("Format:", self)
dataFormat = QComboBox(self)
dataFormat.addItems(['ASCII', 'Excel'])
self.paramDict['Data format'] = dataFormat
delimLabel = QLabel("Delimiter:", self)
delimText = QComboBox(self)
delimText.addItems(['tab', 'space', 'comma', 'semicolon', 'colon', 'dot',
'pipe', 'double pipe', 'backslash', 'forward slash'])
self.paramDict['Delimiter'] = delimText
headerLabel = QLabel("Header line:", self)
headerLine = QSpinBox(self)
headerLine.setValue(0)
self.paramDict['Header line'] = headerLine
subfolderLabel = QLabel("Sub folder:", self)
self.subfolder = QTextEdit(self)
self.subfolder.setSizePolicy(QSizePolicy.Ignored, QSizePolicy.Ignored)
self.subfolder.setText('/Analysis/Summary/')
self.paramDict['Subfolder'] = self.subfolder
#import data
importButton = QPushButton("Browse..", self)
importButton.clicked.connect(lambda: self.import_data(dataSource))
importButton.setStyleSheet("QPushButton { font-weight: bold;} ")
#create variable
self.variable_dialog_init()
createVar = QPushButton("Create Variable..", self)
createVar.clicked.connect(self.makeVarDialog.show)
#create pivot table
self.pivot_dialog_init()
createPivot = QPushButton("Pivot..", self)
createPivot.clicked.connect(self.pivotDialog.show)
#melt data (reshape)
self.melt_dialog_init()
meltData = QPushButton("Reshape..", self)
meltData.clicked.connect(self.meltDialog.show)
#filter data
self.filter_dialog_init()
filterButton = QPushButton("Filter..", self)
filterButton.clicked.connect(self.filterDialog.show)
transformStepsButton = QPushButton("Steps:", self)
transformStepsButton.clicked.connect(self.show_transform_data)
self.transformSteps = DraggableListWidget(copyItem = True,
delMethod = self.update_transform)
self.transformSteps.setDragDropMode(QAbstractItemView.NoDragDrop)
self.transformSteps.setSizePolicy(QSizePolicy.Ignored, QSizePolicy.Preferred)
# self.transformSteps.model().rowsRemoved.connect(lambda: self.update_transform)
#TODO: assign widget to VALUES of pivot table for "groupby" function selection
# testBtn = QPushButton('Test')
# self.transformSteps.setItemWidget(self.transformSteps.item(2), testBtn)
self.filenameLabel = QLabel("", self)
self.filenameLabel.setWordWrap(True)
self.paramDict['File name'] = self.filenameLabel
# dataSource.indexChanged.connect(lambda: self.source_changed(dataSource,
# importButton))
#plot data
plotTypeLabel = QLabel("Plot Type", self)
plotType = QComboBox(self)
self.rel_types = ["line", "scatter"]
self.cat_types = ["strip", "swarm", "box", "violin",
"boxen", "point", "bar", "count"]
plotType.addItems(self.rel_types + self.cat_types)
plotType.currentIndexChanged.connect(lambda: self.plot_type_changed(plotType.currentText()))
self.paramDict['Plot type'] = plotType
xLabel = QLabel("X Variable", self)
self.xVar = QComboBox(self)
self.paramDict['X Variable'] = self.xVar
yLabel = QLabel("Y Variable", self)
self.yVar = QComboBox(self)
self.paramDict['Y Variable'] = self.yVar
groupVarLabel = QLabel("<b>Group By:</b>", self)
colorLabel = QLabel("Color", self)
self.colorVar = QComboBox(self)
self.paramDict['Color Parameter'] = self.colorVar
columnLabel = QLabel("Column", self)
self.columnVar = QComboBox(self)
self.paramDict['Column Parameter'] = self.columnVar
rowLabel = QLabel("Row", self)
self.rowVar = QComboBox(self)
self.paramDict['Row Parameter'] = self.rowVar
styleLabel = QLabel("Style", self)
self.styleVar = QComboBox(self)
self.paramDict['Style Parameter'] = self.styleVar
sizeLabel = QLabel("Size", self)
self.sizeVar = QComboBox(self)
self.paramDict['Size Parameter'] = self.sizeVar
plotTitleLabel = QLabel("Title", self)
plotTitle = QLineEdit(self)
plotTitle.setSizePolicy(QSizePolicy.Ignored, QSizePolicy.Preferred)
# plotTitle.textChanged.connect(lambda: self.update_summary_dict('title',
# plotTitle.text(),
# 0))
plotTitle.setText('Test')
self.paramDict['Title'] = plotTitle
calcStats = QCheckBox('Calculate stats', self)
self.paramDict['Calculate stats'] = calcStats
previewPlot = QCheckBox('Preview plot', self)
self.paramDict['Preview'] = previewPlot
# self.calcStats = QCheckBox('Calculate Stats', self)
#datetime format
datetimeLabel = QLabel("Datetime Format", self)
datetimeFormat = QComboBox(self)
datetimeFormat.addItems(['Date', 'Time', 'Datetime'])
self.paramDict['Datetime format'] = datetimeFormat
#plot format
formatLabel = QLabel("Save as", self)
self.saveFormat = QComboBox(self) #plot save format
self.saveFormat.addItems(['jpg', 'svg', 'pdf', 'png', 'tif', 'tiff'])
# self.paramDict['Save format'] = saveFormat
self.showMarkers = QCheckBox('Show Markers', self)
self.paramDict['Show Markers'] = self.showMarkers
fontScaleLabel = QLabel("Font Scale", self)
fontScale = QDoubleSpinBox(self)
fontScale.setSingleStep(0.1)
# fontScale.setRange(0, 1)
fontScale.setValue(1)
self.paramDict['Font scale'] = fontScale
contextLabel = QLabel("Context", self)
context = QComboBox(self)
context.addItems(['paper', 'notebook', 'talk', 'poster'])
self.paramDict['Plot context'] = context
plotStyleLabel = QLabel("Style", self)
plotStyle = QComboBox(self)
plotStyle.addItems(['ticks', 'darkgrid', 'whitegrid', 'dark', 'white'])
self.paramDict['Plot style'] = plotStyle
colorPaletteLabel = QLabel("Color Palette", self)
colorPalette = QComboBox(self)
colorPalette.addItems(['None', 'deep', 'muted', 'bright',
'pastel', 'dark', 'colorblind',
'Set1', 'Set2', 'Set3', 'Paired',
'Reds', 'Blues','Greens', 'Oranges',
'viridis', 'plasma', 'inferno', 'magma',
'hot', 'afmhot', 'cool',
'hsv', 'gnuplot', 'terrain'])
self.paramDict['Color palette'] = colorPalette
rotateXLabel = QLabel("Rotate X Labels", self)
rotateX = QSpinBox(self)
rotateX.setValue(0)
self.paramDict['X label rotate'] = rotateX
zeroLine = QCheckBox('Zero Line', self)
self.paramDict['Zero line'] = zeroLine
applyDespine = QCheckBox('Despine', self)
self.paramDict['Despine'] = applyDespine
legendSettings = QLabel("<b>Legend Settings:</b>", self)
legendPos = QCheckBox('Outside plot', self)
self.paramDict['Legend outside'] = legendPos
legendLocLabel = QLabel("Location", self)
legendLoc = QComboBox(self)
legendLoc.addItems(['best','upper right','upper left','lower left',
'lower right','right','center left','center right',
'lower center','upper center','center'])
self.paramDict['Legend location'] = legendLoc
legendColLabel = QLabel("Columns", self)
legendCol = QSpinBox(self)
legendCol.setValue(1)
self.paramDict['Legend columns'] = legendCol
# plotFormat.currentIndexChanged.connect(lambda:
# self.update_summary_dict('format',
# plotFormat.currentText(),
# 0))
# fitLabel = QLabel("<b>Fit</b>", self.sumDialog)
# orderLabel = QLabel("<b>Order</b>", self.sumDialog)
# self.summaryDict = {'x var': [None, None, None, None],
# 'y var': [None, None, None, None],
# 'cbar var': [None, None, None, None],
# 'plot num': [None, None, None, None],
# 'fit': [None, None, None, None],
# 'order': [None, None, None, None],
# 'title': [None],
# 'format': [None],
# 'plot type': ["Scatter"]} #initialize
# self.update_summary_dict('format', plotFormat.currentText(), 0)
# self.grouplist = ["Date", "Folder_Name", "Species", "Sex", "Leg", "Pad",
# "Weight", "Temperature", "Humidity", "Medium",
# "Substrate", "Contact_Angle-Water", "Contact_Angle-Hexadecane",
# "Label", "ROI Label","Measurement_Number", "Contact_Time",
# "Detachment Speed", "Attachment Speed", "Sliding Speed",
# "Sliding_Step"]
# self.grouplist.sort()
# ind = groupVar.findText("ROI Label")
# groupVar.setCurrentIndex(groupVar.findText("ROI Label"))
## groupVar.setEnabled(False)
# combine.stateChanged.connect(lambda: self.combine_toggled(groupVar, combine, okButton))
# okButton.clicked.connect(lambda: self.combine_summary_data(combine.isChecked(),
# groupVar.currentText()))
## okButton.setDefault(True)
#buttons
self.showPlot = QPushButton("Show Plot", self)
self.showPlot.clicked.connect(self.show_summary_plots)
self.showPlot.setEnabled(False)
self.savePlot = QPushButton("Save Plot", self)
self.savePlot.clicked.connect(self.export_summary_plots)
self.savePlot.setEnabled(False)
# self.show_data_init() #initialize data dialog
self.dataDialog = PandasTableWidget()
self.statsButton = QPushButton("Show Stats", self)
self.statsButton.clicked.connect(self.show_stats)
self.statsButton.setEnabled(False)
closeButton = QPushButton("Close", self)
closeButton.clicked.connect(self.close)
# gridLayout = QGridLayout(self)
importGroupBox = QGroupBox("Import Data")
importGroupBox.setStyleSheet("QGroupBox { font-weight: bold; } ")
importLayout = QGridLayout()
importGroupBox.setLayout(importLayout)
importLayout.addWidget(dataSourceLabel, 0, 0, 1, 1)
importLayout.addWidget(dataSource, 0, 1, 1, 1)
importLayout.addWidget(dataFormatLabel, 0, 2, 1, 1)
importLayout.addWidget(dataFormat, 0, 3, 1, 1)
importLayout.addWidget(delimLabel, 1, 0, 1, 1)
importLayout.addWidget(delimText, 1, 1, 1, 1)
importLayout.addWidget(headerLabel, 1, 2, 1, 1)
importLayout.addWidget(headerLine, 1, 3, 1, 1)
importLayout.addWidget(subfolderLabel, 2, 0, 1, 1)
importLayout.addWidget(self.subfolder, 2, 1, 1, 3)
importLayout.addWidget(importButton, 3, 0, 1, 1)
importLayout.addWidget(self.filenameLabel, 3, 1, 1, 3)
transformGroupBox = QGroupBox("Transform Data")
transformGroupBox.setStyleSheet("QGroupBox { font-weight: bold; } ")
transformLayout = QGridLayout()
transformGroupBox.setLayout(transformLayout)
transformLayout.addWidget(createVar, 2, 0, 1, 1)
transformLayout.addWidget(createPivot, 3, 0, 1, 1)
transformLayout.addWidget(meltData, 0, 0, 1, 1)
transformLayout.addWidget(filterButton, 1, 0, 1, 1)
transformLayout.addWidget(transformStepsButton, 0, 1, 1, 1)
transformLayout.addWidget(self.transformSteps, 1, 1, 3, 1)
plotGroupBox = QGroupBox("Plot Data")
plotGroupBox.setStyleSheet("QGroupBox { font-weight: bold; } ")
plotLayout = QGridLayout()
plotGroupBox.setLayout(plotLayout)
plotLayout.addWidget(plotTypeLabel, 0, 0, 1, 1)
plotLayout.addWidget(plotType, 0, 1, 1, 1)
plotLayout.addWidget(xLabel, 1, 0, 1, 1)
plotLayout.addWidget(self.xVar, 1, 1, 1, 1)
plotLayout.addWidget(yLabel, 2, 0, 1, 1)
plotLayout.addWidget(self.yVar, 2, 1, 1, 1)
plotLayout.addWidget(plotTitleLabel, 3, 0, 1, 1)
plotLayout.addWidget(plotTitle, 3, 1, 1, 1)
plotLayout.addWidget(calcStats, 4, 0, 1, 1)
plotLayout.addWidget(previewPlot, 5, 0, 1, 1)
# plotLayout.addWidget(self.calcStats, 5, 0, 1, 2)
# self.summary_layout_make(1, 'Pulloff_Area', 'Adhesion_Force',
# 'Detachment Speed', gridLayout, 2)
# self.summary_layout_make(2, 'Pulloff_Area', 'Adhesion_Force',
# 'Adhesion_Preload', gridLayout, 3)
# self.summary_layout_make(3, 'Pulloff_Area', 'Adhesion_Force',
# 'Contact_Time', gridLayout, 4)
# self.summary_layout_make(4, 'Pulloff_Area', 'Adhesion_Force',
# 'Sliding_Step', gridLayout, 5)
plotLayout.addWidget(groupVarLabel, 0, 2, 1, 2, alignment = Qt.AlignCenter)
plotLayout.addWidget(colorLabel, 1, 2, 1, 1)
plotLayout.addWidget(self.colorVar, 1, 3, 1, 1)
plotLayout.addWidget(columnLabel, 2, 2, 1, 1)
plotLayout.addWidget(self.columnVar, 2, 3, 1, 1)
plotLayout.addWidget(rowLabel, 3, 2, 1, 1)
plotLayout.addWidget(self.rowVar, 3, 3, 1, 1)
plotLayout.addWidget(styleLabel, 4, 2, 1, 1)
plotLayout.addWidget(self.styleVar, 4, 3, 1, 1)
plotLayout.addWidget(sizeLabel, 5, 2, 1, 1)
plotLayout.addWidget(self.sizeVar, 5, 3, 1, 1)
plotFormatGroupBox = QGroupBox("Plot Format")
plotFormatGroupBox.setStyleSheet("QGroupBox { font-weight: bold; } ")
plotFormatLayout = QGridLayout()
plotFormatGroupBox.setLayout(plotFormatLayout)
plotFormatLayout.addWidget(contextLabel, 0, 0, 1, 1)
plotFormatLayout.addWidget(context, 0, 1, 1, 1)
plotFormatLayout.addWidget(plotStyleLabel, 1, 0, 1, 1)
plotFormatLayout.addWidget(plotStyle, 1, 1, 1, 1)
plotFormatLayout.addWidget(colorPaletteLabel, 2, 0, 1, 1)
plotFormatLayout.addWidget(colorPalette, 2, 1, 1, 1)
plotFormatLayout.addWidget(fontScaleLabel, 3, 0, 1, 1)
plotFormatLayout.addWidget(fontScale, 3, 1, 1, 1)
plotFormatLayout.addWidget(rotateXLabel, 4, 0, 1, 1)
plotFormatLayout.addWidget(rotateX, 4, 1, 1, 1)
plotFormatLayout.addWidget(datetimeLabel, 5, 0, 1, 1)
plotFormatLayout.addWidget(datetimeFormat, 5, 1, 1, 1)
plotFormatLayout.addWidget(formatLabel, 6, 0, 1, 1)
plotFormatLayout.addWidget(self.saveFormat, 6, 1, 1, 1)
plotFormatLayout.addWidget(self.showMarkers, 0, 2, 1, 1)
plotFormatLayout.addWidget(zeroLine, 1, 2, 1, 1)
plotFormatLayout.addWidget(applyDespine, 0, 3, 1, 1)
plotFormatLayout.addWidget(legendSettings, 2, 2, 1, 2, alignment = Qt.AlignCenter)
plotFormatLayout.addWidget(legendPos, 3, 2, 1, 1)
plotFormatLayout.addWidget(legendLocLabel, 4, 2, 1, 1)
plotFormatLayout.addWidget(legendLoc, 4, 3, 1, 1)
plotFormatLayout.addWidget(legendColLabel, 5, 2, 1, 1)
plotFormatLayout.addWidget(legendCol, 5, 3, 1, 1)
buttonGroupBox = QGroupBox()
buttonGroupBox.setSizePolicy(QSizePolicy.Ignored, QSizePolicy.Fixed)
buttonLayout= QGridLayout()
buttonGroupBox.setLayout(buttonLayout)
buttonLayout.addWidget(self.showPlot, 0, 0, 1, 1)
buttonLayout.addWidget(self.savePlot, 0, 1, 1, 1)
buttonLayout.addWidget(self.statsButton, 0, 2, 1, 1)
buttonLayout.addWidget(closeButton, 0, 3, 1, 1)
self.layout.addWidget(importGroupBox, 0, 0, 1, 1)
self.layout.addWidget(transformGroupBox, 0, 1, 1, 1)
self.layout.addWidget(plotGroupBox, 1, 0, 1, 2)
self.layout.addWidget(plotFormatGroupBox, 2, 0, 1, 2)
self.layout.addWidget(buttonGroupBox, 3, 0, 1, 2)
self.setLayout(self.layout)
## self.sumDialog.show()
# def summary_layout_make(self, plotnum, x_init, y_init, cb_init,
# layout, vpos):
# varlist = ["Adhesion_Force", "Adhesion_Preload", "Friction_Force",
# "Friction_Preload", "Max_Area", "Pulloff_Area",
# "Friction_Area", "ROI_Max_Area", "ROI_Pulloff_Area",
# "Max_Length", "Pulloff_Length", "ROI_Max_Length",
# "ROI_Pulloff_Length", "Pulloff_Contact_Number",
# "Residue_Area", "Pulloff_Median_Eccentricity", "ROI Label",
# "Measurement_Number", "Contact_Time", "Detachment Speed",
# "Attachment Speed", "Sliding Speed", "Sliding_Step", "Slope",
# "Adhesion_Stress", "Friction_Stress",
# "Normalized_Adhesion_Force", "Beam_Spring_Constant",
# "Initial_Deformation","Pulloff_Deformation","Adhesion_Energy",
# "Max_Bounding_Area", "Max_Bounding_Perimeter",
# "Max_Bounding_Length", "Max_Bounding_Width",
# "Normalized_Adhesion_Energy", "Date_of_Experiment"]
# varlist.sort()
# plotLabel = QLabel(str(plotnum), self.sumDialog)
# self.update_summary_dict('plot num', plotnum, plotnum-1)
# xVar = QComboBox(self.sumDialog) #x variable
# xVar.addItems(varlist)
# xVar.currentIndexChanged.connect(lambda: self.update_summary_dict('x var',
# xVar.currentText(),
# plotnum-1))
# xVar.setCurrentIndex(xVar.findText(x_init))
# self.update_summary_dict('x var', xVar.currentText(), plotnum-1)
# yVar = QComboBox(self.sumDialog) #y variable
# yVar.addItems(varlist)
# yVar.currentIndexChanged.connect(lambda: self.update_summary_dict('y var',
# yVar.currentText(),
# plotnum-1))
# yVar.setCurrentIndex(yVar.findText(y_init))
# self.update_summary_dict('y var', yVar.currentText(), plotnum-1)
# colorbarVar = QComboBox(self.sumDialog) #colorbar variable
# colorbarVar.addItems(varlist)
# colorbarVar.currentIndexChanged.connect(lambda: self.update_summary_dict('cbar var',
# colorbarVar.currentText(),
# plotnum-1))
# colorbarVar.setCurrentIndex(colorbarVar.findText(cb_init))
# self.update_summary_dict('cbar var', colorbarVar.currentText(), plotnum-1)
# polyfit = QCheckBox(self.sumDialog) #polynomial fit
# polyfit.stateChanged.connect(lambda: self.update_summary_dict('fit',
# polyfit.isChecked(),
# plotnum-1))
# self.update_summary_dict('fit', polyfit.isChecked(), plotnum-1)
# polyorder = QSpinBox(self.sumDialog) #polynomial order
# polyorder.valueChanged.connect(lambda: self.update_summary_dict('order',
# polyorder.value(),
# plotnum-1))
# polyorder.setRange(1, 10)
# polyorder.setValue(1)
# self.update_summary_dict('order', polyorder.value(), plotnum-1)
# layout.addWidget(plotLabel, vpos, 0, 1, 1)
# layout.addWidget(xVar, vpos, 1, 1, 1)
# layout.addWidget(yVar, vpos, 2, 1, 1)
# layout.addWidget(colorbarVar, vpos, 3, 1, 1)
# layout.addWidget(polyfit, vpos, 4, 1, 1, alignment = Qt.AlignCenter)
# layout.addWidget(polyorder, vpos, 5, 1, 1)
# def combine_toggled(self, source, importButton):
# ## groupVar.setEnabled(combine.isChecked())
# oktext = "Select summary file.." if source.currentText() == False \
# else "Select experiment list.."
# importButton.setText(oktext)
#update list of variables in dropdown
def update_dropdown_params(self):
# self.varlist = list(map(str,self.datadf_filtered.columns))
self.varlist = list(map(str,self.dataTransformList[-1].columns))
self.varlist.sort()
var_list = ['None'] + self.varlist
#combobox widgets
combobox_wids = [self.xVar, self.yVar, self.colorVar, self.columnVar,
self.rowVar, self.styleVar, self.sizeVar]
for wid in combobox_wids:
if var_list != [wid.itemText(i) for i in range(wid.count())]:
wid.clear()
wid.addItems(var_list)
list_wids = [self.pivotVars, self.varListWid, self.meltVars] #list widgets
for wid in list_wids:
if self.varlist != [wid.item(i).text() for i in range(wid.count())]:
wid.clear()
wid.addItems(self.varlist)
self.transformSteps.clear()
self.transformSteps.addItems(self.transformList)
# self.xVar.addItems(['None'] + self.varlist)
# self.yVar.addItems(['None'] + self.varlist)
# self.colorVar.addItems(['None'] + self.varlist)
# self.columnVar.addItems(['None'] + self.varlist)
# self.rowVar.addItems(['None'] + self.varlist)
# self.styleVar.addItems(['None'] + self.varlist)
# self.sizeVar.addItems(['None'] + self.varlist)
#update tansform lists on step item delete
def update_transform(self):
logging.debug('deleted')
stepnum = [int(self.transformSteps.item(i).text().split(':')[0]) \
for i in range(self.transformSteps.count())]
list_len = len(self.transformList)
j = 0
for i in range(list_len):
if i not in stepnum:
if i != 0:
del self.transformList[i-j]
del self.dataTransformList[i-j]
j += 1
else: #dont delete raw data
self.transformSteps.insertItem(0, '0:Raw data')
#reset number order
for i in range(self.transformSteps.count()):
text = self.transformSteps.item(i).text()
self.transformList[i] = str(i) + ':' + text.split(':')[1]
self.transformSteps.item(i).setText(self.transformList[i])
# disable/enable relevant combo boxes
def plot_type_changed(self, plot_type):
if plot_type in self.rel_types:
self.styleVar.setEnabled(True)
self.sizeVar.setEnabled(True)
self.showMarkers.setEnabled(True)
elif plot_type in self.cat_types:
self.styleVar.setEnabled(False)
self.sizeVar.setEnabled(False)
self.showMarkers.setEnabled(False)
# def update_summary_dict(self, key, value, plotnum):
# self.summaryDict[key][plotnum] = value
def import_data(self, source): #import summary data
## self.sumDialog.reject()
## legend_parameter = self.sumlistwidget.currentItem().text()
self.reset_summary()
self.summary = SummaryAnal()
if source.currentText() == 'Filelist sheet':
self.filepath, _ = QFileDialog.getOpenFileName(caption =
"Select experiment list file")
if self.filepath != "":
self.dataTransformList = []
self.transformList = []
self.folderpath = os.path.dirname(self.filepath)
self.datadf = self.summary.combineSummaryFromList(list_filepath = self.filepath,
subfolder = self.subfolder.toPlainText(),
data_format = self.paramDict['Data format'].currentText(),
delimiter = self.paramDict['Delimiter'].currentText(),
header_line = self.paramDict['Header line'].value())
# if self.summary.list_filepath != "":
# self.comb = True
# self.statusBar.showMessage("Summary Data combined!")
# self.datadf_filtered = self.summary.filter_df(self.datadf,
# self.filter_dict)
self.dataTransformList.append(self.datadf)
stepnum = str(len(self.transformList))
self.transformList.append(stepnum +':Raw data')
self.update_dropdown_params()
self.showPlot.setEnabled(True)
self.savePlot.setEnabled(False)
self.statsButton.setEnabled(True)
self.filenameLabel.setText(self.filepath)
# self.summary.plotSummary(self.summaryDict,
# self.summary.df_final,
# self.summary.df_final,
# legend_parameter)
# self.summary.showSummaryPlot()
# else:
# # self.statusBar.showMessage("No file selected")
# # self.comb = False
# self.summary = None
elif source.currentText() == 'File':
# self.comb = False
self.filepath, _ = QFileDialog.getOpenFileName(caption =
"Select summary data file")
if self.filepath != "":
self.dataTransformList = []
self.transformList = []
self.folderpath = os.path.dirname(self.filepath)
self.datadf = self.summary.importSummary(filepath = self.filepath,
data_format = self.paramDict['Data format'].currentText(),
delimiter = self.paramDict['Delimiter'].currentText(),
header_line = self.paramDict['Header line'].value())
# self.create_var('test var [mPa^2]', '''['Pulloff Force']/['Pulloff Area']''') #CHECK
# if self.summary.summary_filepath != "":
# self.datadf_filtered = self.summary.filter_df(self.datadf,
# self.filter_dict)
self.dataTransformList.append(self.datadf)
stepnum = str(len(self.transformList))
self.transformList.append(stepnum +':Raw data')
self.update_dropdown_params()
self.showPlot.setEnabled(True)
self.savePlot.setEnabled(False)
self.statsButton.setEnabled(True)
self.filenameLabel.setText(self.filepath)
# self.summary.plotSummary(self.summaryDict,
# self.summary.df_final,
# self.summary.df_final,
# legend_parameter)
# self.summary.showSummaryPlot()
# else:
# self.summary = None
elif source.currentText() == 'Folder':
self.folderpath = QFileDialog.getExistingDirectory(caption =
"Select folder")
if self.folderpath != "":
self.dataTransformList = []
self.transformList = []
self.datadf = self.summary.combineSummaryFromFolder(folderpath = self.folderpath,
subfolder = self.subfolder.toPlainText(),
data_format = self.paramDict['Data format'].currentText(),
delimiter = self.paramDict['Delimiter'].currentText(),
header_line = self.paramDict['Header line'].value())
# if self.summary.summary_filepath != "":
# self.datadf_filtered = self.summary.filter_df(self.datadf,
# self.filter_dict)
self.dataTransformList.append(self.datadf)
stepnum = str(len(self.transformList))
self.transformList.append(stepnum +':Raw data')
self.update_dropdown_params()
self.showPlot.setEnabled(True)
self.savePlot.setEnabled(False)
self.statsButton.setEnabled(True)
self.filenameLabel.setText(self.folderpath)
# self.summary.plotSummary(self.summaryDict,
# self.summary.df_final,
# self.summary.df_final,
# legend_parameter)
# self.summary.showSummaryPlot()
# else:
# self.summary = None
def export_summary_plots(self): #export summary plots
# if self.comb == False and self.summary == None:
# self.reset_summary()
# self.summary = SummaryAnal()
# self.summary.importSummary()
# if self.summary.summary_filepath != "":
# # self.summary.filter_df(self.filter_dict)
# # self.summary.plotSummary(self.summaryDict,
# # self.summary.df_final,
# # self.summary.df_final)
# self.summary.plotSummary()
# else:
# self.summary = None
#save summary plots in separate thread
if self.summary != None:
# saveSummPlotThread = SummPlotThread(self.summary,
# self.summaryDict['format'][0])
# saveSummPlotThread.output.connect(self.process_indicate)
# saveSummPlotThread.finished.connect(self.save_plot_indicate)
# saveSummPlotThread.start()
# self.summary.plotSummary(self.datadf_filtered, self.paramDict)
self.summary.saveSummaryPlot(self.folderpath,
self.saveFormat.currentText())
#export data to excel
# self.datadf_filtered.to_excel(self.folderpath +
# '/summary_data_' + self.paramDict['Title'].text() +
# '-' + time.strftime("%y%m%d%H%M%S") + '.xlsx')
#export data and stats to excel
filepath = self.folderpath + '/summary_data_' + \
self.paramDict['Title'].text() + '-' + \
time.strftime("%y%m%d%H%M%S") + '.xlsx'
writer = | pd.ExcelWriter(filepath, engine='xlsxwriter') | pandas.ExcelWriter |
# -*- coding: utf-8 -*-
from __future__ import print_function
from distutils.version import LooseVersion
from numpy import nan, random
import numpy as np
from pandas.compat import lrange
from pandas import (DataFrame, Series, Timestamp,
date_range)
import pandas as pd
from pandas.util.testing import (assert_series_equal,
assert_frame_equal,
assertRaisesRegexp)
import pandas.util.testing as tm
from pandas.tests.frame.common import TestData, _check_mixed_float
def _skip_if_no_pchip():
try:
from scipy.interpolate import pchip_interpolate # noqa
except ImportError:
import nose
raise nose.SkipTest('scipy.interpolate.pchip missing')
class TestDataFrameMissingData(tm.TestCase, TestData):
_multiprocess_can_split_ = True
def test_dropEmptyRows(self):
N = len(self.frame.index)
mat = random.randn(N)
mat[:5] = nan
frame = DataFrame({'foo': mat}, index=self.frame.index)
original = Series(mat, index=self.frame.index, name='foo')
expected = original.dropna()
inplace_frame1, inplace_frame2 = frame.copy(), frame.copy()
smaller_frame = frame.dropna(how='all')
# check that original was preserved
assert_series_equal(frame['foo'], original)
inplace_frame1.dropna(how='all', inplace=True)
assert_series_equal(smaller_frame['foo'], expected)
assert_series_equal(inplace_frame1['foo'], expected)
smaller_frame = frame.dropna(how='all', subset=['foo'])
inplace_frame2.dropna(how='all', subset=['foo'], inplace=True)
assert_series_equal(smaller_frame['foo'], expected)
assert_series_equal(inplace_frame2['foo'], expected)
def test_dropIncompleteRows(self):
N = len(self.frame.index)
mat = random.randn(N)
mat[:5] = nan
frame = DataFrame({'foo': mat}, index=self.frame.index)
frame['bar'] = 5
original = Series(mat, index=self.frame.index, name='foo')
inp_frame1, inp_frame2 = frame.copy(), frame.copy()
smaller_frame = frame.dropna()
assert_series_equal(frame['foo'], original)
inp_frame1.dropna(inplace=True)
exp = Series(mat[5:], index=self.frame.index[5:], name='foo')
tm.assert_series_equal(smaller_frame['foo'], exp)
tm.assert_series_equal(inp_frame1['foo'], exp)
samesize_frame = frame.dropna(subset=['bar'])
assert_series_equal(frame['foo'], original)
self.assertTrue((frame['bar'] == 5).all())
inp_frame2.dropna(subset=['bar'], inplace=True)
self.assert_index_equal(samesize_frame.index, self.frame.index)
self.assert_index_equal(inp_frame2.index, self.frame.index)
def test_dropna(self):
df = DataFrame(np.random.randn(6, 4))
df[2][:2] = nan
dropped = df.dropna(axis=1)
expected = df.ix[:, [0, 1, 3]]
inp = df.copy()
inp.dropna(axis=1, inplace=True)
assert_frame_equal(dropped, expected)
assert_frame_equal(inp, expected)
dropped = df.dropna(axis=0)
expected = df.ix[lrange(2, 6)]
inp = df.copy()
inp.dropna(axis=0, inplace=True)
assert_frame_equal(dropped, expected)
assert_frame_equal(inp, expected)
# threshold
dropped = df.dropna(axis=1, thresh=5)
expected = df.ix[:, [0, 1, 3]]
inp = df.copy()
inp.dropna(axis=1, thresh=5, inplace=True)
assert_frame_equal(dropped, expected)
assert_frame_equal(inp, expected)
dropped = df.dropna(axis=0, thresh=4)
expected = df.ix[lrange(2, 6)]
inp = df.copy()
inp.dropna(axis=0, thresh=4, inplace=True)
assert_frame_equal(dropped, expected)
assert_frame_equal(inp, expected)
dropped = df.dropna(axis=1, thresh=4)
assert_frame_equal(dropped, df)
dropped = df.dropna(axis=1, thresh=3)
assert_frame_equal(dropped, df)
# subset
dropped = df.dropna(axis=0, subset=[0, 1, 3])
inp = df.copy()
inp.dropna(axis=0, subset=[0, 1, 3], inplace=True)
assert_frame_equal(dropped, df)
assert_frame_equal(inp, df)
# all
dropped = df.dropna(axis=1, how='all')
| assert_frame_equal(dropped, df) | pandas.util.testing.assert_frame_equal |
import json as json
import mysql.connector as sql
import numpy as np
import pandas as pd
import sys
from flask import Flask, Response, request
import dbconfig as cfg
# globals
HOST = 'mysql'
DATABASE = 'home1'
# THRESHOLD for SPEED
SOGLIA = 50
FLOAT_FORMAT_STRING = "{0:.2f}"
app = Flask(__name__)
def format_float(value):
return float(FLOAT_FORMAT_STRING.format(value))
#
# list of the trips
#
@app.route('/car-api/trips')
def find_trips():
db_connection = sql.connect(
host=HOST, database=DATABASE, user=cfg.mysql['user'], password=cfg.mysql['password'])
SQL_SELECT = 'SELECT id, dayhour, start_id, stop_id FROM trips ORDER BY id ASC'
# load data in Dataframe
dfTrips = pd.read_sql(SQL_SELECT, con=db_connection)
# vector for the list of trips
vet = []
for index, row in dfTrips.iterrows():
# single trip
obj = {}
obj['ID'] = row['id']
obj['DAYHOUR'] = row['dayhour']
obj['START_ID'] = row['start_id']
obj['STOP_ID'] = row['stop_id']
vet.append(obj)
# build the response object
res = {}
res['TRIPS'] = vet
msg_json = json.dumps(res, sort_keys=True)
resp = Response(msg_json, status=200, mimetype='application/json')
return resp
#
# calculate TRIP summry data (consumption, distance, speed_over_threshold)
#
@app.route('/car-api/trip/findByDayHour')
def calcola_trip():
try:
db_connection = sql.connect(
host=HOST, database=DATABASE, user=cfg.mysql['user'], password=cfg.mysql['password'])
# prepare query
# DAYHOUR = "27-01-2018 08"
# TODO: should control presence of parms...
DAYHOUR = request.args['DAYHOUR']
# CARID=googx1
CARID = request.args['CARID']
# identify TRIP from DAYHOUR and CARID
SQL_SELECT = 'SELECT id, start_id, stop_id FROM trips WHERE dayhour = "' + DAYHOUR + '"'
dfTrips = pd.read_sql(SQL_SELECT, con=db_connection)
ID_TRIP = dfTrips['id'][0]
START_ID = dfTrips['start_id'][0]
STOP_ID = dfTrips['stop_id'][0]
# READ msgs from TRIP into DataFrame
SQL_SELECT = 'SELECT msg FROM obd2_msg WHERE msg_type = "OBD2" and id > ' + str(START_ID) + ' and id < ' + \
str(STOP_ID) + ' and carid = "' + CARID + '" order by id ASC'
dfMesg = | pd.read_sql(SQL_SELECT, con=db_connection) | pandas.read_sql |
import os
import json
import datetime
import multiprocessing
import random
import copy
import time
import warnings
import pandas as pd
import numpy as np
from datetime import date, timedelta
from pathlib import Path
from models.common.mit_buildings import MITBuildings
from models.common.to_precision import sig_fig_formatted
from analyses.common.analysis import Analysis, add_empty_buildings, sort_dict
def read_row(pair_person_dates):
'''
function outputs a pd series (row). this row is then used by a multiprocessing pool join to
make a pandas dataframe. this is done as a multprocessed fashion to speed up convering the json into a pandas dataframe.
input is a list [person, dates] where:
person: list of size <num. n_samples of people samples> which contains all the trajectory samples
(i.e. input_samples['trajectory']['samples'])
dates: list of size <num. of days n_days_to_simulate> of identically copied lists of dates from the samples
(i.e. input_samples['trajectory']['dates'])
'''
building_stays_local = pd.DataFrame()
coming_into_campus_local = pd.DataFrame()
leaving_campus_local = pd.DataFrame()
person = pair_person_dates[0]
dates = pair_person_dates[1]
id = pair_person_dates[2]
for i, day_sample in enumerate(person):
if len(day_sample) == 0:
continue # sampled person did not go to work
else:
for stay in day_sample:
if stay['stay_type'] == 'on_campus_inside':
building_stays_local = building_stays_local.append(
{
'person_id': id,
'building': stay['building'],
'date': dates[i],
'start_time': dates[i] + ' ' + stay['start_time'],
'end_time': dates[i] + ' ' + stay['end_time']
}, ignore_index=True)
# getting arrival time into campus to calculate campus level inflow down the line
elif stay['stay_type'] == 'commute' and 'arrival_time' in stay.keys():
coming_into_campus_local = coming_into_campus_local.append(
{
'person_id': id,
'commute_type': stay['commute_type'],
'date': dates[i],
'arrival_time': dates[i] + ' ' + stay['arrival_time']
}, ignore_index=True)
# getting arrival time into campus to calculate campus level inflow down the line
elif stay['stay_type'] == 'commute' and 'departure_time' in stay.keys():
leaving_campus_local = leaving_campus_local.append(
{
'person_id': id,
'commute_type': stay['commute_type'],
'date': dates[i],
'departure_time': dates[i] + ' ' + stay['departure_time']
}, ignore_index=True)
return {'building_stays_local': building_stays_local,
'coming_into_campus_local': coming_into_campus_local,
'leaving_campus_local': leaving_campus_local
}
class ScenarioCampusStatistics(Analysis):
def run(self, input_samples: dict, input_analyses: dict, uuid_prefix: str) -> dict:
start_global = time.time()
np.random.seed(self.analysis_parameters['random_seed'])
if 'unittest_mode' not in self.analysis_parameters:
self.unittest_mode = False
else:
self.unittest_mode = self.analysis_parameters['unittest_mode']
self.n_bootstraps = self.analysis_parameters['n_bootstraps']
self.percentiles_list = self.analysis_parameters['percentiles']
dates = input_samples['trajectory']['dates']
all_person_samples = input_samples['trajectory']['samples']
self.num_samples = len(all_person_samples)
self.building_data = MITBuildings()
self.total_population_size = input_samples['trajectory']['total_population_size']
cpu_count = multiprocessing.cpu_count()
print('cpu count on machine:', cpu_count)
dates_copies = [dates for copies in range(len(all_person_samples))]
person_id_list = list(range(self.num_samples))
input_to_multiprocessing = list(zip(all_person_samples, dates_copies, person_id_list))
# read_row(input_to_multiprocessing[0]) #DEBUG for multiproc
start_reading = time.time()
print('creating reading multiprocessing pool..', cpu_count)
pool = multiprocessing.Pool(cpu_count)
print('reading..')
reading_stays_list = pool.map(read_row, input_to_multiprocessing)
pool.close()
pool.join()
print('reading finished, closed pool, took', time.time() - start_reading, 'sec')
building_stays = | pd.concat([ps['building_stays_local'] for ps in reading_stays_list]) | pandas.concat |
# -*- coding: utf-8 -*-
#
# @Author : <NAME>
# @Email : <EMAIL>
from typing import Dict
import pandas as pd
from sklearn.decomposition import PCA
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import train_test_split
from sklearn.neighbors import KNeighborsClassifier
from common_utils.prints import print_log_message
from recognition.training.extract_training_data import neurons2data
def lda_knn_run(neurons: Dict
):
results = list()
param_grid = {
}
for m in [(0,), (1,), (0, 1)]:
for n_com in range(1, 40):
X, y, num_ids, id_map = neurons2data(neurons.copy(), is_common_id = True, mode = m)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.1)
# -------------------- How to use multi-estimator in grid search --------------------
pca = PCA(n_components = n_com)
pca.fit(X_train, y_train)
X_new = pca.transform(X)
grid_search = GridSearchCV(KNeighborsClassifier(),
param_grid, n_jobs = -1, verbose = 1, cv = 5)
grid_search.fit(X_new, y)
print_log_message(
f"| Mode: {m}\n n_components: {n_com}, \n best score are {grid_search.best_score_}\n best parameters are {grid_search.best_params_} |")
results.append( | pd.DataFrame(grid_search.cv_results_) | pandas.DataFrame |
import os
import sox
import tqdm
import numpy as np
import pandas as pd
import seaborn as sns
from sklearn import svm
from scipy.stats import uniform
import matplotlib.pyplot as plt
from models import deeptone_classifier
from sklearn.naive_bayes import GaussianNB
import sklearn.gaussian_process.kernels as kerns
from sklearn.ensemble import RandomForestClassifier
from sklearn.gaussian_process import GaussianProcessClassifier
try:
import deeptone
except Exception as e:
raise Warning(e)
plt.style.use('fivethirtyeight')
def get_audio_files(data_folder, expected_sr, remove_unexpected_sr = False):
# Find audio files
audio_files = [f for f in os.listdir(data_folder) if f.lower().endswith('.wav')]
# Removal of unxepected sampling rates
unexptected_srs = []
for audio_file in audio_files:
if sox.file_info.sample_rate(os.path.join(data_folder,audio_file)) != expected_sr:
unexptected_srs.append(audio_file)
if len(unexptected_srs) > 0 & remove_unexpected_sr:
audio_files = [i for i in audio_files if i not in unexptected_srs]
Warning(f'Found files with unxepcted sample rates: Removing {remove_unexpected_sr}')
return audio_files
def get_metadata(audio_files, data_folder):
df_ = deeptone_classifier.create_metadata(audio_files = audio_files)
df_index = pd.read_csv(data_folder+"/Orestes_call-combo-classification.csv")
df_index["file_name"] = [f + ".wav" for f in df_index.File.values]
remove = df_index[df_index["type"]=="xx"].file_name.values
df_index_keep = df_index[df_index["type"]!="xx"]
df_index_keep["type"] = ["phc" if x == "climax" else "phi" for x in df_index_keep.type]
df_ = df_[~df_["file_name"].isin(remove)]
df_.loc[df_.file_name.isin(df_index_keep.file_name).values, ['call_type']] = df_index_keep['type'].values
# Subset data by removing pant grunt calls and modify labelling of combinations calls
dfi_keep = df_
dfi_keep = dfi_keep.iloc[np.nonzero(dfi_keep.call_type.values != 'pg')[0],:]
dfi_keep.loc[:,"call_type"] = ["Combination" if "-" in f else f for f in dfi_keep.call_type.values]
try:
os.system(f'mkdir {data_folder}/keep')
except Exception as e:
print(e)
[os.system(f'cp {data_folder}/{f} {data_folder}/keep/{f}') for f in dfi_keep.file_name.values]
dfi_keep['call_type'] = dfi_keep['call_type'].str.replace('phi', "Pant-hoot-intro")
dfi_keep['call_type'] = dfi_keep['call_type'].str.replace('phc', "Pant-hoot-climax")
return dfi_keep
def analysis(data_folder, results_folder, front_end_dictionary, model_dictionary, C_choice_list, train_num_list, active_param_sampling, label, nested_labels):
try:
os.mkdir(results_folder)
except Exception as e:
print(e)
root_seed = 1
audio_files = get_audio_files(data_folder, expected_sr = 16000, remove_unexpected_sr = True)
if len(audio_files)==0:
raise EnvironmentError("No audio files found")
with_plots = False
reportz = pd.DataFrame()
for name in list(front_end_dictionary.keys()):
front_end = front_end_dictionary[name]
# Create metadata from list of target audio files (information enclosed in filename)
dfi_keep = get_metadata(audio_files, data_folder)
# Get raw embeddings for each audio file and save as dictionary
dfs = deeptone_classifier.get_embeddings(df = dfi_keep,
data_folder = data_folder,
normalise = True,
average= False,
expected_sr = 16000,
target_model = front_end["model_type"])
# Transform the embeddings space by averaging over time
embeddings = deeptone_classifier.transform_embeddings_space(dfs = dfs, average = True)
embeddings.reset_index(drop=True, inplace=True)
dfi_keep.reset_index(drop=True, inplace=True)
dfi_keep = embeddings.merge(dfi_keep)
df_pre_scores_all = pd.DataFrame(columns=["y_test_a","y_pred_a","iteration_id","classifier_id","call_type"])
for C_choice in C_choice_list:
for key in list(model_dictionary.keys()):
model_dictionary[key]['search_parameters'] = dict()
if active_param_sampling:
param_sample = ["C",uniform(loc=C_choice+0.001,scale=(C_choice+0.001)*10)]
else:
param_sample = None
df_pre_scores = | pd.DataFrame() | pandas.DataFrame |
import pathlib
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib as mpl
mpl.rcParams['text.usetex'] = True
mpl.rcParams['text.latex.preamble'] = [
r'\usepackage{amsmath}',
r'\usepackage{amssymb}'
]
lw = 4
n_samples = 1000
N = np.logspace(1, 3, 20, dtype=int)
OUTPUT_DIR = pathlib.Path('outputs_plots')
filename = OUTPUT_DIR / 'comparison_AL_A.pkl'
if filename.exists():
df = pd.read_pickle(filename)
else:
curve = []
for n in N:
print(f"\rSize {n} :", end='', flush=True)
a, e = [], []
L = np.tri(n)
for i in range(n_samples):
print(f"{i / n_samples:7.2%}" + "\b" * 7, end='', flush=True)
A = np.random.randn(n, n)
e.append(np.linalg.norm(A, ord=2) ** 2)
a.append(np.linalg.norm(A.dot(L), ord=2) ** 2)
r = np.array(a)/e
curve.append({
'n': n,
'mean': r.mean(),
'median': np.median(r),
'q1': np.quantile(r, q=.1),
'q9': np.quantile(r, q=.9),
'b': 1 / (32 * np.cos(n*np.pi/(2*n+1)) ** 2),
'lb': (n+1/2) / np.pi ** 2
})
df = | pd.DataFrame(curve) | pandas.DataFrame |
import pandas as pd
import os, re, fnmatch, subprocess
from collections import defaultdict
from shlex import quote
from igf_data.illumina.samplesheet import SampleSheet
from igf_data.igfdb.igfTables import Experiment, Run
from igf_data.igfdb.baseadaptor import BaseAdaptor
from igf_data.igfdb.experimentadaptor import ExperimentAdaptor
from igf_data.igfdb.runadaptor import RunAdaptor
from igf_data.igfdb.collectionadaptor import CollectionAdaptor
from igf_data.igfdb.fileadaptor import FileAdaptor
from igf_data.utils.fileutils import calculate_file_checksum
class Collect_seqrun_fastq_to_db:
'''
A class for collecting raw fastq files after demultiplexing and storing them in database.
Additionally this will also create relevant entries for the experiment and run tables in database
:param fastq_dir: A directory path for file look up
:param model_name: Sequencing platform information
:param seqrun_igf_id: Sequencing run name
:param session_class: A database session class
:param flowcell_id: Flowcell information for the run
:param samplesheet_file: Samplesheet filepath
:param samplesheet_filename: Name of the samplesheet file, default SampleSheet.csv
:param collection_type: Collection type information for new fastq files, default demultiplexed_fastq
:param file_location: Fastq file location information, default HPC_PROJECT
:param collection_table: Collection table information for fastq files, default run
:param manifest_name: Name of the file manifest file, default file_manifest.csv
:param singlecell_tag: Samplesheet description for singlecell samples, default 10X
'''
def __init__(
self,fastq_dir,model_name,seqrun_igf_id,session_class,flowcell_id,
samplesheet_file=None,samplesheet_filename='SampleSheet.csv',
collection_type='demultiplexed_fastq',file_location='HPC_PROJECT',
collection_table='run', manifest_name='file_manifest.csv',singlecell_tag='10X'):
self.fastq_dir = fastq_dir
self.samplesheet_file = samplesheet_file
self.samplesheet_filename = samplesheet_filename
self.seqrun_igf_id = seqrun_igf_id
self.model_name = model_name
self.session_class = session_class
self.collection_type = collection_type
self.file_location = file_location
self.flowcell_id = flowcell_id
self.collection_table = collection_table
self.manifest_name = manifest_name
self.singlecell_tag = singlecell_tag
def find_fastq_and_build_db_collection(self):
'''
A method for finding fastq files and samplesheet under a run directory
and loading the new files to db with their experiment and run information
It calculates following entries
* library_name
Same as sample_id unless mentioned in 'Description' field of samplesheet
* experiment_igf_id
library_name combined with the platform name
same library sequenced in different platform will be added as separate experiemnt
* run_igf_id
experiment_igf_id combined with sequencing flowcell_id and lane_id
collection name: Same as run_igf_id, fastq files will be added to db collection
using this id
* collection type
Default type for fastq file collections are 'demultiplexed_fastq'
* file_location
Default value is 'HPC_PROJECT'
'''
try:
fastq_files_list = \
self._collect_fastq_and_sample_info()
self._build_and_store_exp_run_and_collection_in_db(
fastq_files_list=fastq_files_list)
except Exception as e:
raise ValueError(
'Failed to find fastq and build collection, error: {0}'.\
format(e))
def _get_fastq_and_samplesheet(self):
try:
fastq_dir = self.fastq_dir
samplesheet_file = self.samplesheet_file
samplesheet_filename = self.samplesheet_filename
r1_fastq_regex = \
re.compile(r'\S+_R1_\d+\.fastq(\.gz)?', re.IGNORECASE)
r2_fastq_regex = \
re.compile(r'\S+_R2_\d+\.fastq(\.gz)?', re.IGNORECASE)
samplesheet_list = list()
r1_fastq_list = list()
r2_fastq_list = list()
if os.path.isdir(fastq_dir):
for root, _, files in os.walk(top=fastq_dir):
if samplesheet_filename in files:
samplesheet_list.append(
os.path.join(root,samplesheet_filename))
for file in files:
if not fnmatch.fnmatch(file, 'Undetermined_'):
if r1_fastq_regex.match(file):
r1_fastq_list.\
append(os.path.join(root,file))
elif r2_fastq_regex.match(file):
r2_fastq_list.\
append(os.path.join(root,file))
if len(r2_fastq_list) > 0 and \
len(r1_fastq_list) != len(r2_fastq_list):
raise ValueError(
'R1 {0} and R2 {1}'.format(
len(r1_fastq_list),
len(r2_fastq_list)))
if samplesheet_file is None and \
len(samplesheet_list)==1:
self.samplesheet_file = samplesheet_list[0] # set samplesheet file name
if len(samplesheet_list) > 1:
raise ValueError(
'Found more than one samplesheet file for fastq dir {0}'.\
format(fastq_dir))
if samplesheet_file is None and \
len(samplesheet_list)==0:
raise ValueError(
'No samplesheet file for fastq dir {0}'.\
format(fastq_dir))
elif os.path.isfile(fastq_dir):
if samplesheet_file is None:
raise ValueError(
'Missing samplesheet file for fastq file {0}'.\
format(fastq_dir))
if not fnmatch.fnmatch(file, 'Undetermined_'):
if r1_fastq_regex.match(file):
r1_fastq_list.\
append(os.path.join(root,file))
elif r2_fastq_regex.match(file):
r2_fastq_list.\
append(os.path.join(root,file))
return r1_fastq_list, r2_fastq_list
except Exception as e:
raise ValueError(
'Failed to get fastq and samplesheet, error: {0}'.\
format(e))
@staticmethod
def _link_fastq_file_to_sample(sample_name,r1_fastq_list, r2_fastq_list):
try:
sample_files = \
defaultdict(lambda: defaultdict(lambda: defaultdict()))
r1_regex = \
re.compile(
sample_name+'_S\d+_L(\d+)_R1_\d+\.fastq(\.gz)?',
re.IGNORECASE)
for file1 in r1_fastq_list:
if r1_regex.match(os.path.basename(file1)):
m = r1_regex.match(os.path.basename(file1))
lane_id = m.group(1).strip('0')
sample_files[lane_id]['R1'] = file1
if len(r2_fastq_list) > 0:
r2_regex = \
re.compile(
sample_name+'_S\d+_L(\d+)_R2_\d+\.fastq(\.gz)?',
re.IGNORECASE)
for file2 in r2_fastq_list:
if r2_regex.match(os.path.basename(file2)):
m = r2_regex.match(os.path.basename(file2))
lane_id = m.group(1).strip('0')
sample_files[lane_id]['R2'] = file2
return sample_files
except Exception as e:
raise ValueError(
'Failed to link fastq to sample, error: {0}'.format(e))
def _collect_fastq_and_sample_info(self):
'''
An internal method for collecting fastq and sample info
'''
try:
seqrun_igf_id = self.seqrun_igf_id
model_name = self.model_name
flowcell_id = self.flowcell_id
(r1_fastq_list, r2_fastq_list) = \
self._get_fastq_and_samplesheet()
samplesheet_file = self.samplesheet_file
final_data = list()
samplesheet_sc = \
SampleSheet(infile=samplesheet_file) # read samplesheet for single cell check
samplesheet_sc.\
filter_sample_data(
condition_key='Description',
condition_value=self.singlecell_tag,
method='include') # keep only single cell samples
if len(samplesheet_sc._data) >0:
sc_new_data = \
pd.DataFrame(samplesheet_sc._data).\
drop(['Sample_ID','Sample_Name','index'],axis=1).\
drop_duplicates().\
to_dict(orient='records') # remove duplicate entries from single cell samplesheet
final_data.extend(sc_new_data) # add single cell entries to the final dataset
samplesheet_data = \
SampleSheet(infile=samplesheet_file)
samplesheet_data.\
filter_sample_data(
condition_key='Description',
condition_value=self.singlecell_tag,
method='exclude') # keep non single cell samples
if len(samplesheet_data._data) > 0:
final_data.\
extend(samplesheet_data._data) # add normal samples to final data
fastq_files_list = list()
for row in final_data:
description = row['Description']
if description==self.singlecell_tag: # collect required values for single cell projects
sample_name = row['Original_Sample_Name']
sample_id = row['Original_Sample_ID']
else:
sample_name = row['Sample_Name'] # collect default values for normal projects
sample_id = row['Sample_ID']
project_name = row['Sample_Project']
sample_files = \
self._link_fastq_file_to_sample(
sample_name,
r1_fastq_list,
r2_fastq_list)
for lane, lane_files in sample_files.items():
fastq_info = {
'sample_igf_id':sample_id,
'sample_name':sample_name,
'project_igf_id':project_name,
'lane_number':lane,
'seqrun_igf_id':seqrun_igf_id,
'platform_name':model_name,
'flowcell_id':flowcell_id,
'description':description }
for read_type, filepath in lane_files.items():
fastq_info.\
update({read_type:filepath}) # allowing only one file per lane per read type
fastq_files_list.\
append(fastq_info) # adding entries per sample per lane
return fastq_files_list
except Exception as e:
raise ValueError(
'Failed to collect info, error: {0}'.format(e))
@staticmethod
def _count_fastq_reads(fastq_file):
'''
A static method for counting reads from the zipped and bzipped fastq files
required params:
fastq_file: A fastq file with absolute path
'''
try:
if not os.path.exists(fastq_file):
raise IOError(
'fastq file {0} is not found'.format(fastq_file))
if fnmatch.fnmatch(os.path.basename(fastq_file),'*.fastq.gz'):
read_cmd = ['zcat',quote(fastq_file)]
elif fnmatch.fnmatch(os.path.basename(fastq_file),'*.fastq.bz'):
read_cmd = ['bzcat',quote(fastq_file)]
elif fnmatch.fnmatch(os.path.basename(fastq_file),'*.fastq'):
read_cmd = ['cat',quote(fastq_file)]
else:
raise ValueError(
'file {0} is not recognised'.format(fastq_file))
proc = \
subprocess.\
Popen(
read_cmd,
stdout=subprocess.PIPE)
count_cmd = ['wc','-l']
proc2 = \
subprocess.\
Popen(
count_cmd,
stdin=proc.stdout,
stdout=subprocess.PIPE)
proc.stdout.close()
result = \
int(proc2.communicate()[0].decode('UTF-8'))
if result==0:
raise ValueError(
'Fastq file {0} has zero lines'.format(fastq_file))
result = int(result/4)
return result
except Exception as e:
raise ValueError(
'Failed to count fastq reads, error: {0}'.format(e))
def _calculate_experiment_run_and_file_info(self,data,restricted_list):
try:
if not isinstance(data, pd.Series):
data = pd.Series(data)
# set library id
library_id = data.sample_igf_id
# calcaulate experiment id
experiment_id = \
'{0}_{1}'.format(library_id,data.platform_name)
data['library_name'] = library_id
data['experiment_igf_id'] = experiment_id
# calculate run id
run_igf_id = \
'{0}_{1}_{2}'.format(
experiment_id,
data.flowcell_id,
data.lane_number)
data['run_igf_id'] = run_igf_id
# set collection name and type
data['name'] = run_igf_id
data['type'] = self.collection_type
data['table'] = self.collection_table
data['location'] = self.file_location
# set file md5 and size
if 'R1' in data:
data['R1_md5'] = \
calculate_file_checksum(
filepath=data.R1,
hasher='md5')
data['R1_size'] = \
os.path.getsize(data.R1)
data['R1_READ_COUNT'] = \
self._count_fastq_reads(
fastq_file=data.R1)
if 'R2' in data:
data['R2_md5'] = \
calculate_file_checksum(
filepath=data.R2,
hasher='md5')
data['R2_size'] = \
os.path.getsize(data.R2)
data['R2_READ_COUNT'] = \
self._count_fastq_reads(
fastq_file=data.R2)
# set library strategy
library_layout = 'SINGLE'
if 'R1' in data and 'R2' in data and \
data.R1 is not None and data.R2 is not None:
library_layout='PAIRED'
data['library_layout'] = library_layout
return data
except Exception as e:
raise ValueError(
'Failed to calculate exp, run and file, error: {0}'.\
format(e))
@staticmethod
def _reformat_file_group_data(data):
try:
if isinstance(data, pd.DataFrame):
data = data.to_dict(orient='records')
if not isinstance(data,list):
raise ValueError(
'Expecting list got {0}'.format(type(data)))
reformatted_file_group_data = list()
reformatted_file_data = list()
for row in data:
collection_name = None
collection_type = None
file_location = None
if 'name' in row.keys():
collection_name = row['name']
if 'type' in row.keys():
collection_type = row['type']
if 'location' in row.keys():
file_location = row['location']
if 'R1' in row.keys():
r1_file_path = row['R1']
r1_file_size = \
row['R1_size'] if 'R1_size' in row.keys() else None
r1_file_md5 = \
row['R1_md5'] if 'R1_md5' in row.keys() else None
reformatted_file_data.\
append({
'file_path':r1_file_path,
'md5':r1_file_md5,
'location':file_location,
'size':r1_file_size})
reformatted_file_group_data.\
append({
'name':collection_name,
'type':collection_type,
'file_path':r1_file_path})
if 'R2' in row.keys():
r2_file_path = row['R2']
r2_file_size = \
row['R2_size'] if 'R2_size' in row.keys() else None
r2_file_md5 = \
row['R2_md5'] if 'R2_md5' in row.keys() else None
reformatted_file_data.\
append({
'file_path':r2_file_path,
'md5':r2_file_md5,
'location':file_location,
'size':r2_file_size})
reformatted_file_group_data.\
append({
'name':collection_name,
'type':collection_type,
'file_path':r2_file_path})
file_data = \
pd.DataFrame(reformatted_file_data)
file_data = file_data.dropna() # removing rows witn None values
file_group_data = \
pd.DataFrame(reformatted_file_group_data)
file_group_data = \
file_group_data.dropna() # removing rows with None values
return file_data, file_group_data
except Exception as e:
raise ValueError(
'Failed to reformat file group data, error: {0}'.\
format(e))
def _write_manifest_file(self,file_data):
'''
An internal method for writing file data to the manifest file
'''
try:
manifest_name = self.manifest_name
fastq_dir = self.fastq_dir
manifest_path = \
os.path.join(fastq_dir,manifest_name)
if os.path.exists(manifest_path):
raise ValueError(
'manifest file {0} already present'.\
format(manifest_path))
if isinstance(file_data, list):
file_data = pd.DataFrame(file_data) # convert file data to dataframe
file_data['file_path'] = \
file_data['file_path'].\
map(
lambda x: \
os.path.relpath(x, start=fastq_dir)) # replace filepath with relative path
file_data = \
file_data.drop(['location'],axis=1) # remove file location info
file_data.\
to_csv(
manifest_path,
sep='\t',
encoding='utf-8',
index=False) # write data to manifest file
except Exception as e:
raise ValueError(
'Failed to write manifest file, error: {0}'.\
format(e))
@staticmethod
def _check_existing_data(data,dbsession,table_name,check_column='EXISTS'):
try:
if not isinstance(data, pd.Series):
raise ValueError(
'Expecting a data series and got {0}'.\
format(type(data)))
if table_name=='experiment':
if 'experiment_igf_id' in data and \
not pd.isnull(data['experiment_igf_id']):
experiment_igf_id = data['experiment_igf_id']
ea = \
ExperimentAdaptor(**{'session':dbsession})
experiment_exists = \
ea.check_experiment_records_id(
experiment_igf_id)
if experiment_exists: # store data only if experiment is not existing
data[check_column] = True
else:
data[check_column] = False
return data
else:
raise ValueError(
'Missing or empty required column experiment_igf_id')
elif table_name=='run':
if 'run_igf_id' in data and \
not pd.isnull(data['run_igf_id']):
run_igf_id = data['run_igf_id']
ra = RunAdaptor(**{'session':dbsession})
run_exists = \
ra.check_run_records_igf_id(run_igf_id)
if run_exists: # store data only if run is not existing
data[check_column] = True
else:
data[check_column] = False
return data
else:
raise ValueError(
'Missing or empty required column run_igf_id')
elif table_name=='collection':
if 'name' in data and 'type' in data and \
not pd.isnull(data['name']) and \
not pd.isnull(data['type']):
ca = CollectionAdaptor(**{'session':dbsession})
collection_exists = \
ca.check_collection_records_name_and_type(
collection_name=data['name'],
collection_type=data['type'])
if collection_exists:
data[check_column] = True
else:
data[check_column] = False
return data
else:
raise ValueError(
'Missing or empty required column name or type')
else:
raise ValueError(
'table {0} not supported yet'.format(table_name))
except Exception as e:
raise ValueError(
'Failed to check existing data, error: {0}'.format(e))
def _build_and_store_exp_run_and_collection_in_db(
self,fastq_files_list,restricted_list=('10X')):
'''
An internal method for building db collections for the raw fastq files
'''
session_class = self.session_class
db_connected = False
try:
restricted_list = list(restricted_list)
dataframe = \
| pd.DataFrame(fastq_files_list) | pandas.DataFrame |
"""Code used for notebooks and data exploration on
https://github.com/oscovida/oscovida"""
import datetime
import math
import os
import pytz
import time
import joblib
import numpy as np
import pandas as pd
import IPython.display
from typing import Tuple, Union
# choose font - can be deactivated
from matplotlib import rcParams
from oscovida.data_sources import base_url, hungary_data, jhu_population_url, rki_data, rki_population_url, rki_population_backup_file
from oscovida.plotting_helpers import align_twinx_ticks, cut_dates, has_twin, limit_to_smoothed, uncertain_tail
rcParams['font.family'] = 'sans-serif'
rcParams['font.sans-serif'] = ['Tahoma', 'DejaVu Sans', 'Lucida Grande', 'Verdana']
rcParams['svg.fonttype'] = 'none'
# need many figures for index.ipynb and germany.ipynb
rcParams['figure.max_open_warning'] = 50
from matplotlib.collections import LineCollection
from matplotlib.colors import LinearSegmentedColormap
from matplotlib.dates import DateFormatter, date2num, MONDAY, WeekdayLocator
from matplotlib.ticker import ScalarFormatter, FuncFormatter, FixedLocator
from bisect import bisect
import matplotlib.pyplot as plt
plt.style.use('ggplot')
# suppress warning
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
LW = 3 # line width
# set up joblib memory to avoid re-fetching files
joblib_location = "./cachedir"
joblib_memory = joblib.Memory(joblib_location, verbose=0)
def compute_binder_link(notebook_name):
"""Given a string """
root_url = "https://mybinder.org/v2/gh/oscovida/binder/master?filepath=ipynb/"
return root_url + notebook_name
def display_binder_link(notebook_name):
url = compute_binder_link(notebook_name)
# print(f"url is {url}")
IPython.display.display(
IPython.display.Markdown(f'[Execute this notebook with Binder]({url})'))
def clear_cache():
"""Need to run this before new data for the day is created"""
joblib_memory.clear()
def double_time_exponential(q2_div_q1, t2_minus_t1=None):
""" See https://en.wikipedia.org/wiki/Doubling_time"""
if t2_minus_t1 is None:
t2_minus_t1 = np.ones(q2_div_q1.shape)
return t2_minus_t1 * np.log(2) / np.log(q2_div_q1)
def report_download(url, df):
print(f"Downloaded data: last data point {df.columns[-1]} from {url}")
@joblib_memory.cache
def fetch_deaths_last_execution():
"""Use to remember at what time and date the last set of deaths was downloaded.
A bit of a hack as we didn't know how to get this out of joblib.
"""
return datetime.datetime.now().strftime("%d/%m/%Y %H:%M:%S")
@joblib_memory.cache
def fetch_cases_last_execution():
"""See fetch_deaths_last_execution"""
return datetime.datetime.now().strftime("%d/%m/%Y %H:%M:%S")
@joblib_memory.cache
def fetch_deaths():
"""Download deaths from Johns Hopkins data repository"""
url = os.path.join(base_url, "time_series_covid19_" + "deaths" + "_global.csv")
df = pd.read_csv(url, index_col=1)
report_download(url, df)
fetch_deaths_last_execution()
return df
@joblib_memory.cache
def fetch_deaths_US():
"""Download deaths for US states from Johns Hopkins data repository"""
url = os.path.join(base_url, "time_series_covid19_" + "deaths" + "_US.csv")
df = pd.read_csv(url, index_col=1)
report_download(url, df)
# fetch_deaths_last_execution_()
return df
@joblib_memory.cache
def fetch_cases():
"""Download cases from Johns Hopkins data repository"""
url = os.path.join(base_url, "time_series_covid19_" + "confirmed" + "_global.csv")
df = pd.read_csv(url, index_col=1)
report_download(url, df)
fetch_cases_last_execution()
return df
@joblib_memory.cache
def fetch_cases_US():
"""Download cases for US status from Johns Hopkins data repository"""
url = os.path.join(base_url, "time_series_covid19_" + "confirmed" + "_US.csv")
df = pd.read_csv(url, index_col=1)
report_download(url, df)
fetch_cases_last_execution()
return df
def get_country_data_johns_hopkins(country: str,
region: str = None, subregion: str = None) -> Tuple[pd.DataFrame, pd.DataFrame]:
"""Given a country name, return deaths and cases as a tuple of
pandas time series. Works for all (?) countries in the world, or at least
those in the Johns Hopkins data set. All rows should contain a datetime
index and a value.
"""
deaths = fetch_deaths()
cases = fetch_cases()
assert country in deaths.index, f"{country} not in available countries. These are {sorted(deaths.index)}"
# Some countries report sub areas (i.e. multiple rows per country) such as China, France, United Kingdom
# Denmark. In that case, we sum over all regions (by summing over the relevant rows).
tmp = deaths.loc[country]
if len(tmp.shape) == 1: # most countries (Germany, Italy, ...)
d = tmp
elif len(tmp.shape) == 2: # China, France, United Kingdom, ...
d = tmp.drop(columns=['Province/State']).sum()
d.rename("deaths", inplace=True)
else:
raise ValueError("Unknown data set structure for deaths {country}:", tmp)
tmp = cases.loc[country]
if len(tmp.shape) == 1:
c = tmp
elif len(tmp.shape) == 2:
c = tmp.drop(columns=['Province/State']).sum()
c.rename("cases", inplace=True)
else:
raise ValueError("Unknown data set structure for cases {country}:", tmp)
# make date string into timeindex
d.index = pd.to_datetime(d.index, errors="coerce", format="%m/%d/%y")
c.index = pd.to_datetime(c.index, errors="coerce", format="%m/%d/%y")
# drop all rows that don't have data
# sanity check: how many do we drop?
if c.index.isnull().sum() > 3:
print(f"about to drop {c.index.isnull().sum()} entries due to NaT in index", c)
c = c[c.index.notnull()]
if d.index.isnull().sum() > 3:
print(f"about to drop {d.index.isnull().sum()} entries due to NaT in index", d)
d = d[d.index.notnull()]
# check there are no NaN is in the data
assert c.isnull().sum() == 0, f"{c.isnull().sum()} NaNs in {c}"
assert d.isnull().sum() == 0, f"{d.isnull().sum()} NaNs in {d}"
# label data
c.name = country + " cases"
d.name = country + " deaths"
return c, d
def get_US_region_list():
"""return list of strings with US state names"""
deaths = fetch_deaths_US()
return list(deaths.groupby("Province_State").sum().index)
def get_region_US(state, county=None, debug=False):
"""Given a US state name and county, return deaths and cases as a tuple of pandas time
series. (Johns Hopkins data set)
If country is None, then sum over all counties in that state (i.e. return
the numbers for the state.)
"""
if not county is None:
raise NotImplementedError("Can only process US states (no counties)")
deaths = fetch_deaths_US()
cases = fetch_cases_US()
assert state in deaths['Province_State'].values, \
f"{state} not in available states. These are {sorted(set(deaths['Province_State']))}"
if county is None:
tmpd = deaths.groupby('Province_State').sum()
d = tmpd.loc[state]
tmpc = cases.groupby('Province_State').sum()
c = tmpc.loc[state]
else:
raise NotImplementedError("Can't do counties yet.")
# Some countries report sub areas (i.e. multiple rows per country) such as China, France, United Kingdom
# Denmark. In that case, we sum over all regions.
# make date string into timeindex
d.index = pd.to_datetime(d.index, errors="coerce", format="%m/%d/%y")
c.index = pd.to_datetime(c.index, errors="coerce", format="%m/%d/%y")
# drop all rows that don't have data
# sanity check: how many do we drop?
if c.index.isnull().sum() > 3:
if debug:
print(f"about to drop {c.index.isnull().sum()} entries due to NaT in index", c)
c = c[c.index.notnull()]
if d.index.isnull().sum() > 3:
if debug:
print(f"about to drop {d.index.isnull().sum()} entries due to NaT in index", d)
d = d[d.index.notnull()]
# check there are no NaN is in the data
assert c.isnull().sum() == 0, f"{c.isnull().sum()} NaNs in {c}"
assert d.isnull().sum() == 0, f"{d.isnull().sum()} NaNs in {d}"
# label data
country = f"US-{state}"
c.name = country + " cases"
d.name = country + " deaths"
return c, d
def compose_dataframe_summary(cases, deaths):
"""Used in per-country template to show data table.
Could be extended.
Expects series of cases and deaths (time-aligned), combines those in DataFrame and returns it
"""
df = pd.DataFrame()
df["total cases"] = cases
df["daily new cases"] = cases.diff()
if deaths is not None:
df["total deaths"] = deaths
df["daily new deaths"] = deaths.diff()
# drop first row with nan -> otherwise ints are shows as float in table
df = df.dropna().astype(int)
# change index: latest numbers shown first
df = df[::-1]
return df
@joblib_memory.cache
def fetch_data_germany_last_execution():
return datetime.datetime.now().strftime("%d/%m/%Y %H:%M:%S")
@joblib_memory.cache
def fetch_data_germany(include_last_day=True) -> pd.DataFrame:
"""Fetch data for Germany from Robert Koch institute and return as a pandas
DataFrame with the `Meldedatum` as the index.
Data source is https://npgeo-corona-npgeo-de.hub.arcgis.com . The text on that
webpage implies that the data comes from the Robert Koch Institute.
As an option (`include_last_day`), we can omit the last day with data from
the retrieved data sets (see reasoning below in source), as the data is
commonly update one day later with more accurate (and typically higher)
numbers.
"""
# outdated: datasource = "https://opendata.arcgis.com/datasets/dd4580c810204019a7b8eb3e0b329dd6_0.csv"
datasource = rki_data
t0 = time.time()
print(f"Please be patient - downloading data from {datasource} ...")
germany = pd.read_csv(datasource)
delta_t = time.time() - t0
print(f"Completed downloading {len(germany)} rows in {delta_t:.1f} seconds.")
## create new column 'landkreis' and get rid of "SK " and "LK " for this
## - this is too simplistic. We have fields like "Region Hannover"
# germany['landkreis'] = germany['Landkreis'].apply(lambda s: s[3:])
# (at least) the last data from the Robert-Koch-Institute (RKI) seems not to be
# fully reported the day after. For example, on 3 April, the number of cases
# from RKI is well below what is expected. Example:
#
# From RKI (as of evening of 2020-04-03:)
# 2020-03-29 62653
# 2020-03-30 66692
# 2020-03-31 72333
# 2020-04-01 77464
# 2020-04-02 79625
#
# From Johns Hopkins (as of evening of 2020-04-03:):
# 2020-03-29 62095
# 2020-03-30 66885
# 2020-03-31 71808
# 2020-04-01 77872
# 2020-04-02 84794
#
# So we must assume that the RKI data will be corrected later; maybe the next day.
#
# To make our plots not inaccurate, we'll remove the last data point from the RKI data:
g2 = germany.set_index(pd.to_datetime(germany['Meldedatum']))
g2.index.name = 'date'
# get rid of last day in data if desired
if include_last_day == False:
last_day = g2.index.max()
sel = g2.index == last_day
cleaned = g2.drop(g2[sel].index, inplace=False)
else:
cleaned = g2
fetch_data_germany_last_execution()
return cleaned
def pad_cumulative_series_to_yesterday(series):
"""Given a time series with date as index and cumulative cases/deaths as values:
- if the last date in the index is older than yesterday, then
- add that date
- resample the series with a daily interval, using padding with last known value
- and return.
Required for <NAME> Data, where only a new data point is provided if
the numbers change, but the plotting algorithms need to know that there is
no change. Without this padding, the data set looks old as the last plotted
data point is the last one for which data is provided.
"""
now = datetime.datetime.now()
rki_tz = pytz.timezone('Europe/Berlin')
now_tz = datetime.datetime.now(rki_tz)
# remove time zone information from datetime, so we can compare against
# datatime dates from get_country_data which has no timezone information
# attached.
now = now.replace(tzinfo=None)
today = now.replace(hour=0, minute=0, second=0, microsecond=0)
yesterday = today - pd.Timedelta("1D")
last = series.index.max()
if last < yesterday:
# repeat last data point with index for yesterday
series[yesterday] = series[last]
series2 = series.resample("1D").fillna(method="ffill")
return series2
else:
return series
def germany_get_region(state=None, landkreis=None, pad2yesterday=False):
""" Returns cases and deaths time series for Germany, and a label for the state/kreis.
If state is given, return sum of cases (as function of time) in that state (state=Bundesland)
If Landkreis is given, return data from just that Landkreis.
Landkreis seems unique, so there is no need to provide state and Landkreis.
[Should tidy up names here; maybe go to region and subregion in the function argument name, and
translate later.]
"""
germany = fetch_data_germany()
"""Returns two time series: (cases, deaths)"""
assert state or landkreis, "Need to provide a value for state or landkreis"
if state and landkreis:
raise NotImplementedError("Try to use 'None' for the state.")
# TODO: We need to check if this is important.
if state:
if not state in germany['Bundesland'].values:
raise Exception(
f"{state} not in available German states. These are "
f"{sorted(germany['Bundesland'].drop_duplicates())}"
)
land = germany[germany['Bundesland'] == state]
land = land.set_index(pd.to_datetime(land['Meldedatum']))
land.index.name = 'date'
land.sort_index(inplace=True)
# group over multiple rows for the same date
# (this will also group over the different landkreise in the state)
cases = land["AnzahlFall"].groupby('date').agg('sum').cumsum()
region_label = f'Germany-{state}'
cases.name = region_label + " cases"
# group over all multiple entries per day
deaths = land["AnzahlTodesfall"].groupby('date').agg('sum').cumsum()
deaths.name = region_label + " deaths"
if pad2yesterday:
deaths = pad_cumulative_series_to_yesterday(deaths)
cases = pad_cumulative_series_to_yesterday(cases)
return cases, deaths
if landkreis:
assert landkreis in germany['Landkreis'].values, \
f"{landkreis} not in available German districts. These are {sorted(germany['Landkreis'].drop_duplicates())}"
lk = germany[germany["Landkreis"] == landkreis]
lk.index = pd.to_datetime(lk['Meldedatum'])
lk.index.name = 'date'
lk = lk.sort_index()
cases = lk["AnzahlFall"].groupby('date').agg('sum').cumsum()
region_label = f'Germany-{landkreis}'
cases.name = region_label + ' cases'
deaths = lk["AnzahlTodesfall"].groupby('date').agg('sum').cumsum()
deaths.name = region_label + ' deaths'
if pad2yesterday:
deaths = pad_cumulative_series_to_yesterday(deaths)
cases = pad_cumulative_series_to_yesterday(cases)
return cases, deaths
@joblib_memory.cache
def fetch_csv_data_from_url(source) -> pd.DataFrame:
"""Given a URL, fetch the csv using pandas. Put into separate function (from germany_get_population)
to avoid repeated download of file (for better performance)."""
data = | pd.read_csv(source) | pandas.read_csv |
"""
Data structures for sparse float data. Life is made simpler by dealing only
with float64 data
"""
from __future__ import division
# pylint: disable=E1101,E1103,W0231,E0202
from numpy import nan
from pandas.compat import lmap
from pandas import compat
import numpy as np
from pandas.types.missing import isnull, notnull
from pandas.types.common import _ensure_platform_int
from pandas.core.common import _try_sort
from pandas.compat.numpy import function as nv
from pandas.core.index import Index, MultiIndex, _ensure_index
from pandas.core.series import Series
from pandas.core.frame import (DataFrame, extract_index, _prep_ndarray,
_default_index)
import pandas.core.algorithms as algos
from pandas.core.internals import (BlockManager,
create_block_manager_from_arrays)
from pandas.core.generic import NDFrame
from pandas.sparse.series import SparseSeries, SparseArray
from pandas.util.decorators import Appender
import pandas.core.ops as ops
class SparseDataFrame(DataFrame):
"""
DataFrame containing sparse floating point data in the form of SparseSeries
objects
Parameters
----------
data : same types as can be passed to DataFrame
index : array-like, optional
column : array-like, optional
default_kind : {'block', 'integer'}, default 'block'
Default sparse kind for converting Series to SparseSeries. Will not
override SparseSeries passed into constructor
default_fill_value : float
Default fill_value for converting Series to SparseSeries. Will not
override SparseSeries passed in
"""
_constructor_sliced = SparseSeries
_subtyp = 'sparse_frame'
def __init__(self, data=None, index=None, columns=None, default_kind=None,
default_fill_value=None, dtype=None, copy=False):
# pick up the defaults from the Sparse structures
if isinstance(data, SparseDataFrame):
if index is None:
index = data.index
if columns is None:
columns = data.columns
if default_fill_value is None:
default_fill_value = data.default_fill_value
if default_kind is None:
default_kind = data.default_kind
elif isinstance(data, (SparseSeries, SparseArray)):
if index is None:
index = data.index
if default_fill_value is None:
default_fill_value = data.fill_value
if columns is None and hasattr(data, 'name'):
columns = [data.name]
if columns is None:
raise Exception("cannot pass a series w/o a name or columns")
data = {columns[0]: data}
if default_fill_value is None:
default_fill_value = np.nan
if default_kind is None:
default_kind = 'block'
self._default_kind = default_kind
self._default_fill_value = default_fill_value
if isinstance(data, dict):
mgr = self._init_dict(data, index, columns)
if dtype is not None:
mgr = mgr.astype(dtype)
elif isinstance(data, (np.ndarray, list)):
mgr = self._init_matrix(data, index, columns)
if dtype is not None:
mgr = mgr.astype(dtype)
elif isinstance(data, SparseDataFrame):
mgr = self._init_mgr(data._data,
dict(index=index, columns=columns),
dtype=dtype, copy=copy)
elif isinstance(data, DataFrame):
mgr = self._init_dict(data, data.index, data.columns)
if dtype is not None:
mgr = mgr.astype(dtype)
elif isinstance(data, BlockManager):
mgr = self._init_mgr(data, axes=dict(index=index, columns=columns),
dtype=dtype, copy=copy)
elif data is None:
data = DataFrame()
if index is None:
index = Index([])
else:
index = _ensure_index(index)
if columns is None:
columns = Index([])
else:
for c in columns:
data[c] = SparseArray(np.nan, index=index,
kind=self._default_kind,
fill_value=self._default_fill_value)
mgr = to_manager(data, columns, index)
if dtype is not None:
mgr = mgr.astype(dtype)
NDFrame.__init__(self, mgr)
@property
def _constructor(self):
return SparseDataFrame
_constructor_sliced = SparseSeries
def _init_dict(self, data, index, columns, dtype=None):
# pre-filter out columns if we passed it
if columns is not None:
columns = _ensure_index(columns)
data = dict((k, v) for k, v in compat.iteritems(data)
if k in columns)
else:
columns = Index(_try_sort(list(data.keys())))
if index is None:
index = extract_index(list(data.values()))
sp_maker = lambda x: SparseArray(x, kind=self._default_kind,
fill_value=self._default_fill_value,
copy=True)
sdict = DataFrame()
for k, v in compat.iteritems(data):
if isinstance(v, Series):
# Force alignment, no copy necessary
if not v.index.equals(index):
v = v.reindex(index)
if not isinstance(v, SparseSeries):
v = sp_maker(v.values)
elif isinstance(v, SparseArray):
v = sp_maker(v.values)
else:
if isinstance(v, dict):
v = [v.get(i, nan) for i in index]
v = sp_maker(v)
sdict[k] = v
# TODO: figure out how to handle this case, all nan's?
# add in any other columns we want to have (completeness)
nan_vec = np.empty(len(index))
nan_vec.fill(nan)
for c in columns:
if c not in sdict:
sdict[c] = sp_maker(nan_vec)
return to_manager(sdict, columns, index)
def _init_matrix(self, data, index, columns, dtype=None):
data = _prep_ndarray(data, copy=False)
N, K = data.shape
if index is None:
index = _default_index(N)
if columns is None:
columns = _default_index(K)
if len(columns) != K:
raise ValueError('Column length mismatch: %d vs. %d' %
(len(columns), K))
if len(index) != N:
raise ValueError('Index length mismatch: %d vs. %d' %
(len(index), N))
data = dict([(idx, data[:, i]) for i, idx in enumerate(columns)])
return self._init_dict(data, index, columns, dtype)
def __array_wrap__(self, result):
return SparseDataFrame(
result, index=self.index, columns=self.columns,
default_kind=self._default_kind,
default_fill_value=self._default_fill_value).__finalize__(self)
def __getstate__(self):
# pickling
return dict(_typ=self._typ, _subtyp=self._subtyp, _data=self._data,
_default_fill_value=self._default_fill_value,
_default_kind=self._default_kind)
def _unpickle_sparse_frame_compat(self, state):
""" original pickle format """
series, cols, idx, fv, kind = state
if not isinstance(cols, Index): # pragma: no cover
from pandas.io.pickle import _unpickle_array
columns = | _unpickle_array(cols) | pandas.io.pickle._unpickle_array |
# main functions for the conus project
# TODO: move the main_0 codes to the download folder,
# TODO: or add them to the main workflow
# TODO: add options for EV of gauges and trmm-domain
# TODO: separate QRF results and plots
# TODO: check that mev_s_all, years and hdf quantiles are consistent
# TODO: fix or remove the multiprocessing code
# TODO: for testing: move sample data to its own folder
# TODO: improve testing
# TODO: speed up EV analysis - multiprocessing
import os
import h5py
import dask.array as da
import numpy as np
import pandas as pd
from scipy.optimize import curve_fit
import downscale as down
from datetime import datetime
import xarray as xr
# project directories
# tmpa_dir = os.path.join('..', 'data', 'tmpa_conus_data')
tmpa_dir = os.path.join('..', 'data', 'tmpa_conus_data')
outdir_data = os.path.join('..', 'output', 'pixel_stats')
outplot = os.path.join('..', 'output', 'outplot')
# list_gauges_dir = os.path.join(tmpa_dir, 'gauges_noaa_hpd')
list_gauges_dir = os.path.join('..', 'data', 'data_noaa_hpd_gauges')
gauges_dir = os.path.join(list_gauges_dir, 'daily_csv')
stat_list_file = os.path.join(list_gauges_dir, 'HOURLY_LOC_NYEARS.csv')
tmpa_hdf_file = os.path.join(tmpa_dir, 'data_tmpa_3h.hdf5')
pickletemp = os.path.join('..','output','pickletemp')
elev_dir = os.path.join('..', 'data', 'data_elevation')
#### QUANTITIES TO SET FOR ANALYSIS: ####
Tr = np.array([10, 20, 50, 100]) # return times for extreme value analysis
evd_domain = 'world' # (can be = 'conus' or 'world' for EV analysis only)
do_evd_all_gauges = True # do EV for all gauges in the dataset
# do_evd_all_tmpa = True # do EV for all pixels in evd_domain (WORLD or CONUS)
do_trmm_evd = True # to fit MEV to each grid cell over CONUS
do_trmm = True # to downscale trmm where gauges are available
do_gauges = True # to compute gauge stats where there are enough
################# added for reading elevation::
# Boundaries of the CONUS domain and TRMM grid coordinates
solat = 22 # south bound
nolat = 50 # north
welon = -130 # west
ealon = - 60 # east
dx = 0.25
lats = np.arange(-49.875, 49.876, 0.25) # south to North
lons = np.arange(-179.875, 179.876, 0.25) # West to East
nlats = np.size(lats)
nlons = np.size(lons)
#################
# kwargs in input for the function analyze_cell
pixelkwargs = {
'npix':3, # box size for analysis, in number of grid cells
'npix_gauges':5, # size of the box for selecting gauges, in grid cells
'dx':dx, # size of a grid cell (dx = 0.25 for TMPA)
'minstat_bb':4, # number of gauges required over npix_gauges**2 area
'minstat_pixel':1, # number of gauges required over the grid cell
'min_nyears_pixel':10, # min record length (years) to select a gauge
'min_overlap_corr':2000, # compute correlation if at least 2000 obs.
'thresh':1, # [mm] precipitation magnitude threshold
'L0':0.0001, # rain gauge point scale [km]
'acf':'mar', # autocorrelation function used
'dt':3, # hours - timescale of TMPA dataset
'plot':False, # plot results (set False when running on the Cluster)
'tscale':24, # daily timescale to perform analysis
'save_yearly':True, # save yearly Weibull parameters
'toll':0.005, # for optimization algorithm for correlation downscaling
'maxmiss':36, # max number of missing daily data in each year of record
'opt_method':'genetic', # opt algorithm for corr. downscaling
'do_trmm_evd':do_trmm_evd, # to fit MEV to each grid cell over CONUS
'do_gauges': do_gauges, # to compute gauge stats where there are enough
'do_trmm': do_trmm, # to downscale trmm where gauges are available
'do_smoke': False # to test the pixel positions
}
def tmpa_evd(clon, clat, tmpa_hdf_file, Tr, *,
thresh=1, maxmiss=36):
""" extreme value analysis for a tmpa grid cell pixel
load the pixel centered in (clon, clat) from the
dask - xarray stored in the hdf file named tmpa_hdf_file,
and compute quantiles for the range of return times in the array Tr
optional: thresh = 1 theshold for MEV
maxmiss = 36 max number of missing data / year
Do not perform analysis if dry years have less than 2 events"""
res_evd = {}
Fi = 1 - 1 / Tr
xconus = read_gridded_data(tmpa_hdf_file)
# xdata = load_bounding_box(xconus, clon, clat, 1)
# print(xdata.shape)
xconus = xconus.where(xconus >= -0.001)
xpixel = xconus.sel(lat=clat, lon=clon).resample(time='D').sum(
dim='time', skipna=False).dropna(dim='time',
how='any').load()
ts = xpixel.values
years = xpixel.time.dt.year.values
df = pd.DataFrame({'PRCP': ts, 'YEAR': years})
df, ny2, ny1 = down.remove_missing_years(df, maxmiss)
Ny, Cy, Wy = down.mev_fit(df, thresh=thresh)
x0 = 9.0 * np.mean(Cy)
mevq = down.mev_quant(Fi, x0, Ny, Cy, Wy, thresh=thresh)[0]
XIemp, Fiemp, TRemp = down.tab_rain_max(df)
csi, psi, mu = down.gev_fit_lmom(XIemp) # fit to annual maxima
gevq = down.gev_quant(Fi, csi, psi, mu)
res_evd['mev_s_all'] = mevq
res_evd['gev_s_all'] = gevq
return res_evd
def gauge_evd(Tr, gauges_dir, stat_list_file, output_dir,
*, nyears_min=10, maxmiss=36, thresh=1):
'''----------------
compute evd statistics for all gauges in the dataset
with enough complete years of data
----------------'''
sdf = pd.read_csv(stat_list_file, index_col = 0)
nstats = np.shape(sdf)[0]
ntr = np.size(Tr)
Fi = 1-1/Tr
nyearsg = np.zeros(nstats, dtype = int)
mev_g_all = np.zeros((nstats, ntr))*np.nan
gev_g_all = np.zeros((nstats, ntr))*np.nan
for i in range(nstats):
df0 = pd.read_csv( os.path.join(gauges_dir,
'{}.csv'.format(sdf['ID'].values[i])))
df = df0[(df0['PRCP'] > -0.1) & (~np.isnan(df0['PRCP']))]
df, ny2, ny1 = down.remove_missing_years(df, maxmiss)
XIemp, Fiemp, TRemp = down.tab_rain_max(df)
nyearsg[i] = ny2
if nyearsg[i] >= nyears_min:
Ny, Cy, Wy = down.mev_fit(df, thresh=thresh)
x0 = 6.0*np.mean(Cy)
mev_g_all[i, :] = down.mev_quant(Fi, x0, Ny, Cy, Wy,
thresh=thresh)[0]
csi, psi, mu = down.gev_fit_lmom(XIemp)
gev_g_all[i,:] = down.gev_quant(Fi, csi, psi, mu)
# subset dataframe keeping only long enough time series:
sdf['nyearsg'] = nyearsg
for i in range(ntr):
sdf['mev_{}'.format(Tr[i])] = mev_g_all[:,i]
sdf['gev_{}'.format(Tr[i])] = gev_g_all[:,i]
sdf = sdf[sdf['nyearsg'] >= nyears_min]
ngauges = np.shape(sdf)[0]
sdf.to_csv(os.path.join(output_dir, 'dfres_gauges_{}.csv'.format(ngauges)))
return sdf
def gauge_stats(clon, clat, df, Tr, gauges_dir, *, npix=5, dx=0.25,
minstat_bb=4,
minstat_pixel=1, thresh=1,
min_nyears_pixel=10, min_overlap_corr=2000,
maxmiss=36):
'''------------------------------------------------------------------------
gauge_stats:
Computes the statistics for longest-record gauge in the pixel (clon, clat)
if it is at least min_nyears_pixel years long,
and compute correlation between gauges in a npix*npix bounding box
if there are at least minstat_bb gauges with min common record of
min_overlap correlation.
Returns dictionary with corrlation and local gauge Weibull C, W, N
and if there were enough gauges in the pixel / bounding box
INPUT:
clon = longitude central pixel point
clat - latitude central pixel point
df = data frame with list of stations, extracted by NOAA HPD, daily scale
------------------------------------------------------------------------'''
# default values of output variables if not enough stations at the ground:
enough_gauges_bb = False
enough_gauges_pixel = False
alpha = np.nan
epsilon = np.nan
d0 = np.nan
mu0 = np.nan
pwet = np.nan
C = np.nan
W = np.nan
N = np.nan
# Cy = np.zeros(min_nyears_pixel)*np.nan
# Wy = np.zeros(min_nyears_pixel)*np.nan
# Ny = np.zeros(min_nyears_pixel)*np.nan
gam_g = np.nan
nyearsg = np.nan
mev_g = np.zeros(np.size(Tr))*np.nan
# read stations within the box
wb = clon - npix/2*dx
eb = clon + npix/2*dx
nb = clat + npix/2*dx
sb = clat - npix/2*dx
wbpix = clon - 1/2*dx # pixel
ebpix = clon + 1/2*dx
nbpix = clat + 1/2*dx
sbpix = clat - 1/2*dx
# stations within the central pixel and the bounding box
mydf = df[ (df['LAT'] < nb) & (df['LAT'] > sb) &
(df['LON'] > wb) & (df['LON'] < eb) ]
mydfc = df[ (df['LAT'] < nbpix) & (df['LAT'] > sbpix)
& (df['LON'] > wbpix) & (df['LON'] < ebpix) ]
nstations_bb = np.shape(mydf)[0] # numebr of stats in bounding box
nstations_pixel = np.shape(mydfc)[0] # number of stats in central pixel
# compute empirical correlation
if nstations_bb >= minstat_bb:
vdist = []
vcorr = []
for iii in range(nstations_bb):
dfi0 = pd.read_csv( os.path.join(gauges_dir,
'{}.csv'.format(mydf['ID'].values[iii])))
dfi = dfi0[(dfi0['PRCP'] > -0.1) & (~np.isnan(dfi0['PRCP']))]
dates_ii = dfi['DATE'].values
for jjj in range(iii + 1, nstations_bb):
dfj0 = pd.read_csv( os.path.join(gauges_dir,
'{}.csv'.format(mydf['ID'].values[jjj])))
dfj = dfj0[(dfj0['PRCP'] > -0.1) & (~np.isnan(dfj0['PRCP']))]
dates_jj = dfj['DATE'].values
commondates = np.intersect1d(dates_ii, dates_jj)
sample_ii = dfi['PRCP'].values[dfi['DATE'].isin(commondates)]
sample_jj = dfj['PRCP'].values[dfj['DATE'].isin(commondates)]
if np.size(sample_ii) > min_overlap_corr:
excesses_ii = np.maximum(sample_ii - thresh, 0.0)
excesses_jj = np.maximum(sample_jj - thresh, 0.0)
vcorr.append(np.corrcoef(excesses_ii, excesses_jj)[0,1])
vdist.append(down.haversine(mydf['LAT'].values[iii],
mydf['LAT'].values[jjj],
mydf['LON'].values[iii],
mydf['LON'].values[jjj]
))
# fit acf function
if len(vdist) >= minstat_bb:
try:
popt0, pcov0 = curve_fit(down.epl_fun,
np.array(vdist), np.array(vcorr),
p0 = np.array([50.0, 1.0]),
bounds = ((0.0, 0.0), (+np.inf, +np.inf)))
epsilon = popt0[0]
alpha = popt0[1]
enough_gauges_bb = True
L = down.area_lat_long(clat, clon, dx, dx)[0]
L0 = 0.0001
gam_g = down.vrf(L, L0, (epsilon, alpha), acf='mar')
popt1, pcov1 = curve_fit(down.str_exp_fun,
np.array(vdist), np.array(vcorr),
p0 = np.array([50.0, 1.0]),
bounds = ((0.0, 0.0), (+np.inf, +np.inf)))
d0 = popt1[0]
mu0 = popt1[1]
except:
print('gauge_stats WARNING: \n'
'pass - not possible to compute correlation reliably')
# fit Weibull to the longest station in the central pixel
if nstations_pixel >= minstat_pixel:
vec_nyears = mydfc['NYEARS'].values
if np.max(vec_nyears) >= min_nyears_pixel: # at least 10 years of data
# enough_gauges_pixel = True
long_index = np.argmax(mydfc['NYEARS'].values)
dfl0 = pd.read_csv( os.path.join(gauges_dir,
'{}.csv'.format(mydfc['ID'].values[long_index])))
dfl = dfl0[ (dfl0['PRCP'] > -0.1) & (~np.isnan(dfl0['PRCP']) )]
sample = dfl['PRCP'].values
excesses = sample[sample > thresh] - thresh
NCWg = down.wei_fit(excesses)
pwet = NCWg[0]/np.size(sample)
C = NCWg[1]
W = NCWg[2]
N = np.int(np.rint(pwet*365.25))
# fit MEV
# TODOS: add option to save yearly parameters here if needed
dfl, ny2, ny1 = down.remove_missing_years(dfl, maxmiss)
if ny2 >= min_nyears_pixel:
enough_gauges_pixel = True
Ny, Cy, Wy = down.mev_fit(dfl, thresh=thresh)
# nyearsg = np.size(Ny)
Fi = 1-1/Tr
x0 = 6.0*np.mean(Cy)
mev_g = down.mev_quant(Fi, x0, Ny, Cy, Wy, thresh=thresh)[0]
# save results in dictionary:
res_gauges = {'Cg':C,
'Wg':W,
'Ng':N,
'pwg':pwet,
'enough_gauges_pixel':enough_gauges_pixel,
'enough_gauges_bb':enough_gauges_bb,
'ngauges_bb':nstations_bb,
'ngauges_pixel':nstations_pixel,
'nyears_gauge':nyearsg,
'alp_g':alpha,
'eps_g':epsilon,
'd0_g':d0,
'mu0_g':mu0,
'Tr':Tr,
'gam_g':gam_g,
'mev_g':mev_g
}
return res_gauges
def read_gridded_data(tmpa_hdffile):
# f = h5py.File(os.path.join(datadir, 'data_tmpa_3h.hdf5'), "r")
f = h5py.File(tmpa_hdffile, "r")
# print(list(f.keys()))
tmpalat = f['lat'][:]
tmpalon = f['lon'][:]
dates_int = f['dates'][:]
hours_int = f['hours'][:]
dset = f['prcp']
# print('dataset shape = {}'.format(dset.shape))
x = da.from_array(dset, chunks=(6, 6, 300))
dates = [datetime.strptime(str(integd)+str(inthour), '%Y%m%d%H')
for integd, inthour in zip(dates_int, hours_int)] # UTC time
xconus = xr.DataArray(x,
coords={'lon':tmpalon, 'lat':tmpalat, 'time':dates},
dims=('lon', 'lat', 'time'))
return xconus
def load_bounding_box(xconus, clon, clat, npix, dropna = False):
''' load data within the bounding box in memory
from an out-of-memory xarray + dask array
DOES NOT REMOVE MISSING DATA, BUT SET THEM TO NANS'''
xconus = xconus.where(xconus >= -0.001)
lons = xconus.lon.values
dx = np.abs(lons[1] - lons[0])
buffer = 0.50*npix*dx
eps = 1e-4 # to make sure to include boundaires -> add an eps buffer
solat = clat - buffer + eps
nolat = clat + buffer + eps
ealon = clon + buffer + eps
welon = clon - buffer + eps
bcond = np.logical_and(
np.logical_and( xconus.lat > solat, xconus.lat < nolat),
np.logical_and( xconus.lon > welon, xconus.lon < ealon))
# Load in memory the bounding box of interest
if dropna:
xdata = xconus.where(bcond, drop = True).load()
else:
xdata = xconus.where(bcond, drop = True
).dropna(dim='time', how='any').load()
return xdata
def analyze_cell(i, j, clon, clat, Tr, stat_list_file, tmpa_hdf_file,
gauges_dir, *,
npix=3, npix_gauges=5, dx=0.25,
minstat_bb=4, minstat_pixel=1,
min_nyears_pixel=10, min_overlap_corr=2000,
thresh=1,
L0=0.0001,
acf='mar', dt=3, plot=False, tscale=24,
save_yearly = True, toll=0.005, maxmiss=36,
opt_method='genetic',
do_smoke = True,
do_trmm = True,
do_gauges = True,
do_trmm_evd = True):
'''------------------------------------------------------------------------
analyze gauge data and gridded qpes for a bouding box of size npix
centered in clat, clon (indexes i, j respectively)
sdf = list of station coordinates and names
------------------------------------------------------------------------'''
# compute some basic statistics, as NCW
res_smoke = {}
if do_smoke:
res_smoke['clon'] = clon
res_smoke['clat'] = clat
res_evd = {}
if do_trmm_evd:
res_evd = tmpa_evd(clon, clat, tmpa_hdf_file, Tr,
thresh=thresh, maxmiss=maxmiss)
res_gauges = {}
res_tmpa = {}
if do_gauges:
sdf = | pd.read_csv(stat_list_file, index_col=0) | pandas.read_csv |
import pandas as pd
def get_zipcode(lat, lon, all_l):
row = all_l[(all_l['latitude'] == lat) & (all_l['longitude'] == lon)]
print(row)
print("*")
if __name__ == "__main__":
root_path = "/Users/shravya/Documents/CMU/Interactive_Data_Science/Assignments/3/Code2/data/"
reviews = {'NYC': pd.read_csv(root_path + 'NYC_reviews.csv')}
NYC_listings = {'01': pd.read_csv(root_path + '2020/NYC/listings_01.csv'),
'02': pd.read_csv(root_path + '2020/NYC/listings_02.csv'),
'03': pd.read_csv(root_path + '2020/NYC/listings_03.csv'),
'04': | pd.read_csv(root_path + '2020/NYC/listings_04.csv') | pandas.read_csv |
import re
import requests
import pandas as pd
import os
from unidecode import unidecode
from bs4 import BeautifulSoup, SoupStrainer
def get_data(urls, token, name):
"""
Extract data from a set of HTML pages, and convert them into a .csv file.
Args:
urls ([list]): [List containing all URLs]
Yields:
listes_joueurs[csv]: [CSV file containg informations about players]
"""
only_player_search = SoupStrainer(id="site")
def clean_fmt(soup):
forms = soup.findAll('form')
for match in forms:
match.decompose()
raw = str(soup.select("div.data"))
raw = re.sub(r"(<.*?>)*?", '', raw)
raw = raw.replace("(Penalty)","").replace("(", f"\n\n\ndddddd").replace(")","")
raw_list = list(raw.split("\n\n\n"))
int_list = []
for r in raw_list:
r = re.sub(r'\n', "", r)
r = unidecode(r)
int_list.append(r)
clean_list = []
for i in range(0, len(int_list)):
if len(int_list[i]) < 3:
pass
else:
clean_list.append(int_list[i])
clean_list.pop(0)
clean_list = [l.replace("Olympique Lyon", "OL").replace("Girondins Bordeaux","FCGB").replace("SC Bastia", "SCB")\
.replace("Paris Saint-Germain","PSG").replace("Toulouse FC", "TFC").replace("FC Metz", "FCM").replace("AS Saint-Etienne","SE")\
.replace("EA Guingamp", "EAG").replace("AS Monaco", "ASM").replace("ESTAC Troyes","ESTAC").replace("FC Nantes", "FCN")\
.replace("AJ Auxerre","AJA").replace("CS Sedan","CSS").replace("Stade Rennes", "SRFC").replace("Olympique Marseille", "OM")\
.replace("RC Lens", "RCL").replace("RC Strasbourg", "RCS").replace("Lille OSC", "LOSC").replace("Montpellier HSC", "MHSC")\
.replace("FC Sochaux", "FCSM").replace("FC Lorient", "FCL").replace("Le Havre AC", "HAC").replace("dddddd","") for l in clean_list]
clean_name = []
clean_country = []
clean_club = []
clean_goal = []
for i in range(0, int((len(clean_list)/4))):
clean_name.append(clean_list[i*4])
clean_country.append(clean_list[1+(i*4)])
clean_club.append(clean_list[2+(i*4)])
clean_goal.append(clean_list[3+(i*4)])
s1 = pd.Series(clean_name, name='name')
s2 = pd.Series(clean_country, name='country')
s3 = pd.Series(clean_club, name='goal')
s4 = pd.Series(clean_goal, name='other')
df = | pd.concat([s1, s2, s3, s4], axis=1) | pandas.concat |
#pylint: disable=too-many-lines
"""Wells and WellSegment components."""
from copy import deepcopy
import warnings
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import h5py
from anytree import (RenderTree, AsciiStyle, Resolver, PreOrderIter, PostOrderIter,
find_by_attr)
from .well_segment import WellSegment
from .rates import calculate_cf, show_rates, show_rates2, show_blocks_dynamics
from .grids import OrthogonalUniformGrid
from .base_component import BaseComponent
from .utils import full_ind_to_active_ind, active_ind_to_full_ind
from .getting_wellblocks import defining_wellblocks_vtk, find_first_entering_point, defining_wellblocks_compdat
from .wells_dump_utils import write_perf, write_events, write_schedule, write_welspecs
from .wells_load_utils import (load_rsm, load_ecl_binary, load_group, load_grouptree,
load_welspecs, load_compdat, load_wconprod, load_wconinje,
load_welltracks, load_events, load_history,
DEFAULTS, VALUE_CONTROL)
from .decorators import apply_to_each_segment, state_check
class IterableWells:
"""Wells iterator."""
def __init__(self, root):
self.iter = PreOrderIter(root)
def __next__(self):
x = next(self.iter)
if x.is_group:
return next(self)
return x
class Wells(BaseComponent):
"""Wells component.
Contains wells and groups in a single tree structure, wells attributes
and preprocessing actions.
Parameters
----------
node : WellSegment, optional
Root node for well's tree.
"""
def __init__(self, node=None, **kwargs):
super().__init__(**kwargs)
self._root = WellSegment(name='FIELD', is_group=True) if node is None else node
self._resolver = Resolver()
self.init_state(has_blocks=False,
has_cf=False,
spatial=True,
all_tracks_complete=False,
all_tracks_inside=False,
full_perforation=False)
def copy(self):
"""Returns a deepcopy. Cached properties are not copied."""
copy = self.__class__(self.root.copy())
copy._state = deepcopy(self.state) #pylint: disable=protected-access
for node in PreOrderIter(self.root):
if node.is_root:
continue
node_copy = node.copy()
node_copy.parent = copy[node.parent.name]
return copy
@property
def root(self):
"""Tree root."""
return self._root
@property
def resolver(self):
"""Tree resolver."""
return self._resolver
@property
def main_branches(self):
"""List of main branches names."""
return [node.name for node in self if node.is_main_branch]
@property
def names(self):
"""List of well names."""
return [node.name for node in self]
@property
def event_dates(self):
"""List of dates with any event in main branches."""
return self._collect_dates('EVENTS')
@property
def result_dates(self):
"""List of dates with any result in main branches."""
return self._collect_dates('RESULTS')
@property
def history_dates(self):
"""List of dates with any history in main branches."""
return self._collect_dates('HISTORY')
def _collect_dates(self, attr):
"""List of common dates given in the attribute of main branches."""
agg = [getattr(node, attr).DATE for node in self if node and attr in node]
if not agg:
return pd.to_datetime([])
dates = sorted( | pd.concat(agg) | pandas.concat |
import multiprocessing
from typing import NamedTuple
import numpy as np
import pandas as pd
from .morpho import bounding_parallelogram, ImageMoments, ImageMorphology
class Morphometrics(NamedTuple):
"""Measured shape attributes of an image.
- area: Total area/image mass.
- length: Length of the estimated skeleton.
- thickness: Mean thickness along the skeleton.
- slant: Horizontal shear, in radians.
- width: Width of the bounding parallelogram.
- height: Height of the bounding parallelogram.
"""
area: float
length: float
thickness: float
slant: float
width: float
height: float
def measure_image(image, threshold: float = .5, scale: int = 4, bound_frac: float = .02,
verbose=True):
"""Computes morphometrics for a single image.
Parameters
----------
image : (H, W) array_like
Input image.
threshold : float, optional
A relative threshold between 0 and 1. The upsampled image will be binarised at this fraction
between its minimum and maximum values.
scale : int, optional
Upscaling factor for subpixel morphological analysis (>=1).
bound_frac : float, optional
Fraction of image mass to discard along each dimension when computing the bounding
parallelogram.
verbose : bool, optional
Whether to pretty-print the estimated morphometrics.
Returns
-------
Morphometrics
A namedtuple containing the measured area, length, thickness, slant, width, and height.
"""
image = np.asarray(image)
morph = ImageMorphology(image, threshold, scale)
moments = ImageMoments(morph.hires_image)
thickness = morph.mean_thickness
area = morph.area
length = morph.stroke_length
slant = np.arctan(-moments.horizontal_shear)
corners = bounding_parallelogram(morph.hires_image, bound_frac, moments)
width = (corners[1][0] - corners[0][0]) / morph.scale
height = (corners[-1][1] - corners[0][1]) / morph.scale
if verbose:
print(f"Area: {area:.1f}")
print(f"Length: {length:.1f}")
print(f"Thickness: {thickness:.2f}")
print(f"Slant: {np.rad2deg(slant):.0f}°")
print(f"Dimensions: {width:.1f} x {height:.1f}")
return Morphometrics(area, length, thickness, slant, width, height)
def _measure_image_unpack(arg):
return measure_image(*arg)
def measure_batch(images, threshold: float = .5, scale: int = 4, bound_frac: float = .02,
pool: multiprocessing.Pool = None, chunksize: int = 100) -> pd.DataFrame:
"""Computes morphometrics for a batch of images.
Parameters
----------
images : (N, H, W) array_like
Input image batch, indexed along the first dimension.
threshold : float, optional
A relative threshold between 0 and 1. The upsampled image will be binarised at this fraction
between its minimum and maximum values.
scale : int, optional
Upscaling factor for subpixel morphological analysis (>1).
bound_frac : float, optional
Fraction of image mass to discard along each dimension when computing the bounding
parallelogram.
pool : multiprocessing.Pool, optional
A pool of worker processes for parallel processing. Defaults to sequential computation.
chunksize : int
Size of the chunks in which to split the batch for parallel processing. Ignored if
`pool=None`.
Returns
-------
pandas.DataFrame
A data frame with one row for each image, containing the following columns:
- `area`: Total area/image mass.
- `length`: Length of the estimated skeleton.
- `thickness`: Mean thickness along the skeleton.
- `slant`: Horizontal shear, in radians.
- `width`: Width of the bounding parallelogram.
- `height`: Height of the bounding parallelogram.
Notes
-----
If the `tqdm` package is installed, this function will display a fancy progress bar with ETA.
Otherwise, it will print a plain text progress message.
"""
images = np.asarray(images)
args = ((img, threshold, scale, bound_frac, False) for img in images)
if pool is None:
gen = map(_measure_image_unpack, args)
else:
gen = pool.imap(_measure_image_unpack, args, chunksize=chunksize)
try:
import tqdm
gen = tqdm.tqdm(gen, total=len(images), unit='img', ascii=True)
except ImportError:
def plain_progress(g):
print(f"\rProcessing images: {0}/{len(images)}", end='')
for i, res in enumerate(g):
print(f"\rProcessing images: {i + 1}/{len(images)}", end='')
yield res
print()
gen = plain_progress(gen)
results = list(gen)
df = | pd.DataFrame(results) | pandas.DataFrame |
"""
Rank summarization results.
"""
import os
import sys
import time
import argparse
from datetime import datetime
from itertools import product
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from scipy.stats import sem
from tqdm import tqdm
here = os.path.abspath(os.path.dirname(__file__))
sys.path.insert(0, here + '/../')
from experiments import util as exp_util
from postprocess import util as pp_util
from config import rank_args
from rank.remove import get_mean_df
def process(args, exp_hash, out_dir, logger):
begin = time.time()
color, line, label = pp_util.get_plot_dicts()
df_loss_list = []
df_li_loss_list = []
df_acc_list = []
df_li_acc_list = []
df_auc_list = []
df_li_auc_list = []
for tree_type in args.tree_type:
in_dir = os.path.join(args.in_dir,
tree_type,
f'exp_{exp_hash}',
'summary')
for ckpt in args.ckpt:
ckpt_dir = os.path.join(in_dir, f'ckpt_{ckpt}')
# define paths
fp_loss = os.path.join(ckpt_dir, 'loss_rank.csv')
fp_li_loss = os.path.join(ckpt_dir, 'loss_rank_li.csv')
fp_acc = os.path.join(ckpt_dir, 'acc_rank.csv')
fp_li_acc = os.path.join(ckpt_dir, 'acc_rank_li.csv')
fp_auc = os.path.join(ckpt_dir, 'auc_rank.csv')
fp_li_auc = os.path.join(ckpt_dir, 'auc_rank_li.csv')
# check paths
assert os.path.exists(fp_loss), f'{fp_loss} does not exist!'
assert os.path.exists(fp_li_loss), f'{fp_li_loss} does not exist!'
assert os.path.exists(fp_acc), f'{fp_acc} does not exist!'
assert os.path.exists(fp_li_acc), f'{fp_li_acc} does not exist!'
assert os.path.exists(fp_auc), f'{fp_auc} does not exist!'
assert os.path.exists(fp_auc), f'{fp_auc} doess not exist!'
# read results
df_loss_list.append( | pd.read_csv(fp_loss) | pandas.read_csv |
import json
import traceback
from collections import OrderedDict, defaultdict
import numpy as np
import pandas as pd
from scipy import stats
import rrcf
from django.core.serializers.json import DjangoJSONEncoder
from django.utils import timezone
from django_pandas.io import read_frame
from django.core.exceptions import ObjectDoesNotExist
from datacollection.models import Event, URL, CustomSession, Player
from dataprocessing.models import Task
from kimchi.celery import app
from shadowspect.models import Level, Replay
from dashboard.views import create_player_map, get_completed_puzzles_map, get_puzzles_dict
from dataprocessing.elo import adaptedData, run
from dataprocessing.misconceptions import sequenceWithinPuzzlesForMisconceptions, tagMisconceptions
difficultyMapping = ['Sandbox~0.000001','1. One Box~0.000002', '2. Separated Boxes~0.111127', '3. Rotate a Pyramid~0.083447', '4. Match Silhouettes~0.061887', '5. Removing Objects~0.106021', '6. Stretch a Ramp~0.107035', '7. Max 2 Boxes~0.078039', '8. Combine 2 Ramps~0.068608', '9. Scaling Round Objects~0.128647',
'Square Cross-Sections~0.199714', 'Bird Fez~0.156674', 'Pi Henge~0.067346', '45-Degree Rotations~0.096715', 'Pyramids are Strange~0.179600', 'Boxes Obscure Spheres~0.266198', 'Object Limits~0.257177', 'Not Bird~0.260197', 'Angled Silhouette~0.147673',
'Warm Up~0.183971','Tetromino~0.226869', 'Stranger Shapes~0.283971', 'Sugar Cones~0.085909', 'Tall and Small~0.266869', 'Ramp Up and Can It~0.206271', 'More Than Meets Your Eye~0.192319', 'Unnecessary~0.76', 'Zzz~0.234035', 'Bull Market~0.358579', 'Few Clues~0.324041', 'Orange Dance~0.647731', 'Bear Market~1.000000']
typeMapping = ['Sandbox~SAND', '1. One Box~Tutorial', '2. Separated Boxes~Tutorial', '3. Rotate a Pyramid~Tutorial', '4. Match Silhouettes~Tutorial', '5. Removing Objects~Tutorial', '6. Stretch a Ramp~Tutorial', '7. Max 2 Boxes~Tutorial', '8. Combine 2 Ramps~Tutorial', '9. Scaling Round Objects~Tutorial',
'Square Cross-Sections~Easy Puzzles', 'Bird Fez~Easy Puzzles', 'Pi Henge~Easy Puzzles', '45-Degree Rotations~Easy Puzzles', 'Pyramids are Strange~Easy Puzzles', 'Boxes Obscure Spheres~Easy Puzzles', 'Object Limits~Easy Puzzles', 'Not Bird~Easy Puzzles', 'Angled Silhouette~Easy Puzzles',
'Warm Up~Hard Puzzles','Tetromino~Hard Puzzles', 'Stranger Shapes~Hard Puzzles', 'Sugar Cones~Hard Puzzles', 'Tall and Small~Hard Puzzles', 'Ramp Up and Can It~Hard Puzzles', 'More Than Meets Your Eye~Hard Puzzles', 'Unnecessary~Hard Puzzles', 'Zzz~Hard Puzzles', 'Bull Market~Hard Puzzles', 'Few Clues~Hard Puzzles', 'Orange Dance~Hard Puzzles', 'Bear Market~Hard Puzzles']
difficultyPuzzles = dict()
for puzzle in difficultyMapping:
desc = puzzle.split("~")
difficultyPuzzles[desc[0]] = float(desc[1])
tutorialPuzzles = []
for puzzle in typeMapping:
desc = puzzle.split("~")
if(desc[1] == 'Tutorial'):
tutorialPuzzles.append(desc[0])
advancedPuzzles = []
for puzzle in typeMapping:
desc = puzzle.split("~")
if(desc[1] == 'Hard Puzzles'):
advancedPuzzles.append(desc[0])
intermediatePuzzles = []
for puzzle in typeMapping:
desc = puzzle.split("~")
if(desc[1] == 'Easy Puzzles'):
intermediatePuzzles.append(desc[0])
allPuzzles = []
for puzzle in typeMapping:
desc = puzzle.split("~")
allPuzzles.append(desc[0])
@app.task
def process_task(task, *args):
# this fixes an extraneous comma in tuples
if len(args) == 1:
args = args[0]
# and this fixes when there are no args
if len(args) == 0:
task_sig = task.s()
else:
task_sig = task.s(args)
taskname = str(task_sig)
print("processing task", taskname)
task_db, created = Task.objects.get_or_create(signature=taskname)
task_db.state = "starting"
task_db.save(update_fields=['state'])
try:
result = task_sig.apply_async()
task_db.state = "processing"
task_db.save(update_fields=['state'])
task_db.result = result.get()
task_db.state = "done"
task_db.time_ended = timezone.now()
task_db.errors = ""
except Exception as exc:
tb = traceback.format_exception(etype=type(exc), value=exc, tb=exc.__traceback__)
task_db.errors = tb
task_db.state = "error"
print(tb)
task_db.save()
return task_db
# of particular interest
all_data_collection_urls = ['ginnymason', 'chadsalyer', 'kristinknowlton', '<NAME>', 'leja', 'leja2', 'debbiepoull',
'juliamorgan']
@app.task
def computeFunnelByPuzzle(group='all'):
if group == 'all':
toFilter = all_data_collection_urls
else:
toFilter = group
urls = URL.objects.filter(name__in=toFilter)
sessions = CustomSession.objects.filter(url__in=urls)
qs = Event.objects.filter(session__in=sessions)
dataEvents = read_frame(qs)
dataEvents['time'] = pd.to_datetime(dataEvents['time'])
dataEvents = dataEvents.sort_values('time')
dataEvents['group'] = [json.loads(x)['group'] if 'group' in json.loads(x).keys() else '' for x in
dataEvents['data']]
dataEvents['user'] = [json.loads(x)['user'] if 'user' in json.loads(x).keys() else '' for x in dataEvents['data']]
# removing those rows where we dont have a group and a user that is not guest
dataEvents = dataEvents[
((dataEvents['group'] != '') & (dataEvents['user'] != '') & (dataEvents['user'] != 'guest'))]
dataEvents['group_user_id'] = dataEvents['group'] + '~' + dataEvents['user']
# userFunnelDict key: (group~user~puzzle), json values: started, create_shape, submitted, completed
userFunnelDict = dict()
for user in dataEvents['group_user_id'].unique():
user_events = dataEvents[dataEvents['group_user_id'] == user]
user_puzzle_key = None
for enum, event in user_events.iterrows():
if (event['type'] in ['ws-start_level', 'ws-puzzle_started']):
user_puzzle_key = event['group'] + '~' + event['user'] + '~' + json.loads(event['data'])['task_id']
if (user_puzzle_key not in userFunnelDict.keys()):
userFunnelDict[user_puzzle_key] = json.loads(
'{"started": 0, "create_shape": 0, "submitted": 0, "completed": 0}')
if (event['type'] == 'ws-puzzle_started'):
userFunnelDict[user_puzzle_key]['started'] += 1
elif (event['type'] == 'ws-create_shape'):
userFunnelDict[user_puzzle_key]['create_shape'] += 1
elif (event['type'] == 'ws-check_solution'):
userFunnelDict[user_puzzle_key]['submitted'] += 1
elif (event['type'] == 'ws-puzzle_complete'):
userFunnelDict[user_puzzle_key]['completed'] += 1
userFunnelList = []
for key in userFunnelDict.keys():
key_split = key.split('~')
userFunnelList.append([key_split[0], key_split[1], key_split[2], userFunnelDict[key]])
userFunnelByPuzzle = pd.DataFrame(userFunnelList, columns=['group', 'user', 'task_id', 'funnel'])
return userFunnelByPuzzle.to_json()
@app.task
def sequenceBetweenPuzzles(group='all'):
if group == 'all' :
toFilter = all_data_collection_urls
else:
toFilter = group
urls = URL.objects.filter(name__in=toFilter)
sessions = CustomSession.objects.filter(url__in=urls)
qs = Event.objects.filter(session__in=sessions)
dataEvents = read_frame(qs)
dataEvents['group'] = [json.loads(x)['group'] if 'group' in json.loads(x).keys() else '' for x in dataEvents['data']]
dataEvents['user'] = [json.loads(x)['user'] if 'user' in json.loads(x).keys() else '' for x in dataEvents['data']]
# removing those rows where we dont have a group and a user that is not guest
dataEvents = dataEvents[((dataEvents['group'] != '') & (dataEvents['user'] != '') & (dataEvents['user'] != 'guest'))]
dataEvents['group_user_id'] = dataEvents['group'] + '~' + dataEvents['user']
# filtering to only take the group passed as argument
if(group != 'all'):
dataEvents = dataEvents[dataEvents['group'].isin(group)]
# Data Cleaning
dataEvents['time'] = pd.to_datetime(dataEvents['time'])
dataEvents = dataEvents.sort_values('time')
userPuzzleDict = {}
for user in dataEvents['group_user_id'].unique():
#Select rows
user_events = dataEvents[dataEvents['group_user_id'] == user]
user_events_na_dropped = user_events.dropna()
for enum, event in user_events_na_dropped.iterrows():
user_key = event['user']
if(user_key not in userPuzzleDict.keys()):
userPuzzleDict[user_key] = {}
numPuzzles = 1
if(event['type'] == 'ws-start_level'):
activePuzzle = json.loads(event['data'])['task_id']
if (activePuzzle == 'Sandbox'):
continue
secondKey = str(numPuzzles) + '~' + event['session']
if (userPuzzleDict[user_key].get(secondKey) == None):
userPuzzleDict[user_key][secondKey] = dict()
if (userPuzzleDict[user_key][secondKey].get(activePuzzle) == None):
userPuzzleDict[user_key][secondKey] = {activePuzzle : 'started'}
elif(event['type'] == 'ws-puzzle_started'):
if (activePuzzle == 'Sandbox' or userPuzzleDict[user_key][secondKey][activePuzzle] in ['started','shape_created', 'submitted', 'completed']):
continue
userPuzzleDict[user_key][secondKey] = {activePuzzle : 'started'}
elif(event['type'] == 'ws-create_shape'):
if (activePuzzle == 'Sandbox' or userPuzzleDict[user_key][secondKey][activePuzzle] in ['shape_created', 'submitted', 'completed']):
continue
userPuzzleDict[user_key][secondKey] = {activePuzzle : 'shape_created'}
elif(event['type'] == 'ws-check_solution'):
if (activePuzzle == 'Sandbox' or userPuzzleDict[user_key][secondKey][activePuzzle] in ['submitted', 'completed']):
continue
userPuzzleDict[user_key][secondKey] = {activePuzzle :'submitted'}
elif(event['type'] == 'ws-puzzle_complete'):
if (activePuzzle == 'Sandbox'):
continue
userPuzzleDict[user_key][secondKey] = {activePuzzle :'completed'}
elif(event['type'] in ['ws-exit_to_menu', 'ws-disconnect', 'ws-login_user']):
if (activePuzzle == 'Sandbox'):
continue
numPuzzles +=1
userSessionList = []
for key in userPuzzleDict.keys():
for sequence in userPuzzleDict[key].keys():
key_split = sequence.split('~')
userSessionList.append([key, key_split[1], int(key_split[0]), userPuzzleDict[key][sequence]])
userSequence = pd.DataFrame(userSessionList, columns=['user', 'session', 'sequence', 'task_id'])
#Recalculate sequence
mod = []
for user in userSequence['user'].unique():
previousAttempt = 1
n_attempt = 1
individualDf = userSequence[userSequence['user'] == user]
for enum, event in individualDf.iterrows():
if (event['sequence'] != previousAttempt):
n_attempt += 1
previousAttempt = event['sequence']
event['sequence'] = n_attempt
mod.append(event)
modDf = pd.DataFrame(mod, columns=['user', 'session', 'sequence', 'task_id'])
return modDf.to_json()
@app.task
def sequenceWithinPuzzles(group='all'):
if group == 'all':
toFilter = all_data_collection_urls
else:
toFilter = group
urls = URL.objects.filter(name__in=toFilter)
sessions = CustomSession.objects.filter(url__in=urls)
qs = Event.objects.filter(session__in=sessions)
dataEvents = read_frame(qs)
tutorialList = ['1. One Box', '2. Separated Boxes', '3. Rotate a Pyramid', '4. Match Silhouettes',
'5. Removing Objects', '6. Stretch a Ramp', '7. Max 2 Boxes', '8. Combine 2 Ramps',
'9. Scaling Round Objects', 'Sandbox']
# Remove SandBox and tutorial levels.
dataEvents['group'] = [json.loads(x)['group'] if 'group' in json.loads(x).keys() else '' for x in
dataEvents['data']]
dataEvents['user'] = [json.loads(x)['user'] if 'user' in json.loads(x).keys() else '' for x in dataEvents['data']]
# removing those rows where we dont have a group and a user that is not guest
dataEvents = dataEvents[
((dataEvents['group'] != '') & (dataEvents['user'] != '') & (dataEvents['user'] != 'guest'))]
dataEvents['group_user_id'] = dataEvents['group'] + '~' + dataEvents['user']
# filtering to only take the group passed as argument
if (group != 'all'):
dataEvents = dataEvents[dataEvents['group'].isin(group)]
# Data Cleaning
dataEvents['time'] = pd.to_datetime(dataEvents['time'])
dataEvents = dataEvents.sort_values('time')
newDataEvents = []
# Select puzzle and actions
notSelectedEvents = ['ws-mode_change', 'ws-click_nothing', 'ws-click_disabled', 'ws-select_shape',
'ws-deselect_shape', 'ws-paint', 'ws-palette_change', 'ws-toggle_paint_display',
'ws-toggle_snapshot_display', 'ws-create_user', 'ws-redo_action', 'ws-undo_action',
'ws-restart_puzzle', 'ws-puzzle_started']
# Selected puzzles
selectedPuzzles = ['Square Cross-Sections', 'Bird Fez', 'Pi Henge', '45-Degree Rotations', 'Pyramids are Strange',
'Boxes Obscure Spheres', 'Object Limits', 'Warm Up', 'Angled Silhouette',
'Sugar Cones', 'Stranger Shapes', 'Tall and Small', 'Ramp Up and Can It',
'More Than Meets Your Eye', 'Not Bird', 'Unnecesary', 'Zzz', 'Bull Market', 'Few Clues',
'Orange Dance', 'Bear Market']
eventsWithMetaData = ['ws-create_shape', 'ws-delete_shape', 'ws-rotate_shape', 'ws-scale_shape', 'ws-move_shape']
for user in dataEvents['group_user_id'].unique():
# Select rows
user_events = dataEvents[dataEvents['group_user_id'] == user]
user_events_na_dropped = user_events.dropna()
activePuzzle = None
nAttempt = 1
prevCheck = False
prevEvent = None
figureDict = dict()
for enum, event in user_events_na_dropped.iterrows():
# Ignore event
if (prevCheck == True):
if (event['type'] == 'ws-puzzle_complete'):
prevEvent['metadata']['correct'] = True
newDataEvents.append(prevEvent)
prevCheck = False
prevEvent = None
continue
else:
prevEvent['metadata']['correct'] = False
newDataEvents.append(prevEvent)
prevCheck = False
prevEvent = None
if (event['type'] in notSelectedEvents):
continue
elif (event['type'] == 'ws-start_level'):
activePuzzle = json.loads(event['data'])['task_id']
event['task_id'] = activePuzzle
elif (event['type'] == 'ws-create_shape'):
event['task_id'] = activePuzzle
if (event['task_id'] in selectedPuzzles):
event['n_attempt'] = nAttempt
shape_id = json.loads(event['data'])['objSerialization']
shape_type = json.loads(event['data'])['shapeType']
figureDict[shape_id] = shape_type
event['metadata'] = dict()
event['metadata']['shape_id'] = shape_id
event['metadata']['shape_type'] = shape_type
newDataEvents.append(event)
elif (event['type'] == 'ws-delete_shape' or event['type'] == 'ws-move_shape'):
event['task_id'] = activePuzzle
if (event['task_id'] in selectedPuzzles):
event['n_attempt'] = nAttempt
if (event['type'] == 'ws-delete_shape'):
idList = json.loads(event['data'])['deletedShapes']
elif (event['type'] == 'ws-move_shape'):
idList = json.loads(event['data'])['selectedObjects']
for shapeId in idList:
shape_id = shapeId
shape_type = figureDict[shape_id]
event['metadata'] = dict()
event['metadata']['shape_id'] = shape_id
event['metadata']['shape_type'] = shape_type
newDataEvents.append(event)
elif (event['type'] == 'ws-rotate_shape' or event['type'] == 'ws-scale_shape'):
event['task_id'] = activePuzzle
if (event['task_id'] in selectedPuzzles):
event['n_attempt'] = nAttempt
shape_id = json.loads(event['data'])['selectedObject']
shape_type = figureDict[shape_id]
event['metadata'] = dict()
event['metadata']['shape_id'] = shape_id
event['metadata']['shape_type'] = shape_type
newDataEvents.append(event)
elif ((event['type'] in ['ws-exit_to_menu', 'ws-login_user']) and (activePuzzle in selectedPuzzles)):
figureDict.clear()
nAttempt += 1
else:
event['task_id'] = activePuzzle
if (event['task_id'] in selectedPuzzles):
event['n_attempt'] = nAttempt
event['metadata'] = dict()
if (event['type'] == 'ws-check_solution'):
prevCheck = True
prevEvent = event
else:
newDataEvents.append(event)
taskDf = pd.DataFrame(newDataEvents,
columns=['id', 'time', 'group_user_id', 'task_id', 'n_attempt', 'type', 'metadata'])
data = taskDf
listEvent = ['ws-rotate_view', 'ws-rotate_shape', 'ws-undo_action', 'ws-move_shape', 'ws-snapshot',
'ws-scale_shape']
dataConvert2 = []
for user in data['group_user_id'].unique():
individualDf = data[data['group_user_id'] == user]
# Current action set
currentAction = []
# String with action types
actionString = ""
actualEvent = 'None'
for enum, event in individualDf.iterrows():
key = event['group_user_id']
key_split = key.split('~')
event['group_id'] = key_split[0]
event['user'] = key_split[1]
actualEvent = event['type']
eq = True
for a in currentAction:
if (a['type'] != actualEvent):
# Ver si podemos compactar
eq = False
if (eq == False):
igual = True
prev = ""
for a2 in currentAction:
if (a2['type'] != prev):
if (prev == ""):
igual = True
else:
igual = False
prev = a2['type']
if ((igual == True) and (prev in listEvent)):
add = currentAction[0]
# add['type'] = add['type'] + 'x' + str(len(currentAction))
add['n_times'] = len(currentAction)
dataConvert2.append(add)
currentAction.clear()
currentAction.append(event)
else: # igual != True
for a in currentAction:
a['n_times'] = 1
dataConvert2.append(a)
currentAction.clear()
currentAction.append(event)
else: # eq = True
if (event['type'] not in listEvent):
currentAction.append(event)
for a in currentAction:
a['n_times'] = 1
dataConvert2.append(a)
currentAction.clear()
else:
if (len(currentAction) > 0):
if (currentAction[0]['type'] in eventsWithMetaData):
# Event with metadata, check if it is the same shape_id
if (currentAction[0]['metadata']['shape_id'] == event['metadata']['shape_id']):
currentAction.append(event)
else:
add = currentAction[0]
# add['type'] = add['type'] + 'x' + str(len(currentAction))
add['n_times'] = len(currentAction)
dataConvert2.append(add)
currentAction.clear()
currentAction.append(event)
# Event without metaData, just concatenate.
else:
currentAction.append(event)
elif (len(currentAction) == 0):
currentAction.append(event)
# Add last elems
# We must check if last elems can be also replaced.
final = ""
if (len(currentAction) > 0):
igual2 = True
prev = ""
for a2 in currentAction:
if (a2['type'] != prev):
if (prev == ""):
igual2 = True
else:
igual2 = False
prev = a2['type']
if ((igual == True) and (prev in listEvent)):
add = currentAction[0]
# add['type'] = add['type'] + 'x' + str(len(currentAction))
add['n_times'] = len(currentAction)
dataConvert2.append(add)
currentAction.clear()
currentAction.append(event)
else: # igual != True
for a in currentAction:
a['n_times'] = 1
dataConvert2.append(a)
currentAction.clear()
currentAction.append(event)
# Create dataframe from list
# consecutiveDf = pd.DataFrame(dataConvert2, columns=['id', 'time', 'group_user_id', 'task_id', 'n_attempt', 'type', 'metadata'])
data = pd.DataFrame(dataConvert2,
columns=['group_id', 'user', 'task_id', 'n_attempt', 'type', 'n_times', 'metadata'])
# Recalculate n_attempt
mod = []
for user in data['user'].unique():
previousAttempt = 1
n_attempt = 1
individualDf = data[data['user'] == user]
for enum, event in individualDf.iterrows():
if (event['n_attempt'] != previousAttempt):
n_attempt += 1
previousAttempt = event['n_attempt']
event['n_attempt'] = n_attempt
mod.append(event)
modDf = pd.DataFrame(mod, columns=['group_id', 'user', 'task_id', 'n_attempt', 'type', 'n_times', 'metadata'])
return modDf.to_json()
@app.task
def computeLevelsOfActivity(group='all'):
### DATA COLLECTION AND INITIAL PROCESSING
if group == 'all':
toFilter = all_data_collection_urls
else:
toFilter = group
urls = URL.objects.filter(name__in=toFilter)
sessions = CustomSession.objects.filter(url__in=urls)
qs = Event.objects.filter(session__in=sessions)
dataEvents = read_frame(qs)
dataEvents['time'] = | pd.to_datetime(dataEvents['time']) | pandas.to_datetime |
import pandas as pd
import numpy as np
import pytest
import re
import tubular
import tubular.testing.helpers as h
import tubular.testing.test_data as data_generators_p
import input_checker
from input_checker._version import __version__
from input_checker.checker import InputChecker
from input_checker.exceptions import InputCheckerError
class TestInit(object):
"""Tests for InputChecker.init()."""
def test_super_init_called(self, mocker):
"""Test that init calls BaseTransformer.init."""
expected_call_args = {0: {"args": (), "kwargs": {"columns": ["a", "b"]}}}
with h.assert_function_call(
mocker, tubular.base.BaseTransformer, "__init__", expected_call_args
):
InputChecker(columns=["a", "b"])
def test_inheritance(self):
"""Test that InputChecker inherits from tubular.base.BaseTransformer."""
x = InputChecker()
h.assert_inheritance(x, tubular.base.BaseTransformer)
def test_arguments(self):
"""Test that InputChecker init has expected arguments."""
h.test_function_arguments(
func=InputChecker.__init__,
expected_arguments=[
"self",
"columns",
"categorical_columns",
"numerical_columns",
"datetime_columns",
"skip_infer_columns",
],
expected_default_values=(None, None, None, None, None),
)
def test_version_attribute(self):
"""Test that __version__ attribute takes expected value."""
x = InputChecker(columns=["a"])
h.assert_equal_dispatch(
expected=__version__,
actual=x.version_,
msg="__version__ attribute",
)
def test_columns_attributes_generated(self):
"""Test all columns attributes are saved with InputChecker init"""
x = InputChecker(
columns=["a", "b", "c", "d"],
numerical_columns=["a"],
categorical_columns=["b"],
datetime_columns=["d"],
skip_infer_columns=["c"],
)
df = data_generators_p.create_df_2()
df["d"] = pd.to_datetime(
[
"01/02/2020",
"01/02/2021",
"08/04/2019",
"01/03/2020",
"29/03/2019",
"15/10/2018",
"24/07/2020",
]
)
x.fit(df)
assert hasattr(x, "columns") is True, "columns attribute not present after init"
assert (
hasattr(x, "numerical_columns") is True
), "numerical_columns attribute not present after init"
assert (
hasattr(x, "categorical_columns") is True
), "categorical_columns attribute not present after init"
assert (
hasattr(x, "datetime_columns") is True
), "datetime_columns attribute not present after init"
assert (
hasattr(x, "skip_infer_columns") is True
), "skip_infer_columns attribute not present after init"
def test_check_type_called(self, mocker):
"""Test all check type is called by the init method."""
spy = mocker.spy(input_checker.checker.InputChecker, "_check_type")
x = InputChecker(
columns=["a", "b", "c", "d"],
numerical_columns=["a"],
categorical_columns=["b"],
datetime_columns=["d"],
skip_infer_columns=["c"],
)
assert (
spy.call_count == 5
), "unexpected number of calls to InputChecker._check_type with init"
call_0_args = spy.call_args_list[0]
call_0_pos_args = call_0_args[0]
call_1_args = spy.call_args_list[1]
call_1_pos_args = call_1_args[0]
call_2_args = spy.call_args_list[2]
call_2_pos_args = call_2_args[0]
call_3_args = spy.call_args_list[3]
call_3_pos_args = call_3_args[0]
call_4_args = spy.call_args_list[4]
call_4_pos_args = call_4_args[0]
expected_pos_args_0 = (
x,
["a", "b", "c", "d"],
"input columns",
[list, type(None), str],
)
expected_pos_args_1 = (
x,
["b"],
"categorical columns",
[list, str, type(None)],
)
expected_pos_args_2 = (
x,
["a"],
"numerical columns",
[list, dict, str, type(None)],
)
expected_pos_args_3 = (
x,
["d"],
"datetime columns",
[list, dict, str, type(None)],
)
expected_pos_args_4 = (
x,
["c"],
"skip infer columns",
[list, type(None)],
)
assert (
expected_pos_args_0 == call_0_pos_args
), "positional args unexpected in _check_type call for columns argument"
assert (
expected_pos_args_1 == call_1_pos_args
), "positional args unexpected in _check_type call for categorical columns argument"
assert (
expected_pos_args_2 == call_2_pos_args
), "positional args unexpected in _check_type call for numerical columns argument"
assert (
expected_pos_args_3 == call_3_pos_args
), "positional args unexpected in _check_type call for datetime columns argument"
assert (
expected_pos_args_4 == call_4_pos_args
), "positional args unexpected in _check_type call for skip infer columns argument"
def test_check_is_string_value_called(self, mocker):
"""Test all check string is called by the init method when option set to infer."""
spy = mocker.spy(input_checker.checker.InputChecker, "_is_string_value")
x = InputChecker(
numerical_columns="infer",
categorical_columns="infer",
datetime_columns="infer",
)
assert (
spy.call_count == 3
), "unexpected number of calls to InputChecker._is_string_value with init"
call_0_args = spy.call_args_list[0]
call_0_pos_args = call_0_args[0]
call_1_args = spy.call_args_list[1]
call_1_pos_args = call_1_args[0]
call_2_args = spy.call_args_list[2]
call_2_pos_args = call_2_args[0]
expected_pos_args_0 = (x, x.categorical_columns, "categorical columns", "infer")
expected_pos_args_1 = (x, x.numerical_columns, "numerical columns", "infer")
expected_pos_args_2 = (x, x.datetime_columns, "datetime columns", "infer")
assert (
expected_pos_args_0 == call_0_pos_args
), "positional args unexpected in _is_string_value call for numerical columns argument"
assert (
expected_pos_args_1 == call_1_pos_args
), "positional args unexpected in _is_string_value call for categorical columns argument"
assert (
expected_pos_args_2 == call_2_pos_args
), "positional args unexpected in _is_string_value call for categorical columns argument"
def test_check_is_empty_called(self, mocker):
"""Test all check is empty is called by the init method."""
spy = mocker.spy(input_checker.checker.InputChecker, "_is_empty")
x = InputChecker(
columns=["a", "b", "c", "d"],
numerical_columns=["a"],
categorical_columns=["b", "c"],
datetime_columns=["d"],
)
assert (
spy.call_count == 4
), "unexpected number of calls to InputChecker._is_empty with init"
call_0_args = spy.call_args_list[0]
call_0_pos_args = call_0_args[0]
call_1_args = spy.call_args_list[1]
call_1_pos_args = call_1_args[0]
call_2_args = spy.call_args_list[2]
call_2_pos_args = call_2_args[0]
call_3_args = spy.call_args_list[3]
call_3_pos_args = call_3_args[0]
expected_pos_args_0 = (x, "input columns", ["a", "b", "c", "d"])
expected_pos_args_1 = (x, "categorical columns", ["b", "c"])
expected_pos_args_2 = (x, "numerical columns", ["a"])
expected_pos_args_3 = (x, "datetime columns", ["d"])
assert (
expected_pos_args_0 == call_0_pos_args
), "positional args unexpected in _is_empty call for categorical columns argument"
assert (
expected_pos_args_1 == call_1_pos_args
), "positional args unexpected in _is_empty call for numerical columns argument"
assert (
expected_pos_args_2 == call_2_pos_args
), "positional args unexpected in _is_empty call for numerical columns argument"
assert (
expected_pos_args_3 == call_3_pos_args
), "positional args unexpected in _is_empty call for numerical columns argument"
def test_check_is_listed_in_columns_called(self, mocker):
spy = mocker.spy(input_checker.checker.InputChecker, "_is_listed_in_columns")
InputChecker(
columns=["a", "b", "c", "d"],
numerical_columns=["a"],
categorical_columns=["b", "c"],
datetime_columns=["d"],
)
assert (
spy.call_count == 1
), "unexpected number of calls to InputChecker._is_listed_in_columns with init"
class TestConsolidateInputs(object):
def test_arguments(self):
"""Test that _consolidate_inputs has expected arguments."""
h.test_function_arguments(
func=InputChecker._consolidate_inputs,
expected_arguments=["self", "X"],
expected_default_values=None,
)
def test_infer_datetime_columns(self):
"""Test that _consolidate_inputs infers the correct datetime columns"""
x = InputChecker(datetime_columns="infer")
df = data_generators_p.create_df_2()
df["d"] = pd.to_datetime(
[
"01/02/2020",
"01/02/2021",
"08/04/2019",
"01/03/2020",
"29/03/2019",
"15/10/2018",
"24/07/2020",
]
)
df["e"] = pd.to_datetime(
[
"01/02/2020",
"01/02/2021",
"08-04-2019",
"01/03/2020",
"29/03/2019",
"15/10/2018",
"24/07/2020",
]
)
x.fit(df)
assert x.datetime_columns == [
"d",
"e",
], "infer datetime not finding correct columns"
def test_infer_datetime_dict(self):
"""Test that _consolidate_inputs infers the correct datetime dict"""
x = InputChecker(datetime_columns="infer")
df = data_generators_p.create_df_2()
df["d"] = pd.to_datetime(
[
"01/02/2020",
"01/02/2021",
"08/04/2019",
"01/03/2020",
"29/03/2019",
"15/10/2018",
"24/07/2020",
]
)
x.fit(df)
assert (
x.datetime_dict["d"]["maximum"] is False
), "infer numerical not specifying maximum value check as true"
assert (
x.datetime_dict["d"]["minimum"] is True
), "infer numerical not specifying maximum value check as true"
def test_infer_categorical_columns(self):
"""Test that _consolidate_inputs infers the correct categorical columns"""
x = InputChecker(categorical_columns="infer")
df = data_generators_p.create_df_2()
df["d"] = [True, True, False, True, True, False, np.nan]
df["d"] = df["d"].astype("bool")
x.fit(df)
assert x.categorical_columns == [
"b",
"c",
"d",
], "infer categorical not finding correct columns"
def test_infer_numerical_columns(self):
"""Test that _consolidate_inputs infers the correct numerical columns"""
x = InputChecker(numerical_columns="infer")
df = data_generators_p.create_df_2()
x.fit(df)
assert x.numerical_columns == [
"a"
], "infer numerical not finding correct columns"
def test_infer_numerical_skips_infer_columns(self):
"""Test that _consolidate_inputs skips right columns when inferring numerical"""
x = InputChecker(numerical_columns="infer", skip_infer_columns=["a"])
df = data_generators_p.create_df_2()
df["d"] = df["a"]
x.fit(df)
assert x.numerical_columns == [
"d"
], "infer numerical not finding correct columns when skipping infer columns"
def test_infer_categorical_skips_infer_columns(self):
"""Test that _consolidate_inputs skips right columns when inferring categorical"""
x = InputChecker(categorical_columns="infer", skip_infer_columns=["b"])
df = data_generators_p.create_df_2()
x.fit(df)
assert x.categorical_columns == [
"c"
], "infer categorical not finding correct columns when skipping infer columns"
def test_infer_datetime_skips_infer_columns(self):
"""Test that _consolidate_inputs skips right columns when inferring datetime"""
x = InputChecker(datetime_columns="infer", skip_infer_columns=["d"])
df = data_generators_p.create_df_2()
df["d"] = pd.to_datetime(
[
"01/02/2020",
"01/02/2021",
"08/04/2019",
"01/03/2020",
"29/03/2019",
"15/10/2018",
"24/07/2020",
]
)
df["a"] = df["d"]
x.fit(df)
assert x.datetime_columns == [
"a"
], "infer datetime not finding correct columns when skipping infer columns"
def test_infer_numerical_dict(self):
"""Test that _consolidate_inputs infers the correct numerical dict"""
x = InputChecker(numerical_columns="infer")
df = data_generators_p.create_df_2()
x.fit(df)
assert (
x.numerical_dict["a"]["maximum"] is True
), "infer numerical not specifying maximum value check as true"
assert (
x.numerical_dict["a"]["minimum"] is True
), "infer numerical not specifying minimum value check as true"
def test_datetime_type(self):
"""Test that datetime columns is a list after calling _consolidate_inputs"""
x = InputChecker(datetime_columns="infer")
df = data_generators_p.create_df_2()
df["d"] = pd.to_datetime(
[
"01/02/2020",
"01/02/2021",
"08/04/2019",
"01/03/2020",
"29/03/2019",
"15/10/2018",
"24/07/2020",
]
)
x.fit(df)
assert (
type(x.datetime_columns) is list
), f"incorrect datetime_columns type returned from _consolidate_inputs - expected: list but got: {type(x.datetime_columns)} "
def test_categorical_type(self):
"""Test that categorical columns is a list after calling _consolidate_inputs"""
x = InputChecker(categorical_columns="infer")
df = data_generators_p.create_df_2()
x.fit(df)
assert (
type(x.categorical_columns) is list
), f"incorrect categorical_columns type returned from _consolidate_inputs - expected: list but got: {type(x.categorical_columns)} "
def test_numerical_type(self):
"""Test that numerical columns and dict are a list and dict after calling _consolidate_inputs"""
x = InputChecker(numerical_columns="infer")
df = data_generators_p.create_df_2()
x.fit(df)
assert (
type(x.numerical_columns) is list
), f"incorrect numerical_columns type returned from _consolidate_inputs - expected: list but got: {type(x.numerical_columns)} "
assert (
type(x.numerical_dict) is dict
), f"incorrect numerical_dict type returned from _consolidate_inputs - expected: dict but got: {type(x.numerical_dict)} "
def test_check_is_subset_called(self, mocker):
"""Test all check _is_subset is called by the _consolidate_inputs method."""
x = InputChecker(
columns=["a", "b", "c", "d"],
numerical_columns=["a"],
categorical_columns=["c"],
datetime_columns=["d"],
skip_infer_columns=["b"],
)
df = data_generators_p.create_df_2()
df["d"] = pd.to_datetime(
[
"01/02/2020",
"01/02/2021",
"08/04/2019",
"01/03/2020",
"29/03/2019",
"15/10/2018",
"24/07/2020",
]
)
spy = mocker.spy(input_checker.checker.InputChecker, "_is_subset")
x.fit(df)
assert (
spy.call_count == 5
), "unexpected number of calls to InputChecker._is_subset with _consolidate_inputs"
call_0_args = spy.call_args_list[0]
call_0_pos_args = call_0_args[0]
call_1_args = spy.call_args_list[1]
call_1_pos_args = call_1_args[0]
call_2_args = spy.call_args_list[2]
call_2_pos_args = call_2_args[0]
call_3_args = spy.call_args_list[3]
call_3_pos_args = call_3_args[0]
call_4_args = spy.call_args_list[4]
call_4_pos_args = call_4_args[0]
expected_pos_args_0 = (x, "skip infer columns", ["b"], df)
expected_pos_args_1 = (x, "input columns", ["a", "b", "c", "d"], df)
expected_pos_args_2 = (x, "categorical columns", ["c"], df)
expected_pos_args_3 = (x, "numerical columns", ["a"], df)
expected_pos_args_4 = (x, "datetime columns", ["d"], df)
assert (
expected_pos_args_0 == call_0_pos_args
), "positional args unexpected in _is_subset call for skip_infer_columns columns argument"
assert (
expected_pos_args_1 == call_1_pos_args
), "positional args unexpected in _is_subset call for input columns argument"
assert (
expected_pos_args_2 == call_2_pos_args
), "positional args unexpected in _is_subset call for categorical columns argument"
assert (
expected_pos_args_3 == call_3_pos_args
), "positional args unexpected in _is_subset call for numerical columns argument"
assert (
expected_pos_args_4 == call_4_pos_args
), "positional args unexpected in _is_subset call for datetime columns argument"
class TestFitTypeChecker(object):
"""Tests for InputChecker._fit_type_checker()."""
def test_arguments(self):
"""Test that InputChecker _fit_type_checker has expected arguments."""
h.test_function_arguments(
func=InputChecker._fit_type_checker, expected_arguments=["self", "X"]
)
def test_no_column_classes_before_fit(self):
"""Test column_classes is not present before fit called"""
x = InputChecker()
assert (
hasattr(x, "column_classes") is False
), "column_classes attribute present before fit"
def test_column_classes_after_fit(self):
"""Test column_classes is present after fit called"""
df = data_generators_p.create_df_2()
x = InputChecker()
x.fit(df)
assert hasattr(
x, "column_classes"
), "column_classes attribute not present after fit"
def test_correct_columns_classes(self):
"""Test fit type checker saves types for correct columns after fit called"""
df = data_generators_p.create_df_2()
x = InputChecker(columns=["a"])
x.fit(df)
assert list(x.column_classes.keys()) == [
"a"
], f"incorrect values returned from _fit_value_checker - expected: ['a'] but got: {list(x.column_classes.keys())}"
def test_correct_classes_identified(self):
"""Test fit type checker identifies correct classes is present after fit called"""
df = data_generators_p.create_df_2()
x = InputChecker()
df["d"] = pd.to_datetime(
[
"01/02/2020",
"01/02/2021",
"08/04/2019",
"01/03/2020",
"29/03/2019",
"15/10/2018",
"24/07/2020",
]
)
x.fit(df)
assert (
x.column_classes["a"] == "float64"
), f"incorrect type returned from _fit_type_checker for column 'a' - expected: float64 but got: {x.column_classes['a']}"
assert (
x.column_classes["b"] == "object"
), f"incorrect type returned from _fit_type_checker for column 'b' - expected: object but got: {x.column_classes['b']}"
assert (
x.column_classes["c"] == "category"
), f"incorrect type returned from _fit_type_checker for column 'c' - expected: category but got: {x.column_classes['c']}"
assert (
x.column_classes["d"] == "datetime64[ns]"
), f"incorrect type returned from _fit_type_checker for column 'd' - expected: datetime64[ns] but got: {x.column_classes['d']}"
class TestFitNullChecker(object):
"""Tests for InputChecker._fit_null_checker()."""
def test_arguments(self):
"""Test that InputChecker _fit_null_checker has expected arguments."""
h.test_function_arguments(
func=InputChecker._fit_null_checker, expected_arguments=["self", "X"]
)
def test_no_expected_values_before_fit(self):
"""Test null_map is not present before fit called"""
x = InputChecker()
assert hasattr(x, "null_map") is False, "null_map attribute present before fit"
def test_expected_values_after_fit(self):
"""Test null_map is present after fit called"""
df = data_generators_p.create_df_2()
x = InputChecker()
x.fit(df)
assert hasattr(x, "null_map"), "null_map attribute not present after fit"
def test_correct_columns_nulls(self):
"""Test fit nulls checker saves map for correct columns after fit called"""
df = data_generators_p.create_df_2()
x = InputChecker(columns=["a"])
x.fit(df)
assert list(x.null_map.keys()) == [
"a"
], f"incorrect values returned from _fit_null_checker - expected: ['a'] but got: {list(x.null_map.keys())}"
def test_correct_classes_identified(self):
"""Test fit null checker identifies correct columns with nulls after fit called"""
df = data_generators_p.create_df_2()
x = InputChecker()
df["b"] = df["b"].fillna("a")
x.fit(df)
assert (
x.null_map["a"] == 1
), f"incorrect values returned from _fit_null_checker - expected: 1 but got: {x.null_map['a']}"
assert (
x.null_map["b"] == 0
), f"incorrect values returned from _fit_null_checker - expected: 0 but got: {x.null_map['b']}"
assert (
x.null_map["c"] == 1
), f"incorrect values returned from _fit_null_checker - expected: 1 but got: {x.null_map['c']}"
class TestFitValueChecker(object):
"""Tests for InputChecker._fit_value_checker()."""
def test_arguments(self):
"""Test that InputChecker _fit_value_checker has expected arguments."""
h.test_function_arguments(
func=InputChecker._fit_value_checker, expected_arguments=["self", "X"]
)
def test_no_expected_values_before_fit(self):
"""Test expected_values is not present before fit called"""
x = InputChecker(categorical_columns=["b", "c"])
assert (
hasattr(x, "expected_values") is False
), "expected_values attribute present before fit"
def test_expected_values_after_fit(self):
"""Test expected_values is present after fit called"""
df = data_generators_p.create_df_2()
x = InputChecker(categorical_columns=["b", "c"])
x.fit(df)
assert hasattr(
x, "expected_values"
), "expected_values attribute not present after fit"
def test_correct_columns_map(self):
"""Test fit value checker saves levels for correct columns after fit called"""
df = data_generators_p.create_df_2()
x = InputChecker(categorical_columns=["b", "c"])
x.fit(df)
assert list(x.expected_values.keys()) == [
"b",
"c",
], f"incorrect values returned from _fit_value_checker - expected: ['b', 'c'] but got: {list(x.expected_values.keys())}"
def test_correct_values_identified(self):
"""Test fit value checker identifies corrcet levels after fit called"""
df = data_generators_p.create_df_2()
df["d"] = [True, True, False, True, True, False, np.nan]
df["d"] = df["d"].astype("bool")
x = InputChecker(categorical_columns=["b", "c", "d"])
x.fit(df)
assert x.expected_values["b"] == [
"a",
"b",
"c",
"d",
"e",
"f",
np.nan,
], f"incorrect values returned from _fit_value_checker - expected: ['a', 'b', 'c', 'd', 'e', 'f', np.nan] but got: {x.expected_values['b']}"
assert x.expected_values["c"] == [
"a",
"b",
"c",
"d",
"e",
"f",
np.nan,
], f"incorrect values returned from _fit_value_checker - expected: ['a', 'b', 'c', 'd', 'e', 'f', np.nan] but got: {x.expected_values['c']}"
assert x.expected_values["d"] == [
True,
False,
], f"incorrect values returned from _fit_value_checker - expected: [True, False, np.nan] but got: {x.expected_values['d']}"
class TestFitNumericalChecker(object):
"""Tests for InputChecker._fit_numerical_checker()."""
def test_arguments(self):
"""Test that InputChecker _fit_numerical_checker has expected arguments."""
h.test_function_arguments(
func=InputChecker._fit_numerical_checker, expected_arguments=["self", "X"]
)
def test_no_expected_values_before_fit(self):
"""Test numerical_values is not present before fit called"""
x = InputChecker()
assert (
hasattr(x, "numerical_values") is False
), "numerical_values attribute present before fit"
def test_expected_values_after_fit(self):
"""Test numerical_values is present after fit called"""
df = data_generators_p.create_df_2()
x = InputChecker(numerical_columns=["a"])
x.fit(df)
assert hasattr(
x, "numerical_values"
), "numerical_values attribute not present after fit"
def test_correct_columns_num_values(self):
"""Test fit numerical checker saves values for correct columns after fit called"""
df = data_generators_p.create_df_2()
x = InputChecker(numerical_columns=["a"])
x.fit(df)
assert list(x.numerical_values.keys()) == [
"a"
], f"incorrect values returned from numerical_values - expected: ['a'] but got: {list(x.numerical_values.keys())}"
def test_correct_numerical_values_identified(self):
"""Test fit numerical checker identifies correct range values after fit called"""
df = data_generators_p.create_df_2()
x = InputChecker(numerical_columns=["a"])
x.fit(df)
assert (
x.numerical_values["a"]["maximum"] == 6
), f"incorrect values returned from _fit_numerical_checker - expected: 1 but got: {x.numerical_values['a']['maximum']}"
assert (
x.numerical_values["a"]["minimum"] == 1
), f"incorrect values returned from _fit_numerical_checker - expected: 0 but got: {x.numerical_values['a']['minimum']}"
def test_correct_numerical_values_identified_dict(self):
"""Test fit numerical checker identifies correct range values after fit called when inputting a dictionary"""
df = data_generators_p.create_df_2()
numerical_dict = {}
numerical_dict["a"] = {}
numerical_dict["a"]["maximum"] = True
numerical_dict["a"]["minimum"] = False
x = InputChecker(numerical_columns=numerical_dict)
x.fit(df)
assert (
x.numerical_values["a"]["maximum"] == 6
), f"incorrect values returned from _fit_numerical_checker - expected: 1 but got: {x.numerical_values['a']['maximum']}"
assert (
x.numerical_values["a"]["minimum"] is None
), f"incorrect values returned from _fit_numerical_checker - expected: None but got: {x.numerical_values['a']['minimum']}"
class TestFitDatetimeChecker(object):
"""Tests for InputChecker._fit_datetime_checker()."""
def test_arguments(self):
"""Test that InputChecker _fit_value_checker has expected arguments."""
h.test_function_arguments(
func=InputChecker._fit_datetime_checker, expected_arguments=["self", "X"]
)
def test_no_datetime_values_before_fit(self):
"""Test expected_values is not present before fit called"""
x = InputChecker(datetime_columns=["b", "c"])
assert (
hasattr(x, "datetime_values") is False
), "datetime_values attribute present before fit"
def test_datetime_values_after_fit(self):
"""Test datetime_values is present after fit called"""
df = data_generators_p.create_df_2()
df["d"] = pd.to_datetime(
[
"01/02/2020",
"01/02/2021",
"08/04/2019",
"01/03/2020",
"29/03/2019",
"15/10/2018",
"24/07/2020",
]
)
df["e"] = pd.to_datetime(
[
"01/02/2020",
"01/02/2021",
"08-04-2019",
"01/03/2020",
"29/03/2019",
"15/10/2018",
"24/07/2020",
]
)
x = InputChecker(datetime_columns=["d", "e"])
x.fit(df)
assert hasattr(
x, "datetime_values"
), "datetime_values attribute not present after fit"
def test_correct_columns_map(self):
"""Test fit datetime checker saves minimum dates for correct columns after fit called"""
df = data_generators_p.create_df_2()
df["d"] = pd.to_datetime(
[
"01/02/2020",
"01/02/2021",
"08/04/2019",
"01/03/2020",
"29/03/2019",
"15/10/2018",
"24/07/2020",
]
)
df["e"] = pd.to_datetime(
[
"01/02/2020",
"01/02/2021",
"08-04-2019",
"01/03/2020",
"29/03/2019",
"15/10/2018",
"24/07/2020",
]
)
x = InputChecker(datetime_columns=["d", "e"])
x.fit(df)
assert list(x.datetime_values.keys()) == [
"d",
"e",
], f"incorrect values returned from _fit_datetime_checker - expected: ['d', 'e'] but got: {list(x.datetime_values.keys())} "
def test_correct_datetime_values_identified(self):
"""Test fit datetime checker identifies correct minimum bound after fit called"""
df = data_generators_p.create_df_2()
df["d"] = pd.to_datetime(
[
"01/02/2020",
"01/02/2021",
"08/04/2019",
"01/03/2020",
"29/03/2019",
"15/10/2018",
"24/07/2020",
]
)
x = InputChecker(datetime_columns=["d"])
x.fit(df)
expected_min_d = pd.to_datetime("15/10/2018").date()
actual_min_d = x.datetime_values["d"]["minimum"]
actual_max_d = x.datetime_values["d"]["maximum"]
assert (
actual_min_d == expected_min_d
), f"incorrect values returned from _fit_datetime_checker - expected: {expected_min_d}, but got: {actual_min_d}"
assert (
actual_max_d is None
), f"incorrect values returned from _fit_datetime_checker - expected: None, but got: {actual_max_d}"
def test_correct_datetime_values_identified_dict(self):
"""Test fit datetime checker identifies correct range values after fit called when inputting a dictionary"""
df = data_generators_p.create_df_2()
df["d"] = pd.to_datetime(
[
"01/02/2020",
"01/02/2021",
"08/04/2019",
"01/03/2020",
"29/03/2019",
"15/10/2018",
"24/07/2020",
]
)
datetime_dict = {"d": {"maximum": True, "minimum": True}}
x = InputChecker(datetime_columns=datetime_dict)
x.fit(df)
expected_min_d = pd.to_datetime("15/10/2018").date()
expected_max_d = pd.to_datetime("01/02/2021").date()
actual_min_d = x.datetime_values["d"]["minimum"]
actual_max_d = x.datetime_values["d"]["maximum"]
assert (
actual_min_d == expected_min_d
), f"incorrect values returned from _fit_datetime_checker - expected: {expected_min_d}, but got: {actual_min_d}"
assert (
actual_max_d == expected_max_d
), f"incorrect values returned from _fit_datetime_checker - expected: {expected_max_d}, but got: {actual_max_d}"
class TestFit(object):
"""Tests for InputChecker.fit()."""
def test_arguments(self):
"""Test that InputChecker fit has expected arguments."""
h.test_function_arguments(
func=InputChecker.fit,
expected_arguments=["self", "X", "y"],
expected_default_values=(None,),
)
def test_super_fit_called(self, mocker):
"""Test that BaseTransformer fit called."""
expected_call_args = {
0: {"args": (data_generators_p.create_df_2(), None), "kwargs": {}}
}
df = data_generators_p.create_df_2()
x = InputChecker(columns=["a"])
with h.assert_function_call(
mocker, tubular.base.BaseTransformer, "fit", expected_call_args
):
x.fit(df)
def test_all_columns_selected(self):
"""Test fit selects all columns when columns parameter set to None"""
df = data_generators_p.create_df_2()
x = InputChecker(columns=None)
assert (
x.columns is None
), f"incorrect columns attribute before fit when columns parameter set to None - expected: None but got: {x.columns}"
x.fit(df)
assert x.columns == [
"a",
"b",
"c",
], f"incorrect columns identified when columns parameter set to None - expected: ['a', 'b', 'c'] but got: {x.columns}"
def test_fit_returns_self(self):
"""Test fit returns self?"""
df = data_generators_p.create_df_2()
x = InputChecker()
x_fitted = x.fit(df)
assert x_fitted is x, "Returned value from InputChecker.fit not as expected."
def test_no_optional_calls_fit(self):
"""Test numerical_values and expected_values is not present after fit if parameters set to None"""
x = InputChecker(
numerical_columns=None, categorical_columns=None, datetime_columns=None
)
df = data_generators_p.create_df_2()
x.fit(df)
assert (
hasattr(x, "numerical_values") is False
), "numerical_values attribute present with numerical_columns set to None"
assert (
hasattr(x, "expected_values") is False
), "expected_values attribute present with categorical_columns set to None"
assert (
hasattr(x, "datetime_values") is False
), "datetime_values attribute present with datetime_columns set to None"
def test_compulsory_checks_generated_with_no_optional_calls_fit(self):
"""Test null_map and column_classes are present after fit when optional parameters set to None"""
x = InputChecker(
numerical_columns=None, categorical_columns=None, datetime_columns=None
)
df = data_generators_p.create_df_2()
x.fit(df)
assert (
hasattr(x, "null_map") is True
), "null_map attribute not present when optional checks set to None"
assert (
hasattr(x, "column_classes") is True
), "column_classes attribute not present when optional checks set to None"
def test_all_checks_generated(self):
"""Test all checks are generated when all optional parameters set"""
x = InputChecker(
columns=["a", "b", "c", "d"],
numerical_columns=["a"],
categorical_columns=["b", "c"],
datetime_columns=["d"],
)
df = data_generators_p.create_df_2()
df["d"] = pd.to_datetime(
[
"01/02/2020",
"01/02/2021",
"08/04/2019",
"01/03/2020",
"29/03/2019",
"15/10/2018",
"24/07/2020",
]
)
x.fit(df)
assert (
hasattr(x, "numerical_values") is True
), "numerical_values attribute not present after fit with numerical_columns set"
assert (
hasattr(x, "expected_values") is True
), "expected_values attribute not present after fit with categorical_columns set"
assert (
hasattr(x, "datetime_values") is True
), "expected_values attribute not present after fit with datetime_columns set"
assert (
hasattr(x, "null_map") is True
), "null_map attribute not present after fit"
assert (
hasattr(x, "column_classes") is True
), "column_classes attribute not present after fit"
def test_check_df_is_empty_called(self, mocker):
"""Test check is df empty is called by the fit method."""
x = InputChecker(
columns=["a", "b", "c"],
numerical_columns=["a"],
categorical_columns=["b", "c"],
)
df = data_generators_p.create_df_2()
spy = mocker.spy(input_checker.checker.InputChecker, "_df_is_empty")
x.fit(df)
assert (
spy.call_count == 1
), "unexpected number of calls to InputChecker._df_is_empty with fit"
call_0_args = spy.call_args_list[0]
call_0_pos_args = call_0_args[0]
expected_pos_args_0 = (x, "input dataframe", df)
assert (
expected_pos_args_0 == call_0_pos_args
), "positional args unexpected in _df_is_empty call for dataframe argument"
class TestTransformTypeChecker(object):
"""Tests for InputChecker._transform_type_checker()."""
def test_arguments(self):
"""Test that InputChecker _transform_type_checker has expected arguments."""
h.test_function_arguments(
func=InputChecker._transform_type_checker,
expected_arguments=["self", "X", "batch_mode"],
expected_default_values=(False,),
)
def test_check_fitted_called(self, mocker):
"""Test that transform calls BaseTransformer.check_is_fitted."""
expected_call_args = {0: {"args": (["column_classes"],), "kwargs": {}}}
x = InputChecker()
df = data_generators_p.create_df_2()
x.fit(df)
with h.assert_function_call(
mocker, tubular.base.BaseTransformer, "check_is_fitted", expected_call_args
):
x._transform_type_checker(df)
def test_transform_returns_failed_checks_dict(self):
"""Test _transform_type_checker returns results dictionary"""
df = data_generators_p.create_df_2()
x = InputChecker()
x.fit(df)
type_checker_failed_checks = x._transform_type_checker(df)
assert isinstance(
type_checker_failed_checks, dict
), f"incorrect type results type identified - expected: dict but got: {type(type_checker_failed_checks)}"
def test_transform_passes(self):
"""Test _transform_type_checker passes all the checks on the training dataframe"""
df = data_generators_p.create_df_2()
x = InputChecker()
x.fit(df)
type_checker_failed_checks = x._transform_type_checker(df)
assert (
type_checker_failed_checks == {}
), f"Type checker found failed tests - {list(type_checker_failed_checks.keys())}"
def test_transform_passes_column_all_nulls(self):
"""Test _transform_type_checker passes all the checks on the training dataframe when a column contains only nulls"""
df = data_generators_p.create_df_2()
x = InputChecker()
x.fit(df)
df["c"] = np.nan
type_checker_failed_checks = x._transform_type_checker(df)
assert (
type_checker_failed_checks == {}
), f"Type checker found failed tests - {list(type_checker_failed_checks.keys())}"
def test_transform_captures_failed_test(self):
"""Test _transform_type_checker captures a failed check"""
df = data_generators_p.create_df_2()
x = InputChecker()
x.fit(df)
exp_type = df["a"].dtypes
df.loc[5, "a"] = "a"
type_checker_failed_checks = x._transform_type_checker(df)
assert (
type_checker_failed_checks["a"]["actual"] == df["a"].dtypes
), f"incorrect values saved to type_checker_failed_checks bad types - expected: [{type('a')}] but got: {type_checker_failed_checks['a']['types']}"
assert (
type_checker_failed_checks["a"]["expected"] == exp_type
), f"incorrect values saved to type_checker_failed_checks expected types - expected: [{exp_type}] but got: {type_checker_failed_checks['a']['types']}"
def test_transform_passes_batch_mode(self):
"""Test _transform_type_checker passes all the checks on the training dataframe"""
df = data_generators_p.create_df_2()
x = InputChecker()
x.fit(df)
type_checker_failed_checks = x._transform_type_checker(df, batch_mode=True)
assert (
type_checker_failed_checks == {}
), f"Type checker found failed tests - {list(type_checker_failed_checks.keys())}"
def test_transform_captures_failed_test_batch_mode(self):
"""Test _transform_type_checker handles mixed types"""
df = data_generators_p.create_df_2()
df["d"] = pd.to_datetime(
[
"01/02/2020",
"01/02/2021",
"08/04/2019",
"01/03/2020",
"29/03/2019",
"15/10/2018",
"24/07/2020",
]
)
print(df)
x = InputChecker()
x.fit(df)
exp_type = df["a"].dtypes
print(exp_type)
df.loc[5, "a"] = "a"
df.loc[1, "d"] = "a"
df.loc[3, "b"] = 1
type_checker_failed_checks = x._transform_type_checker(df, batch_mode=True)
expected_output = {
"a": {"idxs": [5], "actual": {5: "str"}, "expected": "float"},
"b": {"idxs": [3], "actual": {3: "int"}, "expected": "str"},
"d": {"idxs": [1], "actual": {1: "str"}, "expected": "Timestamp"},
}
for k, v in expected_output.items():
assert (
k in type_checker_failed_checks.keys()
), f"expected column {k} in type_checker_failed_checks output"
assert (
type(type_checker_failed_checks[k]) == dict
), f"expected dict for column {k} in type_checker_failed_checks output"
for sub_k, sub_v in expected_output[k].items():
assert (
sub_k in type_checker_failed_checks[k].keys()
), f"expected {sub_k} as dict key in type_checker_failed_checks output"
assert (
sub_v == type_checker_failed_checks[k][sub_k]
), f"expected {sub_v} as value for {sub_k} in column {k} output of type_checker_failed_checks output"
class TestTransformNullChecker(object):
"""Tests for InputChecker._transform_null_checker()."""
def test_arguments(self):
"""Test that InputChecker _transform_null_checker has expected arguments."""
h.test_function_arguments(
func=InputChecker._transform_null_checker, expected_arguments=["self", "X"]
)
def test_check_fitted_called(self, mocker):
"""Test that transform calls BaseTransformer.check_is_fitted."""
expected_call_args = {0: {"args": (["null_map"],), "kwargs": {}}}
x = InputChecker()
df = data_generators_p.create_df_2()
x.fit(df)
with h.assert_function_call(
mocker, tubular.base.BaseTransformer, "check_is_fitted", expected_call_args
):
x._transform_null_checker(df)
def test_transform_returns_failed_checks_dict(self):
"""Test _transform_null_checker returns results dictionary"""
df = data_generators_p.create_df_2()
x = InputChecker()
x.fit(df)
null_checker_failed_checks = x._transform_null_checker(df)
assert isinstance(
null_checker_failed_checks, dict
), f"incorrect null results type identified - expected: dict but got: {type(null_checker_failed_checks)}"
def test_transform_passes(self):
"""Test _transform_null_checker passes all the checks on the training dataframe"""
df = data_generators_p.create_df_2()
df["b"] = df["b"].fillna("a")
x = InputChecker()
x.fit(df)
null_checker_failed_checks = x._transform_null_checker(df)
assert (
null_checker_failed_checks == {}
), f"Null checker found failed tests - {list(null_checker_failed_checks.keys())}"
def test_transform_captures_failed_test(self):
"""Test _transform_null_checker captures a failed check"""
df = data_generators_p.create_df_2()
df["b"] = df["b"].fillna("a")
x = InputChecker()
x.fit(df)
df.loc[5, "b"] = np.nan
null_checker_failed_checks = x._transform_null_checker(df)
assert null_checker_failed_checks["b"] == [
5
], f"incorrect values saved to value_checker_failed_checks - expected: [5] but got: {null_checker_failed_checks['b']}"
class TestTransformNumericalChecker(object):
"""Tests for InputChecker._transform_numerical_checker()."""
def test_arguments(self):
"""Test that InputChecker _transform_numerical_checker has expected arguments."""
h.test_function_arguments(
func=InputChecker._transform_numerical_checker,
expected_arguments=["self", "X", "type_fails", "batch_mode"],
expected_default_values=(
{},
False,
),
)
def test_check_fitted_called(self, mocker):
"""Test that transform calls BaseTransformer.check_is_fitted."""
expected_call_args = {0: {"args": (["numerical_values"],), "kwargs": {}}}
x = InputChecker(numerical_columns=["a"])
df = data_generators_p.create_df_2()
x.fit(df)
with h.assert_function_call(
mocker, tubular.base.BaseTransformer, "check_is_fitted", expected_call_args
):
x._transform_numerical_checker(df, {})
def test_transform_returns_failed_checks_dict(self):
"""Test _transform_numerical_checker returns results dictionary"""
df = data_generators_p.create_df_2()
x = InputChecker(numerical_columns=["a"])
x.fit(df)
numerical_checker_failed_checks = x._transform_numerical_checker(df, {})
assert isinstance(
numerical_checker_failed_checks, dict
), f"incorrect numerical results type identified - expected: dict but got: {type(numerical_checker_failed_checks)}"
def test_transform_passes(self):
"""Test _transform_numerical_checker passes all the numerical checks on the training dataframe"""
df = data_generators_p.create_df_2()
x = InputChecker(numerical_columns=["a"])
x.fit(df)
numerical_checker_failed_checks = x._transform_numerical_checker(df, {})
assert (
numerical_checker_failed_checks == {}
), f"Numerical checker found failed tests - {list(numerical_checker_failed_checks.keys())}"
def test_transform_captures_failed_test(self):
"""Test _transform_numerical_checker captures a failed check"""
df = data_generators_p.create_df_2()
x = InputChecker(numerical_columns=["a"])
x.fit(df)
df.loc[0, "a"] = -1
df.loc[5, "a"] = 7
numerical_checker_failed_checks = x._transform_numerical_checker(df, {})
expected_max = {5: 7.0}
expected_min = {0: -1.0}
assert (
numerical_checker_failed_checks["a"]["maximum"] == expected_max
), f"incorrect values saved to numerical_checker_failed_checks - expected: {expected_max} but got: {numerical_checker_failed_checks['a']['maximum']}"
assert (
numerical_checker_failed_checks["a"]["minimum"] == expected_min
), f"incorrect values saved to numerical_checker_failed_checks - expected: {expected_min} but got: {numerical_checker_failed_checks['a']['minimum']}"
def test_transform_captures_failed_test_only_maximum(self):
"""Test _transform_numerical_checker captures a failed check when the check includes a maximum value but no minimum value"""
df = data_generators_p.create_df_2()
numerical_dict = {}
numerical_dict["a"] = {}
numerical_dict["a"]["maximum"] = True
numerical_dict["a"]["minimum"] = False
x = InputChecker(numerical_columns=numerical_dict)
x.fit(df)
df.loc[0, "a"] = -1
df.loc[5, "a"] = 7
expected_max = {5: 7.0}
numerical_checker_failed_checks = x._transform_numerical_checker(df, {})
assert (
numerical_checker_failed_checks["a"]["maximum"] == expected_max
), f"incorrect values saved to numerical_checker_failed_checks - expected: {expected_max} but got: {numerical_checker_failed_checks['a']['maximum']}"
assert (
"minimum" not in numerical_checker_failed_checks["a"]
), "No minimum value results expected given input the numerical dict"
def test_transform_captures_failed_test_only_minimum(self):
"""Test _transform_numerical_checker captures a failed check when the check includes a minimum value but no maximum value"""
df = data_generators_p.create_df_2()
numerical_dict = {}
numerical_dict["a"] = {}
numerical_dict["a"]["maximum"] = False
numerical_dict["a"]["minimum"] = True
x = InputChecker(numerical_columns=numerical_dict)
x.fit(df)
df.loc[0, "a"] = -1
df.loc[5, "a"] = 7
numerical_checker_failed_checks = x._transform_numerical_checker(df, {})
expected_min = {0: -1.0}
assert (
numerical_checker_failed_checks["a"]["minimum"] == expected_min
), f"incorrect values saved to numerical_checker_failed_checks - expected: {expected_min} but got: {numerical_checker_failed_checks['a']['minimum']}"
assert (
"maximum" not in numerical_checker_failed_checks["a"]
), "No maximum value results expected given input the numerical dict"
def test_transform_skips_failed_type_checks_batch_mode(self):
"""Test _transform_numerical_checker skips checks for rows which aren't numerical
when operating in batch mode"""
df = data_generators_p.create_df_2()
x = InputChecker(numerical_columns=["a"])
x.fit(df)
df.loc[4, "a"] = "z"
df.loc[1, "a"] = 1
df.loc[2, "a"] = 100
type_fails_dict = {
"a": {"idxs": [1, 4], "actual": {1: "int", 4: "str"}, "expected": "float"}
}
expected_output = {"a": {"max idxs": [2], "maximum": {2: 100}}}
numerical_checker_failed_checks = x._transform_numerical_checker(
df, type_fails_dict, batch_mode=True
)
h.assert_equal_dispatch(
actual=numerical_checker_failed_checks,
expected=expected_output,
msg="rows failing type check have not been removed by _transform_numerical_checker",
)
def test_transform_skips_failed_type_checks(self):
"""Test _transform_numerical_checker skips checks for columns which aren't numerical
when not operating in batch mode"""
df = data_generators_p.create_df_2()
x = InputChecker(numerical_columns=["a"])
x.fit(df)
# Case 1: check will not be performed as column a is not numerical
df_test = pd.DataFrame({"a": ["z", "zz", "zzz"]})
type_fails_dict = {
"a": {"actual": df_test["a"].dtypes, "expected": df["a"].dtypes}
}
numerical_checker_failed_checks = x._transform_numerical_checker(
df_test, type_fails_dict, batch_mode=False
)
h.assert_equal_dispatch(
actual=numerical_checker_failed_checks,
expected={},
msg="rows failing type check have not been removed by _transform_numerical_checker",
)
# Case 2: column a should still get checked because even though type does not match,
# int != float the column is still numerical
df_test2 = pd.DataFrame({"a": [5, 3, 222]})
type_fails_dict2 = {
"a": {"actual": df_test2["a"].dtypes, "expected": df["a"].dtypes}
}
numerical_checker_failed_checks2 = x._transform_numerical_checker(
df_test2, type_fails_dict2, batch_mode=False
)
h.assert_equal_dispatch(
actual=numerical_checker_failed_checks2,
expected={"a": {"max idxs": [2], "maximum": {2: 222}}},
msg="rows failing type check have not been removed by _transform_numerical_checker",
)
class TestTransformValueChecker(object):
"""Tests for InputChecker._transform_value_checker()."""
def test_arguments(self):
"""Test that InputChecker _transform_value_checker has expected arguments."""
h.test_function_arguments(
func=InputChecker._transform_value_checker, expected_arguments=["self", "X"]
)
def test_check_fitted_called(self, mocker):
"""Test that transform calls BaseTransformer.check_is_fitted."""
expected_call_args = {0: {"args": (["expected_values"],), "kwargs": {}}}
x = InputChecker(categorical_columns=["b", "c"])
df = data_generators_p.create_df_2()
x.fit(df)
with h.assert_function_call(
mocker, tubular.base.BaseTransformer, "check_is_fitted", expected_call_args
):
x._transform_value_checker(df)
def test_transform_returns_failed_checks_dict(self):
"""Test _transform_value_checker returns results dictionary"""
df = data_generators_p.create_df_2()
x = InputChecker(categorical_columns=["b", "c"])
x.fit(df)
value_checker_failed_checks = x._transform_value_checker(df)
assert isinstance(
value_checker_failed_checks, dict
), f"incorrect numerical results type identified - expected: dict but got: {type(value_checker_failed_checks)}"
def test_transform_passes(self):
"""Test _transform_value_checker passes all the categorical checks on the training dataframe"""
df = data_generators_p.create_df_2()
x = InputChecker(categorical_columns=["b", "c"])
x.fit(df)
value_checker_failed_checks = x._transform_value_checker(df)
assert (
value_checker_failed_checks == {}
), f"Categorical checker found failed tests - {list(value_checker_failed_checks.keys())}"
def test_transform_captures_failed_test(self):
"""Test _transform_value_checker captures a failed check"""
df = data_generators_p.create_df_2()
x = InputChecker(categorical_columns=["b", "c"])
x.fit(df)
df.loc[5, "b"] = "u"
value_checker_failed_checks = x._transform_value_checker(df)
assert value_checker_failed_checks["b"]["values"] == [
"u"
], f"incorrect values saved to value_checker_failed_checks - expected: ['u'] but got: {value_checker_failed_checks['b']['values']}"
assert value_checker_failed_checks["b"]["idxs"] == [
5
], f"incorrect values saved to value_checker_failed_checks - expected: [5] but got: {value_checker_failed_checks['b']['idxs']}"
class TestTransformDatetimeChecker(object):
"""Tests for InputChecker._transform_datetime_checker()."""
def test_arguments(self):
"""Test that InputChecker _transform_datetime_checker has expected arguments."""
h.test_function_arguments(
func=InputChecker._transform_datetime_checker,
expected_arguments=["self", "X", "type_fails", "batch_mode"],
expected_default_values=(
{},
False,
),
)
def test_check_fitted_called(self, mocker):
"""Test that transform calls BaseTransformer.check_is_fitted."""
expected_call_args = {0: {"args": (["datetime_values"],), "kwargs": {}}}
x = InputChecker(datetime_columns=["d"])
df = data_generators_p.create_df_2()
df["d"] = pd.to_datetime(
[
"01/02/2020",
"01/02/2021",
"08/04/2019",
"01/03/2020",
"29/03/2019",
"15/10/2018",
np.NAN,
]
)
x.fit(df)
with h.assert_function_call(
mocker, tubular.base.BaseTransformer, "check_is_fitted", expected_call_args
):
x._transform_datetime_checker(df, {})
def test_transform_returns_failed_checks_dict(self):
"""Test _transform_datetime_checker returns results dictionary"""
df = data_generators_p.create_df_2()
df["d"] = pd.to_datetime(
[
"01/02/2020",
"01/02/2021",
"08/04/2019",
"01/03/2020",
"29/03/2019",
"15/10/2018",
np.NAN,
]
)
x = InputChecker(datetime_columns=["d"])
x.fit(df)
datetime_checker_failed_checks = x._transform_datetime_checker(df, {})
assert isinstance(
datetime_checker_failed_checks, dict
), f"incorrect datetime results type identified - expected: dict but got: {type(datetime_checker_failed_checks)}"
def test_transform_passes(self):
"""Test _transform_datetime_checker passes all the numerical checks on the training dataframe"""
df = data_generators_p.create_df_2()
df["d"] = pd.to_datetime(
[
"01/02/2020",
"01/02/2021",
"08/04/2019",
"01/03/2020",
"29/03/2019",
"15/10/2018",
np.NAN,
]
)
x = InputChecker(datetime_columns=["d"])
x.fit(df)
datetime_checker_failed_checks = x._transform_datetime_checker(df, {})
assert (
datetime_checker_failed_checks == {}
), f"Datetime checker found failed tests - {list(datetime_checker_failed_checks.keys())}"
def test_transform_captures_failed_test(self):
"""Test _transform_datetime_checker captures a failed check"""
df = data_generators_p.create_df_2()
df["d"] = pd.to_datetime(
[
"01/02/2020",
"01/02/2021",
"08/04/2019",
"01/03/2020",
"29/03/2019",
"15/10/2018",
np.NAN,
]
)
x = InputChecker(datetime_columns=["d"])
x.fit(df)
outliers_1 = pd.to_datetime("15/09/2017", utc=False)
outliers_2 = pd.to_datetime("13/09/2017", utc=False)
df.loc[0, "d"] = outliers_1
df.loc[1, "d"] = outliers_2
datetime_checker_failed_checks = x._transform_datetime_checker(df, {})
results = datetime_checker_failed_checks["d"]["minimum"]
assert results[0] == outliers_1, (
f"incorrect values saved to datetime_checker_failed_checks - "
f"expected: {outliers_1} but got: {results[0]} "
)
assert results[1] == outliers_2, (
f"incorrect values saved to datetime_checker_failed_checks - "
f"expected: {outliers_2} but got: {results[1]} "
)
def test_transform_captures_failed_test_both_minimum_and_maximum(self):
"""Test _transform_datetime_checker captures a failed check when the check includes a maximum value and a
minimum value"""
df = data_generators_p.create_df_2()
df["d"] = pd.to_datetime(
[
"01/02/2020",
"01/02/2021",
"08/04/2019",
"01/03/2020",
"29/03/2019",
"15/10/2018",
"24/07/2020",
]
)
datetime_dict = {"d": {"maximum": True, "minimum": True}}
x = InputChecker(datetime_columns=datetime_dict)
x.fit(df)
lower_outliers = | pd.to_datetime("15/09/2017", utc=False) | pandas.to_datetime |
from scipy import stats
import pandas as pd
def check_dmso(file_name):
# read in data file
dfs, sheet_names, expr_no = read_excel(file_name)
hypothesis = [False, False, False, False]
print('Part 1: Shapiro-Wilk test for normality:')
for i, sheet_name in enumerate(sheet_names):
print('Plates:', sheet_name)
df = dfs[i]
if len(df) > 10:
is_reject = check_normality(df, sheet_name)
hypothesis[expr_no[i] - 1] += is_reject
else:
print('assume non-normality due to small group size')
hypothesis[expr_no[i] - 1] = 1
print()
print('Reject normality hypothesis:',
'experiment 1:', bool(hypothesis[0]), ',',
'experiment 2:', bool(hypothesis[1]), ',',
'experiment 3:', bool(hypothesis[2]), ',',
'experiment 4:', bool(hypothesis[3]), '\n')
print('Part 2: Tests for equal variance and mean/median')
for i, sheet_name in enumerate(sheet_names):
print('Plates:', sheet_name)
df = dfs[i]
is_reject = hypothesis[expr_no[i] - 1]
if is_reject: # reject normality, follow up with non-parametric tests
test_non_parametric(df, sheet_name)
else: # NOT reject normality, follow up with parametric tests
test_parametric(df, sheet_name)
print()
pass
def read_excel(file_name):
xls = | pd.ExcelFile(file_name) | pandas.ExcelFile |
'''
Created on 19 maj 2020
@author: spasz
@brief: Trend inidicator. Rising/Falling, based on given data by an argument.
'''
from scipy import signal
import numpy
import datetime
import pandas as pd
import matplotlib.pyplot as plt
from core.indicator import indicator
class trend(indicator):
def __init__(self, data, ttype='rising'):
indicator.__init__(self, 'Trend', 'trend', data.index)
self.type = ttype
self.trends = self.Init(data)
def Init(self, data):
'''Init trend based on given data'''
if (self.type == 'rising'):
return self.FindUptrends(data)
return self.FindDowntrends(data)
@staticmethod
def FindMaxPeaks(data, n=7):
'''Return series of max points from given data'''
maxs = data.iloc[signal.argrelextrema(
data.values, numpy.greater_equal, order=n)[0]]
return maxs
@staticmethod
def FindMinPeaks(data, n=7):
'''Return series of min points from given data'''
mins = data.iloc[signal.argrelextrema(
data.values, numpy.less_equal, order=n)[0]]
return mins
@staticmethod
def GetTrendDaysLength(trend):
''' Returns trend days length '''
delta = trend.index[-1]-trend.index[0]
return delta.days
def FindUptrends(self, data, days=6, n=2):
''' Downtrend calculation is based on mins '''
uptrends = []
trend = pd.Series()
mins = self.FindMinPeaks(data, n)
# Find rising series. Start from end
for i in range(len(mins.values) - 1):
# If rising
if (mins[i] <= mins[i + 1]):
trend = trend.append(
pd.Series(mins.values[i], index=[mins.index[i]]))
trend = trend.append(
pd.Series(mins.values[i + 1], index=[mins.index[i + 1]]))
elif (trend.size > 0):
trend = trend.loc[~trend.index.duplicated()]
if (self.GetTrendDaysLength(trend) >= days):
uptrends.append(trend)
trend = pd.Series()
# Add last trend
if (trend.size > 0):
trend = trend.loc[~trend.index.duplicated()]
if (self.GetTrendDaysLength(trend) >= days):
uptrends.append(trend)
# Calculate regression line most fitting.
# If some point is far away from line then drop it.
# Add to data.
return uptrends
def FindDowntrends(self, data, days=6, n=2):
''' Downtrend calculation is based on maxs '''
downtrends = []
trend = | pd.Series() | pandas.Series |
"""Tests for KustoClient."""
import os
import json
import unittest
from datetime import datetime, timedelta
import pytest
from six import text_type
from mock import patch
from dateutil.tz.tz import tzutc
from azure.kusto.data.request import KustoClient, ClientRequestProperties
from azure.kusto.data.exceptions import KustoServiceError
from azure.kusto.data._response import WellKnownDataSet
from azure.kusto.data.helpers import dataframe_from_result_table
pandas_installed = False
try:
import pandas
pandas_installed = True
except:
pass
def mocked_requests_post(*args, **kwargs):
"""Mock to replace requests.post"""
class MockResponse:
"""Mock class for KustoResponse."""
def __init__(self, json_data, status_code):
self.json_data = json_data
self.text = text_type(json_data)
self.status_code = status_code
self.headers = None
def json(self):
"""Get json data from response."""
return self.json_data
if args[0] == "https://somecluster.kusto.windows.net/v2/rest/query":
if "truncationmaxrecords" in kwargs["json"]["csl"]:
if json.loads(kwargs["json"]["properties"])["Options"]["deferpartialqueryfailures"]:
file_name = "query_partial_results_defer_is_true.json"
else:
file_name = "query_partial_results_defer_is_false.json"
elif "Deft" in kwargs["json"]["csl"]:
file_name = "deft.json"
with open(os.path.join(os.path.dirname(__file__), "input", file_name), "r") as response_file:
data = response_file.read()
return MockResponse(json.loads(data), 200)
elif args[0] == "https://somecluster.kusto.windows.net/v1/rest/mgmt":
if kwargs["json"]["csl"] == ".show version":
file_name = "versionshowcommandresult.json"
else:
file_name = "adminthenquery.json"
with open(os.path.join(os.path.dirname(__file__), "input", file_name), "r") as response_file:
data = response_file.read()
return MockResponse(json.loads(data), 200)
return MockResponse(None, 404)
DIGIT_WORDS = [
text_type("Zero"),
text_type("One"),
text_type("Two"),
text_type("Three"),
text_type("Four"),
text_type("Five"),
text_type("Six"),
text_type("Seven"),
text_type("Eight"),
text_type("Nine"),
text_type("ten"),
]
class KustoClientTests(unittest.TestCase):
"""Tests class for KustoClient."""
@patch("requests.post", side_effect=mocked_requests_post)
def test_sanity_query(self, mock_post):
"""Test query V2."""
client = KustoClient("https://somecluster.kusto.windows.net")
response = client.execute_query("PythonTest", "Deft")
expected = {
"rownumber": None,
"rowguid": text_type(""),
"xdouble": None,
"xfloat": None,
"xbool": None,
"xint16": None,
"xint32": None,
"xint64": None,
"xuint8": None,
"xuint16": None,
"xuint32": None,
"xuint64": None,
"xdate": None,
"xsmalltext": text_type(""),
"xtext": text_type(""),
"xnumberAsText": text_type(""),
"xtime": None,
"xtextWithNulls": text_type(""),
"xdynamicWithNulls": text_type(""),
}
for row in response.primary_results[0]:
self.assertEqual(row["rownumber"], expected["rownumber"])
self.assertEqual(row["rowguid"], expected["rowguid"])
self.assertEqual(row["xdouble"], expected["xdouble"])
self.assertEqual(row["xfloat"], expected["xfloat"])
self.assertEqual(row["xbool"], expected["xbool"])
self.assertEqual(row["xint16"], expected["xint16"])
self.assertEqual(row["xint32"], expected["xint32"])
self.assertEqual(row["xint64"], expected["xint64"])
self.assertEqual(row["xuint8"], expected["xuint8"])
self.assertEqual(row["xuint16"], expected["xuint16"])
self.assertEqual(row["xuint32"], expected["xuint32"])
self.assertEqual(row["xuint64"], expected["xuint64"])
self.assertEqual(row["xdate"], expected["xdate"])
self.assertEqual(row["xsmalltext"], expected["xsmalltext"])
self.assertEqual(row["xtext"], expected["xtext"])
self.assertEqual(row["xnumberAsText"], expected["xnumberAsText"])
self.assertEqual(row["xtime"], expected["xtime"])
self.assertEqual(row["xtextWithNulls"], expected["xtextWithNulls"])
self.assertEqual(row["xdynamicWithNulls"], expected["xdynamicWithNulls"])
self.assertEqual(type(row["rownumber"]), type(expected["rownumber"]))
self.assertEqual(type(row["rowguid"]), type(expected["rowguid"]))
self.assertEqual(type(row["xdouble"]), type(expected["xdouble"]))
self.assertEqual(type(row["xfloat"]), type(expected["xfloat"]))
self.assertEqual(type(row["xbool"]), type(expected["xbool"]))
self.assertEqual(type(row["xint16"]), type(expected["xint16"]))
self.assertEqual(type(row["xint32"]), type(expected["xint32"]))
self.assertEqual(type(row["xint64"]), type(expected["xint64"]))
self.assertEqual(type(row["xuint8"]), type(expected["xuint8"]))
self.assertEqual(type(row["xuint16"]), type(expected["xuint16"]))
self.assertEqual(type(row["xuint32"]), type(expected["xuint32"]))
self.assertEqual(type(row["xuint64"]), type(expected["xuint64"]))
self.assertEqual(type(row["xdate"]), type(expected["xdate"]))
self.assertEqual(type(row["xsmalltext"]), type(expected["xsmalltext"]))
self.assertEqual(type(row["xtext"]), type(expected["xtext"]))
self.assertEqual(type(row["xnumberAsText"]), type(expected["xnumberAsText"]))
self.assertEqual(type(row["xtime"]), type(expected["xtime"]))
self.assertEqual(type(row["xtextWithNulls"]), type(expected["xtextWithNulls"]))
self.assertEqual(type(row["xdynamicWithNulls"]), type(expected["xdynamicWithNulls"]))
expected["rownumber"] = 0 if expected["rownumber"] is None else expected["rownumber"] + 1
expected["rowguid"] = text_type("0000000{0}-0000-0000-0001-020304050607".format(expected["rownumber"]))
expected["xdouble"] = round(float(0) if expected["xdouble"] is None else expected["xdouble"] + 1.0001, 4)
expected["xfloat"] = round(float(0) if expected["xfloat"] is None else expected["xfloat"] + 1.01, 2)
expected["xbool"] = False if expected["xbool"] is None else not expected["xbool"]
expected["xint16"] = 0 if expected["xint16"] is None else expected["xint16"] + 1
expected["xint32"] = 0 if expected["xint32"] is None else expected["xint32"] + 1
expected["xint64"] = 0 if expected["xint64"] is None else expected["xint64"] + 1
expected["xuint8"] = 0 if expected["xuint8"] is None else expected["xuint8"] + 1
expected["xuint16"] = 0 if expected["xuint16"] is None else expected["xuint16"] + 1
expected["xuint32"] = 0 if expected["xuint32"] is None else expected["xuint32"] + 1
expected["xuint64"] = 0 if expected["xuint64"] is None else expected["xuint64"] + 1
expected["xdate"] = expected["xdate"] or datetime(2013, 1, 1, 1, 1, 1, 0, tzinfo=tzutc())
expected["xdate"] = expected["xdate"].replace(year=expected["xdate"].year + 1)
expected["xsmalltext"] = DIGIT_WORDS[int(expected["xint16"])]
expected["xtext"] = DIGIT_WORDS[int(expected["xint16"])]
expected["xnumberAsText"] = text_type(expected["xint16"])
microseconds = 1001 if expected["rownumber"] == 5 else 1000
expected["xtime"] = (
timedelta()
if expected["xtime"] is None
else (abs(expected["xtime"]) + timedelta(days=1, seconds=1, microseconds=microseconds))
* (-1) ** (expected["rownumber"] + 1)
)
if expected["xint16"] > 0:
expected["xdynamicWithNulls"] = text_type('{{"rowId":{0},"arr":[0,{0}]}}'.format(expected["xint16"]))
@patch("requests.post", side_effect=mocked_requests_post)
def test_sanity_control_command(self, mock_post):
"""Tests contol command."""
client = KustoClient("https://somecluster.kusto.windows.net")
response = client.execute_mgmt("NetDefaultDB", ".show version")
self.assertEqual(len(response), 1)
primary_table = response.primary_results[0]
row_count = 0
for _ in primary_table:
row_count += 1
self.assertEqual(row_count, 1)
result = primary_table[0]
self.assertEqual(result["BuildVersion"], "1.0.6693.14577")
self.assertEqual(
result["BuildTime"], datetime(year=2018, month=4, day=29, hour=8, minute=5, second=54, tzinfo=tzutc())
)
self.assertEqual(result["ServiceType"], "Engine")
self.assertEqual(result["ProductVersion"], "KustoMain_2018.04.29.5")
@pytest.mark.skipif(not pandas_installed, reason="requires pandas")
@patch("requests.post", side_effect=mocked_requests_post)
def test_sanity_data_frame(self, mock_post):
"""Tests KustoResponse to pandas.DataFrame."""
from pandas import DataFrame, Series
from pandas.util.testing import assert_frame_equal
client = KustoClient("https://somecluster.kusto.windows.net")
data_frame = dataframe_from_result_table(
client.execute_query("PythonTest", "Deft").primary_results[0], raise_errors=False
)
self.assertEqual(len(data_frame.columns), 19)
expected_dict = {
"rownumber": Series([None, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0]),
"rowguid": Series(
[
"",
"00000000-0000-0000-0001-020304050607",
"00000001-0000-0000-0001-020304050607",
"00000002-0000-0000-0001-020304050607",
"00000003-0000-0000-0001-020304050607",
"00000004-0000-0000-0001-020304050607",
"00000005-0000-0000-0001-020304050607",
"00000006-0000-0000-0001-020304050607",
"00000007-0000-0000-0001-020304050607",
"00000008-0000-0000-0001-020304050607",
"00000009-0000-0000-0001-020304050607",
],
dtype=object,
),
"xdouble": Series([None, 0.0, 1.0001, 2.0002, 3.0003, 4.0004, 5.0005, 6.0006, 7.0007, 8.0008, 9.0009]),
"xfloat": Series([None, 0.0, 1.01, 2.02, 3.03, 4.04, 5.05, 6.06, 7.07, 8.08, 9.09]),
"xbool": Series([None, False, True, False, True, False, True, False, True, False, True], dtype=bool),
"xint16": Series([None, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0]),
"xint32": Series([None, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0]),
"xint64": Series([None, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0]),
"xuint8": Series([None, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0]),
"xuint16": Series([None, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0]),
"xuint32": Series([None, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0]),
"xuint64": Series([None, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0]),
"xdate": Series(
[
"NaT",
"2014-01-01T01:01:01.000000000",
"2015-01-01T01:01:01.000000000",
"2016-01-01T01:01:01.000000000",
"2017-01-01T01:01:01.000000000",
"2018-01-01T01:01:01.000000000",
"2019-01-01T01:01:01.000000000",
"2020-01-01T01:01:01.000000000",
"2021-01-01T01:01:01.000000000",
"2022-01-01T01:01:01.000000000",
"2023-01-01T01:01:01.000000000",
],
dtype="datetime64[ns]",
),
"xsmalltext": Series(
["", "Zero", "One", "Two", "Three", "Four", "Five", "Six", "Seven", "Eight", "Nine"], dtype=object
),
"xtext": Series(
["", "Zero", "One", "Two", "Three", "Four", "Five", "Six", "Seven", "Eight", "Nine"], dtype=object
),
"xnumberAsText": | Series(["", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9"], dtype=object) | pandas.Series |
import tensorflow as tf
import numpy as np
import pandas as pd
import math
import umap
import os
import sys
import matplotlib.pyplot as plt
import matplotlib
import networkx as nx
from VITAE import VITAE, get_igraph, louvain_igraph, plot_clusters, load_data, get_embedding
type_dict = {
# dyno
'dentate':'UMI',
'immune':'UMI',
'neonatal':'UMI',
'planaria_muscle':'UMI',
'planaria_full':'UMI',
'aging':'non-UMI',
'cell_cycle':'non-UMI',
'fibroblast':'non-UMI',
'germline':'non-UMI',
'human':'non-UMI',
'mesoderm':'non-UMI',
# dyngen
'bifurcating_2':'non-UMI',
"cycle_1":'non-UMI',
"cycle_2":'non-UMI',
"cycle_3":'non-UMI',
"linear_1":'non-UMI',
"linear_2":'non-UMI',
"linear_3":'non-UMI',
"trifurcating_1":'non-UMI',
"trifurcating_2":'non-UMI',
"bifurcating_1":'non-UMI',
"bifurcating_3":'non-UMI',
"converging_1":'non-UMI',
# our model
'linear':'UMI',
'bifurcation':'UMI',
'multifurcating':'UMI',
'tree':'UMI',
}
source_dict = {
'dentate':'dyno',
'immune':'dyno',
'neonatal':'dyno',
'planaria_muscle':'dyno',
'planaria_full':'dyno',
'aging':'dyno',
'cell_cycle':'dyno',
'fibroblast':'dyno',
'germline':'dyno',
'human':'dyno',
'mesoderm':'dyno',
'bifurcating_2':'dyngen',
"cycle_1":'dyngen',
"cycle_2":'dyngen',
"cycle_3":'dyngen',
"linear_1":'dyngen',
"linear_2":'dyngen',
"linear_3":'dyngen',
"trifurcating_1":'dyngen',
"trifurcating_2":'dyngen',
"bifurcating_1":'dyngen',
"bifurcating_3":'dyngen',
"converging_1":'dyngen',
'linear':'our model',
'bifurcation':'our model',
'multifurcating':'our model',
'tree':'our model',
}
df = | pd.DataFrame() | pandas.DataFrame |
import logging
import re
import pandas as pd
from unidecode import unidecode
from comvest.utilities.io import files, read_from_db, write_result, read_result
from comvest.utilities.logging import progresslog, resultlog
pd.options.mode.chained_assignment = None # default='warn'
def validacao_curso(df, col, date):
cursos = df_cursos.loc[df_cursos['ano_vest'] == date]['cod_curso'].tolist()
# Codigos que nao constam na lista de cursos serao remapeados para missing
df[col].fillna(-1, inplace=True)
df[col] = df[col].map(lambda cod: int(cod) if int(cod) in cursos else '')
df[col] = pd.to_numeric(df[col], errors='coerce').astype('Int64')
return df
# Função para concatenar dia, mês e ano
def data_nasc(row, df):
if ('DATA_NASC' in df.columns) or ('DAT_NASC' in df.columns) or ('DTNASC' in df.columns):
if 'DATA_NASC' in df.columns:
data = row['DATA_NASC']
elif 'DAT_NASC' in df.columns:
data = row['DAT_NASC']
else:
data = row['DTNASC']
data = str(data).split('.')[0]
if data == 'nan': return ('')
if len(data) <= 6:
data = data[:-2] + '19' + data[-2:]
ano = data[-4:]
mes = data[-6:-4]
dia = data.replace(data[-6:], '')
if len(data) < 8:
dia = '0' + dia
res = dia + mes + ano
elif all(x in df.columns for x in ('DIA','MES','ANO')):
dia = str(row['DIA']).zfill(2)
mes = str(row['MES']).zfill(2)
ano = str(row['ANO'])
if len(ano) < 4:
ano = '19' + ano
res = "{0}{1}{2}".format(dia, mes, ano)
else:
# Documento sem coluna(s) com data de nascimento
res = ''
return res
def tratar_inscricao(df):
# Checa Número de Inscrição de acordo com as diferentes variações no nome da coluna e retira o '\.0' da string
if 'INSC' in df.columns:
df['INSC'] = df['INSC'].astype("string").replace('\.0', '', regex=True)
elif 'INSC_CAND' in df.columns:
df['INSC'] = df['INSC_CAND'].astype("string").replace('\.0', '', regex=True)
elif 'INSC_cand' in df.columns:
df['INSC'] = df['INSC_cand'].astype("string").replace('\.0', '', regex=True)
elif 'INSCRICAO' in df.columns:
df['INSC'] = df['INSCRICAO'].astype("string").replace('\.0', '', regex=True)
df['INSC'] = pd.to_numeric(df['INSC'], errors='coerce', downcast='integer').astype('Int64')
return df
def tratar_CPF(df):
# Checa se existe a coluna de CPF
if 'CPF' in df.columns:
df['CPF'] = df['CPF'].map(lambda cpf: str(cpf).zfill(11))
else:
df.insert(loc=1, column='CPF', value='-')
return df
def tratar_doc(df):
if any(col in df.columns for col in {'RG','DOC3'}):
df.rename({'RG':'DOC','DOC3':'DOC'}, axis=1, inplace=True)
df['DOC'] = df['DOC'].str.replace(' ','')
return df
def tratar_nome(df):
# Se o nome é dado por NOME_CAND ou NOMEOFIC, entao renomeia a coluna para NOME
if 'NOME_CAND' in df.columns:
df.rename({'NOME_CAND': 'NOME'}, axis=1, inplace=True)
elif 'NOMEOFIC' in df.columns:
df.rename({'NOMEOFIC': 'NOME'}, axis=1, inplace=True)
elif 'NOME_cand' in df.columns:
df.rename({'NOME_cand': 'NOME'}, axis=1, inplace=True)
return df
def tratar_nome_pai(df):
if 'PAI' in df.columns:
df.rename({'PAI': 'NOME_PAI'}, axis=1, inplace=True)
return df
def tratar_nome_mae(df):
if 'MAE' in df.columns:
df.rename({'MAE': 'NOME_MAE'}, axis=1, inplace=True)
return df
def tratar_nacionalidade(df):
for col in df.columns:
if col in {'NACIO','NACION','NACIONALID','NACIONALIDADE'}:
df.rename({col: 'NACIONALIDADE'}, axis=1, inplace=True)
df['NACIONALIDADE'] = pd.to_numeric(df['NACIONALIDADE'], errors='coerce', downcast='integer').astype('Int64')
df['NACIONALIDADE'].replace(0, pd.NA, inplace=True)
return df
return df
def tratar_mun_nasc(df):
for col in df.columns:
if col in {'MUNICIPIO_NASC','MU_NASC','MUNIC_NASC','CIDNASC','CIDNAS'}:
df.rename({col: 'MUN_NASC'}, axis=1, inplace=True)
df['MUN_NASC'] = df['MUN_NASC'].map(lambda mun: unidecode(str(mun)).upper() if str(mun) != '-' else '')
return df
return df
def tratar_uf_nasc(df):
for col in df.columns:
if col in {'UFNASC','EST_NASC','UFNAS'}:
df.rename({col: 'UF_NASC'}, axis=1, inplace=True)
df['UF_NASC'] = df['UF_NASC'].map(lambda uf: unidecode(str(uf)).upper() if str(uf) != '-' else '')
return df
return df
def tratar_cep(df):
for col in df.columns:
if col in {'CEP','CEPEND','CEP_END','CEP3'}:
df.rename({col: 'CEP_RESID'}, axis=1, inplace=True)
fill = df['CEP_RESID'].map(lambda cep: len(re.sub('\D','',str(cep)))).max()
fill = 8 if fill > 8 else fill
df['CEP_RESID'] = df['CEP_RESID'].map(lambda cep: re.sub('\D','',str(cep)).zfill(fill))
return df
if 'CEP_RESID' not in df.columns:
df['CEP_RESID'] = ''
return df
def tratar_mun_resid(df):
for col in df.columns:
if col in {'MUEND','MUNIC_END','MUNICIPIO','CID','CIDEND'}:
df.rename({col: 'MUN_RESID'}, axis=1, inplace=True)
df['MUN_RESID'] = df['MUN_RESID'].map(lambda mun: unidecode(str(mun)).upper())
return df
return df
def tratar_uf_resid(df):
# Se a UF de Residência é dado por UFEND, UF_END ou ESTADO, entao renomeia a coluna para UF_RESID
if 'UFEND' in df.columns:
df.rename({'UFEND': 'UF_RESID'}, axis=1, inplace=True)
elif 'UF_END' in df.columns:
df.rename({'UF_END': 'UF_RESID'}, axis=1, inplace=True)
elif 'ESTADO' in df.columns:
df.rename({'ESTADO': 'UF_RESID'}, axis=1, inplace=True)
elif 'EST' in df.columns:
df.rename({'EST': 'UF_RESID'}, axis=1, inplace=True)
return df
def tratar_opvest(df,date,path):
# Checa colunas de opção de curso no vestibular
for col in df.columns:
if any(opc in col for opc in {'OPCAO1','OP1','OPCAO1OR'}):
df.rename({col: 'OPCAO1'}, axis=1, inplace=True)
df = validacao_curso(df, 'OPCAO1', date)
if any(opc in col for opc in {'OPCAO2','OP2','OPCAO2OR'}):
df.rename({col: 'OPCAO2'}, axis=1, inplace=True)
df = validacao_curso(df, 'OPCAO2', date)
if any(opc in col for opc in {'OPCAO3','OP3'}):
df.rename({col: 'OPCAO3'}, axis=1, inplace=True)
df = validacao_curso(df, 'OPCAO3', date)
# Opcao 1 = 22 (Musica) - deve-se remapear para o codigo referente a enfase, obtida no perfil
if (date == 2001) or (date == 2002) or (date == 2003):
emphasis = | pd.read_excel(path, sheet_name='perfil', usecols=['insc_cand','opcao1'], dtype=str) | pandas.read_excel |
# Copyright (C) 2021 ServiceNow, Inc.
import pytest
import pandas as pd
import re
from nrcan_p2.data_processing.preprocessing_dfcol import (
rm_dbl_space,
rm_cid,
rm_dbl_punct,
convert_to_ascii,
lower,
rm_punct,
rm_newline,
rm_triple_chars,
rm_mid_num_punct,
rm_word_all_punct,
rm_newline_hyphenation,
rm_mid_word_punct,
rm_beg_end_word_punct,
merge_words,
merge_words_bkwd,
rm_nonprintable,
rm_punct_mid_punct,
rm_non_textual_punct,
rm_newline_except_end,
strip_space,
rm_email,
rm_url,
rm_doi,
rm_phonenumber,
rm_slash
)
@pytest.mark.parametrize("text_col, expected_text",
[
(['Alaska. \n', ' blah \t\t \t \n blah'],
['Alaska. \n', ' blah \n blah']
)
]
)
def test_rm_dbl_space(text_col, expected_text):
df_test = pd.DataFrame({'text': text_col})
res = rm_dbl_space(df_test.text)
assert list(res.values) == expected_text
@pytest.mark.parametrize("text_col, expected_text",
[
(['Alaska. \n', '(cid:1010)blah(cid:4)\n'],
['Alaska. \n', 'blah\n']
)
]
)
def test_rm_cid(text_col, expected_text):
df_test = pd.DataFrame({'text': text_col})
res = rm_cid(df_test.text)
assert list(res.values) == expected_text
@pytest.mark.parametrize("text_col, expected_text",
[
([' Alaska. \n', '\nblah \n '],
['Alaska. \n', '\nblah \n']
)
]
)
def test_strip_space(text_col, expected_text):
df_test = pd.DataFrame({'text': text_col})
res = strip_space(df_test.text)
assert list(res.values) == expected_text
@pytest.mark.parametrize("text_col, expected_text",
[
(['Alaska. \n', '||||kkll-ll???!!??...??....'],
['Alaska. \n', '|kkll-ll?!?...?.']
)
]
)
def test_convert_to_ascii(text_col, expected_text):
df_test = pd.DataFrame({'text': text_col})
print(df_test)
res = rm_dbl_punct(df_test.text)
assert list(res.values) == expected_text
@pytest.mark.parametrize("text_col, expected_text",
[
(['Alaska. \n', '𝟏−𝑨𝑨𝑹.. \n', "1 %>+* .B 4!\".𝐵 "],
['Alaska. \n', '1-AAR.. \n', "1 %>+* .B 4!\".B "]
)
]
)
def test_convert_to_ascii(text_col, expected_text):
df_test = pd.DataFrame({'text': text_col})
res = convert_to_ascii(df_test.text)
assert list(res.values) == expected_text
@pytest.mark.parametrize("text_col, expected_text",
[
(['Alaska. \n', 'AL_aska.. \n'],
['alaska. \n', 'al_aska.. \n']
)
]
)
def test_lower(text_col, expected_text):
df_test = | pd.DataFrame({'text': text_col}) | pandas.DataFrame |
# AmpliconExtractor.py
#
# Created by <NAME> for the Meningitis lab in the CDC, under contract with IHRC. Inc.
# <NAME> <<EMAIL>>
#
#
#
#
#pylint: disable=bad-indentation,global-statement, broad-except
import os
import pandas as pd
import re
import sys
import genomeOrganizer
import utilities
import tempfile
# from subprocess import call, DEVNULL
from Bio.Blast.Applications import NcbiblastnCommandline
from Bio import SeqIO
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from Bio.Alphabet import IUPAC
from BLASThelpers import makeblastdb, loadBLASTtableToDataFrame,blankBLASTtable
from shutil import copytree
# _primer_file = 'settings/sample-primers.csv'
default_verbose = __name__ == "__main__"
current_verbose = default_verbose
script_version=1
script_subversion = 5
def vprint(text):
if current_verbose:
print(text)
def print(text):#pylint: disable=redefined-builtin
export_text = "{}".format(text)
if __name__ != "__main__":
export_text = "\t"+export_text
sys.stdout.write(export_text+"\n")
def read_file_to_dict(filename):
primer_cols = ['Locus','Role','Region','Name','Direction','Sequence']
primer_frame = pd.read_table(filename,comment='#',dtype=str,skip_blank_lines=True) #skip blank lines is true by default
###Skip blank lines does not work
primer_frame = primer_frame.dropna(how='all')
primer_frame['Direction'].fillna('',inplace=True)
primer_frame['Region'].fillna('All',inplace=True)
primer_dict = dict()
for _, row in primer_frame.iterrows():
##Parse
locus = row['Locus']
region = row['Region']
primer = row['Name']
sequence = row['Sequence']
direction = row['Direction']
##Role can be a list
function = []
role = row['Role']
if re.match('PCR',role,re.IGNORECASE):
function = ['PCR']
elif re.match('Seq',role,re.IGNORECASE):
function = ['Seq']
elif re.match('All',role,re.IGNORECASE):
function = ['PCR','Seq']
else: ##Did not find function, use for both steps
function = ['PCR','Seq']
vprint("Did not identify function of primer {} -- use for both PCR and Seq".format("-".join([locus,primer])))
vprint('To assign a function, write "PCR", "Seq", or "All" in the second column')
##Construct dict of dict
if locus not in primer_dict:
primer_dict[locus] = dict()
locus_dict = primer_dict[locus]
for f in function: #Add data to both PCR and Seq if appropriate
if f not in locus_dict:
locus_dict[f] = dict()
function_dict = locus_dict[f]
if region not in function_dict:
function_dict[region] = dict()
region_dict = function_dict[region]
region_dict[primer] = sequence
##TODO add direction
#
#
# _primer_dict = dict() ## Dict of Dicts: locus, function (PCR,Seq),region,name,sequence
# with open(filename) as primers:
# p_reader = csv.reader(primers)
# p_reader = [row for row in p_reader if not row[0].startswith('#')] #strip comments
# for row in p_reader:
# row = [item for item in row if item != ''] #strip empy strings
# if len(row) > 0:
# vprint("Parsing row with {} items".format(len(row)))
# ##First is locus name -- no exception
# locus = row[0]
# ##Last is sequence -- no exception
# sequence = row[-1].replace(" ","")
# ##Second to last is primer name -- no exception
# primer = row[-2]
# vprint("Name is {}-{}".format(locus,primer))
# ##Function and region are optional
# c_max = len(row) - 3 #last two indexes are used, so -3 is the maximum that is open
# c = 1
# ##Second could be function (PCR,Seq,All)
# if c <= c_max:
# function = []
# if re.match('PCR',row[c],re.IGNORECASE):
# function = ['PCR']
# c+=1
# elif re.match('Seq',row[c],re.IGNORECASE):
# function = ['Seq']
# c+=1
# elif re.match('All',row[c],re.IGNORECASE):
# function = ['PCR','Seq']
# c+=1
# else: ##Did not find function, use for both steps
# function = ['PCR','Seq']
# vprint("Did not identify function of primer {} -- use for both PCR and Seq".format("-".join([locus,primer])))
# vprint('To assign a function, write "PCR", "Seq", or "All" in the second column')
# region = 'All'
# if c <= c_max:
# region = row[c]
# if locus not in _primer_dict:
# _primer_dict[locus] = dict()
# locus_dict = _primer_dict[locus]
# for f in function: #Add data to both PCR and Seq if appropriate
# if f not in locus_dict:
# locus_dict[f] = dict()
# function_dict = locus_dict[f]
# if region not in function_dict:
# function_dict[region] = dict()
# region_dict = function_dict[region]
# region_dict[primer] = sequence
# #~ print("Found primers for the following genes: "+";".join(_primer_dict.keys())) #Not accurate. Does not confirm that dict contains primers
return primer_dict
## map_primers_to_genome will use BLAST to identify where primers are likely to bind in the genome.
# _primer_dict is a primer heirarchy as returned by "read_file_to_dict"
# blast_db is the name of the genome db you want to search
# outfile is destination to write the blast results to. If outfile is given, we assume that you want details about where the primers map and will put minor warnings to stdout; otherwise, we assume you don't care and the are suppressed
# ## User can "cheat" by passing "range_from" and "range_to" integers in the locus dict.
# Returns : export_regions[locus][subregion][name] = {'contig','start','stop'} where start and stop are the first coordinates past the low and high primers
## Returns a dict with genome information (key in CAPS): isolate NAME, ORIGINAL filename, FASTA filename, blast DB name, contig SEQS
def setupGenomeForBlastBasedExtraction(genome_name,genome_file,tempDir,file_format = '',is_compressed = None):
##Genome information
genomeInfo = dict()
genomeInfo['name'] = genome_name
genomeInfo['original'] = genome_file #just for reporting
##Some people use weird genome filenames, so I need to copy it to something without special characters
temp_genome = os.path.join(tempDir,genome_name + '.fasta')
genomeOrganizer.exportGenomeFASTA(genome_file,temp_genome,file_format,is_compressed)
genomeInfo['fasta'] = temp_genome
if not os.path.isfile(genomeInfo['fasta']):
raise IOError("Illegitimate file at "+genomeInfo['fasta'])
#~ genomeDir,genomeFile = os.path.split(os.path.abspath(genomeInfo['fasta']))
#open the genome file for extracting sequences
genome_handle = utilities.flexible_handle(genomeInfo['original'], is_compressed, 'rt')
genomeInfo['seqs'] = SeqIO.to_dict(SeqIO.parse(genome_handle, file_format))
print("{} bp in {} contig(s)".format(sum([len(c) for c in genomeInfo['seqs'].values()]),len(genomeInfo['seqs']))) ##Appends to sequence identifier line
if len(genomeInfo['seqs']) == 0:
raise ValueError("No sequences parsed from file {}".format(genomeInfo['fasta']))
genome_handle.close()
# make search database for genome
db_base = os.path.basename(genomeInfo['fasta'])
genomeInfo['db'] = os.path.join(tempDir,db_base)
makeblastdb(genomeInfo['fasta'],genomeInfo['db'])
return genomeInfo
class AmpliconExtractor:
def __init__(self,primer_file,working_dir=None,generate_output=False):
### Make writable directories
if working_dir is None:
working_dir = os.getcwd()
##utilities.safeMakeOutputFolder(os.path.join(working_dir,'AmpExtTemp'))
self.generate_output = generate_output
self.primers_dict = read_file_to_dict(primer_file)
if generate_output:
self.outDir = utilities.safeMakeOutputFolder(os.path.join(working_dir,'AmpliconExtractor'))
self.sequence_files = {locus: os.path.join(self.outDir,'{}_primer-extracted_sequences.fasta'.format(locus)) for locus in self.primers_dict.keys()}
self.amplicon_info_file = os.path.join(self.outDir,'amplicon_information.tab')
self.tempDirObj = tempfile.TemporaryDirectory(suffix='_AmpExt', prefix='tmp', dir=self.outDir)
else:
self.outDir = self.sequence_files = self.amplicon_info_file = None
self.tempDirObj = tempfile.TemporaryDirectory(suffix='_AmpExt', prefix='tmp', dir=working_dir)
self.amplicon_info_list = []
##Full service function for a single genome
def evaluateGenome(self,genome_name,genome_file,file_format = '',is_compressed = None, keep_temp = False):
print("## Begin searching sequence {} ## ".format(genome_name))
primer_hit_file = None
if self.outDir is not None:
primer_hit_file = os.path.join(self.outDir,'primer_hits.tab')
primer_hit_file = utilities.appendToFilename(primer_hit_file, genome_name)
genomeInfo = setupGenomeForBlastBasedExtraction(genome_name,genome_file,self.tempDirObj.name,file_format,is_compressed)
amplicon_info = {'Filename':genome_file,"Lab_ID":genome_name}
primers_loc = self.map_primers_to_genome(genomeInfo['db'],primer_hit_file,keep_temp=keep_temp)
for locus, locus_dict in primers_loc.items():
for subregion, subregion_dict in locus_dict.items():
if isinstance(subregion_dict,dict): ##Sequencing features
for name,locations in subregion_dict.items():
print('Seq name :'+ name)
contig = locations['contig']
print('On contig: '+contig)
contig_seq = genomeInfo['seqs'][contig]
print('Found contig: {}, length {}'.format(contig_seq.id,len(contig_seq)))
start = locations['start']
print('Start: {}'.format(start))
stop = locations['stop']
print('Stop: {}'.format(stop))
my_seq = contig_seq[start:stop+1]
new_name = name.replace(' ','_')
my_seq.id = new_name
my_seq.description ="{}:{}-{}".format(contig,start,stop)
# my_fasta = SeqRecord(my_seq,id=name.replace(' ','_'),description="{}:{}-{}".format(contig,start,stop))
if self.sequence_files is not None:
with open(self.sequence_files[locus],"a") as fout:
SeqIO.write(my_seq,fout,'fasta')
if file_format == 'fastq':
fastq_file = utilities.setExt(self.sequence_files[locus], 'fastq', False)
with open(fastq_file,'a') as fastq_out:
SeqIO.write(my_seq,fastq_out,'fastq')
elif subregion == 'OuterRange': ##Original amplicon...actually a range
range_list = subregion_dict
for item in range_list:
assert isinstance(item,region_record)
if len(range_list) == 1:
rr = range_list[0]
amplicon_info['{}_PCR_size'.format(locus)] = "{}".format(rr.get_max() - rr.get_min() + 1)
amplicon_info['{}_contig'.format(locus)] = "{}".format(rr.contig)
amplicon_info['{}_start_position'.format(locus)] = "{}".format(rr.get_min())
amplicon_info['{}_stop_position'.format(locus)] = "{}".format(rr.get_max())
#TODO: report something
else:
print("Warning feature {} not reported for locus {}".format(subregion,locus))
self.amplicon_info_list.append(amplicon_info)
##Returns a dict with entry for every locus that was searched for
#Tolerance keeps hits with bit-scores at tolerance*max_score
def map_primers_to_genome(self,blast_db,outfile=None,search_set=None,default_to_PCR=False,temp_dir = None, keep_temp=False, tolerance=1):
workingDir = temp_dir if temp_dir is not None else self.tempDirObj.name
if outfile == '':
outfile = None
if search_set == None:
search_set = set(self.primers_dict.keys())
temp_infile = os.path.join(workingDir,'tmp_primer.fasta')
temp_outfile = os.path.join(workingDir,'tmp_primer_blast.fasta')
blast_combined = blankBLASTtable()
ql_head = 'query_length' #new column to add
fh_head = 'forward hit'
export_regions = dict() #name for region, coordinates of innermost nucleotide on outermost primers (draw data from seq_borders dict in the sequencing reaction)
for locus in search_set:
if locus not in self.primers_dict.keys():
print("Error: {} is not in the set of primer loci".format(locus))
locus_dict = self.primers_dict[locus].copy() #so that I can modify it
if default_to_PCR: #Make sure there are primers for sequencing the entire region
seq_dict = locus_dict['Seq']
if 'All' not in seq_dict.keys():
seq_dict['All'] = locus_dict['PCR']['All']
export_regions[locus] = dict()
##Evaluate PCR dict first to find general range in which sequencing primers can bind
PCR_dict = locus_dict['PCR']
range_list = []
## Create a master range limit if specified
has_range = ('range_contig' in locus_dict.keys()
and 'range_from' in locus_dict.keys()
and 'range_to' in locus_dict.keys())
if has_range:
master_range = region_record(locus_dict['range_contig'],locus_dict['range_from'],locus_dict['range_to'])
range_list.append(master_range)
## Place BLAST hits into ranges
for (subregion, subregion_dict) in PCR_dict.items(): ##Only one region: "all"
for (primer,sequence) in subregion_dict.items():
#Write query file
my_seq = SeqRecord(Seq(sequence,IUPAC.ambiguous_dna),id="-".join([locus,'PCR',subregion,primer]))
with open(temp_infile,"w") as fout:
SeqIO.write(my_seq,fout,'fasta')
#Search BLAST
blast_cline = NcbiblastnCommandline(query=temp_infile,db=blast_db,outfmt=6,out=temp_outfile,task='blastn-short',evalue=1,reward=1,penalty=-1,gapopen=3,gapextend=2)
blast_cline() ##Should only print for errors
blast_table = loadBLASTtableToDataFrame(temp_outfile)
if keep_temp:
named_file = '{}_{}.tab'.format("-".join([locus,'PCR',subregion,primer]),os.path.basename(blast_db))
utilities.safeOverwriteTable(os.path.join(workingDir,named_file), blast_table, 'tab')
##SPlace best hits into ranges
if len(blast_table) > 0:
##Add some extra info to table
blast_table[ql_head] = len(my_seq)
blast_table[fh_head] = blast_table['s. start'] < blast_table['s. end']
## Limit table to best hits
best = blast_table.sort_values(by=['bit score'],ascending=False).iloc[0]
best_table = blast_table[blast_table['bit score'] >= tolerance*best['bit score']] #This may be too stringent; may need to revisit
## Add best hits to ranges
for _,this_hit in best_table.iterrows():
finished = False #if we found a range for it
for this_range in range_list:
if not finished: #stop upon success or if range is exclusive
finished = this_range.try_add_primer(this_hit['subject id'],this_hit['s. start'],this_hit[fh_head],True)
if this_range.exclusive and not finished:
finished = True
if len(best_table) == 1:
print("Warning: an exclusive hit failed to map to the prespecified region. Please report to developer(s)")
if not finished:
new_range = region_record()
new_range.try_add_primer(this_hit['subject id'],this_hit['s. start'],this_hit[fh_head],True)
range_list.append(new_range)
## Record best hits for reporting
blast_combined = pd.concat([blast_combined,best_table],sort=True)##Note: this is compatible with pandas 0.23 +; older versions will fail. Without sort, it makes FutureWarning and exception.
else:
print("Warning: zero hits for {}".format(my_seq.id))
##Merge any ranges that are close/overlapping; test if ranges are valid (primer pairs)
i = 0
ValidRanges = set()
while i < len(range_list):
this_range = range_list[i]
j = len(range_list)-1
while j > i:
merger = this_range.try_merge_regions(range_list[j])
if merger:
print("Warning: this is an exceptional situation and has not been tested, please report to developer(s). Range merger")
del(range_list[j])
j-=1
#Test validity of this_range
if (len(this_range.For_list) > 0 and len(this_range.Rev_list) > 0):
if this_range.get_min() < this_range.get_max():
ValidRanges.add(i)
i+=1
#Remove invaled ranges
range_list = [range_list[i] for i in ValidRanges]
#Report oddities
if len(range_list) == 0:
print("Warning: Unable to find an amplification region for {}".format(locus))
elif len(range_list) == 2:
print("Warning: Detected multiple amplification regions for {}".format(locus))
for this_range in range_list:
vprint('\n'+locus + ": Potential amplicon region")
vprint(this_range)
## Find the sequencing sites within the defined ranges
Seq_dict = locus_dict['Seq']
for (subregion, subregion_dict) in Seq_dict.items():
export_regions[locus][subregion] = dict()
seq_borders = dict() ##Use range as key to track where sequencing of subregion starts. Values outside of range indicate no matches
seq_primers = dict() ##primer names corresponding to border positions
for (primer,sequence) in subregion_dict.items():
my_seq = SeqRecord(Seq(sequence,IUPAC.ambiguous_dna),id="-".join([locus,'Seq',subregion,primer]))
with open(temp_infile,"w") as fout:
SeqIO.write(my_seq,fout,'fasta')
blast_cline = NcbiblastnCommandline(query=temp_infile,db=blast_db,outfmt=6,out=temp_outfile,task='blastn-short',evalue=1,reward=1,penalty=-1,gapopen=3,gapextend=2)
blast_cline() ##Should only print for errors
blast_table = loadBLASTtableToDataFrame(temp_outfile)
if len(blast_table) > 0:
##Add some extra info to table
blast_table[ql_head] = len(my_seq)
blast_table[fh_head] = blast_table['s. start'] < blast_table['s. end']
for my_range in range_list:
## Limit table to hits in range
r_min = my_range.get_min()
r_max = my_range.get_max()
if my_range not in seq_borders: ##TODO: this should probably be initialized immediately after declaration. Need to check that it doesnt' break the downstream features
seq_borders[my_range] = [r_min -1, r_max+1]
seq_primers[my_range] = ['None','None']
range_table = blast_table[blast_table['subject id'] == my_range.contig]
range_table = range_table[range_table['s. end'] >= r_min]
range_table = range_table[range_table['s. end'] <= r_max]
if len(range_table) > 0:
## Limit table to best hits
best_in_range = range_table.sort_values(by=['bit score'],ascending=False).iloc[0]
range_table = range_table[range_table['bit score'] >= best_in_range['bit score']] #This may be too stringent; may need to revisit
if len(range_table) > 0:
if len(range_table) > 1:
export_line = "Warning: sequencing primer maps to multiple locations within PCR primers. Using outermost site: {}".format(my_seq.id)
# if __name__ != "__main__": ##Being called from an outside procedure...indent to indicated subsidiary position
# export_line = '\t'+export_line
print(export_line)
for _, hit in range_table.iterrows():
q_end = hit['q. end']
gap = len(my_seq) - q_end
s_end = hit['s. end']
is_for = hit[fh_head]
if is_for:
if seq_borders[my_range][0] < my_range.get_min():
seq_borders[my_range][0] = s_end
seq_primers[my_range][0] = primer
if gap > 0:
vprint("Warning: sequencing primer does not match template at 3' end. Sequence probably needs trimming on the low end: {}".format(my_seq.id))
else:
if seq_borders[my_range][0] > s_end:
seq_borders[my_range][0] = s_end
seq_primers[my_range][0] = primer
if gap > 0:
vprint("Warning: sequencing primer does not match template at 3' end. Sequence probably needs trimming on the low end: {}".format(my_seq.id))
vprint("Warning: multiple sequencing primers map in forward direction on template. Using outermost site: {}".format("-".join([locus,'Seq',subregion,seq_primers[my_range][0]])))
else:
if seq_borders[my_range][1] > my_range.get_max():
seq_borders[my_range][1] = s_end
seq_primers[my_range][1] = primer
if gap > 0:
vprint("Warning: sequencing primer does not match template at 3' end. Sequence probably needs trimming on the high end: {}".format(my_seq.id))
else:
if seq_borders[my_range][1] < s_end:
seq_borders[my_range][1] = s_end
seq_primers[my_range][1] = primer
if gap > 0:
vprint("Warning: sequencing primer does not match template at 3' end. Sequence probably needs trimming on the high end: {}".format(my_seq.id))
vprint("Warning: multiple sequencing primers map in reverse direction on template. Using outermost site: {}".format("-".join([locus,'Seq',subregion,seq_primers[my_range][1]])))
else:
print("Warning: sequencing primer failed to map within PCR primers: {}".format(my_seq.id))
## Record best hits for reporting
best_table = blast_table[blast_table['bit score'] >= best_in_range['bit score']] #This may be too stringent; may need to revisit
#~ print("Identified {} hits above threshold used for best in range".format(len(best_table)))
blast_combined = | pd.concat([blast_combined,best_table],sort=True) | pandas.concat |
#!/home/renato/anaconda2/bin/python
import numpy as np
import matplotlib.pyplot as plt
import os, sys
from scipy.interpolate import interp2d
from pylab import *
import pandas as pd
for k in range(0,101):
#beta = np.ones(60)
beta = np.empty(94)
beta.fill(k/100.0)
data = np.array([beta]).T
df4 = pd.DataFrame(data)
df4.to_excel("/home/renato/groimp_efficient/beta_1.xls", index=False, header=False)
os.system("java -Xmx2000m -jar /home/renato/Downloads/GroIMP-1.5/core.jar --headless /home/renato/Downloads/FSPM_BASIC-master-transpired-efficient/project.gs")
df0 = | pd.read_csv('/home/renato/groimp_efficient/field.txt',
delim_whitespace=True,skiprows=1,header=None,
names=["time", "species", "LAI", "nrShoots", "fAbs", "assCO2", "biomAbove", "yield", "harvestIndex","leafArea","fieldRFR"]) | pandas.read_csv |
import numpy as np
import pandas as pd
import wandb
def filter_col(col, df):
return list(filter(lambda x: col in x, df.columns))
def get_df_from_project(project, path: str="wandb"):
"""Load summary and config DataFrame for all runs of a project.
Largely copied from wandb docs.
"""
api = wandb.Api()
runs = api.runs(f'{path}/{project}')
summary_list = []
config_list = []
name_list = []
for run in runs:
# run.summary are the output key/values like accuracy.
# We call ._json_dict to omit large files.
summary_list.append(run.summary._json_dict)
# run.config is the input metrics.
# We remove special values that start with _.
config_list.append(
{k: v for k,v in run.config.items() if not k.startswith('_')})
# run.name is the name of the run.
name_list.append(run.name)
summary_df = pd.DataFrame.from_records(summary_list)
config_df = pd.DataFrame.from_records(config_list)
name_df = pd.DataFrame({'name': name_list})
df = | pd.concat([name_df, config_df, summary_df], axis=1) | pandas.concat |
"""
GMMによるクラスタリングを用いて,最適な曜日の組み合わせを求めて,save,combination_saveに保存する.\
例えば,月水金のデータをクラス A,火木をB,土日祝日をCのように3クラスにラベル付を行う.
Example:
5032AB example
::
python3 src/preprocess/optimize.py --input data/processed/train/5032AB.csv\
--save data/processed/label/5032AB.csv --combination_save data/processed/combination/5032AB.json
"""
import argparse
import json
import os
import numpy as np
import pandas as pd
import datetime
import jpholiday
from sklearn.mixture import GaussianMixture
from sklearn.metrics import log_loss
import category_encoders as ce
def encode_day_to_label(df: pd.DataFrame) -> dict:
"""
日付を曜日,祝日により数値にエンコードする.[月,日]=[0,6],祝日=7
Args:
df(pandas DataFrame):Daily data
Returns:
dict:{date\:label}
"""
index_list = df.index.to_list()
dates = list(map(lambda x: datetime.datetime.strptime(
"20" + x, '%Y.%m.%d'), index_list))
label = list(map(lambda x: x.weekday(), dates))
holiday_label = list(map(lambda x: jpholiday.is_holiday(
datetime.date(x.year, x.month, x.day)), dates))
label = [7 if holiday_label[i] else label[i] for i in range(len(label))]
return dict(zip(index_list, label))
def normalize(df: pd.DataFrame) -> pd.DataFrame:
"""
Min max normalization.
Args:
df(pandas DataFrame):Input data
Returns:
pandas DataFrame:Normalized data
"""
minimum = df.min().min()
maximum = df.max().max()
if (maximum - minimum) == 0:
return df - df
return (df - minimum) / (maximum - minimum)
def gmm_clustering(df: pd.DataFrame, n_clusters: int = 2, seed: int = 0) -> pd.DataFrame:
"""
GMMによるクラスタリングを行い各クラスタへの所属確率を返す.
Args:
df(pandas DataFrame):Input data
n_clusters(int):クラスタ数.
Returns:
pandas DataFrame:各クラスタへの所属確率
"""
gmm = GaussianMixture(n_components=n_clusters, max_iter=20, random_state=seed)
gmm.fit(df)
prob = gmm.predict_proba(df)
columns = [str(i) for i in range(0, len(prob[0]))]
prob_df = | pd.DataFrame(prob, columns=columns, index=df.index) | pandas.DataFrame |
# -*- coding: utf-8 -*-
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import absolute_import, division, print_function
import logging
import numpy as np
import pandas as pd
from fbprophet.diagnostics import performance_metrics
logger = logging.getLogger('fbprophet.plot')
try:
from matplotlib import pyplot as plt
from matplotlib.dates import (
MonthLocator,
num2date,
AutoDateLocator,
AutoDateFormatter,
)
from matplotlib.ticker import FuncFormatter
from pandas.plotting import deregister_matplotlib_converters
deregister_matplotlib_converters()
except ImportError:
logger.error('Importing matplotlib failed. Plotting will not work.')
try:
import plotly.graph_objs as go
from plotly.subplots import make_subplots
except ImportError:
logger.error('Importing plotly failed. Interactive plots will not work.')
def plot(
m, fcst, ax=None, uncertainty=True, plot_cap=True, xlabel='ds', ylabel='y',
figsize=(10, 6)
):
"""Plot the Prophet forecast.
Parameters
----------
m: Prophet model.
fcst: pd.DataFrame output of m.predict.
ax: Optional matplotlib axes on which to plot.
uncertainty: Optional boolean to plot uncertainty intervals, which will
only be done if m.uncertainty_samples > 0.
plot_cap: Optional boolean indicating if the capacity should be shown
in the figure, if available.
xlabel: Optional label name on X-axis
ylabel: Optional label name on Y-axis
figsize: Optional tuple width, height in inches.
Returns
-------
A matplotlib figure.
"""
if ax is None:
fig = plt.figure(facecolor='w', figsize=figsize)
ax = fig.add_subplot(111)
else:
fig = ax.get_figure()
fcst_t = fcst['ds'].dt.to_pydatetime()
ax.plot(m.history['ds'].dt.to_pydatetime(), m.history['y'], 'k.')
ax.plot(fcst_t, fcst['yhat'], ls='-', c='#0072B2')
if 'cap' in fcst and plot_cap:
ax.plot(fcst_t, fcst['cap'], ls='--', c='k')
if m.logistic_floor and 'floor' in fcst and plot_cap:
ax.plot(fcst_t, fcst['floor'], ls='--', c='k')
if uncertainty and m.uncertainty_samples:
ax.fill_between(fcst_t, fcst['yhat_lower'], fcst['yhat_upper'],
color='#0072B2', alpha=0.2)
# Specify formatting to workaround matplotlib issue #12925
locator = AutoDateLocator(interval_multiples=False)
formatter = AutoDateFormatter(locator)
ax.xaxis.set_major_locator(locator)
ax.xaxis.set_major_formatter(formatter)
ax.grid(True, which='major', c='gray', ls='-', lw=1, alpha=0.2)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
fig.tight_layout()
return fig
def plot_components(
m, fcst, uncertainty=True, plot_cap=True, weekly_start=0, yearly_start=0,
figsize=None
):
"""Plot the Prophet forecast components.
Will plot whichever are available of: trend, holidays, weekly
seasonality, yearly seasonality, and additive and multiplicative extra
regressors.
Parameters
----------
m: Prophet model.
fcst: pd.DataFrame output of m.predict.
uncertainty: Optional boolean to plot uncertainty intervals, which will
only be done if m.uncertainty_samples > 0.
plot_cap: Optional boolean indicating if the capacity should be shown
in the figure, if available.
weekly_start: Optional int specifying the start day of the weekly
seasonality plot. 0 (default) starts the week on Sunday. 1 shifts
by 1 day to Monday, and so on.
yearly_start: Optional int specifying the start day of the yearly
seasonality plot. 0 (default) starts the year on Jan 1. 1 shifts
by 1 day to Jan 2, and so on.
figsize: Optional tuple width, height in inches.
Returns
-------
A matplotlib figure.
"""
# Identify components to be plotted
components = ['trend']
if m.train_holiday_names is not None and 'holidays' in fcst:
components.append('holidays')
# Plot weekly seasonality, if present
if 'weekly' in m.seasonalities and 'weekly' in fcst:
components.append('weekly')
# Yearly if present
if 'yearly' in m.seasonalities and 'yearly' in fcst:
components.append('yearly')
# Other seasonalities
components.extend([
name for name in sorted(m.seasonalities)
if name in fcst and name not in ['weekly', 'yearly']
])
regressors = {'additive': False, 'multiplicative': False}
for name, props in m.extra_regressors.items():
regressors[props['mode']] = True
for mode in ['additive', 'multiplicative']:
if regressors[mode] and 'extra_regressors_{}'.format(mode) in fcst:
components.append('extra_regressors_{}'.format(mode))
npanel = len(components)
figsize = figsize if figsize else (9, 3 * npanel)
fig, axes = plt.subplots(npanel, 1, facecolor='w', figsize=figsize)
if npanel == 1:
axes = [axes]
multiplicative_axes = []
dt = m.history['ds'].diff()
min_dt = dt.iloc[dt.values.nonzero()[0]].min()
for ax, plot_name in zip(axes, components):
if plot_name == 'trend':
plot_forecast_component(
m=m, fcst=fcst, name='trend', ax=ax, uncertainty=uncertainty,
plot_cap=plot_cap,
)
elif plot_name in m.seasonalities:
if (
(plot_name == 'weekly' or m.seasonalities[plot_name]['period'] == 7)
and (min_dt == pd.Timedelta(days=1))
):
plot_weekly(
m=m, name=plot_name, ax=ax, uncertainty=uncertainty, weekly_start=weekly_start
)
elif plot_name == 'yearly' or m.seasonalities[plot_name]['period'] == 365.25:
plot_yearly(
m=m, name=plot_name, ax=ax, uncertainty=uncertainty, yearly_start=yearly_start
)
else:
plot_seasonality(
m=m, name=plot_name, ax=ax, uncertainty=uncertainty,
)
elif plot_name in [
'holidays',
'extra_regressors_additive',
'extra_regressors_multiplicative',
]:
plot_forecast_component(
m=m, fcst=fcst, name=plot_name, ax=ax, uncertainty=uncertainty,
plot_cap=False,
)
if plot_name in m.component_modes['multiplicative']:
multiplicative_axes.append(ax)
fig.tight_layout()
# Reset multiplicative axes labels after tight_layout adjustment
for ax in multiplicative_axes:
ax = set_y_as_percent(ax)
return fig
def plot_forecast_component(
m, fcst, name, ax=None, uncertainty=True, plot_cap=False, figsize=(10, 6)
):
"""Plot a particular component of the forecast.
Parameters
----------
m: Prophet model.
fcst: pd.DataFrame output of m.predict.
name: Name of the component to plot.
ax: Optional matplotlib Axes to plot on.
uncertainty: Optional boolean to plot uncertainty intervals, which will
only be done if m.uncertainty_samples > 0.
plot_cap: Optional boolean indicating if the capacity should be shown
in the figure, if available.
figsize: Optional tuple width, height in inches.
Returns
-------
a list of matplotlib artists
"""
artists = []
if not ax:
fig = plt.figure(facecolor='w', figsize=figsize)
ax = fig.add_subplot(111)
fcst_t = fcst['ds'].dt.to_pydatetime()
artists += ax.plot(fcst_t, fcst[name], ls='-', c='#0072B2')
if 'cap' in fcst and plot_cap:
artists += ax.plot(fcst_t, fcst['cap'], ls='--', c='k')
if m.logistic_floor and 'floor' in fcst and plot_cap:
ax.plot(fcst_t, fcst['floor'], ls='--', c='k')
if uncertainty and m.uncertainty_samples:
artists += [ax.fill_between(
fcst_t, fcst[name + '_lower'], fcst[name + '_upper'],
color='#0072B2', alpha=0.2)]
# Specify formatting to workaround matplotlib issue #12925
locator = AutoDateLocator(interval_multiples=False)
formatter = AutoDateFormatter(locator)
ax.xaxis.set_major_locator(locator)
ax.xaxis.set_major_formatter(formatter)
ax.grid(True, which='major', c='gray', ls='-', lw=1, alpha=0.2)
ax.set_xlabel('ds')
ax.set_ylabel(name)
if name in m.component_modes['multiplicative']:
ax = set_y_as_percent(ax)
return artists
def seasonality_plot_df(m, ds):
"""Prepare dataframe for plotting seasonal components.
Parameters
----------
m: Prophet model.
ds: List of dates for column ds.
Returns
-------
A dataframe with seasonal components on ds.
"""
df_dict = {'ds': ds, 'cap': 1., 'floor': 0.}
for name in m.extra_regressors:
df_dict[name] = 0.
# Activate all conditional seasonality columns
for props in m.seasonalities.values():
if props['condition_name'] is not None:
df_dict[props['condition_name']] = True
df = | pd.DataFrame(df_dict) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Tue Nov 2 22:43:12 2021
@author: obnit
"""
import pandas as pd , matplotlib.pyplot as plt , numpy as np
df = pd.read_csv('Export/tested.csv')
df.Date = | pd.to_datetime(df.Date) | pandas.to_datetime |
import datetime
import locale
import logging
import os
import random
import shutil
import warnings
from typing import Any, Dict, List, Optional
import numpy as np
import pandas as pd
import pytest
from freezegun import freeze_time
from ruamel.yaml import YAML
import great_expectations as ge
from great_expectations import DataContext
from great_expectations.core import ExpectationConfiguration
from great_expectations.core.expectation_suite import ExpectationSuite
from great_expectations.core.expectation_validation_result import (
ExpectationValidationResult,
)
from great_expectations.core.usage_statistics.usage_statistics import (
UsageStatisticsHandler,
)
from great_expectations.core.util import get_or_create_spark_application
from great_expectations.data_context import BaseDataContext
from great_expectations.data_context.store.profiler_store import ProfilerStore
from great_expectations.data_context.types.base import (
AnonymizedUsageStatisticsConfig,
CheckpointConfig,
DataContextConfig,
GeCloudConfig,
InMemoryStoreBackendDefaults,
)
from great_expectations.data_context.types.resource_identifiers import (
ConfigurationIdentifier,
ExpectationSuiteIdentifier,
GeCloudIdentifier,
)
from great_expectations.data_context.util import (
file_relative_path,
instantiate_class_from_config,
)
from great_expectations.dataset.pandas_dataset import PandasDataset
from great_expectations.datasource import SqlAlchemyDatasource
from great_expectations.datasource.data_connector.util import (
get_filesystem_one_level_directory_glob_path_list,
)
from great_expectations.datasource.new_datasource import BaseDatasource, Datasource
from great_expectations.rule_based_profiler.config import RuleBasedProfilerConfig
from great_expectations.rule_based_profiler.config.base import (
ruleBasedProfilerConfigSchema,
)
from great_expectations.rule_based_profiler.types import Domain
from great_expectations.self_check.util import (
build_test_backends_list as build_test_backends_list_v3,
)
from great_expectations.self_check.util import (
expectationSuiteValidationResultSchema,
get_dataset,
)
from great_expectations.util import is_library_loadable
yaml = YAML()
###
#
# NOTE: THESE TESTS ARE WRITTEN WITH THE en_US.UTF-8 LOCALE AS DEFAULT FOR STRING FORMATTING
#
###
locale.setlocale(locale.LC_ALL, "en_US.UTF-8")
logger = logging.getLogger(__name__)
def pytest_configure(config):
config.addinivalue_line(
"markers",
"smoketest: mark test as smoketest--it does not have useful assertions but may produce side effects "
"that require manual inspection.",
)
config.addinivalue_line(
"markers",
"rendered_output: produces rendered output that should be manually reviewed.",
)
config.addinivalue_line(
"markers",
"aws_integration: runs aws integration test that may be very slow and requires credentials",
)
def pytest_addoption(parser):
# note: --no-spark will be deprecated in favor of --spark
parser.addoption(
"--no-spark",
action="store_true",
help="If set, suppress tests against the spark test suite",
)
parser.addoption(
"--spark",
action="store_true",
help="If set, execute tests against the spark test suite",
)
parser.addoption(
"--no-sqlalchemy",
action="store_true",
help="If set, suppress all tests using sqlalchemy",
)
parser.addoption(
"--postgresql",
action="store_true",
help="If set, execute tests against postgresql",
)
# note: --no-postgresql will be deprecated in favor of --postgresql
parser.addoption(
"--no-postgresql",
action="store_true",
help="If set, supress tests against postgresql",
)
parser.addoption(
"--mysql",
action="store_true",
help="If set, execute tests against mysql",
)
parser.addoption(
"--mssql",
action="store_true",
help="If set, execute tests against mssql",
)
parser.addoption(
"--bigquery",
action="store_true",
help="If set, execute tests against bigquery",
)
parser.addoption(
"--aws",
action="store_true",
help="If set, execute tests against AWS resources like S3, RedShift and Athena",
)
parser.addoption(
"--aws-integration",
action="store_true",
help="If set, run aws integration tests for usage_statistics",
)
parser.addoption(
"--docs-tests",
action="store_true",
help="If set, run integration tests for docs",
)
parser.addoption(
"--performance-tests",
action="store_true",
help="If set, run performance tests (which might also require additional arguments like --bigquery)",
)
def build_test_backends_list(metafunc):
test_backend_names: List[str] = build_test_backends_list_cfe(metafunc)
backend_name_class_name_map: Dict[str, str] = {
"pandas": "PandasDataset",
"spark": "SparkDFDataset",
}
backend_name: str
return [
(backend_name_class_name_map.get(backend_name) or backend_name)
for backend_name in test_backend_names
]
def build_test_backends_list_cfe(metafunc):
# adding deprecation warnings
if metafunc.config.getoption("--no-postgresql"):
warnings.warn(
"--no-sqlalchemy is deprecated as of v0.14 in favor of the --postgresql flag. It will be removed in v0.16. Please adjust your tests accordingly",
DeprecationWarning,
)
if metafunc.config.getoption("--no-spark"):
warnings.warn(
"--no-spark is deprecated as of v0.14 in favor of the --spark flag. It will be removed in v0.16. Please adjust your tests accordingly.",
DeprecationWarning,
)
include_pandas: bool = True
include_spark: bool = metafunc.config.getoption("--spark")
include_sqlalchemy: bool = not metafunc.config.getoption("--no-sqlalchemy")
include_postgresql: bool = metafunc.config.getoption("--postgresql")
include_mysql: bool = metafunc.config.getoption("--mysql")
include_mssql: bool = metafunc.config.getoption("--mssql")
include_bigquery: bool = metafunc.config.getoption("--bigquery")
include_aws: bool = metafunc.config.getoption("--aws")
test_backend_names: List[str] = build_test_backends_list_v3(
include_pandas=include_pandas,
include_spark=include_spark,
include_sqlalchemy=include_sqlalchemy,
include_postgresql=include_postgresql,
include_mysql=include_mysql,
include_mssql=include_mssql,
include_bigquery=include_bigquery,
include_aws=include_aws,
)
return test_backend_names
def pytest_generate_tests(metafunc):
test_backends = build_test_backends_list(metafunc)
if "test_backend" in metafunc.fixturenames:
metafunc.parametrize("test_backend", test_backends, scope="module")
if "test_backends" in metafunc.fixturenames:
metafunc.parametrize("test_backends", [test_backends], scope="module")
def pytest_collection_modifyitems(config, items):
if config.getoption("--aws-integration"):
# --aws-integration given in cli: do not skip aws-integration tests
return
if config.getoption("--docs-tests"):
# --docs-tests given in cli: do not skip documentation integration tests
return
skip_aws_integration = pytest.mark.skip(
reason="need --aws-integration option to run"
)
skip_docs_integration = pytest.mark.skip(reason="need --docs-tests option to run")
for item in items:
if "aws_integration" in item.keywords:
item.add_marker(skip_aws_integration)
if "docs" in item.keywords:
item.add_marker(skip_docs_integration)
@pytest.fixture(autouse=True)
def no_usage_stats(monkeypatch):
# Do not generate usage stats from test runs
monkeypatch.setenv("GE_USAGE_STATS", "False")
@pytest.fixture(scope="module")
def sa(test_backends):
if not any(
[dbms in test_backends for dbms in ["postgresql", "sqlite", "mysql", "mssql"]]
):
pytest.skip("No recognized sqlalchemy backend selected.")
else:
try:
import sqlalchemy as sa
return sa
except ImportError:
raise ValueError("SQL Database tests require sqlalchemy to be installed.")
@pytest.mark.order(index=2)
@pytest.fixture
def spark_session(test_backends):
if "SparkDFDataset" not in test_backends:
pytest.skip("No spark backend selected.")
try:
import pyspark
from pyspark.sql import SparkSession
return get_or_create_spark_application(
spark_config={
"spark.sql.catalogImplementation": "hive",
"spark.executor.memory": "450m",
# "spark.driver.allowMultipleContexts": "true", # This directive does not appear to have any effect.
}
)
except ImportError:
raise ValueError("spark tests are requested, but pyspark is not installed")
@pytest.fixture
def basic_spark_df_execution_engine(spark_session):
from great_expectations.execution_engine import SparkDFExecutionEngine
conf: List[tuple] = spark_session.sparkContext.getConf().getAll()
spark_config: Dict[str, str] = dict(conf)
execution_engine: SparkDFExecutionEngine = SparkDFExecutionEngine(
spark_config=spark_config,
)
return execution_engine
@pytest.mark.order(index=3)
@pytest.fixture
def spark_session_v012(test_backends):
if "SparkDFDataset" not in test_backends:
pytest.skip("No spark backend selected.")
try:
import pyspark
from pyspark.sql import SparkSession
return get_or_create_spark_application(
spark_config={
"spark.sql.catalogImplementation": "hive",
"spark.executor.memory": "450m",
# "spark.driver.allowMultipleContexts": "true", # This directive does not appear to have any effect.
}
)
except ImportError:
raise ValueError("spark tests are requested, but pyspark is not installed")
@pytest.fixture
def basic_expectation_suite(empty_data_context_stats_enabled):
context: DataContext = empty_data_context_stats_enabled
expectation_suite = ExpectationSuite(
expectation_suite_name="default",
meta={},
expectations=[
ExpectationConfiguration(
expectation_type="expect_column_to_exist",
kwargs={"column": "infinities"},
),
ExpectationConfiguration(
expectation_type="expect_column_to_exist", kwargs={"column": "nulls"}
),
ExpectationConfiguration(
expectation_type="expect_column_to_exist", kwargs={"column": "naturals"}
),
ExpectationConfiguration(
expectation_type="expect_column_values_to_be_unique",
kwargs={"column": "naturals"},
),
],
data_context=context,
)
return expectation_suite
@pytest.fixture
def numeric_high_card_dict():
# fmt: off
data = {
"norm_0_1": [
0.7225866251125405, -0.5951819764073379, -0.2679313226299394, -0.22503289285616823, 0.1432092195399402, 1.1874676802669433, 1.2766412196640815, 0.15197071140718296, -0.08787273509474242, -0.14524643717509128, -1.236408169492396, -0.1595432263317598, 1.0856768114741797, 0.5082788229519655, 0.26419244684748955, -0.2532308428977167, -0.6362679196021943, -3.134120304969242, -1.8990888524318292, 0.15701781863102648,
-0.775788419966582, -0.7400872167978756, -0.10578357492485335, 0.30287010067847436, -1.2127058770179304, -0.6750567678010801, 0.3341434318919877, 1.8336516507046157, 1.105410842250908, -0.7711783703442725, -0.20834347267477862, -0.06315849766945486, 0.003016997583954831, -1.0500016329150343, -0.9168020284223636, 0.306128397266698, 1.0980602112281863, -0.10465519493772572, 0.4557797534454941, -0.2524452955086468,
-1.6176089110359837, 0.46251282530754667, 0.45751208998354903, 0.4222844954971609, 0.9651098606162691, -0.1364401431697167, -0.4988616288584964, -0.29549238375582904, 0.6950204582392359, 0.2975369992016046, -1.0159498719807218, 1.3704532401348395, 1.1210419577766673, 1.2051869452003332, 0.10749349867353084, -3.1876892257116562, 1.316240976262548, -1.3777452919511493, -1.0666211985935259, 1.605446695828751,
-0.39682821266996865, -0.2828059717857655, 1.30488698803017, -2.116606225467923, -0.2026680301462151, -0.05504008273574069, -0.028520163428411835, 0.4424105678123449, -0.3427628263418371, 0.23805293411919937, -0.7515414823259695, -0.1272505897548366, 1.803348436304099, -2.0178252709022124, 0.4860300090112474, 1.2304054166426217, 0.7228668982068365, 1.7400607500575112, 0.3480274098246697, -0.3887978895385282,
-1.6511926233909175, 0.14517929503564567, -1.1599010576123796, -0.016133552438119002, 0.47157644883706273, 0.27657785075518254, 1.4464286976282463, -1.2605489185634533, -1.2548765025615338, 0.0755319579826929, 1.0476733637516833, -0.7038690219524807, -0.9580696842862921, -0.18135657098008018, -0.18163993379314564, 0.4092798531146971, -2.049808182546896, -1.2447062617916826, -1.6681140306283337, 1.0709944517933483,
-0.7059385234342846, -0.8033587669003331, -1.8152275905903312, 0.11729996097670137, 2.2994900038012376, -0.1291192451734159, -0.6731565869164164, -0.06690994571366346, -0.40330072968473235, -0.23927186025094221, 2.7756216937096676, 0.06441299443146056, -0.5095247173507204, -0.5228853558871007, 0.806629654091097, -2.110096084114651, -0.1233374136509439, -1.021178519845751, 0.058906278340351045, -0.26316852406211017,
-1.2990807244026237, -0.1937986598084067, 0.3909222793445317, 0.578027315076297, -0.11837271520846208, -1.134297652720464, 0.496915417153268, -0.5315184110418045, 0.5284176849952198, -1.6810338988102331, 0.41220454054009154, 1.0554031136792, -1.4222775023918832, -1.1664353586956209, 0.018952180522661358, -0.04620616876577671, -0.8446292647938418, -0.6889432180332509, -0.16012081070647954, 0.5680940644754282,
-1.9792941921407943, 0.35441842206114726, 0.12433268557499534, 0.25366905921805377, 0.6262297786892028, 1.327981424671081, 1.774834324890265, -0.9725604763128438, 0.42824027889428, 0.19725541390327114, 1.4640606982992412, 1.6484993842838995, 0.009848260786412894, -2.318740403198263, -0.4125245127403577, -0.15500831770388285, 1.010740123094443, 0.7509498708766653, -0.021415407776108144, 0.6466776546788641,
-1.421096837521404, 0.5632248951325018, -1.230539161899903, -0.26766333435961503, -1.7208241092827994, -1.068122926814994, -1.6339248620455546, 0.07225436117508208, -1.2018233250224348, -0.07213000691963527, -1.0080992229563746, -1.151378048476321, -0.2660104149809121, 1.6307779136408695, 0.8394822016824073, -0.23362802143120032, -0.36799502320054384, 0.35359852278856263, 0.5830948999779656, -0.730683771776052,
1.4715728371820667, -1.0668090648998136, -1.025762014881618, 0.21056106958224155, -0.5141254207774576, -0.1592942838690149, 0.7688711617969363, -2.464535892598544, -0.33306989349452987, 0.9457207224940593, 0.36108072442574435, -0.6490066877470516, -0.8714147266896871, 0.6567118414749348, -0.18543305444915045, 0.11156511615955596, 0.7299392157186994, -0.9902398239693843, -1.3231344439063761, -1.1402773433114928,
0.3696183719476138, -1.0512718152423168, -0.6093518314203102, 0.0010622538704462257, -0.17676306948277776, -0.6291120128576891, 1.6390197341434742, -0.8105788162716191, -2.0105672384392204, -0.7909143328024505, -0.10510684692203587, -0.013384480496840259, 0.37683659744804815, -0.15123337965442354, 1.8427651248902048, 1.0371006855495906, 0.29198928612503655, -1.7455852392709181, 1.0854545339796853, 1.8156620972829793,
1.2399563224061596, 1.1196530775769857, 0.4349954478175989, 0.11093680938321168, 0.9945934589378227, -0.5779739742428905, 1.0398502505219054, -0.09401160691650227, 0.22793239636661505, -1.8664992140331715, -0.16104499274010126, -0.8497511318264537, -0.005035074822415585, -1.7956896952184151, 1.8304783101189757, 0.19094408763231646, 1.3353023874309002, 0.5889134606052353, -0.48487660139277866, 0.4817014755127622,
1.5981632863770983, 2.1416849775567943, -0.5524061711669017, 0.3364804821524787, -0.8609687548167294, 0.24548635047971906, -0.1281468603588133, -0.03871410517044196, -0.2678174852638268, 0.41800607312114096, -0.2503930647517959, 0.8432391494945226, -0.5684563173706987, -0.6737077809046504, 2.0559579098493606, -0.29098826888414253, -0.08572747304559661, -0.301857666880195, -0.3446199959065524, 0.7391340848217359,
-0.3087136212446006, 0.5245553707204758, -3.063281336805349, 0.47471623010413705, 0.3733427291759615, -0.26216851429591426, -0.5433523111756248, 0.3305385199964823, -1.4866150542941634, -0.4699911958560942, 0.7312367186673805, -0.22346998944216903, -0.4102860865811592, -0.3003478250288424, -0.3436168605845268, 0.9456524589400904, -0.03710285453384255, 0.10330609878001526, 0.6919858329179392, 0.8673477607085118,
0.380742577915601, 0.5785785515837437, -0.011421905830097267, 0.587187810965595, -1.172536467775141, -0.532086162097372, -0.34440413367820183, -1.404900386188497, -0.1916375229779241, 1.6910999461291834, -0.6070351182769795, -0.8371447893868493, 0.8853944070432224, 1.4062946075925473, -0.4575973141608374, 1.1458755768004445, 0.2619874618238163, 1.7105876844856704, -1.3938976454537522, -0.11403217166441704,
-1.0354305240085717, -0.4285770475062154, 0.10326635421187867, 0.6911853442971228, 0.6293835213179542, -0.819693698713199, -0.7378190403744175, -1.495947672573938, -1.2406693914431872, -1.0486341638186725, -1.3715759883075953, 3.585407817418151, -0.8007079372574223, -1.527336776754733, -0.4716571043072485, -0.6967311271405545, 1.0003347462169225, -0.30569565002022697, 0.3646134876772732, 0.49083033603832493,
0.07754580794955847, -0.13467337850920083, 0.02134473458605164, 0.5025183900540823, -0.940929087894874, 1.441600637127558, -0.0857298131221344, -0.575175243519591, 0.42622029657630595, -0.3239674701415489, 0.22648849821602596, -0.6636465305318631, 0.30415000329164754, -0.6170241274574016, 0.07578674772163065, 0.2952841441615124, 0.8120317689468056, -0.46861353019671337, 0.04718559572470416, -0.3105660017232523,
-0.28898463203535724, 0.9575298065734561, -0.1977556031830993, 0.009658232624257272, 1.1432743259603295, -1.8989396918936858, 0.20787070770386357, 1.4256750543782999, -0.03838329973778874, -0.9051229357470373, -1.2002277085489457, 2.405569956130733, 1.895817948326675, -0.8260858325924574, 0.5759061866255807, 2.7022875569683342, 1.0591327405967745, 0.21449833798124354, 0.19970388388081273, 0.018242139911433558,
-0.630960146999549, -2.389646042147776, 0.5424304992480339, -1.2159551561948718, -1.6851632640204128, -0.4812221268109694, 0.6217652794219579, -0.380139431677482, -0.2643524783321051, 0.5106648694993016, -0.895602157034141, -0.20559568725141816, 1.5449271875734911, 1.544075783565114, 0.17877619857826843, 1.9729717339967108, 0.8302033109816261, -0.39118561199170965, -0.4428357598297098, -0.02550407946753186,
-1.0202977138210447, 2.6604654314300835, 1.9163029269361842, 0.34697436596877657, -0.8078124769022497, -1.3876596649099957, 0.44707250163663864, -0.6752837232272447, -0.851291770954755, 0.7599767868730256, 0.8134109401706875, -1.6766750539980289, -0.06051832829232975, -0.4652931327216134, -0.9249124398287735, 1.9022739762222731, 1.7632300613807597, 1.675335012283785, 0.47529854476887495, -0.7892463423254658,
0.3910120652706098, 0.5812432547936405, 0.2693084649672777, -0.08138564925779349, 0.9150619269526952, -0.8637356349272142, -0.14137853834901817, -0.20192754829896423, 0.04718228147088756, -0.9743600144318, -0.9936290943927825, 0.3544612180477054, 0.6839546770735121, 1.5089070357620178, 1.301167565172228, -1.5396145667672985, 0.42854366341485456, -1.5876582617301032, -0.0316985879141714, 0.3144220016570915,
-0.05054766725644431, 0.2934139006870167, 0.11396170275994542, -0.6472140129693643, 1.6556030742445431, 1.0319410208453506, 0.3292217603989991, -0.058758121958605435, -0.19917171648476298, -0.5192866115874029, 0.1997510689920335, -1.3675686656161756, -1.7761517497832053, -0.11260276070167097, 0.9717892642758689, 0.0840815981843948, -0.40211265381258554, 0.27384496844034517, -1.0403875081272367, 1.2884781173493884,
-1.8066239592554476, 1.1136979156298865, -0.06223155785690416, 1.3930381289015936, 0.4586305673655182, 1.3159249757827194, -0.5369892835955705, 0.17827408233621184, 0.22693934439969682, 0.8216240002114816, -1.0422409752281838, 0.3329686606709231, -1.5128804353968217, 1.0323052869815534, 1.1640486934424354, 1.6450118078345612, -0.6717687395070293, -0.08135119186406627, 1.2746921873544188, -0.8255794145095643,
0.7123504776564864, 0.6953336934741682, 2.191382322698439, 1.4155790749261592, 2.4681081786912866, -2.2904357033803815, -0.8375155191566624, 1.1040106662196736, 0.7084133268872015, -3.401968681942055, 0.23237090512844757, 1.1199436238058174, 0.6333916486592628, -0.6012340913121055, -0.3693951838866523, -1.7742670566875682, -0.36431378282545124, -0.4042586409194551, -0.04648644034604476, 1.5138191613743486,
-0.2053670782251071, 1.8679122383251414, 0.8355881018692999, -0.5369705129279005, -0.7909355080370954, 2.1080036780007987, 0.019537331188020687, -1.4672982688640615, -1.486842866467901, -1.1036839537574874, 1.0800858540685894, -0.2313974176207594, 0.47763272078271807, -1.9196070490691473, -0.8193535127855751, -0.6853651905832031, -0.18272370464882973, -0.33413577684633056, 2.2261342671906106, 1.6853726343573683,
0.8563421109235769, 1.0468799885096596, 0.12189082561416206, -1.3596466927672854, -0.7607432068282968, 0.7061728288620306, -0.4384478018639071, 0.8620104661898899, 1.04258758121448, -1.1464159128515612, 0.9617945424413628, 0.04987102831355013, -0.8472878887606543, 0.32986774370339184, 1.278319839581162, -0.4040926804592034, -0.6691567800662129, 0.9415431940597389, 0.3974846022291844, -0.8425204662387112,
-1.506166868030291, -0.04248497940038203, 0.26434168799067986, -1.5698380163561454, -0.6651727917714935, 1.2400220571204048, -0.1251830593977037, 0.6156254221302833, 0.43585628657139575, -1.6014619037611209, 1.9152323656075512, -0.8847911114213622, 1.359854519784993, -0.5554989575409871, 0.25064804193232354, 0.7976616257678464, 0.37834567410982123, -0.6300374359617635, -1.0613465068052854, -0.866474302027355,
1.2458556977164312, 0.577814049080149, 2.069400463823993, 0.9068690176961165, -0.5031387968484738, -0.3640749863516844, -1.041502465417534, 0.6732994659644133, -0.006355018868252906, -0.3650517541386253, 1.0975063446734974, -2.203726812834859, 1.060685913143899, -0.4618706570892267, 0.06475263817517128, -0.19326357638969882, -0.01812119454736379, 0.1337618009668529, 1.1838276997792907, 0.4273677345455913,
-0.4912341608307858, 0.2349993979417651, 0.9566260826411601, -0.7948243131958422, -0.6168334352331588, 0.3369425926447926, 0.8547756445246633, 0.2666330662219728, 2.431868771129661, 1.0089732701876513, -0.1162341515974066, -1.1746306816795218, -0.08227639025627424, 0.794676385688044, 0.15005011094018297, -0.8763821573601055, -1.0811684990769739, 0.6311588092267179, 0.026124278982220386, 0.8306502001533514,
1.0856487813261877, -0.018702855899823106, -0.07338137135247896, -0.8435746484744243, -0.18091216366556986, 0.2295807891528797, -1.0689295774443397, -1.5621175533013612, 1.3314045672598216, 0.6211561903553582, 1.0479302317100871, -1.1509436982013124, 0.447985084931758, 0.19917261474342404, 0.3582887259341301, 0.9953552868908098, 0.8948165434511316, 0.4949033431999123, -0.23004847985703908, 0.6411581535557106,
-1.1589671573242186, -0.13691519182560624, -0.8849560872785238, 0.6629182075027006, 2.2608150731789696, 2.2823614453180294, -1.2291376923498247, -0.9267975556981378, 0.2597417839242135, -0.7667310491821938, 0.10503294084132372, 2.960320355577672, -1.0645098483081497, -1.2888339889815872, -0.6564570556444346, 0.4742489396354781, 0.8879606773334898, -0.6477585196839569, -0.7309497810668936, 1.7025953934976548,
0.1789174966941155, -0.4839093362740933, -0.8917713440107442, 1.4521776747175792, -0.1676974219641624, -0.500672037099228, -0.2947747621553442, 0.929636971325952, -0.7614935150071248, 1.6886298813725842, -0.8136217834373227, 1.2030997228178093, 1.382267485738376, 2.594387458306705, -0.7703668776292266, -0.7642584795112598, 1.3356598324609947, -0.5745269784148925, -2.212092904499444, -1.727975556661197,
-0.18543087256023608, -0.10167435635752538, 1.3480966068787303, 0.0142803272337873, -0.480077631815393, -0.32270216749876185, -1.7884435311074431, -0.5695640948971382, -0.22859087912027687, -0.08783386938029487, -0.18151955278624396, 0.2031493507095467, 0.06444304447669409, -0.4339138073294572, 0.236563959074551, -0.2937958719187449, 0.1611232843821199, -0.6574871644742827, 1.3141902865107886, 0.6093649138398077,
0.056674985715912514, -1.828714441504608, -0.46768482587669535, 0.6489735384886999, 0.5035677725398181, -0.887590772676158, -0.3222316759913631, -0.35172770495027483, -0.4329205472963193, -0.8449916868048998, 0.38282765028957993, 1.3171924061732359, 0.2956667124648384, 0.5390909497681301, -0.7591989862253667, -1.1520792974885883, -0.39344757869384944, 0.6192677330177175, -0.05578834574542242, 0.593015990282657,
0.9374465229256678, 0.647772562443425, 1.1071167572595217, -1.3015016617832518, 1.267300472456379, -0.5807673178649629, 0.9343468385348384, -0.28554893036513673, 0.4487573993840033, 0.6749018890520516, -1.20482985206765, 0.17291806504654686, -0.4124576407610529, -0.9203236505429044, -0.7461342369802754, -0.19694162321688435, 0.46556512963300906, 0.5198366004764268, -1.7222561645076129, -0.7078891617994071,
-1.1653209054214695, 1.5560964971092122, 0.3335520152642012, 0.008390825910327906, 0.11336719644324977, 0.3158913817073965, 0.4704483453862008, -0.5700583482495889, -1.276634964816531, -1.7880560933777756, -0.26514994709973827, 0.6194447367446946, -0.654762456435761, 1.0621929196158544, 0.4454719444987052, -0.9323145612076791, 1.3197357985874438, -0.8792938558447049, -0.2470423905508279, 0.5128954444799875,
-0.09202044992462606, -1.3082892596744382, -0.34428948138804927, 0.012422196356164879, 1.4626152292162142, 0.34678216997159833, 0.409462409138861, 0.32838364873801185, 1.8776849459782967, 1.6816627852133539, -0.24894138693568296, 0.7150105850753732, 0.22929306929129853, -0.21434910504054566, 1.3339497173912471, -1.2497042452057836, -0.04487255356399775, -0.6486304639082145, -0.8048044333264733, -1.8090170501469942,
1.481689285694336, -1.4772553200884717, -0.36792462539303805, -1.103508260812736, -0.2135236993720317, 0.40889179796540165, 1.993585196733386, 0.43879096427562897, -0.44512875171982147, -1.1780830020629518, -1.666001035275436, -0.2977294957665528, 1.7299614542270356, 0.9882265798853356, 2.2412430815464597, 0.5801434875813244, -0.739190619909163, -1.2663490594895201, 0.5735521649879137, 1.2105709455012765,
1.9112159951415644, -2.259218931706201, -0.563310876529377, -2.4119185903750493, 0.9662624485722368, -0.22788851242764951, 0.9198283887420099, 0.7855927065251492, -0.7459868094792474, 0.10543289218409971, 0.6401750224618271, -0.0077375118689326705, -0.11647036625911977, -0.4722391874001602, -0.2718425102733572, -0.8796746964457087, 0.6112903638894259, 0.5347851929096421, -0.4749419210717794, 1.0633720764557604,
-0.2590556665572949, 2.590182301241823, 1.4524061372706638, -0.8503733047335056, 0.5609357391481067, -1.5661825434426477, 0.8019667474525984, 1.2716795425969496, 0.20011166646917924, -0.7105405282282679, -0.5593129072748189, -1.2401371010520867, -0.7002520937780202, -2.236596391787529, -1.8130090502823886, -0.23990633860801777, 1.7428780878151378, 1.4661206538178901, -0.8678567353744017, 0.2957423562639015,
0.13935419069962593, 1.399598845123674, 0.059729544605779575, -0.9607778026198247, 0.18474907798482051, 1.0117193651915666, -0.9173540069396245, 0.8934765521365161, -0.665655291396948, -0.32955768273493324, 0.3062873812209283, 0.177342106982554, 0.3595522704599547, -1.5964209653110262, 0.6705899137346863, -1.1034642863469553, -1.0029562484065524, 0.10622956543479244, 0.4261871936541378, 0.7777501694354336,
-0.806235923997437, -0.8272801398172428, -1.2783440745845536, 0.5982979227669168, -0.28214494859284556, 1.101560367699546, -0.14008021262664466, -0.38717961692054237, 0.9962925044431369, -0.7391490127960976, -0.06294945881724459, 0.7283671247384875, -0.8458895297768138, 0.22808829204347086, 0.43685668023014523, 0.9204095286935638, -0.028241645704951284, 0.15951784765135396, 0.8068984900818966, -0.34387965576978663,
0.573828962760762, -0.13374515460012618, -0.5552788325377814, 0.5644705833909952, -0.7500532220469983, 0.33436674493862256, -0.8595435026628129, -0.38943898244735853, 0.6401502590131951, -1.2968645995363652, 0.5861622311675501, 0.2311759458689689, 0.10962292708600496, -0.26025023584932205, -0.5398478003611565, -1.0514168636922954, 1.2689172189127857, 1.7029909647408918, -0.02325431623491577, -0.3064675950620902,
-1.5816446841009473, 0.6874254059433739, 0.7755967316475798, 1.4119333324396597, 0.14198739135512406, 0.2927714469848192, -0.7239793888399496, 0.3506448783535265, -0.7568480706640158, -1.2158508387501554, 0.22197589131086445, -0.5621415304506887, -1.2381112050191665, -1.917208333033256, -0.3321665793941188, -0.5916951886991071, -1.244826507645294, -0.29767661008214463, 0.8590635852032509, -1.8579290298421591,
-1.0470546224962876, -2.540080936704841, 0.5458326769958273, 0.042222128206941614, 0.6080450228346708, 0.6542717901662132, -1.7292955132690793, -0.4793123354077725, 0.7341767020417185, -1.3322222208234826, -0.5076389542432337, 0.684399163420284, 0.3948487980667425, -1.7919279627150193, 1.582925890933478, 0.8341846456063038, 0.11776890377042544, 1.7471239793853526, 1.2269451783893597, 0.4235463733287474,
1.5908284320029056, -1.635191535538596, 0.04419903330064594, -1.264385360373252, 0.5370192519783876, 1.2368603501240771, -0.9241079150337286, -0.3428051342915208, 0.0882286441353256, -2.210824604513402, -1.9000343283757128, 0.4633735273417207, -0.32534396967175094, 0.026187836765356437, 0.18253601230609245, 0.8519745761039671, -0.028225375482784816, -0.5114197447067229, -1.2428743809444227, 0.2879711400745508,
1.2857130031108321, 0.5296743558975853, -0.8440551904275335, -1.3776032491368861, 1.8164028526343798, -1.1422045767986222, -1.8675179752970443, 0.6969635320800454, 0.9444010906414336, -1.28197913481747, -0.06259132322304235, -0.4518754825442558, 0.9183188639099813, -0.2916931407869574, -1.1464007469977915, -0.4475136941593681, 0.44385573868752803, 2.1606711638680762, -1.4813603018181851, -0.5647618024870872,
-1.474746204557383, -2.9067748098220485, 0.06132111635940877, -0.09663310829361334, -1.087053744976143, -1.774855117659402, 0.8130120568830074, -0.5179279676199186, -0.32549430825787784, -1.1995838271705979, 0.8587480835176114, -0.02095126282663596, 0.6677898019388228, -1.1891003375304232, -2.1125937754631305, -0.047765192715672734, 0.09812525010300294, -1.034992359189106, 1.0213451864081846, 1.0788796513160641,
-1.444469239557739, 0.28341828947950637, -2.4556013891966737, 1.7126080715698266, -0.5943068899412715, 1.0897594994215383, -0.16345461884651272, 0.7027032523865234, 2.2851158088542562, 0.5038100496225458, -0.16724173993999966, -0.6747457076421414, 0.42254684460738184, 1.277203836895222, -0.34438446183574595, 0.38956738377878264, -0.26884968654334923, -0.02148772950361766, 0.02044885235644607, -1.3873669828232345,
0.19995968746809226, -1.5826859815811556, -0.20385119370067947, 0.5724329589281247, -1.330307658319185, 0.7756101314358208, -0.4989071461473931, 0.5388161769427321, -0.9811085284266614, 2.335331094403556, -0.5588657325211347, -1.2850853695283377, 0.40092993245913744, -1.9675685522110529, 0.9378938542456674, -0.18645815013912917, -0.6828273180353106, -1.840122530632185, -1.2581798109361761, 0.2867275394896832,
],
}
# fmt: on
return data
@pytest.fixture
def numeric_high_card_dataset(test_backend, numeric_high_card_dict):
schemas = {
"pandas": {
"norm_0_1": "float64",
},
"postgresql": {
# "norm_0_1": "DOUBLE_PRECISION",
"norm_0_1": "NUMERIC",
},
"sqlite": {
"norm_0_1": "FLOAT",
},
"mysql": {
"norm_0_1": "DOUBLE",
},
"mssql": {
"norm_0_1": "FLOAT",
},
"spark": {
"norm_0_1": "FloatType",
},
}
return get_dataset(test_backend, numeric_high_card_dict, schemas=schemas)
@pytest.fixture
def non_numeric_high_card_dataset(test_backend):
"""Provide dataset fixtures that have special values and/or are otherwise useful outside
the standard json testing framework"""
# fmt: off
data = {
"highcardnonnum": [
"<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>",
"<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>",
"rTc9h94WjOXN5Wg40DyatFEFfp9mgWj6", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>",
"2K8njWnvuq1u6tkzreNhxTEyO8PTeWer", "hGwZQW<KEY>", "<KEY>", "<KEY>", "eTTNDggfPpRC22SEVNo9W0BPEWO4Cr57", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "xLNJXaCkOLrD6E0kgGaFOFwctNXjrd77",
"1f8KOCkOvehXYvN8PKv1Ch6dzOjRAr01", "uVF6HJgjVmoipK1sEpVOFJYuv2TXXsOG", "<KEY>", "cAUnysbb8SBLSTr0H7cA1fmnpaL80e0N", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>",
"FaXA6YSUrvSnW7quAimLqQMNrU1Dxyjs", "PisVMvI9RsqQw21B7qYcKkRo5c8C2AKd", "<KEY>", "2b74DhJ6YFHrAkrjK4tvvKkYUKll44bR", "<KEY>", "<KEY>", "m9eDkZ5oZEOFP3HUfaZEirecv2UhQ1B1", "<KEY>", "<KEY>", "<KEY>",
"0S4iueoqKNjvS55O57BdY3DbfwhIDwKc", "ywbQfOLkvXEUzZISZp1cpwCenrrNPjfF", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>",
"<KEY>", "ItvI4l02oAIZEd5cPtDf4OnyBazji0PL", "DW4oLNP49MNNENFoFf7jDTI04xdvCiWg", "vrOZrkAS9MCGOqzhCv4cmr5AGddVBShU", "NhTsracusfp5V6zVeWqLZnychDl7jjO4", "<KEY>", "<KEY>", "UMdFQ<KEY>", "<KEY>", "<KEY>",
"hqfmEBNCA7qgntcQVqB7beBt0hB7eaxF", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "suns0vGgaMzasYpwDEEof2Ktovy0o4os",
"<KEY>", "<KEY>", "<KEY>", "<KEY>", "prQGAOvQbB8fQIrp8xaLXmGwcxDcCnqt", "<KEY>", "<KEY>", "<KEY>", "<KEY>QP<KEY>", "<KEY>",
"<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>",
"<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "vwZyG0jGUys3HQdUiOocIbzhUdUugwKX",
"<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>",
"<KEY>", "<KEY>", "<KEY>", "<KEY>", "eTTNDggfPpRC22SEVNo9W0BPEWO4Cr57", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>",
"1f8KOCkOvehXYvN8PKv1Ch6dzOjRAr01", "uVF6HJgjVmoipK1sEpVOFJYuv2TXXsOG", "<KEY>", "cAUnysbb8SBLSTr0H7cA1fmnpaL80e0N", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>",
"<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>",
"<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>",
"<KEY>", "ItvI4l02oAIZEd5cPtDf4OnyBazji0PL", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>",
"hqfmEBNCA7qgntcQVqB7beBt0hB7eaxF", "<KEY>", "<KEY>", "xw3AuIPyHYq59Qbo5QkQnECSqd2UCvLo", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "suns0vGgaMzasYpwDEEof2Ktovy0o4os",
"<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>",
],
# Built from highcardnonnum using the following:
# vals = pd.Series(data["highcardnonnum"])
# sample_vals = vals.sample(n=10, random_state=42)
# weights = np.random.RandomState(42).rand(10)
# weights = weights / np.sum(weights)
# new_vals = sample_vals.sample(n=200, weights=weights, replace=True, random_state=11)
"medcardnonnum": [
"<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>",
"<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>",
"<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>",
"<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>",
"<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>",
"<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>",
"<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>",
"<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>",
"<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>",
"<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>",
"<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>",
"<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>",
"<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>",
"<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>",
"<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>",
"<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>",
"<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>",
"<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>",
"<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>",
"<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>",
],
}
# fmt: on
schemas = {
"pandas": {
"highcardnonnum": "str",
"medcardnonnum": "str",
},
"postgresql": {
"highcardnonnum": "TEXT",
"medcardnonnum": "TEXT",
},
"sqlite": {
"highcardnonnum": "VARCHAR",
"medcardnonnum": "VARCHAR",
},
"mysql": {
"highcardnonnum": "TEXT",
"medcardnonnum": "TEXT",
},
"mssql": {
"highcardnonnum": "VARCHAR",
"medcardnonnum": "VARCHAR",
},
"spark": {
"highcardnonnum": "StringType",
"medcardnonnum": "StringType",
},
}
return get_dataset(test_backend, data, schemas=schemas)
def dataset_sample_data(test_backend):
# No infinities for mysql
if test_backend == "mysql":
data = {
# "infinities": [-np.inf, -10, -np.pi, 0, np.pi, 10/2.2, np.inf],
"nulls": [np.nan, None, 0, 1.1, 2.2, 3.3, None],
"naturals": [1, 2, 3, 4, 5, 6, 7],
}
else:
data = {
"infinities": [-np.inf, -10, -np.pi, 0, np.pi, 10 / 2.2, np.inf],
"nulls": [np.nan, None, 0, 1.1, 2.2, 3.3, None],
"naturals": [1, 2, 3, 4, 5, 6, 7],
}
schemas = {
"pandas": {"infinities": "float64", "nulls": "float64", "naturals": "float64"},
"postgresql": {
"infinities": "DOUBLE_PRECISION",
"nulls": "DOUBLE_PRECISION",
"naturals": "NUMERIC",
},
"sqlite": {"infinities": "FLOAT", "nulls": "FLOAT", "naturals": "FLOAT"},
"mysql": {"nulls": "DOUBLE", "naturals": "DOUBLE"},
"mssql": {"infinities": "FLOAT", "nulls": "FLOAT", "naturals": "FLOAT"},
"spark": {
"infinities": "FloatType",
"nulls": "FloatType",
"naturals": "FloatType",
},
}
return data, schemas
@pytest.fixture
def dataset(test_backend):
"""Provide dataset fixtures that have special values and/or are otherwise useful outside
the standard json testing framework"""
data, schemas = dataset_sample_data(test_backend)
return get_dataset(test_backend, data, schemas=schemas)
@pytest.fixture
def pandas_dataset():
test_backend = "PandasDataset"
data, schemas = dataset_sample_data(test_backend)
return get_dataset(test_backend, data, schemas=schemas)
@pytest.fixture
def sqlalchemy_dataset(test_backends):
"""Provide dataset fixtures that have special values and/or are otherwise useful outside
the standard json testing framework"""
if "postgresql" in test_backends:
backend = "postgresql"
elif "sqlite" in test_backends:
backend = "sqlite"
else:
return
data = {
"infinities": [-np.inf, -10, -np.pi, 0, np.pi, 10 / 2.2, np.inf],
"nulls": [np.nan, None, 0, 1.1, 2.2, 3.3, None],
"naturals": [1, 2, 3, 4, 5, 6, 7],
}
schemas = {
"postgresql": {
"infinities": "DOUBLE_PRECISION",
"nulls": "DOUBLE_PRECISION",
"naturals": "DOUBLE_PRECISION",
},
"sqlite": {"infinities": "FLOAT", "nulls": "FLOAT", "naturals": "FLOAT"},
}
return get_dataset(backend, data, schemas=schemas, profiler=None)
@pytest.fixture
def sqlitedb_engine(test_backend):
if test_backend == "sqlite":
try:
import sqlalchemy as sa
return sa.create_engine("sqlite://")
except ImportError:
raise ValueError("sqlite tests require sqlalchemy to be installed")
else:
pytest.skip("Skipping test designed for sqlite on non-sqlite backend.")
@pytest.fixture
def postgresql_engine(test_backend):
if test_backend == "postgresql":
try:
import sqlalchemy as sa
db_hostname = os.getenv("GE_TEST_LOCAL_DB_HOSTNAME", "localhost")
engine = sa.create_engine(
f"postgresql://postgres@{db_hostname}/test_ci"
).connect()
yield engine
engine.close()
except ImportError:
raise ValueError("SQL Database tests require sqlalchemy to be installed.")
else:
pytest.skip("Skipping test designed for postgresql on non-postgresql backend.")
@pytest.fixture(scope="function")
def empty_data_context(
tmp_path,
) -> DataContext:
project_path = tmp_path / "empty_data_context"
project_path.mkdir()
project_path = str(project_path)
context = ge.data_context.DataContext.create(project_path)
context_path = os.path.join(project_path, "great_expectations")
asset_config_path = os.path.join(context_path, "expectations")
os.makedirs(asset_config_path, exist_ok=True)
assert context.list_datasources() == []
return context
@pytest.fixture
def titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled(
tmp_path_factory,
monkeypatch,
):
# Re-enable GE_USAGE_STATS
monkeypatch.delenv("GE_USAGE_STATS")
project_path: str = str(tmp_path_factory.mktemp("titanic_data_context"))
context_path: str = os.path.join(project_path, "great_expectations")
os.makedirs(os.path.join(context_path, "expectations"), exist_ok=True)
data_path: str = os.path.join(context_path, "..", "data", "titanic")
os.makedirs(os.path.join(data_path), exist_ok=True)
shutil.copy(
file_relative_path(
__file__,
os.path.join(
"test_fixtures",
"great_expectations_v013_no_datasource_stats_enabled.yml",
),
),
str(os.path.join(context_path, "great_expectations.yml")),
)
shutil.copy(
file_relative_path(__file__, os.path.join("test_sets", "Titanic.csv")),
str(
os.path.join(
context_path, "..", "data", "titanic", "Titanic_19120414_1313.csv"
)
),
)
shutil.copy(
file_relative_path(__file__, os.path.join("test_sets", "Titanic.csv")),
str(
os.path.join(context_path, "..", "data", "titanic", "Titanic_19120414_1313")
),
)
shutil.copy(
file_relative_path(__file__, os.path.join("test_sets", "Titanic.csv")),
str(os.path.join(context_path, "..", "data", "titanic", "Titanic_1911.csv")),
)
shutil.copy(
file_relative_path(__file__, os.path.join("test_sets", "Titanic.csv")),
str(os.path.join(context_path, "..", "data", "titanic", "Titanic_1912.csv")),
)
context: DataContext = DataContext(context_root_dir=context_path)
assert context.root_directory == context_path
datasource_config: str = f"""
class_name: Datasource
execution_engine:
class_name: PandasExecutionEngine
data_connectors:
my_basic_data_connector:
class_name: InferredAssetFilesystemDataConnector
base_directory: {data_path}
default_regex:
pattern: (.*)\\.csv
group_names:
- data_asset_name
my_special_data_connector:
class_name: ConfiguredAssetFilesystemDataConnector
base_directory: {data_path}
glob_directive: "*.csv"
default_regex:
pattern: (.+)\\.csv
group_names:
- name
assets:
users:
base_directory: {data_path}
pattern: (.+)_(\\d+)_(\\d+)\\.csv
group_names:
- name
- timestamp
- size
my_other_data_connector:
class_name: ConfiguredAssetFilesystemDataConnector
base_directory: {data_path}
glob_directive: "*.csv"
default_regex:
pattern: (.+)\\.csv
group_names:
- name
assets:
users: {{}}
my_runtime_data_connector:
module_name: great_expectations.datasource.data_connector
class_name: RuntimeDataConnector
batch_identifiers:
- pipeline_stage_name
- airflow_run_id
"""
# noinspection PyUnusedLocal
datasource: Datasource = context.test_yaml_config(
name="my_datasource", yaml_config=datasource_config, pretty_print=False
)
# noinspection PyProtectedMember
context._save_project_config()
return context
@pytest.fixture
def titanic_v013_multi_datasource_pandas_data_context_with_checkpoints_v1_with_empty_store_stats_enabled(
titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled,
tmp_path_factory,
monkeypatch,
):
context: DataContext = titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled
project_dir: str = context.root_directory
data_path: str = os.path.join(project_dir, "..", "data", "titanic")
datasource_config: str = f"""
class_name: Datasource
execution_engine:
class_name: PandasExecutionEngine
data_connectors:
my_additional_data_connector:
class_name: InferredAssetFilesystemDataConnector
base_directory: {data_path}
default_regex:
pattern: (.*)\\.csv
group_names:
- data_asset_name
"""
# noinspection PyUnusedLocal
datasource: BaseDatasource = context.add_datasource(
"my_additional_datasource", **yaml.load(datasource_config)
)
return context
@pytest.fixture
def titanic_v013_multi_datasource_multi_execution_engine_data_context_with_checkpoints_v1_with_empty_store_stats_enabled(
sa,
spark_session,
titanic_v013_multi_datasource_pandas_data_context_with_checkpoints_v1_with_empty_store_stats_enabled,
tmp_path_factory,
test_backends,
monkeypatch,
):
context: DataContext = titanic_v013_multi_datasource_pandas_data_context_with_checkpoints_v1_with_empty_store_stats_enabled
project_dir: str = context.root_directory
data_path: str = os.path.join(project_dir, "..", "data", "titanic")
if (
any(
[
dbms in test_backends
for dbms in ["postgresql", "sqlite", "mysql", "mssql"]
]
)
and (sa is not None)
and is_library_loadable(library_name="sqlalchemy")
):
db_fixture_file_path: str = file_relative_path(
__file__,
os.path.join("test_sets", "titanic_sql_test_cases.db"),
)
db_file_path: str = os.path.join(
data_path,
"titanic_sql_test_cases.db",
)
shutil.copy(
db_fixture_file_path,
db_file_path,
)
datasource_config: str = f"""
class_name: Datasource
execution_engine:
class_name: SqlAlchemyExecutionEngine
connection_string: sqlite:///{db_file_path}
data_connectors:
default_runtime_data_connector_name:
class_name: RuntimeDataConnector
batch_identifiers:
- default_identifier_name
default_inferred_data_connector_name:
class_name: InferredAssetSqlDataConnector
name: whole_table
"""
# noinspection PyUnusedLocal
datasource: BaseDatasource = context.add_datasource(
"my_sqlite_db_datasource", **yaml.load(datasource_config)
)
return context
@pytest.fixture
def deterministic_asset_dataconnector_context(
tmp_path_factory,
monkeypatch,
):
# Re-enable GE_USAGE_STATS
monkeypatch.delenv("GE_USAGE_STATS")
project_path = str(tmp_path_factory.mktemp("titanic_data_context"))
context_path = os.path.join(project_path, "great_expectations")
os.makedirs(os.path.join(context_path, "expectations"), exist_ok=True)
data_path = os.path.join(context_path, "..", "data", "titanic")
os.makedirs(os.path.join(data_path), exist_ok=True)
shutil.copy(
file_relative_path(
__file__,
"./test_fixtures/great_expectations_v013_no_datasource_stats_enabled.yml",
),
str(os.path.join(context_path, "great_expectations.yml")),
)
shutil.copy(
file_relative_path(__file__, "./test_sets/Titanic.csv"),
str(
os.path.join(
context_path, "..", "data", "titanic", "Titanic_19120414_1313.csv"
)
),
)
shutil.copy(
file_relative_path(__file__, "./test_sets/Titanic.csv"),
str(os.path.join(context_path, "..", "data", "titanic", "Titanic_1911.csv")),
)
shutil.copy(
file_relative_path(__file__, "./test_sets/Titanic.csv"),
str(os.path.join(context_path, "..", "data", "titanic", "Titanic_1912.csv")),
)
context = ge.data_context.DataContext(context_path)
assert context.root_directory == context_path
datasource_config = f"""
class_name: Datasource
execution_engine:
class_name: PandasExecutionEngine
data_connectors:
my_other_data_connector:
class_name: ConfiguredAssetFilesystemDataConnector
base_directory: {data_path}
glob_directive: "*.csv"
default_regex:
pattern: (.+)\\.csv
group_names:
- name
assets:
users: {{}}
"""
context.test_yaml_config(
name="my_datasource", yaml_config=datasource_config, pretty_print=False
)
# noinspection PyProtectedMember
context._save_project_config()
return context
@pytest.fixture
def titanic_pandas_data_context_with_v013_datasource_stats_enabled_with_checkpoints_v1_with_templates(
titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled,
):
context: DataContext = titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled
# add simple template config
simple_checkpoint_template_config: CheckpointConfig = CheckpointConfig(
name="my_simple_template_checkpoint",
config_version=1,
run_name_template="%Y-%M-foo-bar-template-$VAR",
action_list=[
{
"name": "store_validation_result",
"action": {
"class_name": "StoreValidationResultAction",
},
},
{
"name": "store_evaluation_params",
"action": {
"class_name": "StoreEvaluationParametersAction",
},
},
{
"name": "update_data_docs",
"action": {
"class_name": "UpdateDataDocsAction",
},
},
],
evaluation_parameters={
"environment": "$GE_ENVIRONMENT",
"tolerance": 1.0e-2,
"aux_param_0": "$MY_PARAM",
"aux_param_1": "1 + $MY_PARAM",
},
runtime_configuration={
"result_format": {
"result_format": "BASIC",
"partial_unexpected_count": 20,
}
},
)
simple_checkpoint_template_config_key: ConfigurationIdentifier = (
ConfigurationIdentifier(
configuration_key=simple_checkpoint_template_config.name
)
)
context.checkpoint_store.set(
key=simple_checkpoint_template_config_key,
value=simple_checkpoint_template_config,
)
# add nested template configs
nested_checkpoint_template_config_1: CheckpointConfig = CheckpointConfig(
name="my_nested_checkpoint_template_1",
config_version=1,
run_name_template="%Y-%M-foo-bar-template-$VAR",
expectation_suite_name="suite_from_template_1",
action_list=[
{
"name": "store_validation_result",
"action": {
"class_name": "StoreValidationResultAction",
},
},
{
"name": "store_evaluation_params",
"action": {
"class_name": "StoreEvaluationParametersAction",
},
},
{
"name": "update_data_docs",
"action": {
"class_name": "UpdateDataDocsAction",
},
},
],
evaluation_parameters={
"environment": "FOO",
"tolerance": "FOOBOO",
"aux_param_0": "FOOBARBOO",
"aux_param_1": "FOOBARBOO",
"template_1_key": 456,
},
runtime_configuration={
"result_format": "FOOBARBOO",
"partial_unexpected_count": "FOOBARBOO",
"template_1_key": 123,
},
validations=[
{
"batch_request": {
"datasource_name": "my_datasource_template_1",
"data_connector_name": "my_special_data_connector_template_1",
"data_asset_name": "users_from_template_1",
"data_connector_query": {"partition_index": -999},
}
}
],
)
nested_checkpoint_template_config_1_key: ConfigurationIdentifier = (
ConfigurationIdentifier(
configuration_key=nested_checkpoint_template_config_1.name
)
)
context.checkpoint_store.set(
key=nested_checkpoint_template_config_1_key,
value=nested_checkpoint_template_config_1,
)
nested_checkpoint_template_config_2: CheckpointConfig = CheckpointConfig(
name="my_nested_checkpoint_template_2",
config_version=1,
template_name="my_nested_checkpoint_template_1",
run_name_template="%Y-%M-foo-bar-template-$VAR-template-2",
action_list=[
{
"name": "store_validation_result",
"action": {
"class_name": "StoreValidationResultAction",
},
},
{
"name": "store_evaluation_params",
"action": {
"class_name": "MyCustomStoreEvaluationParametersActionTemplate2",
},
},
{
"name": "update_data_docs",
"action": {
"class_name": "UpdateDataDocsAction",
},
},
{
"name": "new_action_from_template_2",
"action": {"class_name": "Template2SpecialAction"},
},
],
evaluation_parameters={
"environment": "$GE_ENVIRONMENT",
"tolerance": 1.0e-2,
"aux_param_0": "$MY_PARAM",
"aux_param_1": "1 + $MY_PARAM",
},
runtime_configuration={
"result_format": "BASIC",
"partial_unexpected_count": 20,
},
)
nested_checkpoint_template_config_2_key: ConfigurationIdentifier = (
ConfigurationIdentifier(
configuration_key=nested_checkpoint_template_config_2.name
)
)
context.checkpoint_store.set(
key=nested_checkpoint_template_config_2_key,
value=nested_checkpoint_template_config_2,
)
nested_checkpoint_template_config_3: CheckpointConfig = CheckpointConfig(
name="my_nested_checkpoint_template_3",
config_version=1,
template_name="my_nested_checkpoint_template_2",
run_name_template="%Y-%M-foo-bar-template-$VAR-template-3",
action_list=[
{
"name": "store_validation_result",
"action": {
"class_name": "StoreValidationResultAction",
},
},
{
"name": "store_evaluation_params",
"action": {
"class_name": "MyCustomStoreEvaluationParametersActionTemplate3",
},
},
{
"name": "update_data_docs",
"action": {
"class_name": "UpdateDataDocsAction",
},
},
{
"name": "new_action_from_template_3",
"action": {"class_name": "Template3SpecialAction"},
},
],
evaluation_parameters={
"environment": "$GE_ENVIRONMENT",
"tolerance": 1.0e-2,
"aux_param_0": "$MY_PARAM",
"aux_param_1": "1 + $MY_PARAM",
"template_3_key": 123,
},
runtime_configuration={
"result_format": "BASIC",
"partial_unexpected_count": 20,
"template_3_key": "bloopy!",
},
)
nested_checkpoint_template_config_3_key: ConfigurationIdentifier = (
ConfigurationIdentifier(
configuration_key=nested_checkpoint_template_config_3.name
)
)
context.checkpoint_store.set(
key=nested_checkpoint_template_config_3_key,
value=nested_checkpoint_template_config_3,
)
# add minimal SimpleCheckpoint
simple_checkpoint_config: CheckpointConfig = CheckpointConfig(
name="my_minimal_simple_checkpoint",
class_name="SimpleCheckpoint",
config_version=1,
)
simple_checkpoint_config_key: ConfigurationIdentifier = ConfigurationIdentifier(
configuration_key=simple_checkpoint_config.name
)
context.checkpoint_store.set(
key=simple_checkpoint_config_key,
value=simple_checkpoint_config,
)
# add SimpleCheckpoint with slack webhook
simple_checkpoint_with_slack_webhook_config: CheckpointConfig = CheckpointConfig(
name="my_simple_checkpoint_with_slack",
class_name="SimpleCheckpoint",
config_version=1,
slack_webhook="https://hooks.slack.com/foo/bar",
)
simple_checkpoint_with_slack_webhook_config_key: ConfigurationIdentifier = (
ConfigurationIdentifier(
configuration_key=simple_checkpoint_with_slack_webhook_config.name
)
)
context.checkpoint_store.set(
key=simple_checkpoint_with_slack_webhook_config_key,
value=simple_checkpoint_with_slack_webhook_config,
)
# add SimpleCheckpoint with slack webhook and notify_with
simple_checkpoint_with_slack_webhook_and_notify_with_all_config: CheckpointConfig = CheckpointConfig(
name="my_simple_checkpoint_with_slack_and_notify_with_all",
class_name="SimpleCheckpoint",
config_version=1,
slack_webhook="https://hooks.slack.com/foo/bar",
notify_with="all",
)
simple_checkpoint_with_slack_webhook_and_notify_with_all_config_key: ConfigurationIdentifier = ConfigurationIdentifier(
configuration_key=simple_checkpoint_with_slack_webhook_and_notify_with_all_config.name
)
context.checkpoint_store.set(
key=simple_checkpoint_with_slack_webhook_and_notify_with_all_config_key,
value=simple_checkpoint_with_slack_webhook_and_notify_with_all_config,
)
# add SimpleCheckpoint with site_names
simple_checkpoint_with_site_names_config: CheckpointConfig = CheckpointConfig(
name="my_simple_checkpoint_with_site_names",
class_name="SimpleCheckpoint",
config_version=1,
site_names=["local_site"],
)
simple_checkpoint_with_site_names_config_key: ConfigurationIdentifier = (
ConfigurationIdentifier(
configuration_key=simple_checkpoint_with_site_names_config.name
)
)
context.checkpoint_store.set(
key=simple_checkpoint_with_site_names_config_key,
value=simple_checkpoint_with_site_names_config,
)
# noinspection PyProtectedMember
context._save_project_config()
return context
@pytest.fixture
def empty_context_with_checkpoint(empty_data_context):
context = empty_data_context
root_dir = empty_data_context.root_directory
fixture_name = "my_checkpoint.yml"
fixture_path = file_relative_path(
__file__, f"./data_context/fixtures/contexts/{fixture_name}"
)
checkpoints_file = os.path.join(root_dir, "checkpoints", fixture_name)
shutil.copy(fixture_path, checkpoints_file)
assert os.path.isfile(checkpoints_file)
return context
@pytest.fixture
def empty_data_context_stats_enabled(tmp_path_factory, monkeypatch):
# Re-enable GE_USAGE_STATS
monkeypatch.delenv("GE_USAGE_STATS", raising=False)
project_path = str(tmp_path_factory.mktemp("empty_data_context"))
context = ge.data_context.DataContext.create(project_path)
context_path = os.path.join(project_path, "great_expectations")
asset_config_path = os.path.join(context_path, "expectations")
os.makedirs(asset_config_path, exist_ok=True)
return context
@pytest.fixture
def titanic_data_context(
tmp_path_factory,
) -> DataContext:
project_path = str(tmp_path_factory.mktemp("titanic_data_context"))
context_path = os.path.join(project_path, "great_expectations")
os.makedirs(os.path.join(context_path, "expectations"), exist_ok=True)
os.makedirs(os.path.join(context_path, "checkpoints"), exist_ok=True)
data_path = os.path.join(context_path, "..", "data")
os.makedirs(os.path.join(data_path), exist_ok=True)
titanic_yml_path = file_relative_path(
__file__, "./test_fixtures/great_expectations_v013_titanic.yml"
)
shutil.copy(
titanic_yml_path, str(os.path.join(context_path, "great_expectations.yml"))
)
titanic_csv_path = file_relative_path(__file__, "./test_sets/Titanic.csv")
shutil.copy(
titanic_csv_path, str(os.path.join(context_path, "..", "data", "Titanic.csv"))
)
return ge.data_context.DataContext(context_path)
@pytest.fixture
def titanic_data_context_no_data_docs_no_checkpoint_store(tmp_path_factory):
project_path = str(tmp_path_factory.mktemp("titanic_data_context"))
context_path = os.path.join(project_path, "great_expectations")
os.makedirs(os.path.join(context_path, "expectations"), exist_ok=True)
os.makedirs(os.path.join(context_path, "checkpoints"), exist_ok=True)
data_path = os.path.join(context_path, "..", "data")
os.makedirs(os.path.join(data_path), exist_ok=True)
titanic_yml_path = file_relative_path(
__file__, "./test_fixtures/great_expectations_titanic_pre_v013_no_data_docs.yml"
)
shutil.copy(
titanic_yml_path, str(os.path.join(context_path, "great_expectations.yml"))
)
titanic_csv_path = file_relative_path(__file__, "./test_sets/Titanic.csv")
shutil.copy(
titanic_csv_path, str(os.path.join(context_path, "..", "data", "Titanic.csv"))
)
return ge.data_context.DataContext(context_path)
@pytest.fixture
def titanic_data_context_no_data_docs(tmp_path_factory):
project_path = str(tmp_path_factory.mktemp("titanic_data_context"))
context_path = os.path.join(project_path, "great_expectations")
os.makedirs(os.path.join(context_path, "expectations"), exist_ok=True)
os.makedirs(os.path.join(context_path, "checkpoints"), exist_ok=True)
data_path = os.path.join(context_path, "..", "data")
os.makedirs(os.path.join(data_path), exist_ok=True)
titanic_yml_path = file_relative_path(
__file__, "./test_fixtures/great_expectations_titanic_no_data_docs.yml"
)
shutil.copy(
titanic_yml_path, str(os.path.join(context_path, "great_expectations.yml"))
)
titanic_csv_path = file_relative_path(__file__, "./test_sets/Titanic.csv")
shutil.copy(
titanic_csv_path, str(os.path.join(context_path, "..", "data", "Titanic.csv"))
)
return ge.data_context.DataContext(context_path)
@pytest.fixture
def titanic_data_context_stats_enabled(tmp_path_factory, monkeypatch):
# Re-enable GE_USAGE_STATS
monkeypatch.delenv("GE_USAGE_STATS")
project_path = str(tmp_path_factory.mktemp("titanic_data_context"))
context_path = os.path.join(project_path, "great_expectations")
os.makedirs(os.path.join(context_path, "expectations"), exist_ok=True)
os.makedirs(os.path.join(context_path, "checkpoints"), exist_ok=True)
data_path = os.path.join(context_path, "..", "data")
os.makedirs(os.path.join(data_path), exist_ok=True)
titanic_yml_path = file_relative_path(
__file__, "./test_fixtures/great_expectations_v013_titanic.yml"
)
shutil.copy(
titanic_yml_path, str(os.path.join(context_path, "great_expectations.yml"))
)
titanic_csv_path = file_relative_path(__file__, "./test_sets/Titanic.csv")
shutil.copy(
titanic_csv_path, str(os.path.join(context_path, "..", "data", "Titanic.csv"))
)
return ge.data_context.DataContext(context_path)
@pytest.fixture
def titanic_data_context_stats_enabled_config_version_2(tmp_path_factory, monkeypatch):
# Re-enable GE_USAGE_STATS
monkeypatch.delenv("GE_USAGE_STATS")
project_path = str(tmp_path_factory.mktemp("titanic_data_context"))
context_path = os.path.join(project_path, "great_expectations")
os.makedirs(os.path.join(context_path, "expectations"), exist_ok=True)
os.makedirs(os.path.join(context_path, "checkpoints"), exist_ok=True)
data_path = os.path.join(context_path, "..", "data")
os.makedirs(os.path.join(data_path), exist_ok=True)
titanic_yml_path = file_relative_path(
__file__, "./test_fixtures/great_expectations_titanic.yml"
)
shutil.copy(
titanic_yml_path, str(os.path.join(context_path, "great_expectations.yml"))
)
titanic_csv_path = file_relative_path(__file__, "./test_sets/Titanic.csv")
shutil.copy(
titanic_csv_path, str(os.path.join(context_path, "..", "data", "Titanic.csv"))
)
return ge.data_context.DataContext(context_path)
@pytest.fixture
def titanic_data_context_stats_enabled_config_version_3(tmp_path_factory, monkeypatch):
# Re-enable GE_USAGE_STATS
monkeypatch.delenv("GE_USAGE_STATS")
project_path = str(tmp_path_factory.mktemp("titanic_data_context"))
context_path = os.path.join(project_path, "great_expectations")
os.makedirs(os.path.join(context_path, "expectations"), exist_ok=True)
os.makedirs(os.path.join(context_path, "checkpoints"), exist_ok=True)
data_path = os.path.join(context_path, "..", "data")
os.makedirs(os.path.join(data_path), exist_ok=True)
titanic_yml_path = file_relative_path(
__file__, "./test_fixtures/great_expectations_v013_upgraded_titanic.yml"
)
shutil.copy(
titanic_yml_path, str(os.path.join(context_path, "great_expectations.yml"))
)
titanic_csv_path = file_relative_path(__file__, "./test_sets/Titanic.csv")
shutil.copy(
titanic_csv_path, str(os.path.join(context_path, "..", "data", "Titanic.csv"))
)
return ge.data_context.DataContext(context_path)
@pytest.fixture
def titanic_sqlite_db(sa):
try:
import sqlalchemy as sa
from sqlalchemy import create_engine
titanic_db_path = file_relative_path(__file__, "./test_sets/titanic.db")
engine = create_engine(f"sqlite:///{titanic_db_path}")
assert engine.execute("select count(*) from titanic").fetchall()[0] == (1313,)
return engine
except ImportError:
raise ValueError("sqlite tests require sqlalchemy to be installed")
@pytest.fixture
def titanic_sqlite_db_connection_string(sa):
try:
import sqlalchemy as sa
from sqlalchemy import create_engine
titanic_db_path = file_relative_path(__file__, "./test_sets/titanic.db")
engine = create_engine(f"sqlite:////{titanic_db_path}")
assert engine.execute("select count(*) from titanic").fetchall()[0] == (1313,)
return f"sqlite:///{titanic_db_path}"
except ImportError:
raise ValueError("sqlite tests require sqlalchemy to be installed")
@pytest.fixture
def titanic_expectation_suite(empty_data_context_stats_enabled):
data_context: DataContext = empty_data_context_stats_enabled
return ExpectationSuite(
expectation_suite_name="Titanic.warning",
meta={},
data_asset_type="Dataset",
expectations=[
ExpectationConfiguration(
expectation_type="expect_column_to_exist", kwargs={"column": "PClass"}
),
ExpectationConfiguration(
expectation_type="expect_column_values_to_not_be_null",
kwargs={"column": "Name"},
),
ExpectationConfiguration(
expectation_type="expect_table_row_count_to_equal",
kwargs={"value": 1313},
),
],
data_context=data_context,
)
@pytest.fixture
def empty_sqlite_db(sa):
"""An empty in-memory sqlite db that always gets run."""
try:
import sqlalchemy as sa
from sqlalchemy import create_engine
engine = create_engine("sqlite://")
assert engine.execute("select 1").fetchall()[0] == (1,)
return engine
except ImportError:
raise ValueError("sqlite tests require sqlalchemy to be installed")
@pytest.fixture
@freeze_time("09/26/2019 13:42:41")
def site_builder_data_context_with_html_store_titanic_random(
tmp_path_factory, filesystem_csv_3
):
base_dir = str(tmp_path_factory.mktemp("project_dir"))
project_dir = os.path.join(base_dir, "project_path")
os.mkdir(project_dir)
os.makedirs(os.path.join(project_dir, "data"))
os.makedirs(os.path.join(project_dir, "data/titanic"))
shutil.copy(
file_relative_path(__file__, "./test_sets/Titanic.csv"),
str(os.path.join(project_dir, "data", "titanic", "Titanic.csv")),
)
os.makedirs(os.path.join(project_dir, "data", "random"))
shutil.copy(
os.path.join(filesystem_csv_3, "f1.csv"),
str(os.path.join(project_dir, "data", "random", "f1.csv")),
)
shutil.copy(
os.path.join(filesystem_csv_3, "f2.csv"),
str(os.path.join(project_dir, "data", "random", "f2.csv")),
)
ge.data_context.DataContext.create(project_dir)
shutil.copy(
file_relative_path(
__file__, "./test_fixtures/great_expectations_site_builder.yml"
),
str(os.path.join(project_dir, "great_expectations", "great_expectations.yml")),
)
context = ge.data_context.DataContext(
context_root_dir=os.path.join(project_dir, "great_expectations")
)
context.add_datasource(
"titanic",
class_name="PandasDatasource",
batch_kwargs_generators={
"subdir_reader": {
"class_name": "SubdirReaderBatchKwargsGenerator",
"base_directory": os.path.join(project_dir, "data", "titanic"),
}
},
)
context.add_datasource(
"random",
class_name="PandasDatasource",
batch_kwargs_generators={
"subdir_reader": {
"class_name": "SubdirReaderBatchKwargsGenerator",
"base_directory": os.path.join(project_dir, "data", "random"),
}
},
)
context.profile_datasource("titanic")
context.profile_datasource("random")
context.profile_datasource(context.list_datasources()[0]["name"])
context._project_config.anonymous_usage_statistics = {
"enabled": True,
"data_context_id": "f43d4897-385f-4366-82b0-1a8eda2bf79c",
}
return context
@pytest.fixture(scope="function")
@freeze_time("09/26/2019 13:42:41")
def site_builder_data_context_v013_with_html_store_titanic_random(
tmp_path, filesystem_csv_3
):
base_dir = tmp_path / "project_dir"
base_dir.mkdir()
base_dir = str(base_dir)
project_dir = os.path.join(base_dir, "project_path")
os.mkdir(project_dir)
os.makedirs(os.path.join(project_dir, "data"))
os.makedirs(os.path.join(project_dir, "data", "titanic"))
shutil.copy(
file_relative_path(__file__, "./test_sets/Titanic.csv"),
str(os.path.join(project_dir, "data", "titanic", "Titanic.csv")),
)
os.makedirs(os.path.join(project_dir, "data", "random"))
shutil.copy(
os.path.join(filesystem_csv_3, "f1.csv"),
str(os.path.join(project_dir, "data", "random", "f1.csv")),
)
shutil.copy(
os.path.join(filesystem_csv_3, "f2.csv"),
str(os.path.join(project_dir, "data", "random", "f2.csv")),
)
ge.data_context.DataContext.create(project_dir)
shutil.copy(
file_relative_path(
__file__, "./test_fixtures/great_expectations_v013_site_builder.yml"
),
str(os.path.join(project_dir, "great_expectations", "great_expectations.yml")),
)
context = ge.data_context.DataContext(
context_root_dir=os.path.join(project_dir, "great_expectations")
)
context.add_datasource(
"titanic",
class_name="PandasDatasource",
batch_kwargs_generators={
"subdir_reader": {
"class_name": "SubdirReaderBatchKwargsGenerator",
"base_directory": os.path.join(project_dir, "data", "titanic"),
}
},
)
context.add_datasource(
"random",
class_name="PandasDatasource",
batch_kwargs_generators={
"subdir_reader": {
"class_name": "SubdirReaderBatchKwargsGenerator",
"base_directory": os.path.join(project_dir, "data", "random"),
}
},
)
context.profile_datasource("titanic")
context.profile_datasource("random")
context.profile_datasource(context.list_datasources()[0]["name"])
context._project_config.anonymous_usage_statistics = {
"enabled": True,
"data_context_id": "f43d4897-385f-4366-82b0-1a8eda2bf79c",
}
return context
@pytest.fixture
def v20_project_directory(tmp_path_factory):
"""
GE config_version: 2 project for testing upgrade helper
"""
project_path = str(tmp_path_factory.mktemp("v20_project"))
context_root_dir = os.path.join(project_path, "great_expectations")
shutil.copytree(
file_relative_path(
__file__, "./test_fixtures/upgrade_helper/great_expectations_v20_project/"
),
context_root_dir,
)
shutil.copy(
file_relative_path(
__file__, "./test_fixtures/upgrade_helper/great_expectations_v2.yml"
),
os.path.join(context_root_dir, "great_expectations.yml"),
)
return context_root_dir
@pytest.fixture
def data_context_parameterized_expectation_suite_no_checkpoint_store(tmp_path_factory):
"""
This data_context is *manually* created to have the config we want, vs
created with DataContext.create()
"""
project_path = str(tmp_path_factory.mktemp("data_context"))
context_path = os.path.join(project_path, "great_expectations")
asset_config_path = os.path.join(context_path, "expectations")
fixture_dir = file_relative_path(__file__, "./test_fixtures")
os.makedirs(
os.path.join(asset_config_path, "my_dag_node"),
exist_ok=True,
)
shutil.copy(
os.path.join(fixture_dir, "great_expectations_basic.yml"),
str(os.path.join(context_path, "great_expectations.yml")),
)
shutil.copy(
os.path.join(
fixture_dir,
"expectation_suites/parameterized_expectation_suite_fixture.json",
),
os.path.join(asset_config_path, "my_dag_node", "default.json"),
)
os.makedirs(os.path.join(context_path, "plugins"), exist_ok=True)
shutil.copy(
os.path.join(fixture_dir, "custom_pandas_dataset.py"),
str(os.path.join(context_path, "plugins", "custom_pandas_dataset.py")),
)
shutil.copy(
os.path.join(fixture_dir, "custom_sqlalchemy_dataset.py"),
str(os.path.join(context_path, "plugins", "custom_sqlalchemy_dataset.py")),
)
shutil.copy(
os.path.join(fixture_dir, "custom_sparkdf_dataset.py"),
str(os.path.join(context_path, "plugins", "custom_sparkdf_dataset.py")),
)
return ge.data_context.DataContext(context_path)
@pytest.fixture
def data_context_parameterized_expectation_suite(tmp_path_factory):
"""
This data_context is *manually* created to have the config we want, vs
created with DataContext.create()
"""
project_path = str(tmp_path_factory.mktemp("data_context"))
context_path = os.path.join(project_path, "great_expectations")
asset_config_path = os.path.join(context_path, "expectations")
fixture_dir = file_relative_path(__file__, "./test_fixtures")
os.makedirs(
os.path.join(asset_config_path, "my_dag_node"),
exist_ok=True,
)
shutil.copy(
os.path.join(fixture_dir, "great_expectations_v013_basic.yml"),
str(os.path.join(context_path, "great_expectations.yml")),
)
shutil.copy(
os.path.join(
fixture_dir,
"expectation_suites/parameterized_expectation_suite_fixture.json",
),
os.path.join(asset_config_path, "my_dag_node", "default.json"),
)
os.makedirs(os.path.join(context_path, "plugins"), exist_ok=True)
shutil.copy(
os.path.join(fixture_dir, "custom_pandas_dataset.py"),
str(os.path.join(context_path, "plugins", "custom_pandas_dataset.py")),
)
shutil.copy(
os.path.join(fixture_dir, "custom_sqlalchemy_dataset.py"),
str(os.path.join(context_path, "plugins", "custom_sqlalchemy_dataset.py")),
)
shutil.copy(
os.path.join(fixture_dir, "custom_sparkdf_dataset.py"),
str(os.path.join(context_path, "plugins", "custom_sparkdf_dataset.py")),
)
return ge.data_context.DataContext(context_path)
@pytest.fixture
def data_context_simple_expectation_suite(tmp_path_factory):
"""
This data_context is *manually* created to have the config we want, vs
created with DataContext.create()
"""
project_path = str(tmp_path_factory.mktemp("data_context"))
context_path = os.path.join(project_path, "great_expectations")
asset_config_path = os.path.join(context_path, "expectations")
fixture_dir = file_relative_path(__file__, "./test_fixtures")
os.makedirs(
os.path.join(asset_config_path, "my_dag_node"),
exist_ok=True,
)
shutil.copy(
os.path.join(fixture_dir, "great_expectations_basic.yml"),
str(os.path.join(context_path, "great_expectations.yml")),
)
shutil.copy(
os.path.join(
fixture_dir,
"rendering_fixtures/expectations_suite_1.json",
),
os.path.join(asset_config_path, "default.json"),
)
os.makedirs(os.path.join(context_path, "plugins"), exist_ok=True)
shutil.copy(
os.path.join(fixture_dir, "custom_pandas_dataset.py"),
str(os.path.join(context_path, "plugins", "custom_pandas_dataset.py")),
)
shutil.copy(
os.path.join(fixture_dir, "custom_sqlalchemy_dataset.py"),
str(os.path.join(context_path, "plugins", "custom_sqlalchemy_dataset.py")),
)
shutil.copy(
os.path.join(fixture_dir, "custom_sparkdf_dataset.py"),
str(os.path.join(context_path, "plugins", "custom_sparkdf_dataset.py")),
)
return ge.data_context.DataContext(context_path)
@pytest.fixture()
def filesystem_csv_data_context_with_validation_operators(
titanic_data_context_stats_enabled, filesystem_csv_2
):
titanic_data_context_stats_enabled.add_datasource(
"rad_datasource",
module_name="great_expectations.datasource",
class_name="PandasDatasource",
batch_kwargs_generators={
"subdir_reader": {
"class_name": "SubdirReaderBatchKwargsGenerator",
"base_directory": str(filesystem_csv_2),
}
},
)
return titanic_data_context_stats_enabled
@pytest.fixture()
def filesystem_csv_data_context(
empty_data_context,
filesystem_csv_2,
) -> DataContext:
empty_data_context.add_datasource(
"rad_datasource",
module_name="great_expectations.datasource",
class_name="PandasDatasource",
batch_kwargs_generators={
"subdir_reader": {
"class_name": "SubdirReaderBatchKwargsGenerator",
"base_directory": str(filesystem_csv_2),
}
},
)
return empty_data_context
@pytest.fixture
def filesystem_csv(tmp_path_factory):
base_dir = tmp_path_factory.mktemp("filesystem_csv")
base_dir = str(base_dir)
# Put a few files in the directory
with open(os.path.join(base_dir, "f1.csv"), "w") as outfile:
outfile.writelines(["a,b,c\n"])
with open(os.path.join(base_dir, "f2.csv"), "w") as outfile:
outfile.writelines(["a,b,c\n"])
os.makedirs(os.path.join(base_dir, "f3"), exist_ok=True)
with open(os.path.join(base_dir, "f3", "f3_20190101.csv"), "w") as outfile:
outfile.writelines(["a,b,c\n"])
with open(os.path.join(base_dir, "f3", "f3_20190102.csv"), "w") as outfile:
outfile.writelines(["a,b,c\n"])
return base_dir
@pytest.fixture(scope="function")
def filesystem_csv_2(tmp_path):
base_dir = tmp_path / "filesystem_csv_2"
base_dir.mkdir()
base_dir = str(base_dir)
# Put a file in the directory
toy_dataset = PandasDataset({"x": [1, 2, 3]})
toy_dataset.to_csv(os.path.join(base_dir, "f1.csv"), index=False)
assert os.path.isabs(base_dir)
assert os.path.isfile(os.path.join(base_dir, "f1.csv"))
return base_dir
@pytest.fixture(scope="function")
def filesystem_csv_3(tmp_path):
base_dir = tmp_path / "filesystem_csv_3"
base_dir.mkdir()
base_dir = str(base_dir)
# Put a file in the directory
toy_dataset = PandasDataset({"x": [1, 2, 3]})
toy_dataset.to_csv(os.path.join(base_dir, "f1.csv"), index=False)
toy_dataset_2 = PandasDataset({"y": [1, 2, 3]})
toy_dataset_2.to_csv(os.path.join(base_dir, "f2.csv"), index=False)
return base_dir
@pytest.fixture(scope="function")
def filesystem_csv_4(tmp_path):
base_dir = tmp_path / "filesystem_csv_4"
base_dir.mkdir()
base_dir = str(base_dir)
# Put a file in the directory
toy_dataset = PandasDataset(
{
"x": [1, 2, 3],
"y": [1, 2, 3],
}
)
toy_dataset.to_csv(os.path.join(base_dir, "f1.csv"), index=None)
return base_dir
@pytest.fixture
def titanic_profiled_evrs_1():
with open(
file_relative_path(
__file__, "./render/fixtures/BasicDatasetProfiler_evrs.json"
),
) as infile:
return expectationSuiteValidationResultSchema.loads(infile.read())
# various types of evr
@pytest.fixture
def evr_failed():
return ExpectationValidationResult(
success=False,
result={
"element_count": 1313,
"missing_count": 0,
"missing_percent": 0.0,
"unexpected_count": 3,
"unexpected_percent": 0.2284843869002285,
"unexpected_percent_nonmissing": 0.2284843869002285,
"partial_unexpected_list": [
"Daly, Mr <NAME> ",
"Barber, Ms ",
"Geiger, <NAME> ",
],
"partial_unexpected_index_list": [77, 289, 303],
"partial_unexpected_counts": [
{"value": "Barber, Ms ", "count": 1},
{"value": "Daly, Mr <NAME> ", "count": 1},
{"value": "Geiger, <NAME> ", "count": 1},
],
},
exception_info={
"raised_exception": False,
"exception_message": None,
"exception_traceback": None,
},
expectation_config=ExpectationConfiguration(
expectation_type="expect_column_values_to_not_match_regex",
kwargs={
"column": "Name",
"regex": "^\\s+|\\s+$",
"result_format": "SUMMARY",
},
),
)
@pytest.fixture
def evr_success():
return ExpectationValidationResult(
success=True,
result={"observed_value": 1313},
exception_info={
"raised_exception": False,
"exception_message": None,
"exception_traceback": None,
},
expectation_config=ExpectationConfiguration(
expectation_type="expect_table_row_count_to_be_between",
kwargs={"min_value": 0, "max_value": None, "result_format": "SUMMARY"},
),
)
@pytest.fixture
def sqlite_view_engine(test_backends):
# Create a small in-memory engine with two views, one of which is temporary
if "sqlite" in test_backends:
try:
import sqlalchemy as sa
sqlite_engine = sa.create_engine("sqlite://")
df = pd.DataFrame({"a": [1, 2, 3, 4, 5]})
df.to_sql(name="test_table", con=sqlite_engine, index=True)
sqlite_engine.execute(
"CREATE TEMP VIEW test_temp_view AS SELECT * FROM test_table where a < 4;"
)
sqlite_engine.execute(
"CREATE VIEW test_view AS SELECT * FROM test_table where a > 4;"
)
return sqlite_engine
except ImportError:
sa = None
else:
pytest.skip("SqlAlchemy tests disabled; not testing views")
@pytest.fixture
def expectation_suite_identifier():
return ExpectationSuiteIdentifier("my.expectation.suite.name")
@pytest.fixture
def basic_sqlalchemy_datasource(sqlitedb_engine):
return SqlAlchemyDatasource("basic_sqlalchemy_datasource", engine=sqlitedb_engine)
@pytest.fixture
def test_folder_connection_path_csv(tmp_path_factory):
df1 = | pd.DataFrame({"col_1": [1, 2, 3, 4, 5], "col_2": ["a", "b", "c", "d", "e"]}) | pandas.DataFrame |
"""This file contains functions and tests to calculate the stylized facts"""
import pandas as pd
import numpy as np
from math import isclose
from stockmarket.functions import div0
# return autocorrelation close to zero after lag 1
# calculate returns
def calculate_close(orderbook_transaction_price_history):
closing_prices = []
for day in orderbook_transaction_price_history:
closing_prices.append(day[-1])
close = pd.Series(closing_prices).pct_change()
return close
def calculate_returns(orderbook_transaction_price_history):
"""Return the returns"""
closing_prices = []
for day in orderbook_transaction_price_history:
closing_prices.append(day[-1])
returns = pd.Series(closing_prices).pct_change()
return returns[1:]
# Test 1
def zero_autocorrelation(returns, lags):
"""returns wether average autocorrelation is much different from zero"""
autocorr_returns = [returns.autocorr(lag=lag) for lag in range(lags)]
# if mean autocorrelation are between -0.1 and 0.1
average_autocorrelation = np.mean(autocorr_returns[1:])
if (average_autocorrelation < 0.1) and (average_autocorrelation > -0.1):
return True, average_autocorrelation
else:
return False, np.inf
# # Test 2
# def fat_tails(returns):
# results = powerlaw.Fit(returns)
# alpha = results.power_law.alpha
# #print(alpha)
# if (alpha < 5) and (alpha > 3):
# return True, alpha
# else:
# return False, np.inf
def fat_tails_kurtosis(returns):
series_returns = pd.Series(returns)
kurt = series_returns.kurtosis()
if kurt > 4:
return True, kurt
else:
return False, np.inf
# Test 3
def clustered_volatility(returns, lags):
absolute_returns = returns.abs()
autocorr_abs_returns = [absolute_returns.autocorr(lag=lag) for lag in range(lags)]
average_autocorrelation = np.mean(autocorr_abs_returns[1:])
#print(average_autocorrelation)
if (average_autocorrelation < 0.1) and (average_autocorrelation > -0.1):
return False, np.inf
else:
return True, average_autocorrelation
# Test 4
def long_memory(returns, hurst_function, lag1, lag2):
h = hurst_function(returns, lag1, lag2)
#print('h = ', h)
return not isclose(0.5, h, abs_tol=(10 ** -1 / 2)), h
# functions to calculate stylized facts
def autocorrelation_returns(returns, lags):
"""
Calculate the average autocorrelation in a returns time series
:param returns: time series of returns
:param lags: the lags over which the autocorrelation is to be calculated
:return: average autocorrelation
"""
returns = pd.Series(returns)
autocorr_returns = [returns.autocorr(lag=lag) for lag in range(lags)]
average_autocorrelation = np.mean(autocorr_returns[1:])
return average_autocorrelation
def kurtosis(returns):
"""
Calculates the kurtosis in a time series of returns
:param returns: time series of returns
:return: kurtosis
"""
series_returns = | pd.Series(returns) | pandas.Series |
from flask import Flask, request, jsonify, g, render_template
from flask_json import FlaskJSON, JsonError, json_response, as_json
import plotly.graph_objects as go
from datetime import datetime
from datetime import timedelta
import glob
import requests
from app import db
from app.models import *
from app.plots import bp
import pandas as pd
import io
from app.api import vis
from sqlalchemy import sql
import numpy as np
from app.tools.curvefit.core.model import CurveModel
from app.tools.curvefit.core.functions import gaussian_cdf, gaussian_pdf
PHU = {'the_district_of_algoma':'The District of Algoma Health Unit',
'brant_county':'Brant County Health Unit',
'durham_regional':'Durham Regional Health Unit',
'grey_bruce':'Grey Bruce Health Unit',
'haldimand_norfolk':'Haldimand-Norfolk Health Unit',
'haliburton_kawartha_pine_ridge_district':'Haliburton, Kawartha, Pine Ridge District Health Unit',
'halton_regional':'Halton Regional Health Unit',
'city_of_hamilton':'City of Hamilton Health Unit',
'hastings_and_prince_edward_counties':'Hastings and Prince Edward Counties Health Unit',
'huron_county':'Huron County Health Unit',
'chatham_kent':'Chatham-Kent Health Unit',
'kingston_frontenac_and_lennox_and_addington':'Kingston, Frontenac, and Lennox and Addington Health Unit',
'lambton':'Lambton Health Unit',
'leeds_grenville_and_lanark_district':'Leeds, Grenville and Lanark District Health Unit',
'middlesex_london':'Middlesex-London Health Unit',
'niagara_regional_area':'Niagara Regional Area Health Unit',
'north_bay_parry_sound_district':'North Bay Parry Sound District Health Unit',
'northwestern':'Northwestern Health Unit',
'city_of_ottawa':'City of Ottawa Health Unit',
'peel_regional':'Peel Regional Health Unit',
'perth_district':'Perth District Health Unit',
'peterborough_county_city':'Peterborough County–City Health Unit',
'porcupine':'Porcupine Health Unit',
'renfrew_county_and_district':'Renfrew County and District Health Unit',
'the_eastern_ontario':'The Eastern Ontario Health Unit',
'simcoe_muskoka_district':'Simcoe Muskoka District Health Unit',
'sudbury_and_district':'Sudbury and District Health Unit',
'thunder_bay_district':'Thunder Bay District Health Unit',
'timiskaming':'Timiskaming Health Unit',
'waterloo':'Waterloo Health Unit',
'wellington_dufferin_guelph':'Wellington-Dufferin-Guelph Health Unit',
'windsor_essex_county':'Windsor-Essex County Health Unit',
'york_regional':'York Regional Health Unit',
'southwestern':'Southwestern Public Health Unit',
'city_of_toronto':'City of Toronto Health Unit',
'huron_perth_county':'Huron Perth Public Health Unit'}
def get_dir(data, today=datetime.today().strftime('%Y-%m-%d')):
source_dir = 'data/' + data['classification'] + '/' + data['stage'] + '/'
load_dir = source_dir + data['source_name'] + '/' + data['table_name']
file_name = data['table_name'] + '_' + today + '.' + data['type']
file_path = load_dir + '/' + file_name
return load_dir, file_path
def get_file(data):
load_dir, file_path = get_dir(data)
files = glob.glob(load_dir + "/*." + data['type'])
files = [file.split('_')[-1] for file in files]
files = [file.split('.csv')[0] for file in files]
dates = [datetime.strptime(file, '%Y-%m-%d') for file in files]
max_date = max(dates).strftime('%Y-%m-%d')
load_dir, file_path = get_dir(data, max_date)
return file_path
## Tests
def new_tests_plot():
df = vis.get_testresults()
df['Date'] = pd.to_datetime(df['Date'])
fig = go.Figure()
temp = df.loc[df['New tests'].notna()]
fig.add_trace(go.Indicator(
mode = "number+delta",
value = df['New tests'].tail(1).values[0],number = {'font': {'size': 60}},))
fig.add_trace(go.Scatter(x=temp.Date,y=temp['New tests'],line=dict(color='#FFF', dash='dot'), visible=True, opacity=0.5, name="Value"))
fig.add_trace(go.Scatter(x=df.Date,y=temp['New tests'].rolling(7).mean(),line=dict(color='red',width=3), opacity=0.5,name="7 Day Average"))
fig.update_layout(
template = {'data' : {'indicator': [{
'mode' : "number+delta+gauge",
'delta' : {'reference': df['New tests'].iloc[-2],
'increasing': {'color':'green'},
'decreasing': {'color':'red'}}},
]
}})
fig.update_layout(
xaxis = {'showgrid': False,'visible':True, 'tickformat':'%d-%b'},
yaxis = {'showgrid': False,'visible':True},
title={'text':f"New Tests<br><span style='font-size:0.5em;color:gray'>Last Updated: {df.Date.tail(1).values[0].astype('M8[D]')}</span><br>",
'y':0.90,
'x':0.5,
'xanchor': 'center',
'yanchor': 'top'},
font=dict(
family="Roboto",
color="#FFF"
)
)
fig.update_layout(
margin=dict(l=0, r=20, t=30, b=50),
plot_bgcolor="#343332",
paper_bgcolor="#343332",
legend_orientation="h",)
div = fig.to_json()
p = Viz.query.filter_by(header="new tests").first()
p.html = div
db.session.add(p)
db.session.commit()
return
def total_tests_plot():
df = vis.get_testresults()
df['Date'] = pd.to_datetime(df['Date'])
fig = go.Figure()
temp = df.loc[df['Total tested'].notna()]
fig.add_trace(go.Indicator(
mode = "number+delta",
value = df['Total tested'].tail(1).values[0],
number = {'font': {'size': 60}},
))
# fig.add_trace(go.Scatter(x=temp.Date,y=temp['Total tested'],line=dict(color='#5E5AA1',dash='dot'), visible=True, opacity=0.5, name="Value"))
fig.add_trace(go.Scatter(x=df.Date,y=df['Total tested'].rolling(7).mean(),line=dict(color='red', width=3), opacity=0.5,name="7 Day Average"))
fig.update_layout(
template = {'data' : {'indicator': [{
'mode' : "number+delta+gauge",
'delta' : {'reference': df['Total tested'].iloc[-2],
'increasing': {'color':'green'},
'decreasing': {'color':'red'}}},
]
}})
fig.update_layout(
xaxis = {'showgrid': False,'visible':True,'tickformat':'%d-%b'},
yaxis = {'showgrid': False,'visible':True},
title={'text':f"Total Tested<br><span style='font-size:0.5em;color:gray'>Last Updated: {df.Date.tail(1).values[0].astype('M8[D]')}</span><br>",
'y':0.90,
'x':0.5,
'xanchor': 'center',
'yanchor': 'top'},
font=dict(
family="Roboto",
color="#FFF"
)
)
fig.update_layout(
margin=dict(l=0, r=20, t=30, b=50),
plot_bgcolor="#343332",
paper_bgcolor="#343332",
legend_orientation="h",
)
div = fig.to_json()
p = Viz.query.filter_by(header="tests").first()
p.html = div
db.session.add(p)
db.session.commit()
return
def tested_positve_plot():
df = vis.get_testresults()
df['Date'] = pd.to_datetime(df['Date'])
fig = go.Figure()
temp = df.loc[df['New Positive pct'].notna()]
temp = df.loc[df['New Positive pct'] > 0]
fig.add_trace(go.Indicator(
mode = "number+delta",
value = df['New Positive pct'].tail(1).values[0]*100,
number = {'font': {'size': 60}}
))
fig.add_trace(go.Scatter(x=temp.Date,y=temp['New Positive pct'],line=dict(color='#FFF', dash='dot'),visible=True, opacity=0.5, name="Value"))
fig.add_trace(go.Scatter(x=df.Date,y=temp['New Positive pct'].rolling(7).mean(),line=dict(color='red',width=3), opacity=0.5,name="7 Day Average"))
fig.update_layout(
template = {'data' : {'indicator': [{
'mode' : "number+delta+gauge",
'delta' : {'reference': df['New Positive pct'].iloc[-2]*100,
'increasing': {'color':'red'},
'decreasing': {'color':'green'}}},
]
}})
fig.update_layout(
xaxis = {'showgrid': False,'visible':True, 'tickformat':'%d-%b'},
yaxis = {'showgrid': False,'visible':True},
title={'text': f"Percent Positivity<br><span style='font-size:0.5em;color:gray'>Last Updated: {df.Date.tail(1).values[0].astype('M8[D]')}</span><br>",
'y':0.90,
'x':0.5,
'xanchor': 'center',
'yanchor': 'top'},
font=dict(
family="Roboto",
color="#FFF"
)
)
fig.update_layout(
margin=dict(l=0, r=20, t=30, b=50),
plot_bgcolor="#343332",
paper_bgcolor="#343332",
legend_orientation="h",
)
div = fig.to_json()
p = Viz.query.filter_by(header="tested positive").first()
p.html = div
db.session.add(p)
db.session.commit()
return
def under_investigation_plot():
df = vis.get_testresults()
df['Date'] = pd.to_datetime(df['Date'])
fig = go.Figure()
temp = df.loc[df['Total tested'].notna()]
fig.add_trace(go.Indicator(
mode = "number+delta",
value = df['Under Investigation'].tail(1).values[0],number = {'font': {'size': 60}},))
fig.add_trace(go.Scatter(x=temp.Date,y=temp['Under Investigation'],line=dict(color='#FFF', dash='dot'), visible=True, opacity=0.5, name="Value"))
fig.add_trace(go.Scatter(x=df.Date,y=temp['Under Investigation'].rolling(7).mean(),line=dict(color='red',width=3), opacity=0.5,name="7 Day Average"))
fig.update_layout(
template = {'data' : {'indicator': [{
'mode' : "number+delta+gauge",
'delta' : {'reference': df['Under Investigation'].iloc[-2],
'increasing': {'color':'grey'},
'decreasing': {'color':'grey'}}},
]
}})
fig.update_layout(
xaxis = {'showgrid': False,'visible':True, 'tickformat':'%d-%b'},
yaxis = {'showgrid': False,'visible':True},
title={'text':f"Under Investigation<br><span style='font-size:0.5em;color:gray'>Last Updated: {df.Date.tail(1).values[0].astype('M8[D]')}</span><br>",
'y':0.90,
'x':0.5,
'xanchor': 'center',
'yanchor': 'top'},
font=dict(
family="Roboto",
color="#FFF"
)
)
fig.update_layout(
margin=dict(l=0, r=20, t=30, b=50),
plot_bgcolor="#343332",
paper_bgcolor="#343332",
legend_orientation="h",)
div = fig.to_json()
p = Viz.query.filter_by(header="under investigation").first()
p.html = div
db.session.add(p)
db.session.commit()
return
## Hospital
def in_hospital_plot(region='ontario'):
if region=='ontario':
df = vis.get_testresults()
df['Date'] = pd.to_datetime(df['Date'])
fig = go.Figure()
temp = df.loc[df['Hospitalized'].notna()]
fig.add_trace(go.Indicator(
mode = "number+delta",
value = temp['Hospitalized'].tail(1).values[0],number = {'font': {'size': 60}},))
fig.add_trace(go.Scatter(x=temp.Date,y=temp['Hospitalized'],line=dict(color='red', width=3),visible=True, opacity=0.5, name="Value"))
# fig.add_trace(go.Scatter(x=temp.Date,y=temp['ICU'].rolling(7).mean(),line=dict(color='#FFF', dash='dot'), opacity=0.5,name="7 Day Average"))
fig.update_layout(
template = {'data' : {'indicator': [{
'mode' : "number+delta+gauge",
'delta' : {'reference': df['Hospitalized'].iloc[-2],
'increasing': {'color':'red'},
'decreasing': {'color':'green'}}},
]
}})
fig.update_layout(
xaxis = {'showgrid': False,'visible':True, 'tickformat':'%d-%b'},
yaxis = {'showgrid': False,'visible':True},
title={'text':f"COVID-19 Patients In Hospital<br><span style='font-size:0.5em;color:gray'>Last Updated: {df.Date.tail(1).values[0].astype('M8[D]')}</span><br>",
'y':0.90,
'x':0.5,
'xanchor': 'center',
'yanchor': 'top'},
font=dict(
family="Roboto",
color="#FFF"
)
)
fig.update_layout(
margin=dict(l=0, r=20, t=30, b=50),
plot_bgcolor="#343332",
paper_bgcolor="#343332",
legend_orientation="h",)
div = fig.to_json()
p = Viz.query.filter_by(header="in hospital", phu=region).first()
p.html = div
db.session.add(p)
db.session.commit()
return
def in_icu_plot(region='ontario'):
if region=='ontario':
df = vis.get_testresults()
df['Date'] = pd.to_datetime(df['Date'])
fig = go.Figure()
temp = df.loc[df['ICU'].notna()]
fig.add_trace(go.Indicator(
mode = "number+delta",
value = temp['ICU'].tail(1).values[0],number = {'font': {'size': 60}},))
fig.add_trace(go.Scatter(x=temp.Date,y=temp['ICU'],line=dict(color='red', width=3),visible=True, opacity=0.5, name="Value"))
# fig.add_trace(go.Scatter(x=temp.Date,y=temp['ICU'].rolling(7).mean(),line=dict(color='#FFF', dash='dot'), opacity=0.5,name="7 Day Average"))
fig.update_layout(
template = {'data' : {'indicator': [{
'mode' : "number+delta+gauge",
'delta' : {'reference': df['ICU'].iloc[-2],
'increasing': {'color':'red'},
'decreasing': {'color':'green'}}},
]
}})
fig.update_layout(
xaxis = {'showgrid': False,'visible':True, 'tickformat':'%d-%b'},
yaxis = {'showgrid': False,'visible':True},
title={'text':f"COVID-19 Patients In ICU<br><span style='font-size:0.5em;color:gray'>Last Updated: {df.Date.tail(1).values[0].astype('M8[D]')}</span><br>",
'y':0.90,
'x':0.5,
'xanchor': 'center',
'yanchor': 'top'},
font=dict(
family="Roboto",
color="#FFF"
)
)
fig.update_layout(
margin=dict(l=0, r=20, t=30, b=50),
plot_bgcolor="#343332",
paper_bgcolor="#343332",
legend_orientation="h",)
else:
df = vis.get_icu_capacity_phu()
df = df.loc[df.PHU == PHU[region]]
df['Date'] = pd.to_datetime(df['date'])
if len(df) <= 0:
div = sql.null()
p = Viz.query.filter_by(header="in icu", phu=region).first()
p.html = div
db.session.add(p)
db.session.commit()
return
fig = go.Figure()
temp = df.loc[df['confirmed_positive'].notna()]
fig.add_trace(go.Indicator(
mode = "number+delta",
value = temp['confirmed_positive'].tail(1).values[0],number = {'font': {'size': 60}},))
fig.add_trace(go.Scatter(x=temp.Date,y=temp['confirmed_positive'],line=dict(color='red', width=3),visible=True, opacity=0.5, name="Value"))
# fig.add_trace(go.Scatter(x=temp.Date,y=temp['confirmed_positive'].rolling(7).mean(),line=dict(color='#FFF', dash='dot'), opacity=0.5,name="7 Day Average"))
fig.update_layout(
template = {'data' : {'indicator': [{
'title' : {"text": f"COVID-19 Patients In ICU<br><span style='font-size:0.5em;color:gray'>Last Updated: {df.Date.tail(1).values[0].astype('M8[D]')}</span><br>"},
'mode' : "number+delta+gauge",
'delta' : {'reference': df['confirmed_positive'].iloc[-2],
'increasing': {'color':'red'},
'decreasing': {'color':'green'}}},
]
}})
fig.update_layout(
xaxis = {'showgrid': False,'visible':True, 'tickformat':'%d-%b'},
yaxis = {'showgrid': False,'visible':True},
title={'text':"",
'y':0.90,
'x':0.5,
'xanchor': 'center',
'yanchor': 'top'},
font=dict(
family="Roboto",
color="#FFF"
)
)
fig.update_layout(
margin=dict(l=0, r=20, t=30, b=50),
plot_bgcolor="#343332",
paper_bgcolor="#343332",
legend_orientation="h",
)
div = fig.to_json()
p = Viz.query.filter_by(header="in icu", phu=region).first()
p.html = div
db.session.add(p)
db.session.commit()
return
def on_ventilator_plot(region='ontario'):
if region=='ontario':
df = vis.get_testresults()
df['Date'] = pd.to_datetime(df['Date'])
fig = go.Figure()
temp = df.loc[df['Ventilator'].notna()]
fig.add_trace(go.Indicator(
mode = "number+delta",
value = temp['Ventilator'].tail(1).values[0],number = {'font': {'size': 60}},))
fig.add_trace(go.Scatter(x=temp.Date,y=temp['Ventilator'],line=dict(color='red', width=3),visible=True, opacity=0.5, name="Value"))
# fig.add_trace(go.Scatter(x=temp.Date,y=temp['Ventilator'].rolling(7).mean(),line=dict(color='#FFF', dash='dot'), opacity=0.5,name="7 Day Average"))
fig.update_layout(
template = {'data' : {'indicator': [{
'mode' : "number+delta+gauge",
'delta' : {'reference': df['Ventilator'].iloc[-2],
'increasing': {'color':'red'},
'decreasing': {'color':'green'}}},
]
}})
fig.update_layout(
xaxis = {'showgrid': False,'visible':True,'tickformat':'%d-%b'},
yaxis = {'showgrid': False,'visible':False},
title={'text':f"COVID-19 Patients On Ventilator<br><span style='font-size:0.5em;color:gray'>Last Updated: {df.Date.tail(1).values[0].astype('M8[D]')}</span><br>",
'y':0.90,
'x':0.5,
'xanchor': 'center',
'yanchor': 'top'},
font=dict(
family="Roboto",
color="#FFF"
)
)
fig.update_layout(
margin=dict(l=0, r=20, t=30, b=50),
plot_bgcolor="#343332",
paper_bgcolor="#343332",
legend_orientation="h",
)
else:
df = vis.get_icu_capacity_phu()
df = df.loc[df.PHU == PHU[region]]
df['Date'] = pd.to_datetime(df['date'])
if len(df) <= 0:
div = sql.null()
p = Viz.query.filter_by(header="on ventilator", phu=region).first()
p.html = div
db.session.add(p)
db.session.commit()
return
fig = go.Figure()
temp = df.loc[df['confirmed_positive_ventilator'].notna()]
fig.add_trace(go.Indicator(
mode = "number+delta",
value = temp['confirmed_positive_ventilator'].tail(1).values[0],number = {'font': {'size': 60}},))
fig.add_trace(go.Scatter(x=temp.Date,y=temp['confirmed_positive_ventilator'],line=dict(color='red', width=3),visible=True, opacity=0.5, name="Value"))
# fig.add_trace(go.Scatter(x=temp.Date,y=temp['confirmed_positive_ventilator'].rolling(7).mean(),line=dict(color='#FFF', dash='dot'), opacity=0.5,name="7 Day Average"))
fig.update_layout(
template = {'data' : {'indicator': [{
'mode' : "number+delta+gauge",
'delta' : {'reference': df['confirmed_positive_ventilator'].iloc[-2],
'increasing': {'color':'red'},
'decreasing': {'color':'green'}}},
]
}})
fig.update_layout(
xaxis = {'showgrid': False,'visible':True, 'tickformat':'%d-%b'},
yaxis = {'showgrid': False,'visible':True},
title={'text':f"COVID-19 Patients On Ventilator<br><span style='font-size:0.5em;color:gray'>Last Updated: {df.Date.tail(1).values[0].astype('M8[D]')}</span><br>",
'y':0.90,
'x':0.5,
'xanchor': 'center',
'yanchor': 'top'},
font=dict(
family="Roboto",
color="#FFF"
)
)
fig.update_layout(
margin=dict(l=0, r=20, t=30, b=50),
plot_bgcolor="#343332",
paper_bgcolor="#343332",
legend_orientation="h",
)
div = fig.to_json()
p = Viz.query.filter_by(header="on ventilator", phu=region).first()
p.html = div
db.session.add(p)
db.session.commit()
return
## Cases
def new_cases_plot(region='ontario'):
if region == 'ontario':
df = vis.get_testresults()
df['Date'] = pd.to_datetime(df['Date'])
fig = go.Figure()
fig.add_trace(go.Indicator(
mode = "number+delta",
value = df['New positives'].tail(1).values[0],
number = {'font': {'size': 60}}
),
)
fig.update_layout(
template = {'data' : {'indicator': [{
'mode' : "number+delta+gauge",
'delta' : {'valueformat':"d",'reference': df['New positives'].iloc[-2],
'increasing': {'color':'red'},
'decreasing': {'color':'green'}}},
]
}})
fig.add_trace(go.Scatter(x=df.Date,y=df['New positives'],line=dict(color='#FFF', dash='dot'), visible=True, opacity=0.5, name="Value"))
fig.add_trace(go.Scatter(x=df.Date,y=df['New positives'].rolling(7).mean(),line=dict(color='red', width=3), opacity=0.5,name="7 Day Average"))
fig.update_layout(
xaxis = {'showgrid': False,'visible':True, 'tickformat':'%d-%b'},
yaxis = {'showgrid': False,'visible':True},
title={'text':f"New Cases<br><span style='font-size:0.5em;color:gray'>Last Updated: {df.Date.tail(1).values[0].astype('M8[D]')}</span><br>",
'y':0.90,
'x':0.5,
'xanchor': 'center',
'yanchor': 'top'},
font=dict(
family="Roboto",
color="#FFF"
)
)
fig.update_layout(
margin=dict(l=0, r=20, t=30, b=50),
plot_bgcolor="#343332",
paper_bgcolor="#343332",
legend_orientation="h")
else:
df = vis.get_phus()
df = df.loc[df.region == PHU[region]]
df['Date'] = pd.to_datetime(df['date'])
if len(df) <= 0:
div = sql.null()
p = Viz.query.filter_by(header="new cases", phu=region).first()
p.html = div
db.session.add(p)
db.session.commit()
return
fig = go.Figure()
fig.add_trace(go.Indicator(
mode = "number+delta",
value = df['value'].tail(1).values[0],
number = {'font': {'size': 60}}
),
)
fig.update_layout(
template = {'data' : {'indicator': [{
'mode' : "number+delta+gauge",
'delta' : {'valueformat':"d",'reference': df['value'].iloc[-2],
'increasing': {'color':'red'},
'decreasing': {'color':'green'}}},
]
}})
fig.add_trace(go.Scatter(x=df.Date,y=df['value'],line=dict(color='#FFF', dash='dot'), visible=True, opacity=0.5, name="Value"))
fig.add_trace(go.Scatter(x=df.Date,y=df['value'].rolling(7).mean(),line=dict(color='red', width=3), opacity=0.5,name="7 Day Average"))
fig.update_layout(
xaxis = {'showgrid': False,'visible':True, 'tickformat':'%d-%b'},
yaxis = {'showgrid': False,'visible':True},
title={'text':f"New Cases<br><span style='font-size:0.5em;color:gray'>Last Updated: {df.Date.tail(1).values[0].astype('M8[D]')}</span><br>",
'y':0.90,
'x':0.5,
'xanchor': 'center',
'yanchor': 'top'},
font=dict(
family="Roboto",
color="#FFF"
)
)
fig.update_layout(
margin=dict(l=0, r=20, t=30, b=50),
plot_bgcolor="#343332",
paper_bgcolor="#343332",
legend_orientation="h",
)
div = fig.to_json()
p = Viz.query.filter_by(header="new cases",phu=region).first()
p.html = div
db.session.add(p)
db.session.commit()
return
def active_cases_plot(region='ontario'):
if region == 'ontario':
df = vis.get_testresults()
df['Date'] = pd.to_datetime(df['Date'])
df = df.loc[df['Active'].notna()]
fig = go.Figure()
fig.add_trace(go.Indicator(
mode = "number+delta",
value = df['Active'].tail(1).values[0],
number = {'font': {'size': 60}}
),
)
fig.update_layout(
template = {'data' : {'indicator': [{
'mode' : "number+delta+gauge",
'delta' : {'valueformat':"d", 'reference': df['Active'].iloc[-2],
'increasing': {'color':'red'},
'decreasing': {'color':'green'}}},
]
}})
fig.add_trace(go.Scatter(x=df.Date,y=df['Active'],line=dict(color='red', width=3), visible=True, opacity=0.5, name="Value"))
# fig.add_trace(go.Scatter(x=df.Date,y=df['Active'].rolling(7).mean(),line=dict(color='red', width=3), opacity=0.5,name="7 Day Average"))
fig.update_layout(
xaxis = {'showgrid': False,'visible':True, 'tickformat':'%d-%b'},
yaxis = {'showgrid': False,'visible':True},
title={'text':f"Active Cases<br><span style='font-size:0.5em;color:gray'>Last Updated: {df.Date.tail(1).values[0].astype('M8[D]')}</span><br>",
'y':0.90,
'x':0.5,
'xanchor': 'center',
'yanchor': 'top'},
font=dict(
family="Roboto",
color="#FFF"
)
)
fig.update_layout(
margin=dict(l=0, r=20, t=30, b=50),
plot_bgcolor="#343332",
paper_bgcolor="#343332",
legend_orientation="h")
div = fig.to_json()
p = Viz.query.filter_by(header="active cases",phu=region).first()
p.html = div
db.session.add(p)
db.session.commit()
return
def total_cases_plot(region='ontario'):
if region=='ontario':
df = vis.get_testresults()
df['Date'] = pd.to_datetime(df['Date'])
fig = go.Figure()
fig.add_trace(go.Indicator(
mode = "number+delta",
value = df['Positives'].tail(1).values[0],
number = {'font': {'size': 60}}
),
)
fig.update_layout(
template = {'data' : {'indicator': [{
'mode' : "number+delta+gauge",
'delta' : {'valueformat':"d", 'reference': df['Positives'].iloc[-2],
'increasing': {'color':'red'},
'decreasing': {'color':'green'}}},
]
}})
fig.add_trace(go.Scatter(x=df.Date,y=df['Positives'],line=dict(color='red', width=3), visible=True, opacity=0.5, name="Value"))
# fig.add_trace(go.Scatter(x=df.Date,y=df['Positives'].rolling(7).mean(),line=dict(color='#FFF', dash='dot'), opacity=0.5,name="7 Day Average"))
fig.update_layout(
xaxis = {'showgrid': False,'visible':True, 'tickformat':'%d-%b'},
yaxis = {'showgrid': False,'visible':True},
title={'text':f"Total Cases<br><span style='font-size:0.5em;color:gray'>Last Updated: {df.Date.tail(1).values[0].astype('M8[D]')}</span><br>",
'y':0.90,
'x':0.5,
'xanchor': 'center',
'yanchor': 'top'},
font=dict(
family="Roboto",
color="#FFF"
)
)
fig.update_layout(
margin=dict(l=0, r=20, t=30, b=50),
plot_bgcolor="#343332",
paper_bgcolor="#343332",
legend_orientation="h",
)
else:
df = vis.get_phus()
df = df.loc[df.region == PHU[region]]
df['value'] = df.value.cumsum()
df['Date'] = pd.to_datetime(df['date'])
if len(df) <= 0:
div = sql.null()
p = Viz.query.filter_by(header="cases", phu=region).first()
p.html = div
db.session.add(p)
db.session.commit()
return
fig = go.Figure()
fig.add_trace(go.Indicator(
mode = "number+delta",
value = df['value'].tail(1).values[0],
number = {'font': {'size': 60}}
),
)
fig.update_layout(
template = {'data' : {'indicator': [{
'mode' : "number+delta+gauge",
'delta' : {'valueformat':"d", 'reference': df['value'].iloc[-2],
'increasing': {'color':'red'},
'decreasing': {'color':'green'}}},
]
}})
fig.add_trace(go.Scatter(x=df.Date,y=df['value'],line=dict(color='red', width=3), visible=True, opacity=0.5, name="Value"))
# fig.add_trace(go.Scatter(x=df.Date,y=df['value'].rolling(7).mean(),line=dict(color='#FFF', dash='dot'), opacity=0.5,name="7 Day Average"))
fig.update_layout(
xaxis = {'showgrid': False,'visible':True, 'tickformat':'%d-%b'},
yaxis = {'showgrid': False,'visible':True},
title={'text':f"Total Cases<br><span style='font-size:0.5em;color:gray'>Last Updated: {df.Date.tail(1).values[0].astype('M8[D]')}</span><br>",
'y':0.90,
'x':0.5,
'xanchor': 'center',
'yanchor': 'top'},
font=dict(
family="Roboto",
color="#FFF"
)
)
fig.update_layout(
margin=dict(l=0, r=20, t=30, b=50),
plot_bgcolor="#343332",
paper_bgcolor="#343332",
legend_orientation="h",
)
div = fig.to_json()
print(region)
p = Viz.query.filter_by(header="cases",phu=region).first()
p.html = div
db.session.add(p)
db.session.commit()
return
def recovered_plot(region='ontario'):
df = vis.get_testresults()
df['Date'] = pd.to_datetime(df['Date'])
fig = go.Figure()
fig.add_trace(go.Indicator(
mode = "number+delta",
value = df['Resolved'].tail(1).values[0],
number = {'font': {'size': 60}}
),
)
fig.update_layout(
template = {'data' : {'indicator': [{
'mode' : "number+delta+gauge",
'delta' : {'valueformat':"d", 'reference': df['Resolved'].iloc[-2],
'increasing': {'color':'green'},
'decreasing': {'color':'red'}}},
]
}})
fig.add_trace(go.Scatter(x=df.Date,y=df['Resolved'],line=dict(color='red', width=3), visible=True, opacity=0.5, name="Value"))
# fig.add_trace(go.Scatter(x=df.Date,y=df['Resolved'].rolling(7).mean(),line=dict(color='#FFF', dash='dot'), opacity=0.5,name="7 Day Average"))
fig.update_layout(
xaxis = {'showgrid': False,'visible':True, 'tickformat':'%d-%b'},
yaxis = {'showgrid': False,'visible':True},
title={'text':f"Recovered Cases<br><span style='font-size:0.5em;color:gray'>Last Updated: {df.Date.tail(1).values[0].astype('M8[D]')}</span><br>",
'y':0.90,
'x':0.5,
'xanchor': 'center',
'yanchor': 'top'},
font=dict(
family="Roboto",
color="#FFF"
)
)
fig.update_layout(
margin=dict(l=0, r=20, t=30, b=50),
plot_bgcolor="#343332",
paper_bgcolor="#343332",
legend_orientation="h")
div = fig.to_json()
p = Viz.query.filter_by(header="recovered", phu=region).first()
p.html = div
db.session.add(p)
db.session.commit()
return
def total_deaths_plot(region='ontario'):
if region == 'ontario':
df = vis.get_testresults()
df['Date'] = | pd.to_datetime(df['Date']) | pandas.to_datetime |
# install pattern
# install gensim
# install nltk
# install pyspellchecker
import re
import pandas as pd
import numpy as np
import gensim
from collections import Counter
import nltk
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer
from nltk.tokenize import RegexpTokenizer
from nltk.stem import WordNetLemmatizer
from nltk.corpus import wordnet
from spellchecker import SpellChecker
class Cleaning:
def __init__(self):
self.WORDS = {}
return
# remove urls (starts with https, http)
def remove_URL(self, col):
text = col.tolist()
TEXT=[]
for word in text:
if pd.isnull(word):
TEXT.append(word)
else:
TEXT.append(re.sub(r'\w+:\/{2}[\d\w-]+(\.[\d\w-]+)*(?:(?:\/[^\s/]*))*', '', str(word)))
se = | pd.Series(TEXT) | pandas.Series |
import numpy as np
import pandas as pd
from numba import njit, typeof
from numba.typed import List
from datetime import datetime, timedelta
import pytest
import vectorbt as vbt
from vectorbt.portfolio.enums import *
from vectorbt.generic.enums import drawdown_dt
from vectorbt import settings
from vectorbt.utils.random import set_seed
from vectorbt.portfolio import nb
from tests.utils import record_arrays_close
seed = 42
day_dt = np.timedelta64(86400000000000)
settings.returns['year_freq'] = '252 days' # same as empyrical
price = pd.Series([1., 2., 3., 4., 5.], index=pd.Index([
datetime(2020, 1, 1),
datetime(2020, 1, 2),
datetime(2020, 1, 3),
datetime(2020, 1, 4),
datetime(2020, 1, 5)
]))
price_wide = price.vbt.tile(3, keys=['a', 'b', 'c'])
big_price = pd.DataFrame(np.random.uniform(size=(1000,)))
big_price.index = [datetime(2018, 1, 1) + timedelta(days=i) for i in range(1000)]
big_price_wide = big_price.vbt.tile(1000)
# ############# nb ############# #
def assert_same_tuple(tup1, tup2):
for i in range(len(tup1)):
assert tup1[i] == tup2[i] or np.isnan(tup1[i]) and np.isnan(tup2[i])
def test_process_order_nb():
# Errors, ignored and rejected orders
log_record = np.empty(1, dtype=log_dt)[0]
log_record[0] = 0
log_record[1] = 0
log_record[2] = 0
log_record[3] = 0
log_record[-1] = 0
cash_now, shares_now, order_result = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(), log_record)
assert cash_now == 100.
assert shares_now == 100.
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=1, status_info=0))
cash_now, shares_now, order_result = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10), log_record)
assert cash_now == 100.
assert shares_now == 100.
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=1, status_info=1))
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
-100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
np.nan, 100., 10., 1100.,
nb.create_order_nb(size=10, price=10), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., np.inf, 10., 1100.,
nb.create_order_nb(size=10, price=10), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., np.nan, 10., 1100.,
nb.create_order_nb(size=10, price=10), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, size_type=-2), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, size_type=20), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, direction=-2), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, direction=20), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., -100., 10., 1100.,
nb.create_order_nb(size=10, price=10, direction=Direction.LongOnly), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, direction=Direction.ShortOnly), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=np.inf), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=-10), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, fees=np.inf), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, fees=-1), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, fixed_fees=np.inf), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, fixed_fees=-1), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, slippage=np.inf), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, slippage=-1), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, min_size=np.inf), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, min_size=-1), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, max_size=0), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, max_size=-10), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, reject_prob=np.nan), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, reject_prob=-1), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, reject_prob=2), log_record)
cash_now, shares_now, order_result = nb.process_order_nb(
100., 100., 10., np.nan,
nb.create_order_nb(size=1, price=10, size_type=SizeType.TargetPercent), log_record)
assert cash_now == 100.
assert shares_now == 100.
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=1, status_info=3))
cash_now, shares_now, order_result = nb.process_order_nb(
100., 100., 10., -10.,
nb.create_order_nb(size=1, price=10, size_type=SizeType.TargetPercent), log_record)
assert cash_now == 100.
assert shares_now == 100.
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=4))
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., np.inf, 1100.,
nb.create_order_nb(size=10, price=10, size_type=SizeType.TargetValue), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., -10., 1100.,
nb.create_order_nb(size=10, price=10, size_type=SizeType.TargetValue), log_record)
cash_now, shares_now, order_result = nb.process_order_nb(
100., 100., np.nan, 1100.,
nb.create_order_nb(size=10, price=10, size_type=SizeType.TargetValue), log_record)
assert cash_now == 100.
assert shares_now == 100.
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=1, status_info=2))
cash_now, shares_now, order_result = nb.process_order_nb(
100., -10., 10., 1100.,
nb.create_order_nb(size=np.inf, price=10, direction=Direction.ShortOnly), log_record)
assert cash_now == 100.
assert shares_now == -10.
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=6))
cash_now, shares_now, order_result = nb.process_order_nb(
100., -10., 10., 1100.,
nb.create_order_nb(size=-np.inf, price=10, direction=Direction.All), log_record)
assert cash_now == 100.
assert shares_now == -10.
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=6))
cash_now, shares_now, order_result = nb.process_order_nb(
100., 10., 10., 1100.,
nb.create_order_nb(size=0, price=10), log_record)
assert cash_now == 100.
assert shares_now == 10.
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=1, status_info=5))
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=15, price=10, max_size=10, allow_partial=False, raise_reject=True), log_record)
cash_now, shares_now, order_result = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=15, price=10, max_size=10, allow_partial=False), log_record)
assert cash_now == 100.
assert shares_now == 100.
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=9))
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, reject_prob=1., raise_reject=True), log_record)
cash_now, shares_now, order_result = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, reject_prob=1.), log_record)
assert cash_now == 100.
assert shares_now == 100.
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=10))
cash_now, shares_now, order_result = nb.process_order_nb(
0., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, direction=Direction.LongOnly), log_record)
assert cash_now == 0.
assert shares_now == 100.
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=7))
cash_now, shares_now, order_result = nb.process_order_nb(
0., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, direction=Direction.All), log_record)
assert cash_now == 0.
assert shares_now == 100.
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=7))
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
np.inf, 100., 10., 1100.,
nb.create_order_nb(size=np.inf, price=10, direction=Direction.LongOnly), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
np.inf, 100., 10., 1100.,
nb.create_order_nb(size=np.inf, price=10, direction=Direction.All), log_record)
cash_now, shares_now, order_result = nb.process_order_nb(
100., 0., 10., 1100.,
nb.create_order_nb(size=-10, price=10, direction=Direction.ShortOnly), log_record)
assert cash_now == 100.
assert shares_now == 0.
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=8))
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
np.inf, 100., 10., 1100.,
nb.create_order_nb(size=np.inf, price=10, direction=Direction.ShortOnly), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
np.inf, 100., 10., 1100.,
nb.create_order_nb(size=-np.inf, price=10, direction=Direction.All), log_record)
cash_now, shares_now, order_result = nb.process_order_nb(
100., 0., 10., 1100.,
nb.create_order_nb(size=-10, price=10, direction=Direction.LongOnly), log_record)
assert cash_now == 100.
assert shares_now == 0.
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=8))
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, fixed_fees=100, raise_reject=True), log_record)
cash_now, shares_now, order_result = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, fixed_fees=100), log_record)
assert cash_now == 100.
assert shares_now == 100.
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=11))
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, min_size=100, raise_reject=True), log_record)
cash_now, shares_now, order_result = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, min_size=100), log_record)
assert cash_now == 100.
assert shares_now == 100.
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=12))
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=100, price=10, allow_partial=False, raise_reject=True), log_record)
cash_now, shares_now, order_result = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=100, price=10, allow_partial=False), log_record)
assert cash_now == 100.
assert shares_now == 100.
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=13))
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=-10, price=10, min_size=100, raise_reject=True), log_record)
cash_now, shares_now, order_result = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=-10, price=10, min_size=100), log_record)
assert cash_now == 100.
assert shares_now == 100.
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=12))
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=-200, price=10, direction=Direction.LongOnly, allow_partial=False,
raise_reject=True),
log_record)
cash_now, shares_now, order_result = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=-200, price=10, direction=Direction.LongOnly, allow_partial=False), log_record)
assert cash_now == 100.
assert shares_now == 100.
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=13))
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=-10, price=10, fixed_fees=1000, raise_reject=True), log_record)
cash_now, shares_now, order_result = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=-10, price=10, fixed_fees=1000), log_record)
assert cash_now == 100.
assert shares_now == 100.
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=11))
# Calculations
cash_now, shares_now, order_result = nb.process_order_nb(
100., 0., 10., 100.,
nb.create_order_nb(size=10, price=10, fees=0.1, fixed_fees=1, slippage=0.1), log_record)
assert cash_now == 0.
assert shares_now == 8.18181818181818
assert_same_tuple(order_result, OrderResult(
size=8.18181818181818, price=11.0, fees=10.000000000000014, side=0, status=0, status_info=-1))
cash_now, shares_now, order_result = nb.process_order_nb(
100., 0., 10., 100.,
nb.create_order_nb(size=100, price=10, fees=0.1, fixed_fees=1, slippage=0.1), log_record)
assert cash_now == 0.
assert shares_now == 8.18181818181818
assert_same_tuple(order_result, OrderResult(
size=8.18181818181818, price=11.0, fees=10.000000000000014, side=0, status=0, status_info=-1))
cash_now, shares_now, order_result = nb.process_order_nb(
100., 0., 10., 100.,
nb.create_order_nb(size=-10, price=10, fees=0.1, fixed_fees=1, slippage=0.1), log_record)
assert cash_now == 180.
assert shares_now == -10.
assert_same_tuple(order_result, OrderResult(
size=10.0, price=9.0, fees=10.0, side=1, status=0, status_info=-1))
cash_now, shares_now, order_result = nb.process_order_nb(
100., 0., 10., 100.,
nb.create_order_nb(size=-100, price=10, fees=0.1, fixed_fees=1, slippage=0.1), log_record)
assert cash_now == 909.
assert shares_now == -100.
assert_same_tuple(order_result, OrderResult(
size=100.0, price=9.0, fees=91.0, side=1, status=0, status_info=-1))
cash_now, shares_now, order_result = nb.process_order_nb(
100., 0., 10., 100.,
nb.create_order_nb(size=10, price=10, size_type=SizeType.TargetShares), log_record)
assert cash_now == 0.
assert shares_now == 10.
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=0, status=0, status_info=-1))
cash_now, shares_now, order_result = nb.process_order_nb(
100., 0., 10., 100.,
nb.create_order_nb(size=-10, price=10, size_type=SizeType.TargetShares), log_record)
assert cash_now == 200.
assert shares_now == -10.
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=1, status=0, status_info=-1))
cash_now, shares_now, order_result = nb.process_order_nb(
100., 0., 10., 100.,
nb.create_order_nb(size=100, price=10, size_type=SizeType.TargetValue), log_record)
assert cash_now == 0.
assert shares_now == 10.
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=0, status=0, status_info=-1))
cash_now, shares_now, order_result = nb.process_order_nb(
100., 0., 10., 100.,
nb.create_order_nb(size=-100, price=10, size_type=SizeType.TargetValue), log_record)
assert cash_now == 200.
assert shares_now == -10.
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=1, status=0, status_info=-1))
cash_now, shares_now, order_result = nb.process_order_nb(
100., 0., 10., 100.,
nb.create_order_nb(size=1, price=10, size_type=SizeType.TargetPercent), log_record)
assert cash_now == 0.
assert shares_now == 10.
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=0, status=0, status_info=-1))
cash_now, shares_now, order_result = nb.process_order_nb(
100., 0., 10., 100.,
nb.create_order_nb(size=-1, price=10, size_type=SizeType.TargetPercent), log_record)
assert cash_now == 200.
assert shares_now == -10.
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=1, status=0, status_info=-1))
cash_now, shares_now, order_result = nb.process_order_nb(
100., 0., 10., 100.,
nb.create_order_nb(size=1, price=10, size_type=SizeType.Percent), log_record)
assert cash_now == 0.
assert shares_now == 10.
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=0, status=0, status_info=-1))
cash_now, shares_now, order_result = nb.process_order_nb(
100., 0., 10., 100.,
nb.create_order_nb(size=0.5, price=10, size_type=SizeType.Percent, fixed_fees=1.), log_record)
assert cash_now == 50.
assert shares_now == 4.9
assert_same_tuple(order_result, OrderResult(
size=4.9, price=10.0, fees=1., side=0, status=0, status_info=-1))
cash_now, shares_now, order_result = nb.process_order_nb(
0., 10., 10., 100.,
nb.create_order_nb(size=-1, price=10, size_type=SizeType.Percent), log_record)
assert cash_now == 100.
assert shares_now == 0.
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=1, status=0, status_info=-1))
cash_now, shares_now, order_result = nb.process_order_nb(
0., 10., 10., 100.,
nb.create_order_nb(size=-0.5, price=10, size_type=SizeType.Percent, fixed_fees=1.), log_record)
assert cash_now == 49.
assert shares_now == 5.
assert_same_tuple(order_result, OrderResult(
size=5., price=10.0, fees=1., side=1, status=0, status_info=-1))
cash_now, shares_now, order_result = nb.process_order_nb(
100., -10., 10., 100.,
nb.create_order_nb(size=1., price=10, size_type=SizeType.Percent), log_record)
assert cash_now == 0.
assert shares_now == 0.
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=0, status=0, status_info=-1))
cash_now, shares_now, order_result = nb.process_order_nb(
0., -10., 10., 100.,
nb.create_order_nb(size=-1., price=10, size_type=SizeType.Percent), log_record)
assert cash_now == 100.
assert shares_now == -20.
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=1, status=0, status_info=-1))
cash_now, shares_now, order_result = nb.process_order_nb(
100., 0., 10., 100.,
nb.create_order_nb(size=np.inf, price=10), log_record)
assert cash_now == 0.
assert shares_now == 10.
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=0, status=0, status_info=-1))
cash_now, shares_now, order_result = nb.process_order_nb(
100., 0., 10., 100.,
nb.create_order_nb(size=-np.inf, price=10), log_record)
assert cash_now == 200.
assert shares_now == -10.
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=1, status=0, status_info=-1))
cash_now, shares_now, order_result = nb.process_order_nb(
150., -5., 10., 100.,
nb.create_order_nb(size=-np.inf, price=10), log_record)
assert cash_now == 200.
assert shares_now == -10.
assert_same_tuple(order_result, OrderResult(
size=5., price=10.0, fees=0., side=1, status=0, status_info=-1))
# Logging
_ = nb.process_order_nb(
100., 0., 10., 100.,
nb.create_order_nb(log=True), log_record)
assert_same_tuple(log_record, (
0, 0, 0, 0, 100., 0., 10., 100., np.nan, 0, 2, np.nan, 0., 0., 0., 0., np.inf, 0.,
True, False, True, 100., 0., np.nan, np.nan, np.nan, -1, 1, 0, 0
))
_ = nb.process_order_nb(
100., 0., 10., 100.,
nb.create_order_nb(size=np.inf, price=10, log=True), log_record)
assert_same_tuple(log_record, (
0, 0, 0, 0, 100., 0., 10., 100., np.inf, 0, 2, 10., 0., 0., 0., 0., np.inf, 0.,
True, False, True, 0., 10., 10., 10., 0., 0, 0, -1, 0
))
_ = nb.process_order_nb(
100., 0., 10., 100.,
nb.create_order_nb(size=-np.inf, price=10, log=True), log_record)
assert_same_tuple(log_record, (
0, 0, 0, 0, 100., 0., 10., 100., -np.inf, 0, 2, 10., 0., 0., 0., 0., np.inf, 0.,
True, False, True, 200., -10., 10., 10., 0., 1, 0, -1, 0
))
def test_build_call_seq_nb():
group_lens = np.array([1, 2, 3, 4])
np.testing.assert_array_equal(
nb.build_call_seq_nb((10, 10), group_lens, CallSeqType.Default),
nb.build_call_seq((10, 10), group_lens, CallSeqType.Default)
)
np.testing.assert_array_equal(
nb.build_call_seq_nb((10, 10), group_lens, CallSeqType.Reversed),
nb.build_call_seq((10, 10), group_lens, CallSeqType.Reversed)
)
set_seed(seed)
out1 = nb.build_call_seq_nb((10, 10), group_lens, CallSeqType.Random)
set_seed(seed)
out2 = nb.build_call_seq((10, 10), group_lens, CallSeqType.Random)
np.testing.assert_array_equal(out1, out2)
# ############# from_signals ############# #
entries = pd.Series([True, True, True, False, False], index=price.index)
entries_wide = entries.vbt.tile(3, keys=['a', 'b', 'c'])
exits = | pd.Series([False, False, True, True, True], index=price.index) | pandas.Series |
import requests
import pandas as pd
# list on github provides discovery urls, access to get all endpoints
feedlist = pd.read_csv("gbfs/systems.csv")
# Check for valid feeds, keep responses
validfeeds = []
for x in feedlist['Auto-Discovery URL']:
try:
t = requests.get(x, timeout=2)
validfeeds.append(t.json())
except:
print("failed: " + x)
pass
# De-duplicate feeds by choosing either 1) en if multiple languages provided or
# 2) using 'feeds' as provided
feedslist = []
for x in validfeeds:
if "en" in x["data"]:
feedslist.append(x["data"]["en"]["feeds"])
elif "nb" in x["data"]:
feedslist.append(x["data"]["nb"]["feeds"])
else:
try:
feedslist.append(x["data"]["feeds"])
except:
pass
#unnest url structure, make into tuples for later dataframe creation
urls = []
for f in feedslist:
for g in f:
urls.append((g["name"], g["url"]))
# create dataframe and export endpoints
gbfs_df = | pd.DataFrame(urls, columns=["feedtype", "feedurl"]) | pandas.DataFrame |
import logging
import experiments.logging_setup
from pathlib import Path
import math
import numpy as np
import pandas as pd
import yaml
from tensorflow.python.keras.models import load_model
from tensorflow.python.keras.utils.data_utils import Sequence
import tensorflow as tf
class CSVIssueClassesGenerator(Sequence):
def __init__(
self,
vectorizer: Path,
corpus_path: Path,
ngfi_csv: Path,
gfi_csv: Path,
batch_size=4,
val_split=0.66,
validation_data=False,
random_state=420,
):
"""
I am the data generator which loads issues noted down in corresponding csv files.
:param vectorizer: path to the vectorizer model
:param corpus_path: path to the corpus
:param ngfi_csv: path to csv file for non good first issues
:param gfi_csv: path to csv file for good first issues
:param batch_size:
:param val_split: how much of the data should be for validation (fraction btw. 0-1)
:param validation_data: flag to disable validation data altogether (useful for final training)
:param random_state: int as my random state (so that the validation and train data split the same way)
"""
gfi_paths = | pd.read_csv(gfi_csv) | pandas.read_csv |
import pandas as pd, numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import curve_fit
import math
def model(x, a, b, c, d):
return a*np.tanh(b*x+c)+d
df = | pd.read_csv("us-states.csv") | pandas.read_csv |
import nose
import os
import numpy as np
import pandas as pd
from pandas import (merge_asof, read_csv,
to_datetime, Timedelta)
from pandas.tools.merge import MergeError
from pandas.util import testing as tm
from pandas.util.testing import assert_frame_equal
class TestAsOfMerge(tm.TestCase):
_multiprocess_can_split_ = True
def read_data(self, name, dedupe=False):
path = os.path.join(tm.get_data_path(), name)
x = read_csv(path)
if dedupe:
x = (x.drop_duplicates(['time', 'ticker'], keep='last')
.reset_index(drop=True)
)
x.time = to_datetime(x.time)
return x
def setUp(self):
self.trades = self.read_data('trades.csv')
self.quotes = self.read_data('quotes.csv', dedupe=True)
self.asof = self.read_data('asof.csv')
self.tolerance = self.read_data('tolerance.csv')
self.allow_exact_matches = self.read_data('allow_exact_matches.csv')
self.allow_exact_matches_and_tolerance = self.read_data(
'allow_exact_matches_and_tolerance.csv')
def test_examples1(self):
""" doc-string examples """
left = pd.DataFrame({'a': [1, 5, 10],
'left_val': ['a', 'b', 'c']})
right = pd.DataFrame({'a': [1, 2, 3, 6, 7],
'right_val': [1, 2, 3, 6, 7]})
pd.merge_asof(left, right, on='a')
def test_examples2(self):
""" doc-string examples """
trades = pd.DataFrame({
'time': pd.to_datetime(['20160525 13:30:00.023',
'20160525 13:30:00.038',
'20160525 13:30:00.048',
'20160525 13:30:00.048',
'20160525 13:30:00.048']),
'ticker': ['MSFT', 'MSFT',
'GOOG', 'GOOG', 'AAPL'],
'price': [51.95, 51.95,
720.77, 720.92, 98.00],
'quantity': [75, 155,
100, 100, 100]},
columns=['time', 'ticker', 'price', 'quantity'])
quotes = pd.DataFrame({
'time': pd.to_datetime(['20160525 13:30:00.023',
'20160525 13:30:00.023',
'20160525 13:30:00.030',
'20160525 13:30:00.041',
'20160525 13:30:00.048',
'20160525 13:30:00.049',
'20160525 13:30:00.072',
'20160525 13:30:00.075']),
'ticker': ['GOOG', 'MSFT', 'MSFT',
'MSFT', 'GOOG', 'AAPL', 'GOOG',
'MSFT'],
'bid': [720.50, 51.95, 51.97, 51.99,
720.50, 97.99, 720.50, 52.01],
'ask': [720.93, 51.96, 51.98, 52.00,
720.93, 98.01, 720.88, 52.03]},
columns=['time', 'ticker', 'bid', 'ask'])
pd.merge_asof(trades, quotes,
on='time',
by='ticker')
pd.merge_asof(trades, quotes,
on='time',
by='ticker',
tolerance=pd.Timedelta('2ms'))
pd.merge_asof(trades, quotes,
on='time',
by='ticker',
tolerance=pd.Timedelta('10ms'),
allow_exact_matches=False)
def test_basic(self):
expected = self.asof
trades = self.trades
quotes = self.quotes
result = merge_asof(trades, quotes,
on='time',
by='ticker')
assert_frame_equal(result, expected)
def test_basic_categorical(self):
expected = self.asof
trades = self.trades.copy()
trades.ticker = trades.ticker.astype('category')
quotes = self.quotes.copy()
quotes.ticker = quotes.ticker.astype('category')
result = merge_asof(trades, quotes,
on='time',
by='ticker')
assert_frame_equal(result, expected)
def test_missing_right_by(self):
expected = self.asof
trades = self.trades
quotes = self.quotes
q = quotes[quotes.ticker != 'MSFT']
result = merge_asof(trades, q,
on='time',
by='ticker')
expected.loc[expected.ticker == 'MSFT', ['bid', 'ask']] = np.nan
assert_frame_equal(result, expected)
def test_basic2(self):
expected = self.read_data('asof2.csv')
trades = self.read_data('trades2.csv')
quotes = self.read_data('quotes2.csv', dedupe=True)
result = merge_asof(trades, quotes,
on='time',
by='ticker')
assert_frame_equal(result, expected)
def test_basic_no_by(self):
f = lambda x: x[x.ticker == 'MSFT'].drop('ticker', axis=1) \
.reset_index(drop=True)
# just use a single ticker
expected = f(self.asof)
trades = f(self.trades)
quotes = f(self.quotes)
result = merge_asof(trades, quotes,
on='time')
assert_frame_equal(result, expected)
def test_valid_join_keys(self):
trades = self.trades
quotes = self.quotes
with self.assertRaises(MergeError):
merge_asof(trades, quotes,
left_on='time',
right_on='bid',
by='ticker')
with self.assertRaises(MergeError):
merge_asof(trades, quotes,
on=['time', 'ticker'],
by='ticker')
with self.assertRaises(MergeError):
merge_asof(trades, quotes,
by='ticker')
def test_with_duplicates(self):
q = pd.concat([self.quotes, self.quotes]).sort_values(
['time', 'ticker']).reset_index(drop=True)
result = merge_asof(self.trades, q,
on='time',
by='ticker')
expected = self.read_data('asof.csv')
assert_frame_equal(result, expected)
result = merge_asof(self.trades, q,
on='time',
by='ticker',
check_duplicates=False)
expected = self.read_data('asof.csv')
expected = pd.concat([expected, expected]).sort_values(
['time', 'ticker']).reset_index(drop=True)
# the results are not ordered in a meaningful way
# nor are the exact matches duplicated, so comparisons
# are pretty tricky here, however the uniques are the same
def aligner(x, ticker):
return (x[x.ticker == ticker]
.sort_values(['time', 'ticker', 'quantity', 'price',
'marketCenter', 'bid', 'ask'])
.drop_duplicates(keep='last')
.reset_index(drop=True)
)
for ticker in expected.ticker.unique():
r = aligner(result, ticker)
e = aligner(expected, ticker)
assert_frame_equal(r, e)
def test_with_duplicates_no_on(self):
df1 = pd.DataFrame({'key': [1, 1, 3],
'left_val': [1, 2, 3]})
df2 = pd.DataFrame({'key': [1, 3, 3],
'right_val': [1, 2, 3]})
result = merge_asof(df1, df2, on='key', check_duplicates=False)
expected = pd.DataFrame({'key': [1, 1, 3, 3],
'left_val': [1, 2, 3, 3],
'right_val': [1, 1, 2, 3]})
assert_frame_equal(result, expected)
df1 = pd.DataFrame({'key': [1, 1, 3],
'left_val': [1, 2, 3]})
df2 = pd.DataFrame({'key': [1, 2, 2],
'right_val': [1, 2, 3]})
result = merge_asof(df1, df2, on='key')
expected = pd.DataFrame({'key': [1, 1, 3],
'left_val': [1, 2, 3],
'right_val': [1, 1, 3]})
assert_frame_equal(result, expected)
def test_valid_allow_exact_matches(self):
trades = self.trades
quotes = self.quotes
with self.assertRaises(MergeError):
merge_asof(trades, quotes,
on='time',
by='ticker',
allow_exact_matches='foo')
def test_valid_tolerance(self):
trades = self.trades
quotes = self.quotes
# dti
merge_asof(trades, quotes,
on='time',
by='ticker',
tolerance=Timedelta('1s'))
# integer
merge_asof(trades.reset_index(), quotes.reset_index(),
on='index',
by='ticker',
tolerance=1)
# incompat
with self.assertRaises(MergeError):
merge_asof(trades, quotes,
on='time',
by='ticker',
tolerance=1)
# invalid
with self.assertRaises(MergeError):
merge_asof(trades.reset_index(), quotes.reset_index(),
on='index',
by='ticker',
tolerance=1.0)
# invalid negative
with self.assertRaises(MergeError):
merge_asof(trades, quotes,
on='time',
by='ticker',
tolerance=-Timedelta('1s'))
with self.assertRaises(MergeError):
merge_asof(trades.reset_index(), quotes.reset_index(),
on='index',
by='ticker',
tolerance=-1)
def test_non_sorted(self):
trades = self.trades.sort_values('time', ascending=False)
quotes = self.quotes.sort_values('time', ascending=False)
# we require that we are already sorted on time & quotes
self.assertFalse(trades.time.is_monotonic)
self.assertFalse(quotes.time.is_monotonic)
with self.assertRaises(ValueError):
merge_asof(trades, quotes,
on='time',
by='ticker')
trades = self.trades.sort_values('time')
self.assertTrue(trades.time.is_monotonic)
self.assertFalse(quotes.time.is_monotonic)
with self.assertRaises(ValueError):
merge_asof(trades, quotes,
on='time',
by='ticker')
quotes = self.quotes.sort_values('time')
self.assertTrue(trades.time.is_monotonic)
self.assertTrue(quotes.time.is_monotonic)
# ok, though has dupes
merge_asof(trades, self.quotes,
on='time',
by='ticker')
def test_tolerance(self):
trades = self.trades
quotes = self.quotes
result = merge_asof(trades, quotes,
on='time',
by='ticker',
tolerance=Timedelta('1day'))
expected = self.tolerance
assert_frame_equal(result, expected)
def test_allow_exact_matches(self):
result = merge_asof(self.trades, self.quotes,
on='time',
by='ticker',
allow_exact_matches=False)
expected = self.allow_exact_matches
assert_frame_equal(result, expected)
def test_allow_exact_matches_and_tolerance(self):
result = merge_asof(self.trades, self.quotes,
on='time',
by='ticker',
tolerance=Timedelta('100ms'),
allow_exact_matches=False)
expected = self.allow_exact_matches_and_tolerance
assert_frame_equal(result, expected)
def test_allow_exact_matches_and_tolerance2(self):
# GH 13695
df1 = pd.DataFrame({
'time': pd.to_datetime(['2016-07-15 13:30:00.030']),
'username': ['bob']})
df2 = pd.DataFrame({
'time': pd.to_datetime(['2016-07-15 13:30:00.000',
'2016-07-15 13:30:00.030']),
'version': [1, 2]})
result = pd.merge_asof(df1, df2, on='time')
expected = pd.DataFrame({
'time': pd.to_datetime(['2016-07-15 13:30:00.030']),
'username': ['bob'],
'version': [2]})
| assert_frame_equal(result, expected) | pandas.util.testing.assert_frame_equal |
# coding=utf-8
# pylint: disable-msg=E1101,W0612
from datetime import timedelta
from numpy import nan
import numpy as np
import pandas as pd
from pandas import (Series, isnull, date_range,
MultiIndex, Index)
from pandas.tseries.index import Timestamp
from pandas.compat import range
from pandas.util.testing import assert_series_equal
import pandas.util.testing as tm
from .common import TestData
def _skip_if_no_pchip():
try:
from scipy.interpolate import pchip_interpolate # noqa
except ImportError:
import nose
raise nose.SkipTest('scipy.interpolate.pchip missing')
def _skip_if_no_akima():
try:
from scipy.interpolate import Akima1DInterpolator # noqa
except ImportError:
import nose
raise nose.SkipTest('scipy.interpolate.Akima1DInterpolator missing')
class TestSeriesMissingData(TestData, tm.TestCase):
_multiprocess_can_split_ = True
def test_timedelta_fillna(self):
# GH 3371
s = Series([Timestamp('20130101'), Timestamp('20130101'), Timestamp(
'20130102'), Timestamp('20130103 9:01:01')])
td = s.diff()
# reg fillna
result = td.fillna(0)
expected = Series([timedelta(0), timedelta(0), timedelta(1), timedelta(
days=1, seconds=9 * 3600 + 60 + 1)])
assert_series_equal(result, expected)
# interprested as seconds
result = td.fillna(1)
expected = Series([timedelta(seconds=1), timedelta(0), timedelta(1),
timedelta(days=1, seconds=9 * 3600 + 60 + 1)])
assert_series_equal(result, expected)
result = td.fillna(timedelta(days=1, seconds=1))
expected = Series([timedelta(days=1, seconds=1), timedelta(
0), timedelta(1), timedelta(days=1, seconds=9 * 3600 + 60 + 1)])
assert_series_equal(result, expected)
result = td.fillna(np.timedelta64(int(1e9)))
expected = Series([timedelta(seconds=1), timedelta(0), timedelta(1),
timedelta(days=1, seconds=9 * 3600 + 60 + 1)])
assert_series_equal(result, expected)
from pandas import tslib
result = td.fillna(tslib.NaT)
expected = Series([tslib.NaT, timedelta(0), timedelta(1),
timedelta(days=1, seconds=9 * 3600 + 60 + 1)],
dtype='m8[ns]')
assert_series_equal(result, expected)
# ffill
td[2] = np.nan
result = td.ffill()
expected = td.fillna(0)
expected[0] = np.nan
assert_series_equal(result, expected)
# bfill
td[2] = np.nan
result = td.bfill()
expected = td.fillna(0)
expected[2] = timedelta(days=1, seconds=9 * 3600 + 60 + 1)
assert_series_equal(result, expected)
def test_datetime64_fillna(self):
s = Series([Timestamp('20130101'), Timestamp('20130101'), Timestamp(
'20130102'), Timestamp('20130103 9:01:01')])
s[2] = np.nan
# reg fillna
result = s.fillna(Timestamp('20130104'))
expected = Series([Timestamp('20130101'), Timestamp(
'20130101'), Timestamp('20130104'), Timestamp('20130103 9:01:01')])
assert_series_equal(result, expected)
from pandas import tslib
result = s.fillna(tslib.NaT)
expected = s
assert_series_equal(result, expected)
# ffill
result = s.ffill()
expected = Series([Timestamp('20130101'), Timestamp(
'20130101'), Timestamp('20130101'), Timestamp('20130103 9:01:01')])
assert_series_equal(result, expected)
# bfill
result = s.bfill()
expected = Series([Timestamp('20130101'), Timestamp('20130101'),
Timestamp('20130103 9:01:01'), Timestamp(
'20130103 9:01:01')])
assert_series_equal(result, expected)
# GH 6587
# make sure that we are treating as integer when filling
# this also tests inference of a datetime-like with NaT's
s = Series([pd.NaT, pd.NaT, '2013-08-05 15:30:00.000001'])
expected = Series(
['2013-08-05 15:30:00.000001', '2013-08-05 15:30:00.000001',
'2013-08-05 15:30:00.000001'], dtype='M8[ns]')
result = s.fillna(method='backfill')
assert_series_equal(result, expected)
def test_datetime64_tz_fillna(self):
for tz in ['US/Eastern', 'Asia/Tokyo']:
# DatetimeBlock
s = Series([Timestamp('2011-01-01 10:00'), pd.NaT, Timestamp(
'2011-01-03 10:00'), pd.NaT])
result = s.fillna(pd.Timestamp('2011-01-02 10:00'))
expected = Series([Timestamp('2011-01-01 10:00'), Timestamp(
'2011-01-02 10:00'), Timestamp('2011-01-03 10:00'), Timestamp(
'2011-01-02 10:00')])
self.assert_series_equal(expected, result)
result = s.fillna(pd.Timestamp('2011-01-02 10:00', tz=tz))
expected = Series([Timestamp('2011-01-01 10:00'), Timestamp(
'2011-01-02 10:00', tz=tz), Timestamp('2011-01-03 10:00'),
Timestamp('2011-01-02 10:00', tz=tz)])
self.assert_series_equal(expected, result)
result = s.fillna('AAA')
expected = Series([Timestamp('2011-01-01 10:00'), 'AAA',
Timestamp('2011-01-03 10:00'), 'AAA'],
dtype=object)
self.assert_series_equal(expected, result)
result = s.fillna({1: pd.Timestamp('2011-01-02 10:00', tz=tz),
3: pd.Timestamp('2011-01-04 10:00')})
expected = Series([Timestamp('2011-01-01 10:00'), Timestamp(
'2011-01-02 10:00', tz=tz), Timestamp('2011-01-03 10:00'),
Timestamp('2011-01-04 10:00')])
self.assert_series_equal(expected, result)
result = s.fillna({1: pd.Timestamp('2011-01-02 10:00'),
3: pd.Timestamp('2011-01-04 10:00')})
expected = Series([Timestamp('2011-01-01 10:00'), Timestamp(
'2011-01-02 10:00'), Timestamp('2011-01-03 10:00'), Timestamp(
'2011-01-04 10:00')])
self.assert_series_equal(expected, result)
# DatetimeBlockTZ
idx = pd.DatetimeIndex(['2011-01-01 10:00', pd.NaT,
'2011-01-03 10:00', pd.NaT], tz=tz)
s = pd.Series(idx)
result = s.fillna(pd.Timestamp('2011-01-02 10:00'))
expected = Series([Timestamp('2011-01-01 10:00', tz=tz), Timestamp(
'2011-01-02 10:00'), Timestamp('2011-01-03 10:00', tz=tz),
Timestamp('2011-01-02 10:00')])
self.assert_series_equal(expected, result)
result = s.fillna(pd.Timestamp('2011-01-02 10:00', tz=tz))
idx = pd.DatetimeIndex(['2011-01-01 10:00', '2011-01-02 10:00',
'2011-01-03 10:00', '2011-01-02 10:00'],
tz=tz)
expected = Series(idx)
self.assert_series_equal(expected, result)
result = s.fillna(pd.Timestamp(
'2011-01-02 10:00', tz=tz).to_pydatetime())
idx = pd.DatetimeIndex(['2011-01-01 10:00', '2011-01-02 10:00',
'2011-01-03 10:00', '2011-01-02 10:00'],
tz=tz)
expected = Series(idx)
self.assert_series_equal(expected, result)
result = s.fillna('AAA')
expected = Series([Timestamp('2011-01-01 10:00', tz=tz), 'AAA',
Timestamp('2011-01-03 10:00', tz=tz), 'AAA'],
dtype=object)
self.assert_series_equal(expected, result)
result = s.fillna({1: pd.Timestamp('2011-01-02 10:00', tz=tz),
3: pd.Timestamp('2011-01-04 10:00')})
expected = Series([Timestamp('2011-01-01 10:00', tz=tz), Timestamp(
'2011-01-02 10:00', tz=tz), Timestamp(
'2011-01-03 10:00', tz=tz), Timestamp('2011-01-04 10:00')])
self.assert_series_equal(expected, result)
result = s.fillna({1: pd.Timestamp('2011-01-02 10:00', tz=tz),
3: pd.Timestamp('2011-01-04 10:00', tz=tz)})
expected = Series([Timestamp('2011-01-01 10:00', tz=tz), Timestamp(
'2011-01-02 10:00', tz=tz), Timestamp(
'2011-01-03 10:00', tz=tz), Timestamp('2011-01-04 10:00',
tz=tz)])
self.assert_series_equal(expected, result)
# filling with a naive/other zone, coerce to object
result = s.fillna(Timestamp('20130101'))
expected = Series([Timestamp('2011-01-01 10:00', tz=tz), Timestamp(
'2013-01-01'), Timestamp('2011-01-03 10:00', tz=tz), Timestamp(
'2013-01-01')])
self.assert_series_equal(expected, result)
result = s.fillna(Timestamp('20130101', tz='US/Pacific'))
expected = Series([Timestamp('2011-01-01 10:00', tz=tz),
Timestamp('2013-01-01', tz='US/Pacific'),
Timestamp('2011-01-03 10:00', tz=tz),
Timestamp('2013-01-01', tz='US/Pacific')])
self.assert_series_equal(expected, result)
def test_fillna_int(self):
s = Series(np.random.randint(-100, 100, 50))
s.fillna(method='ffill', inplace=True)
assert_series_equal(s.fillna(method='ffill', inplace=False), s)
def test_fillna_raise(self):
s = Series(np.random.randint(-100, 100, 50))
self.assertRaises(TypeError, s.fillna, [1, 2])
self.assertRaises(TypeError, s.fillna, (1, 2))
def test_isnull_for_inf(self):
s = Series(['a', np.inf, np.nan, 1.0])
with pd.option_context('mode.use_inf_as_null', True):
r = s.isnull()
dr = s.dropna()
e = Series([False, True, True, False])
de = Series(['a', 1.0], index=[0, 3])
tm.assert_series_equal(r, e)
tm.assert_series_equal(dr, de)
def test_fillna(self):
ts = Series([0., 1., 2., 3., 4.], index=tm.makeDateIndex(5))
self.assert_series_equal(ts, ts.fillna(method='ffill'))
ts[2] = np.NaN
exp = Series([0., 1., 1., 3., 4.], index=ts.index)
self.assert_series_equal(ts.fillna(method='ffill'), exp)
exp = Series([0., 1., 3., 3., 4.], index=ts.index)
self.assert_series_equal(ts.fillna(method='backfill'), exp)
exp = Series([0., 1., 5., 3., 4.], index=ts.index)
self.assert_series_equal(ts.fillna(value=5), exp)
self.assertRaises(ValueError, ts.fillna)
self.assertRaises(ValueError, self.ts.fillna, value=0, method='ffill')
# GH 5703
s1 = Series([np.nan])
s2 = Series([1])
result = s1.fillna(s2)
expected = Series([1.])
assert_series_equal(result, expected)
result = s1.fillna({})
assert_series_equal(result, s1)
result = s1.fillna(Series(()))
assert_series_equal(result, s1)
result = s2.fillna(s1)
assert_series_equal(result, s2)
result = s1.fillna({0: 1})
assert_series_equal(result, expected)
result = s1.fillna({1: 1})
assert_series_equal(result, Series([np.nan]))
result = s1.fillna({0: 1, 1: 1})
assert_series_equal(result, expected)
result = s1.fillna(Series({0: 1, 1: 1}))
| assert_series_equal(result, expected) | pandas.util.testing.assert_series_equal |
# Adapted from
# https://github.com/CODAIT/text-extensions-for-pandas/blob/dc03278689fe1c5f131573658ae19815ba25f33e/text_extensions_for_pandas/array/tensor.py
# and
# https://github.com/CODAIT/text-extensions-for-pandas/blob/dc03278689fe1c5f131573658ae19815ba25f33e/text_extensions_for_pandas/array/arrow_conversion.py
#
# Copyright (c) 2020 IBM Corp.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Modifications:
# - Added ArrowTensorType.to_pandas_type()
# - Added ArrowTensorArray.__getitem__()
# - Added ArrowTensorArray.__iter__()
# - Added support for column casts to extension types.
# - Fleshed out docstrings and examples.
# - Fixed TensorArray.isna() so it returns an appropriate ExtensionArray.
# - Added different (more vectorized) TensorArray.take() operation.
# - Added support for more reducers (agg funcs) to TensorArray.
# - Added support for logical operators to TensorArray(Element).
# - Miscellaneous small bug fixes and optimizations.
from collections import Iterable
import numbers
from typing import Sequence, Any, Union, Tuple, Optional, Callable
import numpy as np
import pandas as pd
from pandas._typing import Dtype
from pandas.compat import set_function_name
from pandas.core.dtypes.generic import ABCDataFrame, ABCSeries
try:
from pandas.core.dtypes.generic import ABCIndex
except ImportError:
# ABCIndexClass changed to ABCIndex in Pandas 1.3
from pandas.core.dtypes.generic import ABCIndexClass as ABCIndex
from pandas.core.indexers import check_array_indexer, validate_indices
import pyarrow as pa
from ray.util.annotations import PublicAPI
# -----------------------------------------------------------------------------
# Pandas extension type and array
# -----------------------------------------------------------------------------
@PublicAPI(stability="beta")
@pd.api.extensions.register_extension_dtype
class TensorDtype(pd.api.extensions.ExtensionDtype):
"""
Pandas extension type for a column of fixed-shape, homogeneous-typed
tensors.
See:
https://github.com/pandas-dev/pandas/blob/master/pandas/core/dtypes/base.py
for up-to-date interface documentation and the subclassing contract. The
docstrings of the below properties and methods were copied from the base
ExtensionDtype.
Examples:
>>> # Create a DataFrame with a list of ndarrays as a column.
>>> df = pd.DataFrame({
"one": [1, 2, 3],
"two": list(np.arange(24).reshape((3, 2, 2, 2)))})
>>> # Note the opaque np.object dtype for this column.
>>> df.dtypes
one int64
two object
dtype: object
>>> # Cast column to our TensorDtype extension type.
>>> df["two"] = df["two"].astype(TensorDtype())
>>> # Note that the column dtype is now TensorDtype instead of
>>> # np.object.
>>> df.dtypes
one int64
two TensorDtype
dtype: object
>>> # Pandas is now aware of this tensor column, and we can do the
>>> # typical DataFrame operations on this column.
>>> col = 2 * (df["two"] + 10)
>>> # The ndarrays underlying the tensor column will be manipulated,
>>> # but the column itself will continue to be a Pandas type.
>>> type(col)
pandas.core.series.Series
>>> col
0 [[[ 2 4]
[ 6 8]]
[[10 12]
[14 16]]]
1 [[[18 20]
[22 24]]
[[26 28]
[30 32]]]
2 [[[34 36]
[38 40]]
[[42 44]
[46 48]]]
Name: two, dtype: TensorDtype
>>> # Once you do an aggregation on that column that returns a single
>>> # row's value, you get back our TensorArrayElement type.
>>> tensor = col.mean()
>>> type(tensor)
ray.data.extensions.tensor_extension.TensorArrayElement
>>> tensor
array([[[18., 20.],
[22., 24.]],
[[26., 28.],
[30., 32.]]])
>>> # This is a light wrapper around a NumPy ndarray, and can easily
>>> # be converted to an ndarray.
>>> type(tensor.to_numpy())
numpy.ndarray
>>> # In addition to doing Pandas operations on the tensor column,
>>> # you can now put the DataFrame into a Dataset.
>>> ds = ray.data.from_pandas(df)
>>> # Internally, this column is represented the corresponding
>>> # Arrow tensor extension type.
>>> ds.schema()
one: int64
two: extension<arrow.py_extension_type<ArrowTensorType>>
>>> # You can write the dataset to Parquet.
>>> ds.write_parquet("/some/path")
>>> # And you can read it back.
>>> read_ds = ray.data.read_parquet("/some/path")
>>> read_ds.schema()
one: int64
two: extension<arrow.py_extension_type<ArrowTensorType>>
>>> read_df = ray.get(read_ds.to_pandas_refs())[0]
>>> read_df.dtypes
one int64
two TensorDtype
dtype: object
>>> # The tensor extension type is preserved along the
>>> # Pandas --> Arrow --> Parquet --> Arrow --> Pandas
>>> # conversion chain.
>>> read_df.equals(df)
True
"""
# NOTE(Clark): This is apparently required to prevent integer indexing
# errors, but is an undocumented ExtensionDtype attribute. See issue:
# https://github.com/CODAIT/text-extensions-for-pandas/issues/166
base = None
@property
def type(self):
"""
The scalar type for the array, e.g. ``int``
It's expected ``ExtensionArray[item]`` returns an instance
of ``ExtensionDtype.type`` for scalar ``item``, assuming
that value is valid (not NA). NA values do not need to be
instances of `type`.
"""
return TensorArrayElement
@property
def name(self) -> str:
"""
A string identifying the data type.
Will be used for display in, e.g. ``Series.dtype``
"""
return "TensorDtype"
@classmethod
def construct_from_string(cls, string: str):
"""
Construct this type from a string.
This is useful mainly for data types that accept parameters.
For example, a period dtype accepts a frequency parameter that
can be set as ``period[H]`` (where H means hourly frequency).
By default, in the abstract class, just the name of the type is
expected. But subclasses can overwrite this method to accept
parameters.
Parameters
----------
string : str
The name of the type, for example ``category``.
Returns
-------
ExtensionDtype
Instance of the dtype.
Raises
------
TypeError
If a class cannot be constructed from this 'string'.
Examples
--------
For extension dtypes with arguments the following may be an
adequate implementation.
>>> @classmethod
... def construct_from_string(cls, string):
... pattern = re.compile(r"^my_type\[(?P<arg_name>.+)\]$")
... match = pattern.match(string)
... if match:
... return cls(**match.groupdict())
... else:
... raise TypeError(
... f"Cannot construct a '{cls.__name__}' from '{string}'"
... )
"""
if not isinstance(string, str):
raise TypeError(
f"'construct_from_string' expects a string, got {type(string)}"
)
# Upstream code uses exceptions as part of its normal control flow and
# will pass this method bogus class names.
if string == cls.__name__:
return cls()
else:
raise TypeError(f"Cannot construct a '{cls.__name__}' from '{string}'")
@classmethod
def construct_array_type(cls):
"""
Return the array type associated with this dtype.
Returns
-------
type
"""
return TensorArray
def __from_arrow__(self, array: Union[pa.Array, pa.ChunkedArray]):
"""
Convert a pyarrow (chunked) array to a TensorArray.
This and TensorArray.__arrow_array__ make up the
Pandas extension type + array <--> Arrow extension type + array
interoperability protocol. See
https://pandas.pydata.org/pandas-docs/stable/development/extending.html#compatibility-with-apache-arrow
for more information.
"""
if isinstance(array, pa.ChunkedArray):
if array.num_chunks > 1:
# TODO(Clark): Remove concat and construct from list with
# shape.
values = np.concatenate(
[chunk.to_numpy() for chunk in array.iterchunks()]
)
else:
values = array.chunk(0).to_numpy()
else:
values = array.to_numpy()
return TensorArray(values)
class TensorOpsMixin(pd.api.extensions.ExtensionScalarOpsMixin):
"""
Mixin for TensorArray operator support, applying operations on the
underlying ndarrays.
"""
@classmethod
def _create_method(cls, op, coerce_to_dtype=True, result_dtype=None):
"""
Add support for binary operators by unwrapping, applying, and
rewrapping.
"""
# NOTE(Clark): This overrides, but coerce_to_dtype, result_dtype might
# not be needed
def _binop(self, other):
lvalues = self._tensor
if isinstance(other, (ABCDataFrame, ABCSeries, ABCIndex)):
# Rely on Pandas to unbox and dispatch to us.
return NotImplemented
# divmod returns a tuple
if op_name in ["__divmod__", "__rdivmod__"]:
# TODO(Clark): Add support for divmod and rdivmod.
# div, mod = result
raise NotImplementedError
if isinstance(other, (TensorArray, TensorArrayElement)):
rvalues = other._tensor
else:
rvalues = other
result = op(lvalues, rvalues)
# Force a TensorArray if rvalue is not a scalar.
if isinstance(self, TensorArrayElement) and (
not isinstance(other, TensorArrayElement) or not np.isscalar(other)
):
result_wrapped = TensorArray(result)
else:
result_wrapped = cls(result)
return result_wrapped
op_name = f"__{op.__name__}__"
return set_function_name(_binop, op_name, cls)
@classmethod
def _create_logical_method(cls, op):
return cls._create_method(op)
class TensorArrayElement(TensorOpsMixin):
"""
Single element of a TensorArray, wrapping an underlying ndarray.
"""
def __init__(self, values: np.ndarray):
"""
Construct a TensorArrayElement from a NumPy ndarray.
Args:
values: ndarray that underlies this TensorArray element.
"""
self._tensor = values
def __repr__(self):
return self._tensor.__repr__()
def __str__(self):
return self._tensor.__str__()
def to_numpy(self):
"""
Return the values of this element as a NumPy ndarray.
"""
return np.asarray(self._tensor)
def __array__(self):
return np.asarray(self._tensor)
@PublicAPI(stability="beta")
class TensorArray(pd.api.extensions.ExtensionArray, TensorOpsMixin):
"""
Pandas `ExtensionArray` representing a tensor column, i.e. a column
consisting of ndarrays as elements. All tensors in a column must have the
same shape.
Examples:
>>> # Create a DataFrame with a list of ndarrays as a column.
>>> df = pd.DataFrame({
"one": [1, 2, 3],
"two": TensorArray(np.arange(24).reshape((3, 2, 2, 2)))})
>>> # Note that the column dtype is TensorDtype.
>>> df.dtypes
one int64
two TensorDtype
dtype: object
>>> # Pandas is aware of this tensor column, and we can do the
>>> # typical DataFrame operations on this column.
>>> col = 2 * (df["two"] + 10)
>>> # The ndarrays underlying the tensor column will be manipulated,
>>> # but the column itself will continue to be a Pandas type.
>>> type(col)
pandas.core.series.Series
>>> col
0 [[[ 2 4]
[ 6 8]]
[[10 12]
[14 16]]]
1 [[[18 20]
[22 24]]
[[26 28]
[30 32]]]
2 [[[34 36]
[38 40]]
[[42 44]
[46 48]]]
Name: two, dtype: TensorDtype
>>> # Once you do an aggregation on that column that returns a single
>>> # row's value, you get back our TensorArrayElement type.
>>> tensor = col.mean()
>>> type(tensor)
ray.data.extensions.tensor_extension.TensorArrayElement
>>> tensor
array([[[18., 20.],
[22., 24.]],
[[26., 28.],
[30., 32.]]])
>>> # This is a light wrapper around a NumPy ndarray, and can easily
>>> # be converted to an ndarray.
>>> type(tensor.to_numpy())
numpy.ndarray
>>> # In addition to doing Pandas operations on the tensor column,
>>> # you can now put the DataFrame into a Dataset.
>>> ds = ray.data.from_pandas(df)
>>> # Internally, this column is represented the corresponding
>>> # Arrow tensor extension type.
>>> ds.schema()
one: int64
two: extension<arrow.py_extension_type<ArrowTensorType>>
>>> # You can write the dataset to Parquet.
>>> ds.write_parquet("/some/path")
>>> # And you can read it back.
>>> read_ds = ray.data.read_parquet("/some/path")
>>> read_ds.schema()
one: int64
two: extension<arrow.py_extension_type<ArrowTensorType>>
>>> read_df = ray.get(read_ds.to_pandas_refs())[0]
>>> read_df.dtypes
one int64
two TensorDtype
dtype: object
>>> # The tensor extension type is preserved along the
>>> # Pandas --> Arrow --> Parquet --> Arrow --> Pandas
>>> # conversion chain.
>>> read_df.equals(df)
True
"""
SUPPORTED_REDUCERS = {
"sum": np.sum,
"all": np.all,
"any": np.any,
"min": np.min,
"max": np.max,
"mean": np.mean,
"median": np.median,
"prod": np.prod,
"std": np.std,
"var": np.var,
}
# See https://github.com/pandas-dev/pandas/blob/master/pandas/core/arrays/base.py # noqa
# for interface documentation and the subclassing contract.
def __init__(
self,
values: Union[
np.ndarray,
ABCSeries,
Sequence[Union[np.ndarray, TensorArrayElement]],
TensorArrayElement,
Any,
],
):
"""
Args:
values: A NumPy ndarray or sequence of NumPy ndarrays of equal
shape.
"""
if isinstance(values, ABCSeries):
# Convert series to ndarray and passthrough to ndarray handling
# logic.
values = values.to_numpy()
if isinstance(values, np.ndarray):
if (
values.dtype.type is np.object_
and len(values) > 0
and isinstance(values[0], (np.ndarray, TensorArrayElement))
):
# Convert ndarrays of ndarrays/TensorArrayElements
# with an opaque object type to a properly typed ndarray of
# ndarrays.
self._tensor = np.array([np.asarray(v) for v in values])
else:
self._tensor = values
elif isinstance(values, Sequence):
if len(values) == 0:
self._tensor = np.array([])
else:
self._tensor = np.stack([np.asarray(v) for v in values], axis=0)
elif isinstance(values, TensorArrayElement):
self._tensor = np.array([np.asarray(values)])
elif np.isscalar(values):
# `values` is a single element:
self._tensor = np.array([values])
elif isinstance(values, TensorArray):
raise TypeError("Use the copy() method to create a copy of a TensorArray")
else:
raise TypeError(
"Expected a numpy.ndarray or sequence of numpy.ndarray, "
f"but received {values} "
f"of type '{type(values)}' instead."
)
@classmethod
def _from_sequence(
cls, scalars, *, dtype: Optional[Dtype] = None, copy: bool = False
):
"""
Construct a new ExtensionArray from a sequence of scalars.
Parameters
----------
scalars : Sequence
Each element will be an instance of the scalar type for this
array, ``cls.dtype.type`` or be converted into this type in this
method.
dtype : dtype, optional
Construct for this particular dtype. This should be a Dtype
compatible with the ExtensionArray.
copy : bool, default False
If True, copy the underlying data.
Returns
-------
ExtensionArray
"""
if copy and isinstance(scalars, np.ndarray):
scalars = scalars.copy()
elif isinstance(scalars, TensorArray):
scalars = scalars._tensor.copy() if copy else scalars._tensor
return TensorArray(scalars)
@classmethod
def _from_factorized(
cls, values: np.ndarray, original: pd.api.extensions.ExtensionArray
):
"""
Reconstruct an ExtensionArray after factorization.
Parameters
----------
values : ndarray
An integer ndarray with the factorized values.
original : ExtensionArray
The original ExtensionArray that factorize was called on.
See Also
--------
factorize : Top-level factorize method that dispatches here.
ExtensionArray.factorize : Encode the extension array as an enumerated
type.
"""
raise NotImplementedError
def __getitem__(
self, item: Union[int, slice, np.ndarray]
) -> Union["TensorArray", "TensorArrayElement"]:
"""
Select a subset of self.
Parameters
----------
item : int, slice, or ndarray
* int: The position in 'self' to get.
* slice: A slice object, where 'start', 'stop', and 'step' are
integers or None
* ndarray: A 1-d boolean NumPy ndarray the same length as 'self'
Returns
-------
item : scalar or ExtensionArray
Notes
-----
For scalar ``item``, return a scalar value suitable for the array's
type. This should be an instance of ``self.dtype.type``.
For slice ``key``, return an instance of ``ExtensionArray``, even
if the slice is length 0 or 1.
For a boolean mask, return an instance of ``ExtensionArray``, filtered
to the values where ``item`` is True.
"""
# Return scalar if single value is selected, a TensorArrayElement for
# single array element, or TensorArray for slice.
if isinstance(item, int):
value = self._tensor[item]
if np.isscalar(value):
return value
else:
return TensorArrayElement(value)
else:
# BEGIN workaround for Pandas issue #42430
if isinstance(item, tuple) and len(item) > 1 and item[0] == Ellipsis:
if len(item) > 2:
# Hopefully this case is not possible, but can't be sure
raise ValueError(
"Workaround Pandas issue #42430 not "
"implemented for tuple length > 2"
)
item = item[1]
# END workaround for issue #42430
if isinstance(item, TensorArray):
item = np.asarray(item)
item = | check_array_indexer(self, item) | pandas.core.indexers.check_array_indexer |
#code will get the proper values like emyield, marketcap, cacl, etc, and supply a string and value to put back into the dataframe.
import pandas as pd
import numpy as np
import logging
import inspect
from scipy import stats
from dateutil.relativedelta import relativedelta
from datetime import datetime
from scipy import stats
import math
class quantvaluedata: #just contains functions, will NEVEFR actually get the data
def __init__(self,allitems=None):
if allitems is None:
self.allitems=[]
else:
self.allitems=allitems
return
def get_value(self,origdf,key,i=-1):
if key not in origdf.columns and key not in self.allitems and key not in ['timedepositsplaced','fedfundssold','interestbearingdepositsatotherbanks']:
logging.error(key+' not found in allitems')
#logging.error(self.allitems)
return None
df=origdf.copy()
df=df.sort_values('yearquarter')
if len(df)==0:
##logging.error("empty dataframe")
return None
if key not in df.columns:
#logging.error("column not found:"+key)
return None
interested_quarter=df['yearquarter'].iloc[-1]+i+1#because if we want the last quarter we need them equal
if not df['yearquarter'].isin([interested_quarter]).any(): #if the quarter we are interested in is not there
return None
s=df['yearquarter']==interested_quarter
df=df[s]
if len(df)>1:
logging.error(df)
logging.error("to many rows in df")
exit()
pass
value=df[key].iloc[0]
if pd.isnull(value):
return None
return float(value)
def get_sum_quarters(self,df,key,seed,length):
values=[]
#BIG BUG, this was origionally -length-1, which was always truncating the array and producing nans.
periods=range(seed,seed-length,-1)
for p in periods:
values.append(self.get_value(df,key,p))
#logging.info('values:'+str(values))
if pd.isnull(values).any(): #return None if any of the values are None
return None
else:
return float(np.sum(values))
def get_market_cap(self,statements_df,prices_df,seed=-1):
total_shares=self.get_value(statements_df,'weightedavedilutedsharesos',seed)
if pd.isnull(total_shares):
return None
end_date=statements_df['end_date'].iloc[seed]
if seed==-1: #get the latest price but see if there was a split between the end date and now
s=pd.to_datetime(prices_df['date'])>pd.to_datetime(end_date)
tempfd=prices_df[s]
splits=tempfd['split_ratio'].unique()
adj=pd.Series(splits).product() #multiply all the splits together to get the total adjustment factor from the last total_shares
total_shares=total_shares*adj
last_price=prices_df.sort_values('date').iloc[-1]['close']
price=float(last_price)
market_cap=price*float(total_shares)
return market_cap
else:
marketcap=self.get_value(statements_df,'marketcap',seed)
if pd.isnull(marketcap):
return None
else:
return marketcap
def get_netdebt(self,statements_df,seed=-1):
shorttermdebt=self.get_value(statements_df,'shorttermdebt',seed)
longtermdebt=self.get_value(statements_df,'longtermdebt',seed)
capitalleaseobligations=self.get_value(statements_df,'capitalleaseobligations',seed)
cashandequivalents=self.get_value(statements_df,'cashandequivalents',seed)
restrictedcash=self.get_value(statements_df,'restrictedcash',seed)
fedfundssold=self.get_value(statements_df,'fedfundssold',seed)
interestbearingdepositsatotherbanks=self.get_value(statements_df,'interestbearingdepositsatotherbanks',seed)
timedepositsplaced=self.get_value(statements_df,'timedepositsplaced',seed)
s=pd.Series([shorttermdebt,longtermdebt,capitalleaseobligations,cashandequivalents,restrictedcash,fedfundssold,interestbearingdepositsatotherbanks,timedepositsplaced]).astype('float')
if pd.isnull(s).all(): #return None if everything is null
return None
m=pd.Series([1,1,1,-1,-1,-1,-1])
netdebt=s.multiply(m).sum()
return float(netdebt)
def get_enterprise_value(self,statements_df,prices_df,seed=-1):
#calculation taken from https://intrinio.com/data-tag/enterprisevalue
marketcap=self.get_market_cap(statements_df,prices_df,seed)
netdebt=self.get_netdebt(statements_df,seed)
totalpreferredequity=self.get_value(statements_df,'totalpreferredequity',seed)
noncontrollinginterests=self.get_value(statements_df,'noncontrollinginterests',seed)
redeemablenoncontrollinginterest=self.get_value(statements_df,'redeemablenoncontrollinginterest',seed)
s=pd.Series([marketcap,netdebt,totalpreferredequity,noncontrollinginterests,redeemablenoncontrollinginterest])
if pd.isnull(s).all() or pd.isnull(marketcap):
return None
return float(s.sum())
def get_ebit(self,df,seed=-1,length=4):
ebit=self.get_sum_quarters(df,'totaloperatingincome',seed,length)
if pd.notnull(ebit):
return float(ebit)
totalrevenue=self.get_sum_quarters(df,'totalrevenue',seed,length)
provisionforcreditlosses=self.get_sum_quarters(df,'provisionforcreditlosses',seed,length)
totaloperatingexpenses=self.get_sum_quarters(df,'totaloperatingexpenses',seed,length)
s=pd.Series([totalrevenue,provisionforcreditlosses,totaloperatingexpenses])
if pd.isnull(s).all():
return None
ebit=(s.multiply(pd.Series([1,-1,-1]))).sum()
if pd.notnull(ebit):
return float(ebit)
return None
def get_emyield(self,statements_df,prices_df,seed=-1,length=4):
ebit=self.get_ebit(statements_df,seed,length)
enterprisevalue=self.get_enterprise_value(statements_df,prices_df,seed)
if pd.isnull([ebit,enterprisevalue]).any() or enterprisevalue==0:
return None
return float(ebit/enterprisevalue)
def get_scalednetoperatingassets(self,statements_df,seed=-1):
"""
SNOA = (Operating Assets Operating Liabilities) / Total Assets
where
OA = total assets cash and equivalents
OL = total assets ST debt LT debt minority interest - preferred stock - book common
oa=ttmsdfcompany.iloc[-1]['totalassets']-ttmsdfcompany.iloc[-1]['cashandequivalents']
ol=ttmsdfcompany.iloc[-1]['totalassets']-ttmsdfcompany.iloc[-1]['netdebt']-ttmsdfcompany.iloc[-1]['totalequityandnoncontrollinginterests']
snoa=(oa-ol)/ttmsdfcompany.iloc[-1]['totalassets']
"""
totalassets=self.get_value(statements_df,'totalassets',seed)
cashandequivalents=self.get_value(statements_df,'cashandequivalents',seed)
netdebt=self.get_netdebt(statements_df,seed)
totalequityandnoncontrollinginterests=self.get_value(statements_df,'totalequityandnoncontrollinginterests',seed)
if pd.isnull(totalassets) or totalassets==0:
return None
s=pd.Series([totalassets,cashandequivalents])
m=pd.Series([1,-1])
oa=s.multiply(m).sum()
s=pd.Series([totalassets,netdebt,totalequityandnoncontrollinginterests])
m=pd.Series([1,-1,-1])
ol=s.multiply(m).sum()
scalednetoperatingassets=(oa-ol)/totalassets
return float(scalednetoperatingassets)
def get_scaledtotalaccruals(self,statements_df,seed=-1,length=4):
netincome=self.get_sum_quarters(statements_df,'netincome',seed,length)
netcashfromoperatingactivities=self.get_sum_quarters(statements_df,'netcashfromoperatingactivities',seed,length)
start_assets=self.get_value(statements_df,'cashandequivalents',seed-length)
end_assets=self.get_value(statements_df,'cashandequivalents',seed)
if pd.isnull([start_assets,end_assets]).any():
return None
totalassets=np.mean([start_assets,end_assets])
if pd.isnull(totalassets):
return None
num=pd.Series([netincome,netcashfromoperatingactivities])
if pd.isnull(num).all():
return None
m=pd.Series([1,-1])
num=num.multiply(m).sum()
den=totalassets
if den==0:
return None
scaledtotalaccruals=num/den
return float(scaledtotalaccruals)
def get_grossmargin(self,statements_df,seed=-1,length=4):
totalrevenue=self.get_sum_quarters(statements_df, 'totalrevenue', seed, length)
totalcostofrevenue=self.get_sum_quarters(statements_df, 'totalcostofrevenue', seed, length)
if pd.isnull([totalrevenue,totalcostofrevenue]).any() or totalcostofrevenue==0:
return None
grossmargin=(totalrevenue-totalcostofrevenue)/totalcostofrevenue
return float(grossmargin)
def get_margingrowth(self,statements_df,seed=-1,length1=20,length2=4):
grossmargins=[]
for i in range(seed,seed-length1,-1):
grossmargins.append(self.get_grossmargin(statements_df, i, length2))
grossmargins=pd.Series(grossmargins)
if pd.isnull(grossmargins).any():
return None
growth=grossmargins.pct_change(periods=1)
growth=growth[pd.notnull(growth)]
if len(growth)==0:
return None
grossmargingrowth=stats.gmean(1+growth)-1
if pd.isnull(grossmargingrowth):
return None
return float(grossmargingrowth)
def get_marginstability(self,statements_df,seed=-1,length1=20,length2=4):
#length1=how far back to go, how many quarters to get 20 quarters
#length2=for each quarter, how far back to go 4 quarters
grossmargins=[]
for i in range(seed,seed-length1,-1):
grossmargins.append(self.get_grossmargin(statements_df, i, length2))
grossmargins=pd.Series(grossmargins)
if pd.isnull(grossmargins).any() or grossmargins.std()==0:
return None
marginstability=grossmargins.mean()/grossmargins.std()
if pd.isnull(marginstability):
return None
return float(marginstability)
def get_cacl(self,df,seed=-1):
a=self.get_value(df,'totalcurrentassets',seed)
l=self.get_value(df,'totalcurrentliabilities',seed)
if pd.isnull([a,l]).any() or l==0:
return None
else:
return a/l
def get_tatl(self,df,seed=-1):
a=self.get_value(df,'totalassets',seed)
l=self.get_value(df,'totalliabilities',seed)
if pd.isnull([a,l]).any() or l==0:
return None
else:
return a/l
def get_longterm_cacl(self,df,seed=-1,length=20):
ltcacls=[]
for i in range(seed,seed-length,-1):
ltcacls.append(self.get_cacl(df,i))
ltcacls=pd.Series(ltcacls)
if pd.isnull(ltcacls).any():
return None
return stats.gmean(1+ltcacls)-1 #not totally sure we need the 1+, and the -1 11/9/17
def get_longterm_tatl(self,df,seed=-1,length=20):
lttatls=[]
for i in range(seed,seed-length,-1):
lttatls.append(self.get_tatl(df,i))
lttatls=pd.Series(lttatls)
if pd.isnull(lttatls).any():
return None
return stats.gmean(1+lttatls)-1 #not totally sure we need the 1+, and the -1 11/9/17
def get_capex(self,df,seed=-1,length=4):
purchaseofplantpropertyandequipment=self.get_sum_quarters(df,'purchaseofplantpropertyandequipment',seed,length)
saleofplantpropertyandequipment=self.get_sum_quarters(df,'saleofplantpropertyandequipment',seed,length)
s=pd.Series([purchaseofplantpropertyandequipment,saleofplantpropertyandequipment])
if pd.isnull(s).all():
return None
m=pd.Series([-1,-1])
capex=(s*m).sum()
if capex is None:
return None
return float(capex)
def get_freecashflow(self,df,seed=-1):
netcashfromoperatingactivities=self.get_value(df,'netcashfromoperatingactivities',seed)
capex=self.get_capex(df,seed,length=1)
s=pd.Series([netcashfromoperatingactivities,capex])
if pd.isnull(s).all():
return None
m=pd.Series([1,-1])
fcf=(s*m).sum()
return float(fcf)
#add a length2 paramater so we take the sums of cash flows
def get_cashflowonassets(self,df,seed=-1,length1=20,length2=4):
cfoas=[]
for i in range(seed,seed-length1,-1):
start_assets=self.get_value(df,'totalassets',i-length2)
end_assets=self.get_value(df,'totalassets',i)
fcfs=[]
for k in range(i,i-length2,-1):
fcf=self.get_freecashflow(df,k)
fcfs.append(fcf)
if pd.isnull(fcfs).any():
return None
total_fcf=pd.Series(fcfs).sum()
avg_assets=pd.Series([start_assets,end_assets]).mean()
if pd.isnull([total_fcf,avg_assets]).any() or avg_assets==0:
return None
else:
cfoas.append(total_fcf/avg_assets)
if pd.isnull(cfoas).any():
return None
else:
if pd.isnull(stats.gmean(1+pd.Series(cfoas))-1):
return None
else:
return stats.gmean(1+pd.Series(cfoas))-1 #we want to punish variability because the higher number the better
def get_roa(self,df,seed=-1,length=4):
netincome=self.get_sum_quarters(df,'netincome',seed,length)
start_assets=self.get_value(df,'totalassets',seed-length)
end_assets=self.get_value(df,'totalassets',seed)
if pd.isnull([start_assets,end_assets]).any():
return None
totalassets=pd.Series([start_assets,end_assets]).mean()
if pd.isnull([netincome,totalassets]).any() or totalassets==0:
return None
roa=netincome/totalassets
return float(roa)
def get_roc(self,df,seed=-1,length=4):
ebit=self.get_ebit(df,seed,length)
dividends=self.get_sum_quarters(df,'paymentofdividends',seed,length)
start_debt=self.get_netdebt(df,seed-length)
end_debt=self.get_netdebt(df,seed)
netdebt=pd.Series([start_debt,end_debt]).mean()
start_equity=self.get_value(df,'totalequity',seed-length)
end_equity=self.get_value(df,'totalequity',seed)
totalequity=pd.Series([start_equity,end_equity]).mean()
num=pd.Series([ebit,dividends]).sum()
den=pd.Series([netdebt,totalequity]).sum()
if pd.isnull([num,den]).any() or den==0:
return None
else:
roc=(float(num/den))
return float(roc)
def get_longtermroa(self,df,seed=-1,length1=20,length2=4):
roas=[]
for i in range(seed,seed-length1,-1):
roas.append(self.get_roa(df,i,length2))
if pd.isnull(roas).any():
return None
longtermroagmean=stats.gmean(1+pd.Series(roas))-1
if pd.isnull(longtermroagmean):
return None
return float(longtermroagmean)
def get_longtermroc(self,df,seed=-1,length1=20,length2=4):
rocs=[]
for i in range(seed,seed-length1,-1):
rocs.append(self.get_roc(df,i,length2))
rocs=pd.Series(rocs)
if pd.isnull(rocs).any():
return None
roc=stats.gmean(1+rocs)-1
if pd.isnull(roc):
return None
return float(roc)
def get_momentum(self,df,period=relativedelta(months=11)):
df=df[pd.to_datetime(df['date'])>=pd.to_datetime(df['date'].max())-period]
df=df['adj_close'].astype('float')
pctchange=df.pct_change()
pctchange=pctchange.dropna()
pctchange=1+pctchange
pctchange=pctchange.tolist()
gain=np.prod(pctchange)
return float(gain-1)
def get_fip(self,df,period=relativedelta(years=1)):
orig_df=df.copy()
df=df[pd.to_datetime(df['date'])>=pd.to_datetime(df['date'].max())-period]
df=df['adj_close'].astype('float')
pctchange=df.pct_change()
pctchange=pctchange.dropna()
if len(pctchange)==0:
return None
updays=(pctchange>0).sum()
downdays=(pctchange<0).sum()
fip=float(downdays)/float(len(pctchange))-float(updays)/float(len(pctchange))
if self.get_momentum(orig_df)<0:
fip=-1*fip
return fip #the lower the better
def get_balance_sheet_mean_value(self,df,tag,seed=-1,length=1):
start=self.get_value(df,tag,seed-length)
end=self.get_value(df,tag,seed)
if pd.isnull(pd.Series([start,end])).any() or start==0 or end==0:
return None
average=pd.Series([start,end]).mean()
if pd.isnull(average):
return None
else:
return float(average)
def get_dsri(self,df,seed1=-1,seed2=-5,length=4):
#seed1 and 2 are the quarters we are comparing between
#dsri=(ttmsdfcompany.iloc[-1]['accountsreceivable']/ttmsdfcompany.iloc[-1]['totalrevenue'])/(ttmsdfcompany.iloc[-5]['accountsreceivable']/ttmsdfcompany.iloc[-5]['totalrevenue'])
#accountsreceivable1=self.get_value(cik,'balance_sheet','accountsreceivable',seed1)
#accountsreceivable2=self.get_value(cik,'balance_sheet','accountsreceivable',seed2)
accountsreceivable1=self.get_balance_sheet_mean_value(df, 'accountsreceivable', seed1,length)
accountsreceivable2=self.get_balance_sheet_mean_value(df, 'accountsreceivable', seed2,length)
totalrevenue1=self.get_sum_quarters(df,'totalrevenue',seed1,length)
totalrevenue2=self.get_sum_quarters(df,'totalrevenue',seed2,length)
if pd.isnull([accountsreceivable1,accountsreceivable2,totalrevenue1,totalrevenue2]).any() or totalrevenue1==0 or totalrevenue2==0:
return None
num=accountsreceivable1/totalrevenue1
den=accountsreceivable2/totalrevenue2
if den==0:
return None
dsri=num/den
return float(dsri)
def get_gmi(self,df,seed1=-1,seed2=-5,length=4):
#gmi=((ttmsdfcompany.iloc[-5]['totalrevenue']-ttmsdfcompany.iloc[-5]['totalcostofrevenue'])/ttmsdfcompany.iloc[-5]['totalrevenue'])/((ttmsdfcompany.iloc[-1]['totalrevenue']-ttmsdfcompany.iloc[-1]['totalcostofrevenue'])/ttmsdfcompany.iloc[-1]['totalrevenue'])
totalrevenue1=self.get_sum_quarters(df,'totalrevenue',seed1,length)
totalrevenue2=self.get_sum_quarters(df,'totalrevenue',seed2,length)
totalcostofrevenue1=self.get_sum_quarters(df,'totalcostofrevenue',seed1,length)
totalcostofrevenue2=self.get_sum_quarters(df,'totalcostofrevenue',seed2,length)
if pd.isnull([totalrevenue1,totalrevenue2,totalcostofrevenue1,totalcostofrevenue2]).any():
return None
if totalrevenue2==0 or totalrevenue1==0:
return None
num=(totalrevenue2-totalcostofrevenue2)/totalrevenue2
den=(totalrevenue1-totalcostofrevenue1)/totalrevenue1
gmi=num/den
if den==0:
return None
return float(gmi)
def get_aqi(self,df,seed1=-1,seed2=-5):
#https://www.oldschoolvalue.com/blog/investment-tools/beneish-earnings-manipulation-m-score/
#otherlta1=companydf.iloc[-1]['totalassets']-(companydf.iloc[-1]['totalcurrentassets']+companydf.iloc[-1]['netppe'])
#otherlta2=companydf.iloc[-5]['totalassets']-(companydf.iloc[-5]['totalcurrentassets']+companydf.iloc[-5]['netppe'])
# aqi=(otherlta1/companydf.iloc[-1]['totalassets'])/(otherlta2/companydf.iloc[-5]['totalassets'])
netppe1=self.get_value(df,'netppe',seed1)
netppe2=self.get_value(df,'netppe',seed2)
totalassets1=self.get_value(df,'totalassets',seed1)
totalassets2=self.get_value(df,'totalassets',seed2)
totalcurrentassets1=self.get_value(df,'totalcurrentassets',seed1)
totalcurrentassets2=self.get_value(df,'totalcurrentassets',seed2)
if pd.isnull([netppe1,netppe2,totalassets1,totalassets2,totalcurrentassets1,totalcurrentassets2]).any():
return None
a=totalassets1-totalcurrentassets1-netppe1
b=totalassets2-totalcurrentassets2-netppe2
if totalassets1==0 or totalassets2==0:
return None
num=a/totalassets1
den=b/totalassets2
if den==0:
return None
aqi=num/den
return float(aqi)
def get_sgi(self,df,seed1=-1,seed2=-5,length=4):
#sgi=ttmsdfcompany.iloc[-1]['totalrevenue']/ttmsdfcompany.iloc[-5]['totalrevenue']
totalrevenue1=self.get_sum_quarters(df,'totalrevenue',seed1,length)
totalrevenue2=self.get_sum_quarters(df,'totalrevenue',seed2,length)
if pd.isnull([totalrevenue1,totalrevenue2]).any():
return None
if totalrevenue2==0:
return None
sgi=totalrevenue1/totalrevenue2
return float(sgi)
def get_depi(self,df,seed1=-1,seed2=-5,length=4):
#depit=ttmsdfcompany.iloc[-1]['depreciationexpense']/(ttmsdfcompany.iloc[-1]['depreciationexpense']+ttmsdfcompany.iloc[-1]['netppe'])
#depit1=ttmsdfcompany.iloc[-5]['depreciationexpense']/(ttmsdfcompany.iloc[-5]['depreciationexpense']+ttmsdfcompany.iloc[-5]['netppe'])
#depi=depit1/depit
depreciationexpense1=self.get_sum_quarters(df,'depreciationexpense',seed1,length)
depreciationexpense2=self.get_sum_quarters(df,'depreciationexpense',seed2,length)
netppe1=self.get_balance_sheet_mean_value(df, 'netppe', seed1,length)
netppe2=self.get_balance_sheet_mean_value(df, 'netppe', seed2,length)
if pd.isnull([depreciationexpense1,depreciationexpense2,netppe1,netppe2]).any():
return None
num=depreciationexpense2/(depreciationexpense2+netppe2)
den=depreciationexpense1/(depreciationexpense1+netppe1)
if den==0:
return None
depi=num/den
return float(depi)
def get_sgai(self,df,seed1=-1,seed2=-5,length=4):
#sgait=ttmsdfcompany.iloc[-1]['sgaexpense']/ttmsdfcompany.iloc[-1]['totalrevenue']
#sgait1=ttmsdfcompany.iloc[-5]['sgaexpense']/ttmsdfcompany.iloc[-5]['totalrevenue']
#sgai=sgait/sgait1
sgaexpense1=self.get_sum_quarters(df,'sgaexpense',seed1,length)
sgaexpense2=self.get_sum_quarters(df,'sgaexpense',seed2,length)
totalrevenue1=self.get_sum_quarters(df,'totalrevenue',seed1,length)
totalrevenue2=self.get_sum_quarters(df,'totalrevenue',seed2,length)
if pd.isnull([sgaexpense1,sgaexpense2,totalrevenue1,totalrevenue2]).any():
return None
if totalrevenue1==0 or totalrevenue2==0:
return None
num=sgaexpense1/totalrevenue1
den=sgaexpense2/totalrevenue2
if den==0:
return None
sgai=num/den
return float(sgai)
def get_lvgi(self,df,seed1=-1,seed2=-5):
"""
lvgit=(companydf.iloc[-1]['longtermdebt']+companydf.iloc[-1]['totalcurrentliabilities'])/companydf.iloc[-1]['totalassets']
lvgit1=(companydf.iloc[-5]['longtermdebt']+companydf.iloc[-5]['totalcurrentliabilities'])/companydf.iloc[-5]['totalassets']
lvgi=lvgit/lvgit1
"""
longtermdebt1=self.get_value(df,'longtermdebt',seed1)
longtermdebt2=self.get_value(df,'longtermdebt',seed2)
shorttermdebt1=self.get_value(df,'shorttermdebt',seed1)
shorttermdebt2=self.get_value(df,'shorttermdebt',seed2)
totalassets1=self.get_value(df,'totalassets',seed1)
totalassets2=self.get_value(df,'totalassets',seed2)
if pd.isnull([longtermdebt1,longtermdebt2,shorttermdebt1,shorttermdebt2,totalassets1,totalassets2]).any() or totalassets1==0 or totalassets2==0:
return None
num=(longtermdebt1+shorttermdebt1)/totalassets1
den=(longtermdebt2+shorttermdebt2)/totalassets2
if den==0:
return None
lvgi=num/den
return float(lvgi)
def get_tata(self,df,seed=-1,length=4):
#tata=(ttmsdfcompany.iloc[-1]['netincomecontinuing']-ttmsdfcompany.iloc[-1]['netcashfromoperatingactivities'])/ttmsdfcompany.iloc[-1]['totalassets']
netincomecontinuing=self.get_sum_quarters(df,'netincomecontinuing',seed,length)
netcashfromoperatingactivities=self.get_sum_quarters(df,'netincomecontinuing',seed,length)
#totalassets=self.get_value(cik,'balance_sheet','totalassets',seed)
start_assets=self.get_value(df,'totalassets',seed-length)
end_assets=self.get_value(df,'totalassets',seed)
if pd.isnull([start_assets,end_assets]).any() or start_assets==0 or end_assets==0:
return None
totalassets=pd.Series([start_assets,end_assets]).mean()
if pd.isnull([netincomecontinuing,totalassets,netcashfromoperatingactivities]).any() or totalassets==0:
return None
tata=(netincomecontinuing-netcashfromoperatingactivities)/totalassets
return float(tata)
def get_probm(self,df,seed1=-1,seed2=-5,length=4):
#probmarray=[-4.84,.92*dsri,.528*gmi,.404*aqi,.892*sgi,.115*depi,-1*.172*sgai,-1*.327*lvgi,4.697*tata]
#https://www.oldschoolvalue.com/blog/investment-tools/beneish-earnings-manipulation-m-score/
dsri=self.get_dsri(df,seed1,seed2,length)
gmi=self.get_gmi(df,seed1,seed2,length)
aqi=self.get_aqi(df,seed1,seed2)
sgi=self.get_sgi(df,seed1,seed2,length)
depi=self.get_depi(df,seed1,seed2,length)
sgai=self.get_sgai(df,seed1,seed2,length)
lvgi=self.get_lvgi(df,seed1,seed2)
tata=self.get_tata(df,seed1,length)
probmarray=[dsri,gmi,aqi,sgi,depi,sgai,lvgi,tata]
if pd.isnull(probmarray).all():
return None
m=[.92,.528,.404,.892,.115,-.172,-.327,4.697]
s= | pd.Series(probmarray) | pandas.Series |
import os
import pandas as pd
import utils
import datetime
import time
def create_monitor(file_name_monitor):
list_columns_order = [
'dt_start', 'project_name', 'project_path',
'1_start_auth', '2_auto_zip', '3_auto_report', '4_reencode_auth',
'5_auto_reencode', '6_auto_join', '7_timestamp', '8_auto_send_auth',
'9_uploaded',
'dt_upload', 'chat_link']
df_monitor = pd.DataFrame(columns=list_columns_order)
df_monitor.to_csv(file_name_monitor, index=False)
def ensure_exists_monitor(file_name_monitor):
exist = os.path.exists(file_name_monitor)
if not exist:
create_monitor(file_name_monitor)
def check_folders_auth(list_folder_path):
list_ = []
for folder_path in list_folder_path:
folder_name = os.path.basename(folder_path)
if folder_name[0] == '_':
list_.append(folder_path)
return list_
def get_list_folder_path_start_auth(folder_path_start):
exist = os.path.exists(folder_path_start)
if exist is False:
os.mkdir(folder_path_start)
return []
list_folder_name = os.listdir(folder_path_start)
list_folder_path = []
for folder_name in list_folder_name:
folder_path = os.path.join(folder_path_start, folder_name)
list_folder_path.append(folder_path)
list_folder_path_start_auth = \
check_folders_auth(list_folder_path)
return list_folder_path_start_auth
def check_project_in_monitor(folder_path_project: str, file_path_monitor: str):
"""verifies that folder_path are in the monitor
Args:
folder_path_project (str):
file_path_monitor (str):
Returns:
bolean: true if folder_path exist in monitor
"""
df = | pd.read_csv(file_path_monitor) | pandas.read_csv |
import logging
import hashlib
import pandas as pd
import typing
import wikifier
import os
import traceback
import requests
from ast import literal_eval
from io import StringIO
from datamart_isi.cache.wikidata_cache import QueryCache
from datamart_isi.config import cache_file_storage_base_loc
from datamart_isi.utilities import connection
from datamart_isi.utilities.utils import Utils
from datamart_isi.utilities.d3m_wikifier import check_has_q_node_columns
from d3m.container import DataFrame as d3m_DataFrame
WIKIDATA_CACHE_MANAGER = QueryCache()
WIKIDATA_SERVER = connection.get_wikidata_server_url()
_logger = logging.getLogger(__name__)
class MaterializerCache(object):
@staticmethod
def materialize(metadata, run_wikifier=True) -> typing.Union[pd.DataFrame, bytes]:
# general type materializer
if 'url' in metadata:
loaded_data = MaterializerCache.get_data(metadata=metadata)
file_type = metadata.get("file_type") or ""
if "csv" in file_type:
has_q_nodes = check_has_q_node_columns(loaded_data)
if has_q_nodes:
_logger.warning("The original data already has Q nodes! Will not run wikifier")
else:
if run_wikifier:
loaded_data = wikifier.produce(loaded_data)
return loaded_data
elif "p_nodes_needed" in metadata:
# wikidata materializer
return MaterializerCache.materialize_for_wikidata(metadata)
else:
raise ValueError("Unknown type for materialize!")
@staticmethod
def materialize_for_wikidata(metadata: dict) -> d3m_DataFrame:
"""
this function is currently only used from get first 10 rows
:param metadata: dict
:return: materialized dataframe
"""
show_item_label = metadata.get("show_item_label", True)
if show_item_label:
label_part = " ?itemLabel \n"
else:
label_part = " ?item \n"
where_part = ""
length = metadata.get("length", 100)
column_name_suffix = metadata.get("suffix_col_name", "")
for i, each_p_node in enumerate(metadata["p_nodes_needed"]):
label_part += " ?value" + str(i) + "Label\n"
where_part += " ?item wdt:" + each_p_node + " ?value" + str(i) + ".\n"
sparql_query = """PREFIX wikibase: <http://wikiba.se/ontology#>
PREFIX wd: <http://www.wikidata.org/entity/>
prefix bd: <http://www.bigdata.com/rdf#>
PREFIX wdt: <http://www.wikidata.org/prop/direct/>
SELECT \n""" + label_part + "WHERE \n {\n" + where_part \
+ """ SERVICE wikibase:label { bd:serviceParam wikibase:language "[AUTO_LANGUAGE],en". }\n}\n""" \
+ "LIMIT " + str(length)
results = WIKIDATA_CACHE_MANAGER.get_result(sparql_query)
all_res = {}
for i, result in enumerate(results):
each_res = {}
for each_key in result.keys():
each_res[each_key] = result[each_key]['value']
all_res[i] = each_res
df_res = pd.DataFrame.from_dict(all_res, "index")
column_names = df_res.columns.tolist()
column_names = column_names[1:]
column_names_replaced = {"itemLabel" if show_item_label else "item": "q_node"}
for each in zip(column_names, metadata["p_nodes_needed"]):
column_names_replaced[each[0]] = Utils.get_node_name(each[1]) + column_name_suffix
df_res.rename(columns=column_names_replaced, inplace=True)
# change to correct order
df_res_cols = df_res.columns.tolist()
df_res_cols = df_res_cols[1:] + [df_res_cols[0]]
df_res = df_res[df_res_cols]
df_res = d3m_DataFrame(df_res, generate_metadata=True)
return df_res
@staticmethod
def materialize_for_wikitable(dataset_url: str, file_type: str, extra_information: str) -> pd.DataFrame:
from datamart_isi.materializers.wikitables_materializer import WikitablesMaterializer
materializer = WikitablesMaterializer()
loaded_data = materializer.get_one(dataset_url, extra_information['xpath'])
return loaded_data
@staticmethod
def materialize_for_csv(dataset_url: str, file_type: str) -> pd.DataFrame:
from datamart_isi.materializers.general_materializer import GeneralMaterializer
general_materializer = GeneralMaterializer()
file_metadata = {
"materialization": {
"arguments": {
"url": dataset_url,
"file_type": file_type
}
}
}
try:
result = general_materializer.get(metadata=file_metadata).to_csv(index=False)
# remove last \n so that we will not get an extra useless row
if result[-1] == "\n":
result = result[:-1]
loaded_data = StringIO(result)
loaded_data = pd.read_csv(loaded_data, dtype="str")
return loaded_data
except:
traceback.print_exc()
raise ValueError("Materializing from " + dataset_url + " failed!")
@staticmethod
def get_data(metadata) -> pd.DataFrame:
"""
Main function for get the data through cache
:param metadata:
:return:
"""
dataset_url = metadata['url']['value']
# updated v2019.10.14: add local storage cache file
hash_generator = hashlib.md5()
hash_generator.update(dataset_url.encode('utf-8'))
hash_url_key = hash_generator.hexdigest()
dataset_cache_loc = os.path.join(cache_file_storage_base_loc, "datasets_cache", hash_url_key)
_logger.debug("Try to check whether cache file exist or not at " + dataset_cache_loc)
if os.path.exists(dataset_cache_loc + ".h5"):
_logger.info("Found exist cached dataset file in h5 format.")
loaded_data = | pd.read_hdf(dataset_cache_loc + ".h5") | pandas.read_hdf |
#!/usr/bin/env python
# coding: utf-8
# -------------------------------------------------------------------
# **TD DSA 2021 de <NAME> - rapport de <NAME>**
# ------------------------- -------------------------------------
# # Un dernier Essai : BERT
# Après avoir utilisé directement les modèles préentrainé, je souhaitais essayer d'ajuster véritablement un modèle de Deep Learning.
#
# J'ai alors trouvé un article sur `medium` présentant une telle adaptation :
# source : https://scottmduda.medium.com/fine-tuning-language-models-for-sentiment-analysis-91db72396549
#
# github : https://github.com/dontmindifiduda/financial_statement_sentiment_analysis/
# La logique consiste à aller chercher un modèle préentrainé sur HuggingFace. Ici trous variantes sont testées :
#
# - `BERT` : le modèle de référence de l'encodage bidirectionnel initialement publié par Google
# - `DistilBERT` : la version allégée de `BERT` pour des performances a priori comparables
# - `RoBERTa` : la variante de Facebook de `BERT` renonçant à l'objectif de prédiction de la phrase suivante et ayant été entraîné avec plus de données et des séquences d'apprentissage plus longues
#
# In[1]:
import numpy as np
import pandas as pd
import os
import re
import time
import datetime
import string
import matplotlib.pyplot as plt
import seaborn as sns
plt.style.use('ggplot')
import nltk
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
from collections import Counter, defaultdict
import transformers
from transformers import BertModel, BertTokenizer, DistilBertTokenizer, RobertaModel, RobertaTokenizer
from transformers import AutoConfig, AutoModel, AdamW, get_linear_schedule_with_warmup
import torch
from torch import nn, optim
from torch.utils.data import Dataset, random_split, DataLoader, RandomSampler, SequentialSampler
from sklearn.model_selection import train_test_split, StratifiedKFold
from sklearn.preprocessing import LabelEncoder
from sklearn.metrics import f1_score, accuracy_score, confusion_matrix, classification_report
import mlflow
import gc
# ## Chargement des données
# In[2]:
# On Importe les données
#df
df_train=pd.read_parquet('/mnt/data/interim/df_train.gzip')
df_val=pd.read_parquet('/mnt/data/interim/df_val.gzip')
df_test=pd.read_parquet('/mnt/data/interim/df_test.gzip')
#X
X_train=pd.read_parquet('/mnt/data/interim/X_train.gzip')
X_val= | pd.read_parquet('/mnt/data/interim/X_val.gzip') | pandas.read_parquet |
import pandas as pd
import numpy as np
def load_process_covid(path):
#load data
rawDF = (
| pd.read_csv(path, encoding='latin') | pandas.read_csv |
# coding: utf-8
"""Mapping of production and consumption mixes in Europe and their effect on
the carbon footprint of electric vehicles
This code performs the following:
- Import data from ENTSO-E (production quantities, trades relationships)
- Calculates the production and consumption electricity mixes for European countries
- Calculates the carbon footprint (CF) for the above electricity mixes](#CF_el)
- Calculates the production, use-phase and end-of-life emissions for battery electric vehicles (BEVs) under
the following assumptions:](#BEV_calcs)
- Production in Korea (with electricity intensity 684 g CO2-eq/kWh)
- Use phase uses country-specific production and consumption mix
- End-of-life emissions static for all countries
Requires the following files for input:
- ENTSO_production_volumes.csv (from hybridized_impact_factors.py)
- final_emission_factors.csv (from hybridized_impact_factors.py)
- trades.csv (from hybridized_impact_factors.py)
- trade_ef_hv.csv (from hybridized_impact_factors.py)
- API_EG.ELC.LOSS.ZS_DS2_en_csv_v2_673578.csv (transmission losses, from OECD)
- car_specifications.xlsx
"""
import os
from datetime import datetime
import numpy as np
import pandas as pd
import country_converter as coco
import logging
#%% Main function
def run_calcs(run_id, year, no_ef_countries, export_data=True, include_TD_losses=True, BEV_lifetime=180000, ICEV_lifetime=180000, flowtrace_el=True, allocation=True, production_el_intensity=679, incl_ei=False, energy_sens=False):
"""Run all electricity mix and vehicle calculations and exports results."""
# Korean el-mix 679 g CO2/kWh, from ecoinvent
fp = os.path.curdir
production, trades, trade_ef, country_total_prod_disagg, country_total_cons_disagg, g_raw, C = load_prep_el_data(fp, year)
codecheck_file, elmixes, trade_only, country_el, CFEL, CFCI = el_calcs(flowtrace_el, run_id, fp, C, production, country_total_prod_disagg, country_total_cons_disagg, g_raw, trades, trade_ef, include_TD_losses, incl_ei, export_data) # Leontief electricity calculations
results_toSI, ICEV_total_impacts, ICEV_prodEOL_impacts, ICEV_op_int = BEV_calcs(fp, country_el, production, elmixes, BEV_lifetime, ICEV_lifetime, production_el_intensity, CFCI, allocation, energy_sens)
SI_fp = export_SI(run_id, results_toSI, production, trades, C, CFEL, no_ef_countries)
pickle_results(run_id, results_toSI, CFEL, ICEV_total_impacts, codecheck_file, export_data)
return results_toSI['BEV footprint'].xs('Consumption mix', level=1, axis=1), ICEV_prodEOL_impacts, ICEV_op_int, SI_fp
#%% Load and format data for calculations
def load_prep_el_data(fp, year):
"""Load electricity data and emissions factors."""
fp_output = os.path.join(fp, 'output')
# Output from bentso.py
filepath_production = os.path.join(fp_output, 'entsoe', 'ENTSO_production_volumes_'+ str(year) +'.csv')
filepath_intensities = os.path.join(fp_output, 'final_emission_factors_'+ str(year) +'.csv')
filepath_trades = os.path.join(fp_output, 'entsoe', 'trades_'+ str(year) +'.csv')
filepath_tradeonly_ef = os.path.join(fp_output, 'ecoinvent_ef_hv.csv')
# read in production mixes (annual average)
production = pd.read_csv(filepath_production, index_col=0)
production.rename_axis(index='', inplace=True)
# matrix of total imports/exports of electricity between regions; aka Z matrix
trades = pd.read_csv(filepath_trades, index_col=0)
trades.fillna(0, inplace=True) # replace np.nan with 0 for matrix math, below
# manually remove Cyprus for now
production.drop(index='CY', inplace=True)
trades = trades.drop(columns='CY').drop(index='CY')
imports = trades.sum(axis=0)
exports = trades.sum(axis=1)
""" Make into sum of production and production + import - export"""
country_total_prod_disagg = production.sum(axis=1)
country_total_cons_disagg = country_total_prod_disagg + imports - exports
waste = (production['Waste'] / production.sum(axis=1))
waste_min = waste[waste > 0].min()
waste_max = waste.max()
g_raw = production.sum(axis=1) # Vector of total electricity production (regionalized)
""" Read power plant CO2 intensities [tech averages] """
# average technology CO2 intensities (i.e., non-regionalized)
all_C = pd.read_csv(filepath_intensities, index_col=0)
all_C.drop(index='CY', inplace=True)
# use ecoinvent factors for these countries as a proxy to calculate consumption mixes for receiving countries
trade_ef = pd.read_csv(filepath_tradeonly_ef, index_col=[0, 1, 2, 3], header=[0])
trade_ef.index = trade_ef.index.droplevel([0, 1, 3]) # remove DSID, activityName and productName (leaving geography)
trade_ef.index.rename('geo', inplace=True)
trade_ef.columns = ['emission factor']
# Generate regionalized tech generation matrix
C = all_C.T
C.sort_index(axis=1, inplace=True)
C.sort_index(axis=0, inplace=True)
return production, trades, trade_ef, country_total_prod_disagg, country_total_cons_disagg, g_raw, C
#%% el_calcs
def el_calcs(flowtrace_el, run_id, fp, C, production, country_total_prod_disagg, country_total_cons_disagg, g_raw, trades, trade_ef, include_TD_losses, incl_ei, export_data):
fp_data = os.path.join(fp, 'data')
# Make list of full-country resolution
original_countries = list(production.index)
# Make list of aggregated countries (affects Nordic countries + GB (UK+NI))
# read 3-letter ISO codes
countries = list(trades.index)
""" Calculates national production mixes and consumption mixes using Leontief assumption """
# Start electricity calculations (ELFP.m)
# Calculate production and consumption mixes
# Carbon intensity of production mix
CFPI_no_TD = pd.DataFrame(production.multiply(C.T).sum(axis=1) / production.sum(axis=1), columns=['Production mix intensity']) # production mix intensity without losses
CFPI_no_TD.fillna(0, inplace=True)
# List of countries that have trade relationships, but no production data
trade_only = list(set(trades.index) - set(production.loc[production.sum(axis=1) > 0].index))
# Add ecoinvent proxy emission factors for trade-only countries
logging.info('Replacing missing production mix intensities with values from ecoinvent:')
for country in trade_only:
if CFPI_no_TD.loc[country, 'Production mix intensity'] == 0:
logging.info(country)
CFPI_no_TD.loc[country] = trade_ef.loc[country].values
i = country_total_cons_disagg.size # Number of European regions
g = g_raw
g = g.sort_index() # total generation vector (local production for each country)
total_imported = trades.sum(axis=0) # sum rows for total imports
total_exported = trades.sum(axis=1) # sum columns for total exports
y = total_imported + g - total_exported # total final demand (consumption) of electricity
q = g + total_imported # vector of total consumption
q.replace(np.nan, 0, inplace=True)
if flowtrace_el:
# For flow tracing approach: make Leontief production functions (normalize columns of A)
# normalized trade matrix quadrant
Atmx = pd.DataFrame(np.matmul(trades, np.linalg.pinv(np.diag(q))))
# normalized production matrix quadrant
Agen = pd.DataFrame(np.diag(g) * np.linalg.pinv(np.diag(q)), index=countries, columns=countries) # coefficient matrix, generation
# "Trade" Leontief inverse
# Total imports from region i to j per unit demand on j
Ltmx = pd.DataFrame(np.linalg.pinv(np.identity(i) - Atmx), trades.columns, trades.index)
# Production in country i for trade to country j
# Total generation in i (rows) per unit demand j
Lgen = pd.DataFrame(np.matmul(Agen, Ltmx), index=Agen.index, columns=Ltmx.columns)
y_diag = pd.DataFrame(np.diag(y), index=countries, columns=countries)
# total imports for given demand
Xtmx = pd.DataFrame(np.matmul(np.linalg.pinv(np.identity(i) - Atmx), y_diag))
# Total generation to satisfy demand (consumption)
Xgen = np.matmul(np.matmul(Agen, Ltmx), y_diag)
Xgen.sum(axis=0)
Xgen_df = pd.DataFrame(Xgen, index=Agen.index, columns=y_diag.columns)
# ### Check electricity generated matches demand
totgen = Xgen.sum(axis=0)
r_gendem = totgen / y # All countries should be 1
#%% Generation techonlogy matrix
# TC is a country-by-generation technology matrix - normalized to share of total domestic generation, i.e., normalized generation/production mix
# technology generation, kWh/ kWh domestic generated electricity
TC = pd.DataFrame(np.matmul(np.linalg.pinv(np.diag(g)), production), index=g.index, columns=production.columns)
TCsum = TC.sum(axis=1) # Quality assurance - each country should sum to 1
# Calculate technology generation mix in GWh based on production in each region
TGP = pd.DataFrame(np.matmul(TC.transpose(), np.diag(g)), index=TC.columns, columns=g.index) #.== production
# Carbon intensity of consumption mix
CFCI_no_TD = pd.DataFrame(np.matmul(CFPI_no_TD.T.values, Lgen), columns=CFPI_no_TD.index).T
else:
# Use grid-average assumption for trade
prod_emiss = production.multiply(C.T).sum(axis=1)
trade_emiss = (pd.DataFrame(np.diag(CFPI_no_TD.iloc(axis=1)[0]), index=CFPI_no_TD.index, columns=CFPI_no_TD.index)).dot(trades)
CFCI_no_TD = pd.DataFrame((prod_emiss + trade_emiss.sum(axis=0) - trade_emiss.sum(axis=1)) / y)
CFCI_no_TD.columns = ['Consumption mix intensity']
# use ecoinvent for missing countries
if incl_ei:
CFCI_no_TD.update(trade_ef.rename(columns={'emission factor':'Consumption mix intensity'}))
#%% Calculate losses
# Transpose added after removing country aggregation as data pre-treatment
if include_TD_losses:
# Calculate technology characterization factors including transmission and distribution losses
# First, read transmission and distribution losses, downloaded from World Bank economic indicators (most recent values from 2014)
if isinstance(include_TD_losses, float):
TD_losses = include_TD_losses # apply constant transmission and distribution losses to all countries
elif isinstance(include_TD_losses, bool):
losses_fp = os.path.join(fp_data, 'API_EG.ELC.LOSS.ZS_DS2_en_csv_v2_673578.csv')
try:
TD_losses = pd.read_csv(losses_fp, skiprows=[0,1,2,3], usecols=[1, 58], index_col=0)
TD_losses = TD_losses.iloc[:, -7:].dropna(how='all', axis=1)
TD_losses = TD_losses.apply(lambda x: x / 100 + 1) # convert losses to a multiplicative factor
# ## Calculate total national carbon emissions from el - production and consumption mixes
TD_losses.index = coco.convert(names=TD_losses.index.tolist(), to='ISO2', not_found=None)
TD_losses = TD_losses.loc[countries]
TD_losses = pd.Series(TD_losses.iloc[:, 0])
except:
print("Warning! Transmission and distribution losses input files not found!")
TD_losses = pd.Series(np.zeros(len(production.index)), index=production.index)
else:
print('invalid entry for losses')
# Caclulate carbon intensity of production and consumption mixes including losses
CFPI_TD_losses = CFPI_no_TD.multiply(TD_losses, axis=0).dropna(how='any', axis=0) # apply transmission and distribution losses to production mix intensity
CFCI_TD_losses = CFCI_no_TD.multiply(TD_losses, axis=0).dropna(how='any', axis=0)
if len(CFCI_TD_losses) < len(CFPI_TD_losses):
CFCI_TD_losses = CFCI_no_TD.multiply(TD_losses, axis=0)
CFPI = CFPI_TD_losses
CFCI = CFCI_TD_losses
else:
CFPI = CFPI_no_TD
CFCI = CFCI_no_TD
elmixes = (CFPI.copy()).join(CFCI.copy()).T
#%%
# Aggregate multi-nodes to single countries using weighted average of production/consumption as appropriate
country_total_prod_disagg.columns = ["Total production (TWh)"]
country_total_prod_disagg.index = original_countries
country_total_cons_disagg.columns = ["Total consumption (TWh)"]
country_total_cons_disagg.index = original_countries
country_el = pd.concat([country_total_prod_disagg, country_total_cons_disagg], axis=1)
country_el.columns = ['Total production (TWh)', 'Total consumption (TWh)']
CFEL_mixes = elmixes.T
CFEL = pd.concat([country_el, CFEL_mixes], axis=1)
imports = trades.sum(axis=0)
exports = trades.sum(axis=1)
CFEL['Trade percentage, gross'] = (imports + exports) / CFEL['Total production (TWh)']
CFEL['Import percentage'] = imports / CFEL['Total production (TWh)']
CFEL['Export percentage'] = exports / CFEL['Total production (TWh)']
CFEL['imports'] = imports
CFEL['exports'] = exports
#Calculate total carbon footprint intensity ratio production vs consumption
rCP = CFCI['Consumption mix intensity'].divide(CFPI['Production mix intensity'])
rCP.columns = ["ratio consumption:production mix"]
# Export intermediate variables from calculations for troubleshooting
if export_data:
keeper = run_id + "{:%d-%m-%y, %H_%M}".format(datetime.now())
fp_results = os.path.join(fp, 'results')
codecheck_file = os.path.join(os.path.abspath(fp_results), 'code_check_' + keeper + '.xlsx')
writer = pd.ExcelWriter(codecheck_file)
g.to_excel(writer, "g")
q.to_excel(writer, "q")
y.to_excel(writer, 'y')
if flowtrace_el:
Atmx.to_excel(writer, "Atmx")
Agen.to_excel(writer, "Agen")
Ltmx.to_excel(writer, "LTmx")
Lgen.to_excel(writer, "Lgen")
Xtmx.to_excel(writer, "Xtmx")
TGP.to_excel(writer, "TGP")
CFPI.T.to_excel(writer, "CFPI")
CFCI.T.to_excel(writer, "CFCI")
rCP.to_excel(writer, "rCP")
C.T.to_excel(writer, "C")
writer.save()
return codecheck_file, elmixes, trade_only, country_el, CFEL, CFCI
#%%
def BEV_calcs(fp, country_el, production, elmixes, BEV_lifetime, ICEV_lifetime, production_el_intensity, CFCI, allocation=True, energy_sens=False):
"""Calculate BEV lifecycle emissions."""
# First, setup calculations
# read in data
fp_data = os.path.join(fp, 'data')
vehicle_fp = os.path.join(fp_data, 'car_specifications.xlsx')
cars = pd.read_excel(vehicle_fp, sheet_name='veh_emiss', index_col=[0, 1, 2], usecols='A:G')
cars = cars.sort_index()
vehicle_CO2 = ["BEV", "ICEV"]
if energy_sens:
# if performing the experiment for battery energy demand in manufacturing,
# update with new energy values
alt_energy = pd.read_excel(vehicle_fp, sheet_name='alt_energy', index_col=[0,1,2], usecols='A:H') # column A is scenario name
if isinstance(energy_sens, str):
cars.update(alt_energy.loc[energy_sens])
# Impacts from electricity demand in cell production
battery_prod_el = production_el_intensity / 1e6 * cars.loc["BEV", "Production el, battery"] # in t CO2/vehicle
batt_prod_impacts = cars.loc["BEV", "Production, RObattery"].add(battery_prod_el, fill_value=0).sum(axis=0)
if allocation:
alloc_share = BEV_lifetime / ((cars.loc["BEV", "Max EFC", "cycles"] * (cars.loc["BEV", "Batt size", "kWh"]*.9) * 1000) / cars.loc["BEV", "Use phase", "Wh/km"])
else:
alloc_share = 1
alloc_batt_prod_impacts = alloc_share * batt_prod_impacts
# Total vehicle production impacts - sum of battery emissions + rest of vehicle
BEV_prod_impacts = cars.loc["BEV", "Production, ROV"] + alloc_batt_prod_impacts
# Modify for battery production in Europe
# batt_prod_EU = pd.DataFrame(np.matmul(CFCI.values / 1e6, cars.loc["BEV", "Production el, battery"].values), index=CFCI.index, columns=cars.columns)
batt_prod_EU = pd.DataFrame(np.matmul((elmixes.T['Consumption mix intensity'].values / 1e6).reshape(-1, 1),
cars.loc["BEV", "Production el, battery"].values),
index=elmixes.columns, columns=cars.columns)
# Total battery production impacts in Europe
batt_prod_EU = batt_prod_EU + cars.loc["BEV", "Production, RObattery", "t CO2"]
alloc_batt_prod_EU = alloc_share * batt_prod_EU
BEV_prod_EU = pd.DataFrame(index=elmixes.columns, columns=["A", "C", "JC", "JE"])
BEV_prod_EU = alloc_batt_prod_EU + cars.loc["BEV", "Production, ROV", "t CO2"]
BEV_prod_EU.columns = pd.MultiIndex.from_product([["EUR production impacts BEV"], BEV_prod_EU.columns, ["Consumption mix"]], names=["", "Segment", "Elmix"])
# Calculate use phase emissions
segs = cars.loc['BEV', 'Use phase', 'Wh/km']
mi = pd.MultiIndex.from_product([list(elmixes.index), list(segs.index)])
segs = segs.reindex(mi, level=1)
segs = | pd.DataFrame(segs) | pandas.DataFrame |
from models.neuralnet import SurvivalNeuralNet
# from models.feedforwardnet import SurvivalFeedForwardNet
from keras.models import Model
from keras.layers import Input, Dense, Dropout
from keras.regularizers import L1L2
import numpy as np
import pandas as pd
import _pickle as cPickle
from keras.utils import to_categorical
from wx_hyperparam import WxHyperParameter
from wx_core import DoFeatureSelectionWX
from sklearn.utils import shuffle
from lifelines import CoxPHFitter, KaplanMeierFitter
from sklearn.metrics import roc_auc_score
# from sklearn.feature_selection import VarianceThreshold
from tqdm import tqdm
import os
import models.utils as helper
class SurvivalFFNNCOX(SurvivalNeuralNet):
def __init__(self, model_name, cancer, omics_type, out_folder, epochs=1000, vecdim=10):
super(SurvivalFFNNCOX, self).__init__(model_name, cancer, omics_type, out_folder, epochs)
self.vecdim = vecdim
self.selected_idx = None
self.random_seed = 1
self.cancer_type = cancer
self.omics_type = omics_type
self.out_folder = out_folder
def DoFeatureSelectionCPH(self, x, c, s, xnames, fold, sel_f_num, dev_index):
variance_th = 0.15
xdf = | pd.DataFrame(x,columns=xnames) | pandas.DataFrame |
"""
from KUtils.eda import data_preparation as dp
"""
import numpy as np
import pandas as pd
import re
from KUtils.eda import chartil
def plotUnique(df, optional_settings={}):
unique_dict = {x: len(df[x].unique()) for x in df.columns}
optional_settings.update({'x_label':'Features'})
optional_settings.update({'y_label':'Unique values'})
optional_settings.update({'chart_title':'Unique values in each Feature/Column'})
if optional_settings.get('sort_by_value')==None:
optional_settings.update({'sort_by_value':False})
chartil.core_barchart_from_series( | pd.Series(unique_dict) | pandas.Series |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import division, print_function, unicode_literals
import cv2
import numpy as np
import ubelt as ub
import sklearn.metrics
import scipy.io
import pandas as pd
from os.path import expanduser
from . import algos as ctalgo
def to_mat_format():
import pandas as pd
measure_fpath = 'measurements_haul83.csv'
py_df = pd.DataFrame.from_csv(measure_fpath, index_col=None)
py_df['fishlen'] = py_df['fishlen'] / 10
bbox_pts1 = py_df['box_pts1'].map(lambda p: eval(p.replace(';', ','), np.__dict__))
bbox_pts2 = py_df['box_pts2'].map(lambda p: eval(p.replace(';', ','), np.__dict__))
bbox_pts1 = np.array(bbox_pts1.values.tolist())
bbox_pts2 = np.array(bbox_pts2.values.tolist())
X = bbox_pts1.T[0].T
Y = bbox_pts1.T[1].T
X = pd.DataFrame(X, columns=['LX1', 'LX2', 'LX3', 'LX4'])
Y = pd.DataFrame(Y, columns=['LY1', 'LY2', 'LY3', 'LY4'])
py_df.join(X.join(Y))
X = bbox_pts2.T[0].T
Y = bbox_pts2.T[1].T
X = pd.DataFrame(X, columns=['RX1', 'RX2', 'RX3', 'RX4'])
Y = pd.DataFrame(Y, columns=['RY1', 'RY2', 'RY3', 'RY4'])
py_df = py_df.join(X.join(Y))
py_df = py_df.rename(columns={
'error': 'Err',
'fishlen': 'fishLength',
'range': 'fishRange',
})
py_df.drop(['box_pts1', 'box_pts2'], axis=1, inplace=True)
py_df.to_csv('haul83_py_results.csv')
pass
def compare_results():
print('Comparing results')
import pandas as pd
from tabulate import tabulate
# Read in output of demo script
measure_fpath = 'measurements_haul83.csv'
py_df = pd.DataFrame.from_csv(measure_fpath, index_col=None)
# Convert python length output from mm into cm for consistency
py_df['fishlen'] = py_df['fishlen'] / 10
py_df['current_frame'] = py_df['current_frame'].astype(np.int)
# janky CSV parsing
py_df['box_pts1'] = py_df['box_pts1'].map(lambda p: eval(p.replace(';', ','), np.__dict__))
py_df['box_pts2'] = py_df['box_pts2'].map(lambda p: eval(p.replace(';', ','), np.__dict__))
py_df['obox1'] = [ctalgo.OrientedBBox(*cv2.minAreaRect(pts[:, None, :].astype(np.int)))
for pts in py_df['box_pts1']]
py_df['obox2'] = [ctalgo.OrientedBBox(*cv2.minAreaRect(pts[:, None, :].astype(np.int)))
for pts in py_df['box_pts2']]
py_df.drop(['box_pts1', 'box_pts2'], axis=1, inplace=True)
# Remap to matlab names
py_df = py_df.rename(columns={
'error': 'Err',
'fishlen': 'fishLength',
'range': 'fishRange',
})
# Load matlab results
mat_df = _read_kresimir_results()
FORCE_COMPARABLE_RANGE = True
# FORCE_COMPARABLE_RANGE = False
if FORCE_COMPARABLE_RANGE:
# Be absolutely certain we are in comparable regions (may slightly bias
# results, against python and in favor of matlab)
min_frame = max(mat_df.current_frame.min(), py_df.current_frame.min())
max_frame = min(mat_df.current_frame.max(), py_df.current_frame.max())
print('min_frame = {!r}'.format(min_frame))
print('max_frame = {!r}'.format(max_frame))
mat_df = mat_df[(mat_df.current_frame >= min_frame) &
(mat_df.current_frame <= max_frame)]
py_df = py_df[(py_df.current_frame >= min_frame) &
(py_df.current_frame <= max_frame)]
intersect_frames = np.intersect1d(mat_df.current_frame, py_df.current_frame)
print('intersecting frames = {} / {} (matlab)'.format(
len(intersect_frames), len(set(mat_df.current_frame))))
print('intersecting frames = {} / {} (python)'.format(
len(intersect_frames), len(set(py_df.current_frame))))
# Reuse the hungarian algorithm implementation from ctalgo
min_assign = ctalgo.FishStereoMeasurments.minimum_weight_assignment
correspond = []
for f in intersect_frames:
pidxs = np.where(py_df.current_frame == f)[0]
midxs = np.where(mat_df.current_frame == f)[0]
pdf = py_df.iloc[pidxs]
mdf = mat_df.iloc[midxs]
ppts1 = np.array([o.center for o in pdf['obox1']])
mpts1 = np.array([o.center for o in mdf['obox1']])
ppts2 = np.array([o.center for o in pdf['obox2']])
mpts2 = np.array([o.center for o in mdf['obox2']])
dists1 = sklearn.metrics.pairwise.pairwise_distances(ppts1, mpts1)
dists2 = sklearn.metrics.pairwise.pairwise_distances(ppts2, mpts2)
# arbitrarilly chosen threshold
thresh = 100
for i, j in min_assign(dists1):
d1 = dists1[i, j]
d2 = dists2[i, j]
if d1 < thresh and d2 < thresh and abs(d1 - d2) < thresh / 4:
correspond.append((pidxs[i], midxs[j]))
correspond = np.array(correspond)
# pflags = np.array(ub.boolmask(correspond.T[0], len(py_df)))
mflags = np.array(ub.boolmask(correspond.T[1], len(mat_df)))
# print('there are {} detections that seem to be in common'.format(len(correspond)))
# print('The QC flags of the common detections are: {}'.format(
# ub.dict_hist(mat_df[mflags]['QC'].values)))
# print('The QC flags of the other matlab detections are: {}'.format(
# ub.dict_hist(mat_df[~mflags]['QC'].values)))
print('\n\n----\n## All stats\n')
print(ub.codeblock(
'''
Overall, the matlab script made {nmat} length measurements and the
python script made {npy} length measurements. Here is a table
summarizing the average lengths / ranges / errors of each script:
''').format(npy=len(py_df), nmat=len(mat_df)))
stats = pd.DataFrame(columns=['python', 'matlab'])
for key in ['fishLength', 'fishRange', 'Err']:
stats.loc[key, 'python'] = '{:6.2f} ± {:6.2f}'.format(py_df[key].mean(), py_df[key].std())
stats.loc[key, 'matlab'] = '{:6.2f} ± {:6.2f}'.format(mat_df[key].mean(), mat_df[key].std())
stats.loc['nTotal', 'python'] = '{}'.format(len(py_df))
stats.loc['nTotal', 'matlab'] = '{}'.format(len(mat_df))
print(tabulate(stats, headers='keys', tablefmt='psql', stralign='right'))
print('\n\n----\n## Only COMMON detections\n')
py_df_c = py_df.iloc[correspond.T[0]]
mat_df_c = mat_df.iloc[correspond.T[1]]
stats = pd.DataFrame(columns=['python', 'matlab'])
for key in ['fishLength', 'fishRange', 'Err']:
stats.loc[key, 'python'] = '{:6.2f} ± {:6.2f}'.format(py_df_c[key].mean(), py_df_c[key].std())
stats.loc[key, 'matlab'] = '{:6.2f} ± {:6.2f}'.format(mat_df_c[key].mean(), mat_df_c[key].std())
stats.loc['nTotal', 'python'] = '{}'.format(len(py_df_c))
stats.loc['nTotal', 'matlab'] = '{}'.format(len(mat_df_c))
print(ub.codeblock(
'''
Now, we investigate how many dections matlab and python made in common.
(Note, choosing which dections in one version correspond to which in
another is done using a heuristic based on distances between bbox
centers and a thresholded minimum assignment problem).
Python made {npy_c}/{nmat} = {percent:.2f}% of the detections matlab made
''').format(npy_c=len(py_df_c), nmat=len(mat_df),
percent=100 * len(py_df_c) / len(mat_df)))
print(tabulate(stats, headers='keys', tablefmt='psql', stralign='right'))
print('\n\n----\n## Evaulation using the QC code\n')
hist_hit = ub.dict_hist(mat_df[mflags]['QC'].values)
hist_miss = ub.dict_hist(mat_df[~mflags]['QC'].values)
print(ub.codeblock(
'''
However, not all of those matlab detections were good. Because we have
detections in corrsepondences with each other we can assign the python
detections QC codes.
Here is a histogram of the QC codes for these python detections:
{}
(Note: read histogram as <QC-code>: <frequency>)
Here is a histogram of the other matlab detections that python did not
find:
{}
To summarize:
python correctly rejected {:.2f}% of the matlab QC=0 detections
python correctly accepted {:.2f}% of the matlab QC=1 detections
python correctly accepted {:.2f}% of the matlab QC=2 detections
Note, that because python made detections that matlab did not make,
the remaining {} detections may be right or wrong, but there is
no way to tell from this analysis.
Lastly, here are the statistics for the common detections that had a
non-zero QC code.
''').format(
ub.repr2(hist_hit, nl=1),
ub.repr2(hist_miss, nl=1),
100 * hist_miss[0] / (hist_hit[0] + hist_miss[0]),
100 * hist_hit[1] / (hist_hit[1] + hist_miss[1]),
100 * hist_hit[2] / (hist_hit[2] + hist_miss[2]),
len(py_df) - len(py_df_c)
)
)
is_qc = (mat_df_c['QC'] > 0).values
mat_df_c = mat_df_c[is_qc]
py_df_c = py_df_c[is_qc]
stats = | pd.DataFrame(columns=['python', 'matlab']) | pandas.DataFrame |
"""
Generic data algorithms. This module is experimental at the moment and not
intended for public consumption
"""
from __future__ import annotations
import operator
from textwrap import dedent
from typing import (
TYPE_CHECKING,
Literal,
Union,
cast,
final,
)
from warnings import warn
import numpy as np
from pandas._libs import (
algos,
hashtable as htable,
iNaT,
lib,
)
from pandas._typing import (
AnyArrayLike,
ArrayLike,
DtypeObj,
Scalar,
TakeIndexer,
npt,
)
from pandas.util._decorators import doc
from pandas.core.dtypes.cast import (
construct_1d_object_array_from_listlike,
infer_dtype_from_array,
sanitize_to_nanoseconds,
)
from pandas.core.dtypes.common import (
ensure_float64,
ensure_object,
ensure_platform_int,
is_array_like,
is_bool_dtype,
is_categorical_dtype,
is_complex_dtype,
is_datetime64_dtype,
is_extension_array_dtype,
is_float_dtype,
is_integer,
is_integer_dtype,
is_list_like,
is_numeric_dtype,
is_object_dtype,
is_scalar,
is_timedelta64_dtype,
needs_i8_conversion,
)
from pandas.core.dtypes.dtypes import PandasDtype
from pandas.core.dtypes.generic import (
ABCDatetimeArray,
ABCExtensionArray,
ABCIndex,
ABCMultiIndex,
ABCRangeIndex,
ABCSeries,
ABCTimedeltaArray,
)
from pandas.core.dtypes.missing import (
isna,
na_value_for_dtype,
)
from pandas.core.array_algos.take import take_nd
from pandas.core.construction import (
array as pd_array,
ensure_wrapped_if_datetimelike,
extract_array,
)
from pandas.core.indexers import validate_indices
if TYPE_CHECKING:
from pandas._typing import (
NumpySorter,
NumpyValueArrayLike,
)
from pandas import (
Categorical,
DataFrame,
Index,
Series,
)
from pandas.core.arrays import (
DatetimeArray,
ExtensionArray,
TimedeltaArray,
)
_shared_docs: dict[str, str] = {}
# --------------- #
# dtype access #
# --------------- #
def _ensure_data(values: ArrayLike) -> np.ndarray:
"""
routine to ensure that our data is of the correct
input dtype for lower-level routines
This will coerce:
- ints -> int64
- uint -> uint64
- bool -> uint64 (TODO this should be uint8)
- datetimelike -> i8
- datetime64tz -> i8 (in local tz)
- categorical -> codes
Parameters
----------
values : np.ndarray or ExtensionArray
Returns
-------
np.ndarray
"""
if not isinstance(values, ABCMultiIndex):
# extract_array would raise
values = extract_array(values, extract_numpy=True)
# we check some simple dtypes first
if is_object_dtype(values.dtype):
return ensure_object(np.asarray(values))
elif is_bool_dtype(values.dtype):
if isinstance(values, np.ndarray):
# i.e. actually dtype == np.dtype("bool")
return np.asarray(values).view("uint8")
else:
# i.e. all-bool Categorical, BooleanArray
try:
return np.asarray(values).astype("uint8", copy=False)
except TypeError:
# GH#42107 we have pd.NAs present
return np.asarray(values)
elif is_integer_dtype(values.dtype):
return np.asarray(values)
elif is_float_dtype(values.dtype):
# Note: checking `values.dtype == "float128"` raises on Windows and 32bit
# error: Item "ExtensionDtype" of "Union[Any, ExtensionDtype, dtype[Any]]"
# has no attribute "itemsize"
if values.dtype.itemsize in [2, 12, 16]: # type: ignore[union-attr]
# we dont (yet) have float128 hashtable support
return ensure_float64(values)
return np.asarray(values)
elif is_complex_dtype(values.dtype):
# Incompatible return value type (got "Tuple[Union[Any, ExtensionArray,
# ndarray[Any, Any]], Union[Any, ExtensionDtype]]", expected
# "Tuple[ndarray[Any, Any], Union[dtype[Any], ExtensionDtype]]")
return values # type: ignore[return-value]
# datetimelike
elif needs_i8_conversion(values.dtype):
if isinstance(values, np.ndarray):
values = sanitize_to_nanoseconds(values)
npvalues = values.view("i8")
npvalues = cast(np.ndarray, npvalues)
return npvalues
elif is_categorical_dtype(values.dtype):
values = cast("Categorical", values)
values = values.codes
return values
# we have failed, return object
values = np.asarray(values, dtype=object)
return ensure_object(values)
def _reconstruct_data(
values: ArrayLike, dtype: DtypeObj, original: AnyArrayLike
) -> ArrayLike:
"""
reverse of _ensure_data
Parameters
----------
values : np.ndarray or ExtensionArray
dtype : np.dtype or ExtensionDtype
original : AnyArrayLike
Returns
-------
ExtensionArray or np.ndarray
"""
if isinstance(values, ABCExtensionArray) and values.dtype == dtype:
# Catch DatetimeArray/TimedeltaArray
return values
if not isinstance(dtype, np.dtype):
# i.e. ExtensionDtype
cls = dtype.construct_array_type()
if isinstance(values, cls) and values.dtype == dtype:
return values
values = cls._from_sequence(values)
elif is_bool_dtype(dtype):
values = values.astype(dtype, copy=False)
# we only support object dtypes bool Index
if isinstance(original, ABCIndex):
values = values.astype(object, copy=False)
elif dtype is not None:
if is_datetime64_dtype(dtype):
dtype = np.dtype("datetime64[ns]")
elif is_timedelta64_dtype(dtype):
dtype = np.dtype("timedelta64[ns]")
values = values.astype(dtype, copy=False)
return values
def _ensure_arraylike(values) -> ArrayLike:
"""
ensure that we are arraylike if not already
"""
if not | is_array_like(values) | pandas.core.dtypes.common.is_array_like |
import pandas as pd
from time import sleep
from pandas.core.frame import DataFrame
from selenium import webdriver
from selenium.webdriver.firefox.options import Options
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
# Opcoes do navegador
options = Options()
options.headless = False # Impede o navegador de ser aberto
options.add_argument("--disable-extensions")
# options.add_argument('--disable-useAutomationExtension')
driver = webdriver.Firefox(options=options) # Inclui as opções no navegador
driver.set_window_size(411, 823)
# Executa o navegador com a URL
driver.get("https://www.wowhead.com/spells/talents")
# Espera em segundos para que a pagina seja carregada
sleep(3)
next = driver.find_element_by_xpath('//*[@id="lv-spells"]/div[3]/div[1]/a[3]')
while (next is not None):
all_talents_names = [
i.text for i in driver.find_elements_by_xpath("//td[2]/div/a")]
url_talents = [href.get_attribute(
"href") for href in driver.find_elements_by_xpath("//td[2]/div/a[@href]")]
classe_name = [
href.get_attribute("href")[
24:] for href in driver.find_elements_by_xpath("//td[3]//div/div/a")]
for a, b, c in zip(all_talents_names, url_talents, classe_name):
df3 = pd.read_json('Data/talents.json')
df2 = pd.DataFrame({
"name_talent": [a],
"id_talent": [b],
"classe_talent_name": [c]
})
result = | pd.concat([df2, df3]) | pandas.concat |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
train = pd.read_csv('/kaggle/input/forest-cover-type-prediction/train.csv')
test = pd.read_csv('/kaggle/input/forest-cover-type-prediction/test.csv')
test_id = test.Id
def write_sub(pred,postfix):
'''
Writes submission file
pred - predicted values
postfix - description of a file
'''
sub = | pd.DataFrame({'Id':test_id,'Cover_Type':pred}) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Mon May 11 2020
@author: <NAME>
Goal - Code to filter the data from the tank edges of all the tracked videos and it as pickled file.
"""
import sys, os
import pathlib
from pprint import pprint
import numpy as np
from scipy import stats
from scipy.spatial import distance
import matplotlib.pyplot as plt
from matplotlib.pyplot import figure
import trajectorytools as tt
import trajectorytools.plot as ttplot
import trajectorytools.socialcontext as ttsocial
from trajectorytools.constants import dir_of_data
import pickle
import argparse
import pandas as pd
def filter(tr,l):
left_edge = np.nanmin(tr.s[:,:,0])
right_edge = np.nanmax(tr.s[:,:,0])
bottom_edge = np.nanmin(tr.s[:,:,1])
top_edge = np.nanmax(tr.s[:,:,1])
column_names = list(range(1,tr.number_of_individuals+1))
#for i in range(tr.number_of_individuals):
# position_x = pd.DataFrame(tr.s[:,i,0], columns = column_names)
position_x = pd.DataFrame(tr.s[:,:,0], columns = column_names)
position_y = | pd.DataFrame(tr.s[:,:,1], columns = column_names) | pandas.DataFrame |
#!/usr/bin/env python3
# Simulate data from the Beattie model and M10 model using a Gillespie
# algorithm output plots into examples/example_output or
# MARKOVBUILDER_EXAMPLE_OUTPUT if it exists
import logging
import os
import sys
import matplotlib.pyplot as plt
import pandas as pd
from markov_builder import example_models
def main():
# First define functions which output the values of each transition rate
# for a given voltage (as dictionaries)
# Perform the simulations
mc = example_models.construct_four_state_chain()
protocol = ((-80, 100), (20, 200))
SimulateStepProtocol(mc, protocol, name="Beattie")
def SimulateStepProtocol(mc, protocol, name: str = ""):
fig = plt.figure(figsize=(8, 8))
ax1 = fig.add_subplot(211)
no_trajectories = 100
dist = None
data = [ | pd.DataFrame(columns=("time", *mc.graph.nodes)) | pandas.DataFrame |
# coding: utf-8
# # Parameter Calibration
# This notebook describes a mathematical framework for selecting policy parameters - namely the emissions intensity baseline and permit price. Please be aware of the following key assumptions underlying this model:
#
# * Generators bid into the market at their short-run marginal cost (SRMC);
# * the market for electricity is perfectly competitive;
# * the policy maker is able to directly control the emissions intensity baseline and permit price.
#
# Steps taken to conduct the analysis:
# 1. Import packages and declare declare paths to files
# 2. Load data
# 3. Organise data
# 6. Construct model used to select scheme parameters. The model consists of three blocks of equations:
# * Primal block - contains constraints related to a standard DCOPF model;
# * Dual block - dual constraints associated with dual program of standard DCOPF model;
# * Strong duality constraint block - block of constraints linking primal and dual objectives.
# 7. Run DCOPF model to find business-as-usual emissions and wholesale prices.
# 8. Run model used to select policy parameters, save output
#
# ## Import packages
# In[1]:
import os
import re
import time
import pickle
import random
from math import pi
import numpy as np
import pandas as pd
import datetime as dt
from pyomo.environ import *
import matplotlib.pyplot as plt
# Seed random number generator
np.random.seed(seed=10)
# ## Declare paths to files
# In[2]:
# Identifier used to update paths depending on the number of scenarios investigated
number_of_scenarios = '100_scenarios'
class DirectoryPaths(object):
"Paths to relevant directories"
def __init__(self):
self.data_dir = os.path.join(os.path.curdir, os.path.pardir, os.path.pardir, 'data')
self.scenarios_dir = os.path.join(os.path.curdir, os.path.pardir, '1_create_scenarios')
self.output_dir = os.path.join(os.path.curdir, 'output', number_of_scenarios)
paths = DirectoryPaths()
# ## Model data
# ### Input data
# In[3]:
class RawData(object):
"Collect input data"
def __init__(self):
# Paths to directories
DirectoryPaths.__init__(self)
# Network data
# ------------
# Nodes
self.df_n = pd.read_csv(os.path.join(self.data_dir, 'network_nodes.csv'), index_col='NODE_ID')
# AC edges
self.df_e = pd.read_csv(os.path.join(self.data_dir, 'network_edges.csv'), index_col='LINE_ID')
# HVDC links
self.df_hvdc_links = pd.read_csv(os.path.join(self.data_dir, 'network_hvdc_links.csv'), index_col='HVDC_LINK_ID')
# AC interconnector links
self.df_ac_i_links = pd.read_csv(os.path.join(self.data_dir, 'network_ac_interconnector_links.csv'), index_col='INTERCONNECTOR_ID')
# AC interconnector flow limits
self.df_ac_i_limits = pd.read_csv(os.path.join(self.data_dir, 'network_ac_interconnector_flow_limits.csv'), index_col='INTERCONNECTOR_ID')
# Generators
# ----------
# Generating unit information
self.df_g = pd.read_csv(os.path.join(self.data_dir, 'generators.csv'), index_col='DUID', dtype={'NODE': int})
self.df_g['SRMC_2016-17'] = self.df_g['SRMC_2016-17'].map(lambda x: x + np.random.uniform(0, 2))
# Operating scenarios
# -------------------
with open(os.path.join(paths.scenarios_dir, 'output', '{0}.pickle'.format(number_of_scenarios)), 'rb') as f:
self.df_scenarios = pickle.load(f)
# Create object containing raw model data
raw_data = RawData()
# ### Organise data for model
# In[4]:
class OrganiseData(object):
"Organise data to be used in mathematical program"
def __init__(self):
# Load model data
RawData.__init__(self)
def reindex_nodes(self):
# Original node indices
df_index_map = self.df_n.index.to_frame().rename(columns={'NODE_ID': 'original'}).reset_index().drop('NODE_ID',axis=1)
# New node indices
df_index_map['new'] = df_index_map.apply(lambda x: x.name + 1, axis=1)
# Create dictionary mapping original node indices to new node indices
index_map = df_index_map.set_index('original')['new'].to_dict()
# Network nodes
# -------------
# Construct new index and assign to dataframe
new_index = pd.Index(self.df_n.apply(lambda x: index_map[x.name], axis=1), name=self.df_n.index.name)
self.df_n.index = new_index
# Network edges
# -------------
# Reindex 'from' and 'to' nodes in network edges dataframe
def _reindex_from_and_to_nodes(row, order=False):
"""Re-index 'from' and 'to' nodes. If required, change node order such that 'from' node index < 'to' node index"""
# Original 'from' and 'to' nodes
n_1 = index_map[row['FROM_NODE']]
n_2 = index_map[row['TO_NODE']]
if order:
# If original 'from' node index is less than original 'to' node index keep same order, else reverse order
if n_1 < n_2:
f, t = n_1, n_2
else:
f, t = n_2, n_1
return pd.Series({'FROM_NODE': f, 'TO_NODE': t})
else:
return | pd.Series({'FROM_NODE': n_1, 'TO_NODE': n_2}) | pandas.Series |
import sys
import gzip
import pandas as pd
import numpy as np
def getDP(sample_info):
try:
dp = float(sample_info.split(':')[2])
except:
dp = np.nan
return dp
def main():
region = 0
temp = {}
result = {}
region_size = {}
input = sys.argv[1]
output = sys.argv[2]
step = int(sys.argv[3])
# with open('/u/scratch2/k/k8688933/dp_stat_vcf_only/dp_csv_split2/header', 'r') as h:
# header = h.readline().strip().split(',')
with gzip.open(input, 'rt') as f:
for line in f:
if line.startswith('#CHROM'):
header = line.strip().split('\t')[9:]
for sample in header:
result[sample] = {}
elif not line.startswith('#'):
info_ = line.strip().split('\t')
sample_info_list = info_[9:]
dp = list(map(getDP, sample_info_list))
pos = float(info_[1])
current_region = int(pos / step)
if current_region > region:
for k, v in temp.items():
result[k][region] = np.nanmean(v)
region_size[region] = len(v)
temp = {}
for sample, depth in zip(header, dp):
# if depth >= 100:
# continue
temp[sample] = [depth]
region = current_region
elif current_region == region:
for sample, depth in zip(header, dp):
# if depth >= 100:
# continue
try:
temp[sample].append(depth)
except KeyError:
temp[sample] = [depth]
res = | pd.DataFrame(result) | pandas.DataFrame |
import numpy as np
import pandas as pd
from requests.exceptions import HTTPError
import xarray as xr
from toffy.mibitracker_utils import MibiRequests
from toffy import qc_comp
from toffy import settings
import ark.utils.io_utils as io_utils
import ark.utils.misc_utils as misc_utils
import ark.utils.test_utils as test_utils
import os
from pathlib import Path
import pytest
import tempfile
parametrize = pytest.mark.parametrize
RUN_POINT_NAMES = ['Point%d' % i for i in range(1, 13)]
RUN_POINT_IDS = list(range(661, 673))
# NOTE: all fovs and all channels will be tested in the example_qc_metric_eval notebook test
FOVS_CHANS_TEST_MIBI = [
(None, ['CCL8', 'CD11b'], None, RUN_POINT_NAMES, RUN_POINT_IDS),
(None, ['CCL8', 'CD11b'], "TIFs", RUN_POINT_NAMES, RUN_POINT_IDS),
(['Point1'], None, None, RUN_POINT_NAMES[0:1], RUN_POINT_IDS[0:1]),
(['Point1'], None, "TIFs", RUN_POINT_NAMES[0:1], RUN_POINT_IDS[0:1]),
(['Point1'], ['CCL8', 'CD11b'], None, RUN_POINT_NAMES[0:1], RUN_POINT_IDS[0:1]),
(['Point1'], ['CCL8', 'CD11b'], "TIFs", RUN_POINT_NAMES[0:1], RUN_POINT_IDS[0:1])
]
FOVS_CHANS_TEST_QC = [
(None, None, False),
(None, None, True),
(['fov0', 'fov1'], None, False),
(['fov0', 'fov1'], None, True),
(None, ['chan0', 'chan1'], False),
(None, ['chan0', 'chan1'], True),
(['fov0', 'fov1'], ['chan0', 'chan1'], False),
(['fov0', 'fov1'], ['chan0', 'chan1'], True)
]
MIBITRACKER_EMAIL = '<EMAIL>'
MIBITRACKER_PASSWORD = '<PASSWORD>!?'
MIBITRACKER_RUN_NAME = '191008_JG85b'
MIBITRACKER_RUN_LABEL = 'JG85_Run2'
def test_create_mibitracker_request_helper():
# error check: bad email and/or password provided
mr = qc_comp.create_mibitracker_request_helper('bad_email', 'bad_password')
assert mr is None
# test creation works (just test the correct type returned)
mr = qc_comp.create_mibitracker_request_helper(MIBITRACKER_EMAIL, MIBITRACKER_PASSWORD)
assert type(mr) == MibiRequests
@pytest.mark.parametrize(
"test_fovs,test_chans,test_sub_folder,actual_points,actual_ids",
FOVS_CHANS_TEST_MIBI
)
def test_download_mibitracker_data(test_fovs, test_chans, test_sub_folder,
actual_points, actual_ids):
with tempfile.TemporaryDirectory() as temp_dir:
# error check: bad base_dir provided
with pytest.raises(FileNotFoundError):
qc_comp.download_mibitracker_data('', '', '', '', 'bad_base_dir', '', '')
# error check: bad run_name and/or run_label provided
with pytest.raises(ValueError):
qc_comp.download_mibitracker_data(
MIBITRACKER_EMAIL, MIBITRACKER_PASSWORD, 'bad_run_name', 'bad_run_label',
temp_dir, '', ''
)
# bad fovs provided
with pytest.raises(ValueError):
qc_comp.download_mibitracker_data(
MIBITRACKER_EMAIL, MIBITRACKER_PASSWORD,
MIBITRACKER_RUN_NAME, MIBITRACKER_RUN_LABEL,
temp_dir, '', '', fovs=['Point0', 'Point1']
)
# bad channels provided
with pytest.raises(ValueError):
qc_comp.download_mibitracker_data(
MIBITRACKER_EMAIL, MIBITRACKER_PASSWORD,
MIBITRACKER_RUN_NAME, MIBITRACKER_RUN_LABEL,
temp_dir, '', '', channels=['B', 'C']
)
# ensure test to remove tiff_dir if it already exists runs
os.mkdir(os.path.join(temp_dir, 'sample_tiff_dir'))
# error check: tiff_dir that already exists provided with overwrite_tiff_dir=False
with pytest.raises(ValueError):
qc_comp.download_mibitracker_data(
MIBITRACKER_EMAIL, MIBITRACKER_PASSWORD,
MIBITRACKER_RUN_NAME, MIBITRACKER_RUN_LABEL,
temp_dir, 'sample_tiff_dir', overwrite_tiff_dir=False,
img_sub_folder=test_sub_folder, fovs=test_fovs, channels=test_chans
)
# run the data
run_order = qc_comp.download_mibitracker_data(
MIBITRACKER_EMAIL, MIBITRACKER_PASSWORD,
MIBITRACKER_RUN_NAME, MIBITRACKER_RUN_LABEL,
temp_dir, 'sample_tiff_dir', overwrite_tiff_dir=True,
img_sub_folder=test_sub_folder, fovs=test_fovs, channels=test_chans
)
# for testing purposes, set test_fovs and test_chans to all fovs and channels
# if they're set to None
if test_fovs is None:
test_fovs = ['Point%d' % i for i in np.arange(1, 13)]
if test_chans is None:
test_chans = [
'CD115', 'C', 'Au', 'CCL8', 'CD11c', 'Ca', 'Background',
'CD11b', 'CD192', 'CD19', 'CD206', 'CD25', 'CD4', 'CD45.1',
'CD3', 'CD31', 'CD49b', 'CD68', 'CD45.2', 'FceRI', 'DNA', 'CD8',
'F4-80', 'Fe', 'IL-1B', 'Ly-6C', 'FRB', 'Lyve1', 'Ly-6G', 'MHCII',
'Na', 'Si', 'SMA', 'P', 'Ta', 'TREM2'
]
# set the sub folder to a blank string if None
if test_sub_folder is None:
test_sub_folder = ""
# get the contents of tiff_dir
tiff_dir_contents = os.listdir(os.path.join(temp_dir, 'sample_tiff_dir'))
# assert all the fovs are contained in the dir
tiff_dir_fovs = [d for d in tiff_dir_contents if
os.path.isdir(os.path.join(temp_dir, 'sample_tiff_dir', d))]
misc_utils.verify_same_elements(
created_fov_dirs=tiff_dir_fovs,
provided_fov_dirs=test_fovs
)
# assert for each fov the channels created are correct
for fov in tiff_dir_fovs:
# list all the files in the fov folder (and sub folder)
# remove file extensions so raw channel names are extracted
channel_files = io_utils.remove_file_extensions(os.listdir(
os.path.join(temp_dir, 'sample_tiff_dir', fov, test_sub_folder)
))
# assert the channel names are the same
misc_utils.verify_same_elements(
create_channels=channel_files,
provided_channels=test_chans
)
# assert that the run order created is correct for both points and ids
run_fov_names = [ro[0] for ro in run_order]
run_fov_ids = [ro[1] for ro in run_order]
assert run_fov_names == actual_points
assert run_fov_ids == actual_ids
def test_compute_nonzero_mean_intensity():
# test on a zero array
sample_img_arr = np.zeros((3, 3))
sample_nonzero_mean = qc_comp.compute_nonzero_mean_intensity(sample_img_arr)
assert sample_nonzero_mean == 0
# test on a non-zero array
sample_img_arr = np.array([[0, 1, 2], [3, 0, 0], [0, 4, 5]])
sample_nonzero_mean = qc_comp.compute_nonzero_mean_intensity(sample_img_arr)
assert sample_nonzero_mean == 3
def test_compute_total_intensity():
sample_img_arr = np.array([[0, 1, 2], [3, 0, 0], [0, 4, 5]])
sample_total_intensity = qc_comp.compute_total_intensity(sample_img_arr)
assert sample_total_intensity == 15
def test_compute_99_9_intensity():
sample_img_arr = np.array([[0, 1, 2], [3, 0, 0], [0, 4, 5]])
sample_99_9_intensity = qc_comp.compute_99_9_intensity(sample_img_arr)
assert np.allclose(sample_99_9_intensity, 5, rtol=1e-02)
def test_sort_bin_file_fovs():
# test without suffix ignore
fov_list = [
'fov-2-scan-2', 'fov-10-scan-1', 'fov-5-scan-3', 'fov-2-scan-10', 'fov-200-scan-4'
]
fov_list_sorted = qc_comp.sort_bin_file_fovs(fov_list)
assert fov_list_sorted == [
'fov-2-scan-2', 'fov-2-scan-10', 'fov-5-scan-3', 'fov-10-scan-1', 'fov-200-scan-4'
]
# test with a suffix on some fovs
fov_list_some_suffix = fov_list[:]
fov_list_some_suffix[:2] = [f + '_suffix.csv' for f in fov_list[:2]]
fov_list_sorted = qc_comp.sort_bin_file_fovs(fov_list_some_suffix, suffix_ignore='_suffix.csv')
assert fov_list_sorted == [
'fov-2-scan-2_suffix.csv', 'fov-2-scan-10',
'fov-5-scan-3', 'fov-10-scan-1_suffix.csv', 'fov-200-scan-4'
]
# test with a suffix on all fovs
fov_list_all_suffix = [f + '_suffix.csv' for f in fov_list]
fov_list_sorted = qc_comp.sort_bin_file_fovs(fov_list_all_suffix, suffix_ignore='_suffix.csv')
assert fov_list_sorted == [
'fov-2-scan-2_suffix.csv', 'fov-2-scan-10_suffix.csv',
'fov-5-scan-3_suffix.csv', 'fov-10-scan-1_suffix.csv',
'fov-200-scan-4_suffix.csv'
]
# NOTE: we don't need to test iteration over multiple FOVs because
# test_compute_qc_metrics computes on 1 FOV at a time
@parametrize("gaussian_blur", [False, True])
@parametrize("bin_file_folder, fovs",
[('moly', ['fov-1-scan-1']), ('tissue', ['fov-1-scan-1'])])
def test_compute_qc_metrics(gaussian_blur, bin_file_folder, fovs):
with tempfile.TemporaryDirectory() as temp_dir:
# define a sample panel, leave panel correctness/incorrectness test for mibi_bin_tools
panel = pd.DataFrame([{
'Mass': 89,
'Target': 'SMA',
'Start': 88.7,
'Stop': 89.0,
}])
# write the panel to csv
panel_path = os.path.join(temp_dir, 'sample_panel.csv')
panel.to_csv(panel_path, index=False)
# define the full path to the bin file folder
bin_file_path = os.path.join(Path(__file__).parent, 'data', bin_file_folder)
# define a sample qc_path to write to
qc_path = os.path.join(temp_dir, 'sample_qc_dir')
# bin folder error check
with pytest.raises(FileNotFoundError):
qc_comp.compute_qc_metrics(
'bad_bin_path', fovs[0], panel_path, gaussian_blur
)
# panel file error check
with pytest.raises(FileNotFoundError):
qc_comp.compute_qc_metrics(
bin_file_path, fovs[0], 'bad_panel_path', gaussian_blur
)
# fov error check
with pytest.raises(FileNotFoundError):
qc_comp.compute_qc_metrics(
bin_file_path, 'bad_fov', panel_path, gaussian_blur
)
# first time: create new files, also asserts qc_path is created
qc_comp.compute_qc_metrics(
bin_file_path, fovs[0], panel_path, gaussian_blur
)
for ms, mc in zip(settings.QC_SUFFIXES, settings.QC_COLUMNS):
# assert the file for this QC metric was created
metric_path = os.path.join(bin_file_path, '%s_%s.csv' % (fovs[0], ms))
assert os.path.exists(metric_path)
# read the data for this QC metric
metric_data = | pd.read_csv(metric_path) | pandas.read_csv |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import random
import time
import warnings
warnings.filterwarnings('ignore')
sns.set(style='darkgrid', palette='deep')
#Analysing dataset with padas profiling
#from pandas_profiling import ProfileReport
#profile = ProfileReport(df, title='Medical Cost Personal Datasets', html={'style':{'full_width':True}})
#Importing Dataset
df_raw = pd.read_excel('titanic3.xls')
new_columns = ['class','survival', 'name', 'sex', 'age', 'siblings/spouses',
'parents/children', 'ticket', 'fare', 'cabin', 'embarked', 'lifeboat',
'body number', 'home/destination']
df_raw.info()
#Feature Engineering
df = pd.DataFrame(df_raw.values, columns= new_columns )
df_user = pd.DataFrame(np.arange(0, len(df)), columns=['passanger'])
df = pd.concat([df_user, df], axis=1)
df['family'] = df['siblings/spouses'] + df['parents/children'] + 1
df = df.drop(['siblings/spouses','parents/children'], axis=1)
df['embarked'].value_counts()
df['embarked'].replace(['S', 'C', 'Q'],
['southampton', 'cherbourg', 'quennstone'], inplace= True )
df.info()
df.columns
df[['class', 'survival', 'age', 'fare',
'body number', 'family']] = df[['class', 'survival', 'age', 'fare',
'body number', 'family']].apply(pd.to_numeric)
#Converting columns to Datatime
df['Timestamp'] = pd.to_datetime(df['Timestamp'])
time_new = df['Timestamp'].iloc[0]
df['Hour'] = df['Timestamp'].apply(lambda time_new: time_new.hour)
df['Month'] = df['Timestamp'].apply(lambda time_new: time_new.month)
df['Day'] = df['Timestamp'].apply(lambda time_new: time_new.dayofweek)
df["hour"] = df.hour.str.slice(1, 3).astype(int)
#Visualising Dataset
bins = range(0,100,10)
ax = sns.distplot(df.age[df.y=='yes'],
color='red', kde=False, bins=bins, label='Have Subscribed')
sns.distplot(df.age[df.y=='no'],
ax=ax, # Overplots on first plot
color='blue', kde=False, bins=bins, label="Haven't Subscribed")
plt.legend()
plt.show()
g = pd.crosstab(df.sex, df.survival).plot(kind='bar', figsize=(10,5))
ax = g.axes
for p in ax.patches:
ax.annotate(f"{p.get_height() * 100 / df.shape[0]:.2f}%", (p.get_x() + p.get_width() / 2., p.get_height()),
ha='center', va='center', fontsize=11, color='gray', rotation=0, xytext=(0, 10),
textcoords='offset points')
plt.grid(b=True, which='major', linestyle='--')
plt.title('Survival Frequency for Genre')
plt.legend(['Not Survived', 'Survived'])
plt.xlabel('Genre')
plt.ylabel('Quantity')
plt.show()
df.groupby(pd.cut(df.age, bins))['age'].count().plot(kind='bar', figsize=(10,10))
plt.grid(b=True, which='major', linestyle='--')
plt.title('Frequency of Age')
plt.grid(b=True, which='major', linestyle='--')
plt.xlabel('Age')
plt.ylabel('Quantity')
plt.show()
pd.crosstab(pd.cut(df.age, bins), df.survival).plot(kind='bar', figsize=(10,10))
plt.grid(b=True, which='major', linestyle='--')
plt.title('Survival Frequency for Age')
plt.legend(['Not Survival', 'Survival'])
plt.yticks(np.arange(0,250,50))
plt.xlabel('Age')
plt.ylabel('Quantity')
plt.show()
age_notsurvival = (df.groupby(pd.cut(df.age, bins))['age'].count()/ len(df[df.survival==0]))*100
age_survival = (df.groupby(pd.cut(df.age, bins))['age'].count()/ len(df[df.survival==1]))*100
age_notsurvival.plot(kind='bar', figsize=(10,10))
plt.grid(b=True, which='major', linestyle='--')
plt.title('Percentage of Age for Passanger Not Survived')
plt.yticks(np.arange(0,110,10))
plt.xlabel('Age')
plt.ylabel('Percentage')
plt.show()
age_survival.plot(kind='bar', figsize=(10,10))
plt.grid(b=True, which='major', linestyle='--')
plt.title('Percentage of Age for Passanger Survived')
plt.yticks(np.arange(0,110,10))
plt.xlabel('Age')
plt.ylabel('Percentage')
plt.show()
fig, axes = plt.subplots(nrows=2, ncols=1, sharex=True, sharey=True, figsize=(10,10))
plt.subplots_adjust(hspace=0)
plt.suptitle('Age Frequency')
ax1 = sns.countplot( | pd.cut(df.age, bins) | pandas.cut |
# Copyright (C) 2021 <NAME>
import numpy as np
import pandas as pd
import scipy.spatial.distance as dist
from sentence_transformers import SentenceTransformer, SentencesDataset, LoggingHandler, losses, models
from torch.utils.data import DataLoader
from sentence_transformers.readers import TripletReader
from sentence_transformers.evaluation import TripletEvaluator
from tqdm import trange
from .util import *
from .encoder import SBertEncoder
class SlangGenTrainer:
MAX_NEIGHBOR = 300
def __init__(self, dataset, word_encoder, out_dir='', verbose=False):
self.out_dir = out_dir
create_directory(out_dir)
self.dataset = dataset
self.word_encoder = word_encoder
self.verbose = verbose
conv_lens = []
for i in range(dataset.V):
word = dataset.vocab[i]
conv_lens.append(len(dataset.conv_data[word].definitions))
self.conv_lens = np.asarray(conv_lens)
self.conv_acc = np.zeros(dataset.V, dtype=np.int32)
for i in range(1,dataset.V):
self.conv_acc[i] = self.conv_acc[i-1] + self.conv_lens[i-1]
self.word_dist = self.preprocess_word_dist()
np.save(out_dir+'/word_dist.npy', self.word_dist)
self.sense_encoder = None
self.se_model_name = "INVALID"
def preprocess_slang_data(self, slang_ind, fold_name='default', skip_steps=[]):
out_dir = self.out_dir + '/' + fold_name
create_directory(out_dir)
out_dir += '/'
# Generate contrastive pairs for training
if 'contrastive' not in skip_steps:
if self.verbose:
print("Generating contrative pairs...")
contrastive_pairs_train, contrastive_pairs_dev = self.preprocess_contrastive(slang_ind)
np.save(out_dir+'contrastive_train.npy', contrastive_pairs_train)
np.save(out_dir+'contrastive_dev.npy', contrastive_pairs_dev)
if self.verbose:
print("Complete!")
def load_preprocessed_data(self, fold_name='default', skip_steps=[]):
out_dir = self.out_dir + '/' + fold_name + '/'
preproc_data = {}
if 'contrastive' not in skip_steps:
preproc_data['cp_train'] = np.load(out_dir+'contrastive_train.npy', allow_pickle=True)
preproc_data['cp_dev'] = np.load(out_dir+'contrastive_dev.npy', allow_pickle=True)
return preproc_data
def load_sense_encoder(self, model_name, model_path):
if self.se_model_name == model_name:
return self.sense_encoder
self.sense_encoder = SBertEncoder(sbert_model_name=model_name, name=model_path)
self.se_model_name = model_name
def get_trained_embeddings(self, slang_ind, fold_name='default', model_path='SBERT_contrastive'):
model_name = self.out_dir + '/' + fold_name + '/SBERT_data/' + model_path
self.load_sense_encoder(model_name, model_path)
return self.get_sense_embeddings(slang_ind, fold_name)
def get_sense_embeddings(self, slang_ind, fold_name='default'):
if self.verbose:
print("Encoding sense definitions...")
out_dir = self.out_dir + '/' + fold_name + '/'
sense_embeds = self.sense_encoder.encode_dataset(self.dataset, slang_ind)
np.savez(out_dir+"sum_embed_"+self.sense_encoder.name+".npz", train=sense_embeds['train'], dev=sense_embeds['dev'], test=sense_embeds['test'], standard=sense_embeds['standard'])
if self.verbose:
print("Complete!")
return sense_embeds
def get_testtime_embeddings(self, slang_def_sents, fold_name='default', model_path='SBERT_contrastive'):
model_name = self.out_dir + '/' + fold_name + '/SBERT_data/' + model_path
self.load_sense_encoder(model_name, model_path)
return self.sense_encoder.encode_sentences(slang_def_sents)
def train_contrastive_model(self, slang_ind, params=None, fold_name='default'):
if params is None:
params = {'train_batch_size':16, 'num_epochs':4, 'triplet_margin':1, 'outpath':'SBERT_contrastive'}
self.prep_contrastive_training(slang_ind, fold_name=fold_name)
out_dir = self.out_dir + '/' + fold_name + '/SBERT_data/'
triplet_reader = TripletReader(out_dir, s1_col_idx=0, s2_col_idx=1, s3_col_idx=2, delimiter=',', has_header=True)
output_path = out_dir+params['outpath']
sbert_model = SentenceTransformer('bert-base-nli-mean-tokens')
train_data = SentencesDataset(examples=triplet_reader.get_examples('contrastive_train.csv'), model=sbert_model)
train_dataloader = DataLoader(train_data, shuffle=True, batch_size=params['train_batch_size'])
train_loss = losses.TripletLoss(model=sbert_model, triplet_margin=params['triplet_margin'])
dev_data = SentencesDataset(examples=triplet_reader.get_examples('contrastive_dev.csv'), model=sbert_model)
dev_dataloader = DataLoader(dev_data, shuffle=False, batch_size=params['train_batch_size'])
evaluator = TripletEvaluator(dev_dataloader)
warmup_steps = int(len(train_data)*params['num_epochs']/params['train_batch_size']*0.1) #10% of train data
# Train the model
sbert_model.fit(train_objectives=[(train_dataloader, train_loss)],
evaluator=evaluator,
epochs=params['num_epochs'],
evaluation_steps=len(dev_data),
warmup_steps=warmup_steps,
output_path=output_path)
def prep_contrastive_training(self, slang_ind, fold_name='default'):
if self.verbose:
print("Generating triplet data for contrastive training...")
out_dir = self.out_dir + '/' + fold_name + '/SBERT_data/'
create_directory(out_dir)
preproc_data = self.load_preprocessed_data(fold_name=fold_name)
N_train, triplets = self.sample_triplets(preproc_data['cp_train'])
N_dev, triplets_dev = self.sample_triplets(preproc_data['cp_dev'])
np.save(out_dir+'triplets.npy', triplets)
np.save(out_dir+'triplets_dev.npy', triplets_dev)
slang_def_sents = []
for i in range(self.dataset.N_total):
slang_def_sents.append(' '.join(simple_preprocess(self.dataset.slang_data[i].def_sent)))
conv_def_sents = []
for i in range(self.dataset.V):
word = self.dataset.vocab[i]
for d in self.dataset.conv_data[word].definitions:
conv_def_sents.append(' '.join(simple_preprocess(d['def'])))
data_train = {'anchor':[slang_def_sents[slang_ind.train[triplets[i][0]]] for i in range(N_train)],\
'positive':[conv_def_sents[triplets[i][1]] for i in range(N_train)],\
'negative':[conv_def_sents[triplets[i][2]] for i in range(N_train)]}
data_dev = {'anchor':[slang_def_sents[slang_ind.dev[triplets_dev[i][0]]] for i in range(N_dev)],\
'positive':[conv_def_sents[triplets_dev[i][1]] for i in range(N_dev)],\
'negative':[conv_def_sents[triplets_dev[i][2]] for i in range(N_dev)]}
df_train = pd.DataFrame(data=data_train)
df_dev = | pd.DataFrame(data=data_dev) | pandas.DataFrame |
#!/usr/bin/env python
# coding: utf-8
# # Creating machine learning models
# ## High level ML project managment
# The need and the steps taken when creating a machine learning model usually falls into the following steps:
#
# 
#
# * The business first needs to define the problem and the potential value that a solution will bring.
# * The second step is to transfer the business problem into a machine learning problem.
# * The third step is to run alot of experiments: try out many ML algorithms, do feature engineering, debate with your colleagues and present the results.
# * The final step is to decide which model to use and start thinking about deployment.
#
# The deployment part historicaly is not the part of an ML practioner but this is changing rapidly.
#
# If any problem is to big to overcome in a given step, then a team should go a step back and rethink the previous step.
# ## Business problem
# 
# Imagine that we are working in a huge analytics company and our new task is to model the probability of Counter Terrorist (**CT** for short) team winning a Counter Strike: Global Offensive (**CSGO** for short) game.
#
# The rules of the game are simple: there are two teams, named terrorists and counter-terrorists, each consisting of 5 players. At the start of the round each player buys weapons, armor and other equipment and the objective is to win the match.
#
# To read more about the game visit the official website: https://blog.counter-strike.net/index.php/about/
#
# This esport is very popular and our analytics company is trying to break into the gaming market with a very accurate model which will be shown on TV, on gaming streams and other places.
# ## Rules of the game
# The ultimate victory of a CSGO match is when a team, either CT or T, earn **16 points**. A point is earn when a match is won.
#
# Match winning criteria:
#
# * A given team eliminates all 5 players of the oposite team.
# * If the terrorists have planted the bomb, then the winning criteria for a CT team is to defuse the bomb and for the T team to win the match the bomb needs to explode.
#
# The maximum number of seconds in a match is **175.00**.
#
# There are 5 CT and 5 T players on match start. Each of them have **100 hit points (HP)** and can buy up to **100 armor** and a helmet.
#
# Players earn in game dollars during a match which can be spent on weapons, grenades, armor and other accesories.
# ## Machine learning problem
# After the business problem is defined and the rules of the game are clear, we now need to convert the business problem into a machine learning problem.
# If we define:
#
# $$ \mathbb{Y}_{i} = \{0, 1\}, \forall i = 1, ..., n$$
#
# $$ \mathbb{X}_{i} \in R^{p}, \forall i = 1, ..., n$$
#
# Where
#
# $i$ - observation i.
#
# $n$ - total number of observations.
#
# $p$ - number of features.
#
# Then we are trying to create a model for the probability to observe $\mathbb{Y}=1$ event given $\mathbb{X}$:
#
# $$P(\mathbb{Y}=1|\mathbb{X}) \in (0, 1)$$
#
# $\mathbb{Y} = 1$ means that the CT team have won and the $\mathbb{Y} = 0$ means that CT team have lost.
#
# The function $f$ that links $\mathbb{X}$ to $\mathbb{Y}$ is the machine learning model which are trying to build:
#
# $$ f: \mathbb{X} \rightarrow \mathbb{Y} $$
# Because we are trying to predict an observation falling into one of two classes (CT winning or losing) the machine learning model $f$ can be called a *binary classifier*.
# # Python package imports
# The first thing that any developer or a ML practioner does is load up packages which are installed into his/hers machine.
# In[1]:
# Data reading
import pandas as pd
# Main modeling class
import xgboost as xgb
# Data spliting
from sklearn.model_selection import train_test_split
# Plotting library
import matplotlib.pyplot as plt
import seaborn as sns
# Array math
import numpy as np
# Modeling frameworks
from sklearn.linear_model import LogisticRegression
import xgboost as xgb
# Accuracy metrics
from sklearn.metrics import roc_auc_score, roc_curve
# Hp parameter search
from sklearn.model_selection import ParameterGrid
# Model saving
import pickle
# Operating system functionalities
import os
# JSON saving and loading
import json
# # Reading data
# Finding, cleaning and labelling data is ussually a long and painfull process. This is not the main emphasis of this book so lets imagine that we have already spent months in creating the beautifull dataset which we will read.
#
# The original dataset can be found here: https://www.kaggle.com/christianlillelund/csgo-round-winner-classification
# In[2]:
# Using pandas to read a csv file
d = pd.read_csv("data/data.csv")
# Printing the shape of data
print(f"Number of observations: {d.shape[0]}")
print(f"Number of features: {d.shape[1]}")
# In[3]:
# Getting the feature names
d.columns.values
# In[4]:
# Displaying a snippet of data
print(d.head())
# A short description about the data from the kaggle source:
#
# *The dataset consists of round snapshots from about 700 demos from high level tournament play in 2019 and 2020. Warmup rounds and restarts have been filtered, and for the remaining live rounds a round snapshot has been recorded every 20 seconds until the round is decided. Following its initial publication, It has been pre-processed and flattened to improve readability and make it easier for algorithms to process. The total number of snapshots is 122411. **Snapshots are i.i.d and should be treated as individual data points**, not as part of a match.*
# The feature that will be used for the creation of $\mathbb{Y}$ variable is **round_winner**. If CT have won, then the value of $\mathbb{Y}$ will be 1 and 0 othervise.
# In[5]:
# Creating the Y variable
d['Y'] = [1 if x == 'CT' else 0 for x in d['round_winner']]
# Inspecting the distribution of the classes
distribution = d.groupby('Y', as_index=False).size()
distribution['Y'] = distribution['Y'].astype(str)
distribution['share'] = distribution['size'] / distribution['size'].sum()
plt.bar(
distribution['Y'],
distribution['share'],
edgecolor='black'
)
plt.title("Share of binary responses in data")
plt.ylabel("Share in data")
plt.xlabel("Response value")
plt.show()
# The classes are almost perfectly balanced.
# ## Dropping inconsistancies
# In[6]:
d = d[(d['t_players_alive']<=5) & (d['ct_players_alive']<=5)].copy()
# # Feature engineering
# Feature engineering is the process of using domain knowledge to create additional features from the raw features in data. Alot of experimentation time is spent here and not all the features created end up improving the model. Nevertheless, if we create atlest one new feature from the given list of features which improves the performance of our classifier then we have added imense value to the original dataset without investing into new data collection.
#
# The AI expert <NAME> has proposed that the current ML industry should move from the model centric approach to the data centric approach {cite}`data_centric`:
#
# *"If 80 percent of our work is data preparation, then ensuring data quality is the important work of a machine learning team."*
#
# <NAME> urges to shift the focus from trying out new models while fixing a dataset and instead fix a model and then engineer new features, label new data points and do other data related experiments.
#
# Regardless of which school of thought wins out, developing new features is paramount in either case.
#
# In[7]:
# Boolean for the planting of the bomb event
d['bomb_planted'] = [1 if x else 0 for x in d['bomb_planted']]
# The differences between the team scores
d['team_score_diff'] = d['ct_score'] - d['t_score']
# Putting the team_score_diff into buckets
cut_bins_score = [-15, -5, 0, 5, 15]
d['team_score_diff'] = pd.cut(d['team_score_diff'], bins=cut_bins_score)
# Calculating the share of remaining health of CT
d['ct_health_share'] = d['ct_health'] / (d['t_health'] + d['ct_health'])
# Calculating the armor share
d['ct_armor_per_player'] = d['ct_armor'] / d['ct_players_alive']
# Total money share owned by CT
d['ct_money_share'] = d['ct_money'] / (d['t_money'] + d['ct_money'])
# Difference between alive CT players and T players
d['ct_players_alive_diff'] = d['ct_players_alive'] - d['t_players_alive']
# Is there a defuse kit in CT team?
d['ct_defuse_kit_present'] = [1 if x > 0 else 0 for x in d['ct_defuse_kits']]
# # Explanatory Data Analysis
# ## Bomb planting event
# In[8]:
# Calculating the probability of winning when a bomb is planted
prob_w = d.groupby(['bomb_planted'])['Y'].agg(['sum', 'size'])
prob_w['prob_of_win_CT'] = prob_w['sum'] / prob_w['size']
# Adding a custom index
prob_w.index = ['bomb not planted', 'bomb planted']
# Ploting the results
plt.bar(
prob_w.index,
prob_w['prob_of_win_CT'],
edgecolor='black'
)
plt.title("Probability of CT winning")
plt.ylabel("Probability")
plt.show()
# In[9]:
print(prob_w)
# As we can see, if a bomb is planted, the odds of winning for a CT squad is more than two times lower than if the bomb is not planted: **0.52** and **0.22** respectively.
# In[ ]:
# ## Maps
# In[10]:
# Calculating the probability of winning when a bomb is planted
prob_w = d.groupby(['map'])['Y'].agg(['sum', 'size'])
prob_w['prob_of_win_CT'] = prob_w['sum'] / prob_w['size']
# Ploting the results
plt.figure(figsize=(12, 7))
plt.bar(
prob_w.index,
prob_w['prob_of_win_CT'],
edgecolor='black'
)
plt.title("Probability of CT winning")
plt.ylabel("Probability")
plt.axhline(y=0.5, color='r', linestyle='--')
plt.show()
# In[11]:
print(prob_w)
# The map **de_cache** seems to be a clear outlier in the dataset: the CTs are winning in this map more than 70% of the maches.
# ## Tilting
# The definition of tilting in esports is ***state of mental or emotional confusion or frustration***. We can measure that by the influence of the current score of matches in favor of CTs to the probablity of winning.
# In[12]:
# Calculating the probability of winning when a bomb is planted
prob_w = d.groupby(['team_score_diff'])['Y'].agg(['sum', 'size'])
prob_w['prob_of_win_CT'] = prob_w['sum'] / prob_w['size']
# Adjusting the index
prob_w.index = [str(x) for x in prob_w.index]
# Ploting the results
plt.figure(figsize=(10, 6))
plt.bar(
prob_w.index,
prob_w['prob_of_win_CT'],
edgecolor='black'
)
plt.title("Probability of CT winning")
plt.ylabel("Probability")
plt.xlabel("Difference between scores in favor of CT")
plt.axhline(y=0.5, color='r', linestyle='--')
plt.show()
# There is a relationship between the matches won by CT and the probability of winning the current match: the bigger the difference between the match score in favor of CT, the higher the chances of winning.
# ## Health, armor and money influence
# In[13]:
# Ploting the distributions of CT health share
plt.figure(figsize=(10, 6))
plt.hist(
d.loc[d['Y']==1, 'ct_health_share'].values,
alpha=0.5,
label='CT won match',
edgecolor='black',
bins=20
)
plt.hist(
d.loc[d['Y']==0, 'ct_health_share'].values,
alpha=0.5,
label='CT won match',
edgecolor='black',
bins=20
)
plt.legend()
plt.title("Distribution of CT health share of total HP pool by match win event")
plt.ylabel("Number of matches")
plt.xlabel("Share of total HP pool")
plt.show()
# As our intuition suggested, the more total health is comprised of CT HP, the bigger the probability of winning.
# In[14]:
plt.figure(figsize=(10, 6))
sns.kdeplot(
d.loc[d['Y']==1, 'ct_armor_per_player'].values,
shade=True,
linewidth=2,
label = 'CT won match'
)
sns.kdeplot(
d.loc[d['Y']==0, 'ct_armor_per_player'].values,
shade=True,
linewidth=2,
label = 'CT lost match'
)
plt.legend()
plt.title("Distribution of CT armor per player by match win event")
plt.ylabel("Share of matches")
plt.xlabel("Armor per player")
plt.show()
# The density of CT winning a match is "bigger" the more there is armor per player.
# In[15]:
plt.figure(figsize=(10, 6))
plt.hist(
d.loc[d['Y']==1, 'ct_money_share'].values,
alpha=0.5,
label='CT won match',
edgecolor='black',
bins=20
)
plt.hist(
d.loc[d['Y']==0, 'ct_money_share'].values,
alpha=0.5,
label='CT lost match',
edgecolor='black',
bins=20
)
plt.legend()
plt.title("Distribution of all money owned by CT by match win event")
plt.ylabel("Number of matches")
plt.xlabel("Share of total money owned")
plt.show()
# As with the health case, having more of the total economy in the game helps positively to win a match.
# ## Impact of alive players
# In[16]:
# Calculating the probability of winning when a bomb is planted
prob_w = d.groupby(['ct_players_alive', 't_players_alive'], as_index=False)['Y'].agg(['sum', 'size'])
prob_w['prob_of_win_CT'] = prob_w['sum'] / prob_w['size']
# Droping the obvious cases of CT=0 and T=0
prob_w = prob_w[[False if x[0]==0.0 or x[1]==0.0 else True for x in prob_w.index]]
# Creating a dataframe for a heatmap
heatmap_df = pd.DataFrame({
'ct_players_alive': prob_w.index.get_level_values(0),
't_players_alive': prob_w.index.get_level_values(1),
'p': prob_w['prob_of_win_CT']
})
heatmap_df = heatmap_df.pivot(index='ct_players_alive', columns='t_players_alive', values='p')
# Drawing the heatmap
plt.figure(figsize=(8, 8))
sns.heatmap(heatmap_df, linewidths=.5, cmap="YlGnBu")
plt.title("Heatmap of probability to win vs alive players")
plt.show()
# Even having one player advantage in a CSGO match leads to huge increases in probability of winning. The highest probability to win is where there are alot of alive CT players and not much alive T players.
# ## Defusal kit necesity
# If a bomb is planted in the game, the only way to defuse it is with a difusal kit.
# In[17]:
# Calculating the probability of winning when a bomb is planted
prob_w = d.groupby(['ct_defuse_kit_present'])['Y'].agg(['sum', 'size'])
prob_w['prob_of_win_CT'] = prob_w['sum'] / prob_w['size']
# Adding a custom index
prob_w.index = ['Defuse kit not present', 'Defuse kit present']
# Ploting the results
plt.bar(
prob_w.index,
prob_w['prob_of_win_CT'],
edgecolor='black'
)
plt.title("Probability of CT winning")
plt.ylabel("Probability")
plt.show()
# In[18]:
prob_w
# Having a defusal kit in a team really proves to be beneficial!
# # Evaluating model performance
# In order to compare algorithms with one another or to measure the impact of new data and features, we need to have a performance metric (or more than one). One of the most popular metrics in measuring binary classifiers is the **Area Under the Curve metric (AUC)**. In order to have a grasp on AUC we first need to make sense of some intermediate definitions.
# ## Confusion matrix
# In the field of machine learning and specifically the problem of statistical classification, a confusion matrix is a specific table layout that allows visualization of the performance of an algorithm. Each row of the matrix represents the instances in an actual class while each column represents the instances in a predicted class, or vice versa – both variants are found in various textbooks and articles.
#
# 
#
# The abbreviations stand for:
#
# **TP** - True Positives
#
# **FN** - False Negatives
#
# **FP** - False Positives
#
# **TN** - True Negatives
#
# The **actual values** refer to the actual ending of the matches. In our case, if CT have won this is termed as a *positive* and if CT have lost then this is termed as a *negative*. The predicted values refer to the outcome predicted by the machine learning algorithm. Thus:
#
# * If a match is actually won by CT and our algorithm predicted the same, then that observation is a True Positive.
#
# * If a match is actually won by CT but our algorithm predicted that CT lost, then that observation is a False Negative.
#
# * If a match is actually lost by CT but our algorithm predicted that CT won, then that observation is a False Positive.
#
# * If a match is actually lost by CT and our algorithm predicted that CT have lost, then that observation is a True Negative.
#
# A perfect classifier would have only TPs and TNs in the confusion matrix and no FNs and FPs. Most of the time, this is not the case.
# ## Model treshold
# Most of the popular ML models do not just output 1 or 0 (meaning that CT have won or lost) given a set of features $\mathbb{X}$. Rather, they output a **probability**. Recall, that a binary classifier is just a probability model that:
#
# $$ f(\mathbb{X}) = P(\mathbb{Y} = 1| \mathbb{X}) \in (0, 1)$$
#
# So the output of the algorithm can be 0.0148, 0.5897, 0.998 and so on. By default, a label of 1 (CT winning a match) is given to an observation when $f(\mathbb{X}) \geqslant 0.5$. In other words, the treshold **t** = 0.5. In general terms:
#
# $$ y_{predicted} = \begin{cases} 1, & f(\mathbb{X}) \geqslant t \\
# 0, & f(\mathbb{X}) < t \end{cases} t \in (0, 1)$$
#
# Altough it is generaly advised to have the default treshold of 0.5, but in some cases a user can vary the treshold to achieve better results.
# ## Receiver operating characteristic curve (ROC)
# A receiver operating characteristic curve, or **ROC** curve, is a graphical plot that illustrates the performance of a binary classifier as the threshold is varied. It is a 2D plot where the X axis is the **False Positive Rate (FPR)** and the Y axis is the **True Positive Rate (TPR)**. FPR and TPR are defined as follows:
#
# $$FPR = \dfrac{FP}{N}$$
#
# $$TPR = \dfrac{TP}{P}$$
#
# Here **FP** - number of false positives generated by the classifier, **TP** - number of true positives generated by the classifier and **N** and **P** are the total number of "negative" and "positive" class observations in the data respectively.
#
# An example ROC plot:
#
# 
#
# Notice that the axis values are in the interval **[0, 1]**. Altough it may not look like it, but the orange curve is made up out of alot of points who are connected to make a line (hence the term "curve"). Every point was gotten using a different treshold **t**. We always want a classifier whose ROC curve spikes as much as possible to the top left corner. The more the curve is closer to the right bottom corner, the worse the classifier.
#
# If the curve shoots up rapidly that means that by adjusting the treshold by a little bit, the true positive rate (or the amount of "positive" class observations identified correctly) is very high while the errors that our model makes are minimal (FRP is near zero). Further adjusting the treshold may increase the total positive class observations identified but it will come with a cost of increasing the FPR.
#
# To put everything in an interactive way, please watch the video by the great StatQuest team about ROC curves: https://www.youtube.com/watch?v=4jRBRDbJemM
#
# Another great resource on this topic: https://developers.google.com/machine-learning/crash-course/classification/roc-and-auc
# ## AUC statistic
# The area under the curve (AUC) statistic is the integral of a given ROC curve between the points (0,0) and (1,1):
#
# 
#
# The perfect estimator has an are under the curve of 1.0, a bad estimator has the value of 0.5 and bellow. In practise, a classifier with the AUC statistic above 0.8 is consider to be good and AUC above 0.9 is considered to be very good.
#
# For the objective of creating an ML model for the winner of a CSGO match, we will use the AUC statistic as the main measure of the "goodness" of the model.
# # Creating the train, validation and test sets
# When creating machine learning models it is very advised to split the data into a **train**, **validation** and **test** sets. A good general rule of thumb is to have ~80% of the data to train the algotithm, ~10% of the data to use in various parameter tuning and ~10% of the data to only use in final performance metric calculation.
#
# All of these datasets are needed to make sure that our model does not **overfit**.
# ## Overfitting problem
# As stated beautifully in the book "Introduction to Statistical Learning"{cite}`stat_learning`:
#
# **"When we overfit the training data, the test performance metrics will be very large because the supposed patterns that the method found in the training data simply don’t exist in the test data. Note that regardless of whether or not overfitting has occurred, we almost always expect the training errors to be smaller than the test errors because most statistical learning methods either directly or indirectly seek to minimize the training errors"**
#
# In other words, if we only use training data when creating ML models, we are blinded a bit and do not know how will the model perform with unseen data.
#
# As per {cite}`train_val_test`:
#
# **"The training set the largest corpus of your dataset that you reserve for training your model. After training, inference on these images will be taken with a grain of salt, since the model has already had a chance to look at and memorize the correct output."**
#
# **"The validation set is a separate section of your dataset that you will use during training to get a sense of how well your model is doing on images that are not being used in training. During training, it is common to report validation metrics continually after each training epoch <\...\>. You use these metrics to get a sense of when your model has hit the best performance it can reach on your validation set. You may choose to cease training at this point <\...\> As you work on your model, you can continually iterate on your dataset, image augmentations, and model design to increase your model's performance on the validation set."**
#
# **"After all of the training experiments have concluded, you probably have gotten a sense on how your model might do on the validation set. But it is important to remember that the validation set metrics may have influenced you during the creation of the model, and in this sense you might, as a designer, overfit the new model to the validation set. Because the validation set is heavily used in model creation, it is important to hold back a completely separate stronghold of data - the test set. You can run evaluation metrics on the test set at the very end of your project, to get a sense of how well your model will do in production."**
# ## Feature list
# After the feature engineering steps and EDA we can define the final feature list which we will use in our models:
# In[19]:
# Initial list
features = [
'bomb_planted',
'ct_health_share',
'ct_players_alive',
't_players_alive',
'ct_defuse_kit_present',
'ct_helmets',
't_helmets'
]
# **NOTE:** some of the features will be left out because of iterative inspection of model results and EDA.
# In[20]:
# Creating dummy vars for the map feature
map_df = pd.get_dummies(d['map'])
# Map feature names
map_features = map_df.columns.values.tolist()
# Concatenating the map_df to original dataframe
d = pd.concat([d, map_df], axis=1)
# Adding the map features to the original feature list
#features += map_features
# In[21]:
# Creating dummy vars for the team_score_diff features
score_df = pd.get_dummies(d['team_score_diff'])
# Score feature names
score_df.columns = [f"team_score_diff_in_{str(x)}" for x in score_df.columns]
score_features = score_df.columns.values.tolist()
# Concatenating the map_df to original dataframe
d = | pd.concat([d, score_df], axis=1) | pandas.concat |
# Licensed to Modin Development Team under one or more contributor license agreements.
# See the NOTICE file distributed with this work for additional information regarding
# copyright ownership. The Modin Development Team licenses this file to you under the
# Apache License, Version 2.0 (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific language
# governing permissions and limitations under the License.
import pandas
from pandas.core.common import apply_if_callable
from pandas.core.dtypes.common import (
infer_dtype_from_object,
is_dict_like,
is_list_like,
is_numeric_dtype,
)
from pandas.core.indexes.api import ensure_index_from_sequences
from pandas.util._validators import validate_bool_kwarg
from pandas.io.formats.printing import pprint_thing
from pandas._libs.lib import no_default
from pandas._typing import Label
import itertools
import functools
import numpy as np
import sys
import os
from typing import Optional, Sequence, Tuple, Union, Mapping
import warnings
from modin.error_message import ErrorMessage
from modin.utils import _inherit_docstrings, to_pandas, hashable
from .utils import (
from_pandas,
from_non_pandas,
)
from .iterator import PartitionIterator
from .series import Series
from .base import BasePandasDataset, _ATTRS_NO_LOOKUP
from .groupby import DataFrameGroupBy
@_inherit_docstrings(
pandas.DataFrame, excluded=[pandas.DataFrame, pandas.DataFrame.__init__]
)
class DataFrame(BasePandasDataset):
def __init__(
self,
data=None,
index=None,
columns=None,
dtype=None,
copy=False,
query_compiler=None,
):
"""Distributed DataFrame object backed by Pandas dataframes.
Args:
data (NumPy ndarray (structured or homogeneous) or dict):
Dict can contain Series, arrays, constants, or list-like
objects.
index (pandas.Index, list, ObjectID): The row index for this
DataFrame.
columns (pandas.Index): The column names for this DataFrame, in
pandas Index object.
dtype: Data type to force. Only a single dtype is allowed.
If None, infer
copy (boolean): Copy data from inputs.
Only affects DataFrame / 2d ndarray input.
query_compiler: A query compiler object to manage distributed computation.
"""
if isinstance(data, (DataFrame, Series)):
self._query_compiler = data._query_compiler.copy()
if index is not None and any(i not in data.index for i in index):
raise NotImplementedError(
"Passing non-existant columns or index values to constructor not"
" yet implemented."
)
if isinstance(data, Series):
# We set the column name if it is not in the provided Series
if data.name is None:
self.columns = [0] if columns is None else columns
# If the columns provided are not in the named Series, pandas clears
# the DataFrame and sets columns to the columns provided.
elif columns is not None and data.name not in columns:
self._query_compiler = from_pandas(
DataFrame(columns=columns)
)._query_compiler
if index is not None:
self._query_compiler = data.loc[index]._query_compiler
elif columns is None and index is None:
data._add_sibling(self)
else:
if columns is not None and any(i not in data.columns for i in columns):
raise NotImplementedError(
"Passing non-existant columns or index values to constructor not"
" yet implemented."
)
if index is None:
index = slice(None)
if columns is None:
columns = slice(None)
self._query_compiler = data.loc[index, columns]._query_compiler
# Check type of data and use appropriate constructor
elif query_compiler is None:
distributed_frame = from_non_pandas(data, index, columns, dtype)
if distributed_frame is not None:
self._query_compiler = distributed_frame._query_compiler
return
warnings.warn(
"Distributing {} object. This may take some time.".format(type(data))
)
if is_list_like(data) and not is_dict_like(data):
old_dtype = getattr(data, "dtype", None)
values = [
obj._to_pandas() if isinstance(obj, Series) else obj for obj in data
]
if isinstance(data, np.ndarray):
data = np.array(values, dtype=old_dtype)
else:
try:
data = type(data)(values, dtype=old_dtype)
except TypeError:
data = values
elif is_dict_like(data) and not isinstance(
data, (pandas.Series, Series, pandas.DataFrame, DataFrame)
):
data = {
k: v._to_pandas() if isinstance(v, Series) else v
for k, v in data.items()
}
pandas_df = pandas.DataFrame(
data=data, index=index, columns=columns, dtype=dtype, copy=copy
)
self._query_compiler = from_pandas(pandas_df)._query_compiler
else:
self._query_compiler = query_compiler
def __repr__(self):
from pandas.io.formats import console
num_rows = pandas.get_option("display.max_rows") or 10
num_cols = pandas.get_option("display.max_columns") or 20
if pandas.get_option("display.max_columns") is None and pandas.get_option(
"display.expand_frame_repr"
):
width, _ = console.get_console_size()
width = min(width, len(self.columns))
col_counter = 0
i = 0
while col_counter < width:
col_counter += len(str(self.columns[i])) + 1
i += 1
num_cols = i
i = len(self.columns) - 1
col_counter = 0
while col_counter < width:
col_counter += len(str(self.columns[i])) + 1
i -= 1
num_cols += len(self.columns) - i
result = repr(self._build_repr_df(num_rows, num_cols))
if len(self.index) > num_rows or len(self.columns) > num_cols:
# The split here is so that we don't repr pandas row lengths.
return result.rsplit("\n\n", 1)[0] + "\n\n[{0} rows x {1} columns]".format(
len(self.index), len(self.columns)
)
else:
return result
def _repr_html_(self): # pragma: no cover
"""repr function for rendering in Jupyter Notebooks like Pandas
Dataframes.
Returns:
The HTML representation of a Dataframe.
"""
num_rows = pandas.get_option("max_rows") or 60
num_cols = pandas.get_option("max_columns") or 20
# We use pandas _repr_html_ to get a string of the HTML representation
# of the dataframe.
result = self._build_repr_df(num_rows, num_cols)._repr_html_()
if len(self.index) > num_rows or len(self.columns) > num_cols:
# We split so that we insert our correct dataframe dimensions.
return result.split("<p>")[
0
] + "<p>{0} rows x {1} columns</p>\n</div>".format(
len(self.index), len(self.columns)
)
else:
return result
def _get_columns(self):
"""Get the columns for this DataFrame.
Returns:
The union of all indexes across the partitions.
"""
return self._query_compiler.columns
def _set_columns(self, new_columns):
"""Set the columns for this DataFrame.
Args:
new_columns: The new index to set this
"""
self._query_compiler.columns = new_columns
columns = property(_get_columns, _set_columns)
def _validate_eval_query(self, expr, **kwargs):
"""Helper function to check the arguments to eval() and query()
Args:
expr: The expression to evaluate. This string cannot contain any
Python statements, only Python expressions.
"""
if isinstance(expr, str) and expr == "":
raise ValueError("expr cannot be an empty string")
if isinstance(expr, str) and "@" in expr:
ErrorMessage.not_implemented("Local variables not yet supported in eval.")
if isinstance(expr, str) and "not" in expr:
if "parser" in kwargs and kwargs["parser"] == "python":
ErrorMessage.not_implemented(
"'Not' nodes are not implemented."
) # pragma: no cover
@property
def ndim(self):
"""Get the number of dimensions for this DataFrame.
Returns:
The number of dimensions for this DataFrame.
"""
# DataFrames have an invariant that requires they be 2 dimensions.
return 2
def drop_duplicates(
self, subset=None, keep="first", inplace=False, ignore_index=False
):
return super(DataFrame, self).drop_duplicates(
subset=subset, keep=keep, inplace=inplace
)
@property
def dtypes(self):
"""Get the dtypes for this DataFrame.
Returns:
The dtypes for this DataFrame.
"""
return self._query_compiler.dtypes
def duplicated(self, subset=None, keep="first"):
"""
Return boolean Series denoting duplicate rows, optionally only
considering certain columns.
Args:
subset : column label or sequence of labels, optional
Only consider certain columns for identifying duplicates, by
default use all of the columns
keep : {'first', 'last', False}, default 'first'
- ``first`` : Mark duplicates as ``True`` except for the
first occurrence.
- ``last`` : Mark duplicates as ``True`` except for the
last occurrence.
- False : Mark all duplicates as ``True``.
Returns:
Series
"""
import hashlib
df = self[subset] if subset is not None else self
# if the number of columns we are checking for duplicates is larger than 1, we must
# hash them to generate a single value that can be compared across rows.
if len(df.columns) > 1:
hashed = df.apply(
lambda s: hashlib.new("md5", str(tuple(s)).encode()).hexdigest(), axis=1
).to_frame()
else:
hashed = df
duplicates = hashed.apply(lambda s: s.duplicated(keep=keep)).squeeze(axis=1)
# remove Series name which was assigned automatically by .apply
duplicates.name = None
return duplicates
@property
def empty(self):
"""Determines if the DataFrame is empty.
Returns:
True if the DataFrame is empty.
False otherwise.
"""
return len(self.columns) == 0 or len(self.index) == 0
@property
def axes(self):
"""Get the axes for the DataFrame.
Returns:
The axes for the DataFrame.
"""
return [self.index, self.columns]
@property
def shape(self):
"""Get the size of each of the dimensions in the DataFrame.
Returns:
A tuple with the size of each dimension as they appear in axes().
"""
return len(self.index), len(self.columns)
def add_prefix(self, prefix):
"""Add a prefix to each of the column names.
Returns:
A new DataFrame containing the new column names.
"""
return DataFrame(query_compiler=self._query_compiler.add_prefix(prefix))
def add_suffix(self, suffix):
"""Add a suffix to each of the column names.
Returns:
A new DataFrame containing the new column names.
"""
return DataFrame(query_compiler=self._query_compiler.add_suffix(suffix))
def applymap(self, func):
"""Apply a function to a DataFrame elementwise.
Args:
func (callable): The function to apply.
"""
if not callable(func):
raise ValueError("'{0}' object is not callable".format(type(func)))
ErrorMessage.non_verified_udf()
return DataFrame(query_compiler=self._query_compiler.applymap(func))
def apply(self, func, axis=0, raw=False, result_type=None, args=(), **kwds):
axis = self._get_axis_number(axis)
query_compiler = super(DataFrame, self).apply(
func, axis=axis, raw=raw, result_type=result_type, args=args, **kwds
)
if not isinstance(query_compiler, type(self._query_compiler)):
return query_compiler
# This is the simplest way to determine the return type, but there are checks
# in pandas that verify that some results are created. This is a challenge for
# empty DataFrames, but fortunately they only happen when the `func` type is
# a list or a dictionary, which means that the return type won't change from
# type(self), so we catch that error and use `type(self).__name__` for the return
# type.
try:
if axis == 0:
init_kwargs = {"index": self.index}
else:
init_kwargs = {"columns": self.columns}
return_type = type(
getattr(pandas, type(self).__name__)(**init_kwargs).apply(
func, axis=axis, raw=raw, result_type=result_type, args=args, **kwds
)
).__name__
except Exception:
return_type = type(self).__name__
if return_type not in ["DataFrame", "Series"]:
return query_compiler.to_pandas().squeeze()
else:
result = getattr(sys.modules[self.__module__], return_type)(
query_compiler=query_compiler
)
if isinstance(result, Series):
if axis == 0 and result.name == self.index[0] or result.name == 0:
result.name = None
elif axis == 1 and result.name == self.columns[0] or result.name == 0:
result.name = None
return result
def groupby(
self,
by=None,
axis=0,
level=None,
as_index=True,
sort=True,
group_keys=True,
squeeze: bool = no_default,
observed=False,
dropna: bool = True,
):
"""
Apply a groupby to this DataFrame. See _groupby() remote task.
Parameters
----------
by: The value to groupby.
axis: The axis to groupby.
level: The level of the groupby.
as_index: Whether or not to store result as index.
sort: Whether or not to sort the result by the index.
group_keys: Whether or not to group the keys.
squeeze: Whether or not to squeeze.
dropna : bool, default True
If True, and if group keys contain NA values,
NA values together with row/column will be dropped.
If False, NA values will also be treated as the key in groups
Returns
-------
A new DataFrame resulting from the groupby.
"""
if squeeze is not no_default:
warnings.warn(
(
"The `squeeze` parameter is deprecated and "
"will be removed in a future version."
),
FutureWarning,
stacklevel=2,
)
else:
squeeze = False
axis = self._get_axis_number(axis)
idx_name = None
# Drop here indicates whether or not to drop the data column before doing the
# groupby. The typical pandas behavior is to drop when the data came from this
# dataframe. When a string, Series directly from this dataframe, or list of
# strings is passed in, the data used for the groupby is dropped before the
# groupby takes place.
drop = False
if (
not isinstance(by, (pandas.Series, Series))
and is_list_like(by)
and len(by) == 1
):
by = by[0]
if callable(by):
by = self.index.map(by)
elif isinstance(by, str):
drop = by in self.columns
idx_name = by
if (
self._query_compiler.has_multiindex(axis=axis)
and by in self.axes[axis].names
or hasattr(self.axes[axis], "name")
and self.axes[axis].name == by
):
# In this case we pass the string value of the name through to the
# partitions. This is more efficient than broadcasting the values.
pass
else:
by = self.__getitem__(by)._query_compiler
elif isinstance(by, Series):
drop = by._parent is self
idx_name = by.name
by = by._query_compiler
elif is_list_like(by):
# fastpath for multi column groupby
if (
not isinstance(by, Series)
and axis == 0
and all(
(
(isinstance(o, str) and (o in self))
or (isinstance(o, Series) and (o._parent is self))
)
for o in by
)
):
# We can just revert Series back to names because the parent is
# this dataframe:
by = [o.name if isinstance(o, Series) else o for o in by]
by = self.__getitem__(by)._query_compiler
drop = True
else:
mismatch = len(by) != len(self.axes[axis])
if mismatch and all(
isinstance(obj, str)
and (
obj in self
or (hasattr(self.index, "names") and obj in self.index.names)
)
for obj in by
):
# In the future, we will need to add logic to handle this, but for now
# we default to pandas in this case.
pass
elif mismatch and any(
isinstance(obj, str) and obj not in self.columns for obj in by
):
names = [o.name if isinstance(o, Series) else o for o in by]
raise KeyError(next(x for x in names if x not in self))
return DataFrameGroupBy(
self,
by,
axis,
level,
as_index,
sort,
group_keys,
squeeze,
idx_name,
observed=observed,
drop=drop,
dropna=dropna,
)
def _reduce_dimension(self, query_compiler):
return Series(query_compiler=query_compiler)
def keys(self):
"""Get the info axis for the DataFrame.
Returns:
A pandas Index for this DataFrame.
"""
return self.columns
def transpose(self, copy=False, *args):
"""Transpose columns and rows for the DataFrame.
Returns:
A new DataFrame transposed from this DataFrame.
"""
return DataFrame(query_compiler=self._query_compiler.transpose(*args))
T = property(transpose)
def add(self, other, axis="columns", level=None, fill_value=None):
return self._binary_op(
"add",
other,
axis=axis,
level=level,
fill_value=fill_value,
broadcast=isinstance(other, Series),
)
def append(self, other, ignore_index=False, verify_integrity=False, sort=False):
"""Append another DataFrame/list/Series to this one.
Args:
other: The object to append to this.
ignore_index: Ignore the index on appending.
verify_integrity: Verify the integrity of the index on completion.
Returns:
A new DataFrame containing the concatenated values.
"""
if sort is False:
warnings.warn(
"Due to https://github.com/pandas-dev/pandas/issues/35092, "
"Pandas ignores sort=False; Modin correctly does not sort."
)
if isinstance(other, (Series, dict)):
if isinstance(other, dict):
other = Series(other)
if other.name is None and not ignore_index:
raise TypeError(
"Can only append a Series if ignore_index=True"
" or if the Series has a name"
)
if other.name is not None:
# other must have the same index name as self, otherwise
# index name will be reset
name = other.name
# We must transpose here because a Series becomes a new row, and the
# structure of the query compiler is currently columnar
other = other._query_compiler.transpose()
other.index = pandas.Index([name], name=self.index.name)
else:
# See note above about transpose
other = other._query_compiler.transpose()
elif isinstance(other, list):
if not all(isinstance(o, BasePandasDataset) for o in other):
other = DataFrame(pandas.DataFrame(other))._query_compiler
else:
other = [obj._query_compiler for obj in other]
else:
other = other._query_compiler
# If ignore_index is False, by definition the Index will be correct.
# We also do this first to ensure that we don't waste compute/memory.
if verify_integrity and not ignore_index:
appended_index = (
self.index.append(other.index)
if not isinstance(other, list)
else self.index.append([o.index for o in other])
)
is_valid = next((False for idx in appended_index.duplicated() if idx), True)
if not is_valid:
raise ValueError(
"Indexes have overlapping values: {}".format(
appended_index[appended_index.duplicated()]
)
)
query_compiler = self._query_compiler.concat(
0, other, ignore_index=ignore_index, sort=sort
)
return DataFrame(query_compiler=query_compiler)
def assign(self, **kwargs):
df = self.copy()
for k, v in kwargs.items():
if callable(v):
df[k] = v(df)
else:
df[k] = v
return df
def boxplot(
self,
column=None,
by=None,
ax=None,
fontsize=None,
rot=0,
grid=True,
figsize=None,
layout=None,
return_type=None,
backend=None,
**kwargs,
):
return to_pandas(self).boxplot(
column=column,
by=by,
ax=ax,
fontsize=fontsize,
rot=rot,
grid=grid,
figsize=figsize,
layout=layout,
return_type=return_type,
backend=backend,
**kwargs,
)
def combine(self, other, func, fill_value=None, overwrite=True):
return super(DataFrame, self).combine(
other, func, fill_value=fill_value, overwrite=overwrite
)
def compare(
self,
other: "DataFrame",
align_axis: Union[str, int] = 1,
keep_shape: bool = False,
keep_equal: bool = False,
) -> "DataFrame":
"""
Compare to another DataFrame and show the differences.
Parameters
----------
other : DataFrame
Object to compare with.
align_axis : {0 or 'index', 1 or 'columns'}, default 1
Determine which axis to align the comparison on.
* 0, or 'index' : Resulting differences are stacked vertically
with rows drawn alternately from self and other.
* 1, or 'columns' : Resulting differences are aligned horizontally
with columns drawn alternately from self and other.
keep_shape : bool, default False
If true, all rows and columns are kept.
Otherwise, only the ones with different values are kept.
keep_equal : bool, default False
If true, the result keeps values that are equal.
Otherwise, equal values are shown as NaNs.
Returns
-------
DataFrame
DataFrame that shows the differences stacked side by side.
The resulting index will be a MultiIndex with 'self' and 'other'
stacked alternately at the inner level.
"""
return self._default_to_pandas(
pandas.DataFrame.compare,
other=other,
align_axis=align_axis,
keep_shape=keep_shape,
keep_equal=keep_equal,
)
def corr(self, method="pearson", min_periods=1):
"""
Compute pairwise correlation of columns, excluding NA/null values.
Parameters
----------
method : {'pearson', 'kendall', 'spearman'} or callable
Method of correlation:
* pearson : standard correlation coefficient
* kendall : Kendall Tau correlation coefficient
* spearman : Spearman rank correlation
* callable: callable with input two 1d ndarrays
and returning a float. Note that the returned matrix from corr
will have 1 along the diagonals and will be symmetric
regardless of the callable's behavior.
min_periods : int, optional
Minimum number of observations required per pair of columns
to have a valid result. Currently only available for Pearson
and Spearman correlation.
Returns
-------
DataFrame
Correlation matrix.
Notes
-----
Correlation floating point precision may slightly differ from pandas.
For now pearson method is available only. For other methods defaults to pandas.
"""
return self.__constructor__(
query_compiler=self._query_compiler.corr(
method=method,
min_periods=min_periods,
)
)
def corrwith(self, other, axis=0, drop=False, method="pearson"):
if isinstance(other, DataFrame):
other = other._query_compiler.to_pandas()
return self._default_to_pandas(
pandas.DataFrame.corrwith, other, axis=axis, drop=drop, method=method
)
def cov(self, min_periods=None, ddof: Optional[int] = 1):
"""
Compute pairwise covariance of columns, excluding NA/null values.
Compute the pairwise covariance among the series of a DataFrame.
The returned data frame is the `covariance matrix
<https://en.wikipedia.org/wiki/Covariance_matrix>`__ of the columns
of the DataFrame.
Both NA and null values are automatically excluded from the
calculation. (See the note below about bias from missing values.)
A threshold can be set for the minimum number of
observations for each value created. Comparisons with observations
below this threshold will be returned as ``NaN``.
This method is generally used for the analysis of time series data to
understand the relationship between different measures
across time.
Parameters
----------
min_periods : int, optional
Minimum number of observations required per pair of columns
to have a valid result.
ddof : int, default 1
Delta degrees of freedom. The divisor used in calculations is N - ddof,
where N represents the number of elements.
Returns
-------
DataFrame
The covariance matrix of the series of the DataFrame.
Notes
-----
Covariance floating point precision may slightly differ from pandas.
If DataFrame contains at least one NA/null value, then defaults to pandas.
"""
numeric_df = self.drop(
columns=[
i for i in self.dtypes.index if not is_numeric_dtype(self.dtypes[i])
]
)
is_notna = True
if all(numeric_df.notna().all()):
if min_periods is not None and min_periods > len(numeric_df):
result = np.empty((numeric_df.shape[1], numeric_df.shape[1]))
result.fill(np.nan)
return numeric_df.__constructor__(result)
else:
cols = numeric_df.columns
idx = cols.copy()
numeric_df = numeric_df.astype(dtype="float64")
denom = 1.0 / (len(numeric_df) - ddof)
means = numeric_df.mean(axis=0)
result = numeric_df - means
result = result.T._query_compiler.conj().dot(result._query_compiler)
else:
result = numeric_df._query_compiler.cov(min_periods=min_periods)
is_notna = False
if is_notna:
result = numeric_df.__constructor__(
query_compiler=result, index=idx, columns=cols
)
result *= denom
else:
result = numeric_df.__constructor__(query_compiler=result)
return result
def dot(self, other):
"""
Compute the matrix multiplication between the DataFrame and other.
This method computes the matrix product between the DataFrame and the
values of an other Series, DataFrame or a numpy array.
It can also be called using ``self @ other`` in Python >= 3.5.
Parameters
----------
other : Series, DataFrame or array-like
The other object to compute the matrix product with.
Returns
-------
Series or DataFrame
If other is a Series, return the matrix product between self and
other as a Series. If other is a DataFrame or a numpy.array, return
the matrix product of self and other in a DataFrame of a np.array.
See Also
--------
Series.dot: Similar method for Series.
Notes
-----
The dimensions of DataFrame and other must be compatible in order to
compute the matrix multiplication. In addition, the column names of
DataFrame and the index of other must contain the same values, as they
will be aligned prior to the multiplication.
The dot method for Series computes the inner product, instead of the
matrix product here.
"""
if isinstance(other, BasePandasDataset):
common = self.columns.union(other.index)
if len(common) > len(self.columns) or len(common) > len(other.index):
raise ValueError("Matrices are not aligned")
qc = other.reindex(index=common)._query_compiler
if isinstance(other, DataFrame):
return self.__constructor__(
query_compiler=self._query_compiler.dot(
qc, squeeze_self=False, squeeze_other=False
)
)
else:
return self._reduce_dimension(
query_compiler=self._query_compiler.dot(
qc, squeeze_self=False, squeeze_other=True
)
)
other = np.asarray(other)
if self.shape[1] != other.shape[0]:
raise ValueError(
"Dot product shape mismatch, {} vs {}".format(self.shape, other.shape)
)
if len(other.shape) > 1:
return self.__constructor__(
query_compiler=self._query_compiler.dot(other, squeeze_self=False)
)
return self._reduce_dimension(
query_compiler=self._query_compiler.dot(other, squeeze_self=False)
)
def eq(self, other, axis="columns", level=None):
return self._binary_op(
"eq", other, axis=axis, level=level, broadcast=isinstance(other, Series)
)
def equals(self, other):
"""
Checks if other DataFrame is elementwise equal to the current one
Returns:
Boolean: True if equal, otherwise False
"""
if isinstance(other, pandas.DataFrame):
# Copy into a Modin DataFrame to simplify logic below
other = DataFrame(other)
return (
self.index.equals(other.index)
and self.columns.equals(other.columns)
and self.eq(other).all().all()
)
def explode(self, column: Union[str, Tuple], ignore_index: bool = False):
return self._default_to_pandas(
pandas.DataFrame.explode, column, ignore_index=ignore_index
)
def eval(self, expr, inplace=False, **kwargs):
"""Evaluate a Python expression as a string using various backends.
Args:
expr: The expression to evaluate. This string cannot contain any
Python statements, only Python expressions.
parser: The parser to use to construct the syntax tree from the
expression. The default of 'pandas' parses code slightly
different than standard Python. Alternatively, you can parse
an expression using the 'python' parser to retain strict
Python semantics. See the enhancing performance documentation
for more details.
engine: The engine used to evaluate the expression.
truediv: Whether to use true division, like in Python >= 3
local_dict: A dictionary of local variables, taken from locals()
by default.
global_dict: A dictionary of global variables, taken from
globals() by default.
resolvers: A list of objects implementing the __getitem__ special
method that you can use to inject an additional collection
of namespaces to use for variable lookup. For example, this is
used in the query() method to inject the index and columns
variables that refer to their respective DataFrame instance
attributes.
level: The number of prior stack frames to traverse and add to
the current scope. Most users will not need to change this
parameter.
target: This is the target object for assignment. It is used when
there is variable assignment in the expression. If so, then
target must support item assignment with string keys, and if a
copy is being returned, it must also support .copy().
inplace: If target is provided, and the expression mutates target,
whether to modify target inplace. Otherwise, return a copy of
target with the mutation.
Returns:
ndarray, numeric scalar, DataFrame, Series
"""
self._validate_eval_query(expr, **kwargs)
inplace = validate_bool_kwarg(inplace, "inplace")
new_query_compiler = self._query_compiler.eval(expr, **kwargs)
return_type = type(
pandas.DataFrame(columns=self.columns)
.astype(self.dtypes)
.eval(expr, **kwargs)
).__name__
if return_type == type(self).__name__:
return self._create_or_update_from_compiler(new_query_compiler, inplace)
else:
if inplace:
raise ValueError("Cannot operate inplace if there is no assignment")
return getattr(sys.modules[self.__module__], return_type)(
query_compiler=new_query_compiler
)
def floordiv(self, other, axis="columns", level=None, fill_value=None):
return self._binary_op(
"floordiv",
other,
axis=axis,
level=level,
fill_value=fill_value,
broadcast=isinstance(other, Series),
)
@classmethod
def from_dict(
cls, data, orient="columns", dtype=None, columns=None
): # pragma: no cover
ErrorMessage.default_to_pandas("`from_dict`")
return from_pandas(
pandas.DataFrame.from_dict(
data, orient=orient, dtype=dtype, columns=columns
)
)
@classmethod
def from_records(
cls,
data,
index=None,
exclude=None,
columns=None,
coerce_float=False,
nrows=None,
): # pragma: no cover
ErrorMessage.default_to_pandas("`from_records`")
return from_pandas(
pandas.DataFrame.from_records(
data,
index=index,
exclude=exclude,
columns=columns,
coerce_float=coerce_float,
nrows=nrows,
)
)
def ge(self, other, axis="columns", level=None):
return self._binary_op(
"ge", other, axis=axis, level=level, broadcast=isinstance(other, Series)
)
def gt(self, other, axis="columns", level=None):
return self._binary_op(
"gt", other, axis=axis, level=level, broadcast=isinstance(other, Series)
)
def hist(
self,
column=None,
by=None,
grid=True,
xlabelsize=None,
xrot=None,
ylabelsize=None,
yrot=None,
ax=None,
sharex=False,
sharey=False,
figsize=None,
layout=None,
bins=10,
**kwds,
): # pragma: no cover
return self._default_to_pandas(
pandas.DataFrame.hist,
column=column,
by=by,
grid=grid,
xlabelsize=xlabelsize,
xrot=xrot,
ylabelsize=ylabelsize,
yrot=yrot,
ax=ax,
sharex=sharex,
sharey=sharey,
figsize=figsize,
layout=layout,
bins=bins,
**kwds,
)
def info(
self, verbose=None, buf=None, max_cols=None, memory_usage=None, null_counts=None
):
"""
Print a concise summary of a DataFrame, which includes the index
dtype and column dtypes, non-null values and memory usage.
Parameters
----------
verbose (bool, optional): Whether to print the full summary. Defaults
to true
buf (writable buffer): Where to send output. Defaults to sys.stdout
max_cols (int, optional): When to switch from verbose to truncated
output. By defualt, this is 100.
memory_usage (bool, str, optional): Specifies whether the total memory
usage of the DataFrame elements (including index) should be displayed.
True always show memory usage. False never shows memory usage. A value
of 'deep' is equivalent to "True with deep introspection". Memory usage
is shown in human-readable units (base-2 representation). Without deep
introspection a memory estimation is made based in column dtype and
number of rows assuming values consume the same memory amount for
corresponding dtypes. With deep memory introspection, a real memory
usage calculation is performed at the cost of computational resources.
Defaults to True.
null_counts (bool, optional): Whetehr to show the non-null counts. By
default, this is shown only when the frame is smaller than 100 columns
and 1690785 rows. A value of True always shows the counts and False
never shows the counts.
Returns
-------
Prints the summary of a DataFrame and returns None.
"""
def put_str(src, output_len=None, spaces=2):
src = str(src)
return src.ljust(output_len if output_len else len(src)) + " " * spaces
def format_size(num):
for x in ["bytes", "KB", "MB", "GB", "TB"]:
if num < 1024.0:
return f"{num:3.1f} {x}"
num /= 1024.0
return f"{num:3.1f} PB"
output = []
type_line = str(type(self))
index_line = self.index._summary()
columns = self.columns
columns_len = len(columns)
dtypes = self.dtypes
dtypes_line = f"dtypes: {', '.join(['{}({})'.format(dtype, count) for dtype, count in dtypes.value_counts().items()])}"
if max_cols is None:
max_cols = 100
exceeds_info_cols = columns_len > max_cols
if buf is None:
buf = sys.stdout
if null_counts is None:
null_counts = not exceeds_info_cols
if verbose is None:
verbose = not exceeds_info_cols
if null_counts and verbose:
# We're gonna take items from `non_null_count` in a loop, which
# works kinda slow with `Modin.Series`, that's why we call `_to_pandas()` here
# that will be faster.
non_null_count = self.count()._to_pandas()
if memory_usage is None:
memory_usage = True
def get_header(spaces=2):
output = []
head_label = " # "
column_label = "Column"
null_label = "Non-Null Count"
dtype_label = "Dtype"
non_null_label = " non-null"
delimiter = "-"
lengths = {}
lengths["head"] = max(len(head_label), len(pprint_thing(len(columns))))
lengths["column"] = max(
len(column_label), max(len(pprint_thing(col)) for col in columns)
)
lengths["dtype"] = len(dtype_label)
dtype_spaces = (
max(lengths["dtype"], max(len(pprint_thing(dtype)) for dtype in dtypes))
- lengths["dtype"]
)
header = put_str(head_label, lengths["head"]) + put_str(
column_label, lengths["column"]
)
if null_counts:
lengths["null"] = max(
len(null_label),
max(len(pprint_thing(x)) for x in non_null_count)
+ len(non_null_label),
)
header += put_str(null_label, lengths["null"])
header += put_str(dtype_label, lengths["dtype"], spaces=dtype_spaces)
output.append(header)
delimiters = put_str(delimiter * lengths["head"]) + put_str(
delimiter * lengths["column"]
)
if null_counts:
delimiters += put_str(delimiter * lengths["null"])
delimiters += put_str(delimiter * lengths["dtype"], spaces=dtype_spaces)
output.append(delimiters)
return output, lengths
output.extend([type_line, index_line])
def verbose_repr(output):
columns_line = f"Data columns (total {len(columns)} columns):"
header, lengths = get_header()
output.extend([columns_line, *header])
for i, col in enumerate(columns):
i, col, dtype = map(pprint_thing, [i, col, dtypes[col]])
to_append = put_str(" {}".format(i), lengths["head"]) + put_str(
col, lengths["column"]
)
if null_counts:
non_null = pprint_thing(non_null_count[col])
to_append += put_str(
"{} non-null".format(non_null), lengths["null"]
)
to_append += put_str(dtype, lengths["dtype"], spaces=0)
output.append(to_append)
def non_verbose_repr(output):
output.append(columns._summary(name="Columns"))
if verbose:
verbose_repr(output)
else:
non_verbose_repr(output)
output.append(dtypes_line)
if memory_usage:
deep = memory_usage == "deep"
mem_usage_bytes = self.memory_usage(index=True, deep=deep).sum()
mem_line = f"memory usage: {format_size(mem_usage_bytes)}"
output.append(mem_line)
output.append("")
buf.write("\n".join(output))
def insert(self, loc, column, value, allow_duplicates=False):
"""Insert column into DataFrame at specified location.
Args:
loc (int): Insertion index. Must verify 0 <= loc <= len(columns).
column (hashable object): Label of the inserted column.
value (int, Series, or array-like): The values to insert.
allow_duplicates (bool): Whether to allow duplicate column names.
"""
if isinstance(value, (DataFrame, pandas.DataFrame)):
if len(value.columns) != 1:
raise ValueError("Wrong number of items passed 2, placement implies 1")
value = value.iloc[:, 0]
if isinstance(value, Series):
# TODO: Remove broadcast of Series
value = value._to_pandas()
if not self._query_compiler.lazy_execution and len(self.index) == 0:
try:
value = pandas.Series(value)
except (TypeError, ValueError, IndexError):
raise ValueError(
"Cannot insert into a DataFrame with no defined index "
"and a value that cannot be converted to a "
"Series"
)
new_index = value.index.copy()
new_columns = self.columns.insert(loc, column)
new_query_compiler = DataFrame(
value, index=new_index, columns=new_columns
)._query_compiler
elif len(self.columns) == 0 and loc == 0:
new_query_compiler = DataFrame(
data=value, columns=[column], index=self.index
)._query_compiler
else:
if (
is_list_like(value)
and not isinstance(value, pandas.Series)
and len(value) != len(self.index)
):
raise ValueError("Length of values does not match length of index")
if not allow_duplicates and column in self.columns:
raise ValueError("cannot insert {0}, already exists".format(column))
if loc > len(self.columns):
raise IndexError(
"index {0} is out of bounds for axis 0 with size {1}".format(
loc, len(self.columns)
)
)
if loc < 0:
raise ValueError("unbounded slice")
new_query_compiler = self._query_compiler.insert(loc, column, value)
self._update_inplace(new_query_compiler=new_query_compiler)
def interpolate(
self,
method="linear",
axis=0,
limit=None,
inplace=False,
limit_direction: Optional[str] = None,
limit_area=None,
downcast=None,
**kwargs,
):
return self._default_to_pandas(
pandas.DataFrame.interpolate,
method=method,
axis=axis,
limit=limit,
inplace=inplace,
limit_direction=limit_direction,
limit_area=limit_area,
downcast=downcast,
**kwargs,
)
def iterrows(self):
"""Iterate over DataFrame rows as (index, Series) pairs.
Note:
Generators can't be pickled so from the remote function
we expand the generator into a list before getting it.
This is not that ideal.
Returns:
A generator that iterates over the rows of the frame.
"""
def iterrow_builder(s):
return s.name, s
partition_iterator = PartitionIterator(self, 0, iterrow_builder)
for v in partition_iterator:
yield v
def items(self):
"""Iterator over (column name, Series) pairs.
Note:
Generators can't be pickled so from the remote function
we expand the generator into a list before getting it.
This is not that ideal.
Returns:
A generator that iterates over the columns of the frame.
"""
def items_builder(s):
return s.name, s
partition_iterator = PartitionIterator(self, 1, items_builder)
for v in partition_iterator:
yield v
def iteritems(self):
"""Iterator over (column name, Series) pairs.
Note:
Returns the same thing as .items()
Returns:
A generator that iterates over the columns of the frame.
"""
return self.items()
def itertuples(self, index=True, name="Pandas"):
"""Iterate over DataFrame rows as namedtuples.
Args:
index (boolean, default True): If True, return the index as the
first element of the tuple.
name (string, default "Pandas"): The name of the returned
namedtuples or None to return regular tuples.
Note:
Generators can't be pickled so from the remote function
we expand the generator into a list before getting it.
This is not that ideal.
Returns:
A tuple representing row data. See args for varying tuples.
"""
def itertuples_builder(s):
return next(s._to_pandas().to_frame().T.itertuples(index=index, name=name))
partition_iterator = PartitionIterator(self, 0, itertuples_builder)
for v in partition_iterator:
yield v
def join(self, other, on=None, how="left", lsuffix="", rsuffix="", sort=False):
"""
Join two or more DataFrames, or a DataFrame with a collection.
Parameters
----------
other : DataFrame, Series, or list of DataFrame
Index should be similar to one of the columns in this one.
If a Series is passed, its name attribute must be set,
and that will be used as the column name in the resulting joined DataFrame.
on : str, list of str, or array-like, optional
Column or index level name(s) in the caller to join on the index in other,
otherwise joins index-on-index. If multiple values given,
the other DataFrame must have a MultiIndex. Can pass an array as the join key
if it is not already contained in the calling DataFrame.
how : {'left', 'right', 'outer', 'inner'}, Default is 'left'
How to handle the operation of the two objects.
- left: use calling frame's index (or column if on is specified)
- right: use other's index.
- outer: form union of calling frame's index (or column if on is specified)
with other's index, and sort it lexicographically.
- inner: form intersection of calling frame's index (or column if on is specified)
with other's index, preserving the order of the calling’s one.
lsuffix : str, default ''
Suffix to use from left frame's overlapping columns.
rsuffix : str, default ''
Suffix to use from right frame's overlapping columns.
sort : boolean. Default is False
Order result DataFrame lexicographically by the join key.
If False, the order of the join key depends on the join type (how keyword).
Returns
-------
DataFrame
A dataframe containing columns from both the caller and other.
"""
if isinstance(other, Series):
if other.name is None:
raise ValueError("Other Series must have a name")
other = DataFrame({other.name: other})
if on is not None:
return self.__constructor__(
query_compiler=self._query_compiler.join(
other._query_compiler,
on=on,
how=how,
lsuffix=lsuffix,
rsuffix=rsuffix,
sort=sort,
)
)
if isinstance(other, DataFrame):
# Joining the empty DataFrames with either index or columns is
# fast. It gives us proper error checking for the edge cases that
# would otherwise require a lot more logic.
new_columns = (
pandas.DataFrame(columns=self.columns)
.join(
pandas.DataFrame(columns=other.columns),
lsuffix=lsuffix,
rsuffix=rsuffix,
)
.columns
)
other = [other]
else:
# This constraint carried over from Pandas.
if on is not None:
raise ValueError(
"Joining multiple DataFrames only supported for joining on index"
)
new_columns = (
pandas.DataFrame(columns=self.columns)
.join(
[pandas.DataFrame(columns=obj.columns) for obj in other],
lsuffix=lsuffix,
rsuffix=rsuffix,
)
.columns
)
new_frame = DataFrame(
query_compiler=self._query_compiler.concat(
1, [obj._query_compiler for obj in other], join=how, sort=sort
)
)
new_frame.columns = new_columns
return new_frame
def le(self, other, axis="columns", level=None):
return self._binary_op(
"le", other, axis=axis, level=level, broadcast=isinstance(other, Series)
)
def lookup(self, row_labels, col_labels):
return self._default_to_pandas(pandas.DataFrame.lookup, row_labels, col_labels)
def lt(self, other, axis="columns", level=None):
return self._binary_op(
"lt", other, axis=axis, level=level, broadcast=isinstance(other, Series)
)
def median(self, axis=None, skipna=None, level=None, numeric_only=None, **kwargs):
"""
Return the median of the values for the requested axis.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
skipna : bool, default True
Exclude NA/null values when computing the result.
level : int or level name, default None
If the axis is a MultiIndex (hierarchical), count along a particular level,
collapsing into a Series.
numeric_only : bool, default None
Include only float, int, boolean columns. If None, will attempt to use everything,
then use only numeric data. Not implemented for Series.
**kwargs
Additional keyword arguments to be passed to the function.
Returns
-------
Series or DataFrame (if level specified)
The median of the values for the requested axis
"""
axis = self._get_axis_number(axis)
if numeric_only is not None and not numeric_only:
self._validate_dtypes(numeric_only=True)
if level is not None:
return self.__constructor__(
query_compiler=self._query_compiler.median(
axis=axis,
skipna=skipna,
level=level,
numeric_only=numeric_only,
**kwargs,
)
)
return self._reduce_dimension(
self._query_compiler.median(
axis=axis,
skipna=skipna,
level=level,
numeric_only=numeric_only,
**kwargs,
)
)
def melt(
self,
id_vars=None,
value_vars=None,
var_name=None,
value_name="value",
col_level=None,
ignore_index=True,
):
"""
Unpivot a DataFrame from wide to long format, optionally leaving identifiers set.
Parameters
----------
id_vars : tuple, list, or ndarray, optional
Column(s) to use as identifier variables.
value_vars : tuple, list, or ndarray, optional
Column(s) to unpivot. If not specified, uses all columns that
are not set as `id_vars`.
var_name : scalar
Name to use for the 'variable' column.
value_name : scalar, default 'value'
Name to use for the 'value' column.
col_level : int or str, optional
If columns are a MultiIndex then use this level to melt.
ignore_index : bool, default True
If True, original index is ignored. If False, the original index is retained.
Index labels will be repeated as necessary.
Returns
-------
DataFrame
Unpivoted DataFrame.
"""
return self.__constructor__(
query_compiler=self._query_compiler.melt(
id_vars=id_vars,
value_vars=value_vars,
var_name=var_name,
value_name=value_name,
col_level=col_level,
ignore_index=ignore_index,
)
)
def memory_usage(self, index=True, deep=False):
"""Returns the memory usage of each column in bytes
Args:
index (bool): Whether to include the memory usage of the DataFrame's
index in returned Series. Defaults to True
deep (bool): If True, introspect the data deeply by interrogating
objects dtypes for system-level memory consumption. Defaults to False
Returns:
A Series where the index are the column names and the values are
the memory usage of each of the columns in bytes. If `index=true`,
then the first value of the Series will be 'Index' with its memory usage.
"""
if index:
result = self._reduce_dimension(
self._query_compiler.memory_usage(index=False, deep=deep)
)
index_value = self.index.memory_usage(deep=deep)
return Series(index_value, index=["Index"]).append(result)
return super(DataFrame, self).memory_usage(index=index, deep=deep)
def merge(
self,
right,
how="inner",
on=None,
left_on=None,
right_on=None,
left_index=False,
right_index=False,
sort=False,
suffixes=("_x", "_y"),
copy=True,
indicator=False,
validate=None,
):
"""
Merge DataFrame or named Series objects with a database-style join.
The join is done on columns or indexes. If joining columns on columns,
the DataFrame indexes will be ignored. Otherwise if joining indexes on indexes or
indexes on a column or columns, the index will be passed on.
Parameters
----------
right : DataFrame or named Series
Object to merge with.
how : {'left', 'right', 'outer', 'inner'}, default 'inner'
Type of merge to be performed.
- left: use only keys from left frame,
similar to a SQL left outer join; preserve key order.
- right: use only keys from right frame,
similar to a SQL right outer join; preserve key order.
- outer: use union of keys from both frames,
similar to a SQL full outer join; sort keys lexicographically.
- inner: use intersection of keys from both frames,
similar to a SQL inner join; preserve the order of the left keys.
on : label or list
Column or index level names to join on.
These must be found in both DataFrames. If on is None and not merging on indexes
then this defaults to the intersection of the columns in both DataFrames.
left_on : label or list, or array-like
Column or index level names to join on in the left DataFrame.
Can also be an array or list of arrays of the length of the left DataFrame.
These arrays are treated as if they are columns.
right_on : label or list, or array-like
Column or index level names to join on in the right DataFrame.
Can also be an array or list of arrays of the length of the right DataFrame.
These arrays are treated as if they are columns.
left_index : bool, default False
Use the index from the left DataFrame as the join key(s).
If it is a MultiIndex, the number of keys in the other DataFrame
(either the index or a number of columns) must match the number of levels.
right_index : bool, default False
Use the index from the right DataFrame as the join key. Same caveats as left_index.
sort : bool, default False
Sort the join keys lexicographically in the result DataFrame.
If False, the order of the join keys depends on the join type (how keyword).
suffixes : tuple of (str, str), default ('_x', '_y')
Suffix to apply to overlapping column names in the left and right side, respectively.
To raise an exception on overlapping columns use (False, False).
copy : bool, default True
If False, avoid copy if possible.
indicator : bool or str, default False
If True, adds a column to output DataFrame called "_merge" with information
on the source of each row. If string, column with information on source of each row
will be added to output DataFrame, and column will be named value of string.
Information column is Categorical-type and takes on a value of "left_only"
for observations whose merge key only appears in 'left' DataFrame,
"right_only" for observations whose merge key only appears in 'right' DataFrame,
and "both" if the observation’s merge key is found in both.
validate : str, optional
If specified, checks if merge is of specified type.
- 'one_to_one' or '1:1': check if merge keys are unique in both left and right datasets.
- 'one_to_many' or '1:m': check if merge keys are unique in left dataset.
- 'many_to_one' or 'm:1': check if merge keys are unique in right dataset.
- 'many_to_many' or 'm:m': allowed, but does not result in checks.
Returns
-------
DataFrame
A DataFrame of the two merged objects.
"""
if isinstance(right, Series):
if right.name is None:
raise ValueError("Cannot merge a Series without a name")
else:
right = right.to_frame()
if not isinstance(right, DataFrame):
raise TypeError(
f"Can only merge Series or DataFrame objects, a {type(right)} was passed"
)
if left_index and right_index:
return self.join(
right, how=how, lsuffix=suffixes[0], rsuffix=suffixes[1], sort=sort
)
return self.__constructor__(
query_compiler=self._query_compiler.merge(
right._query_compiler,
how=how,
on=on,
left_on=left_on,
right_on=right_on,
left_index=left_index,
right_index=right_index,
sort=sort,
suffixes=suffixes,
copy=copy,
indicator=indicator,
validate=validate,
)
)
def mod(self, other, axis="columns", level=None, fill_value=None):
return self._binary_op(
"mod",
other,
axis=axis,
level=level,
fill_value=fill_value,
broadcast=isinstance(other, Series),
)
def mul(self, other, axis="columns", level=None, fill_value=None):
return self._binary_op(
"mul",
other,
axis=axis,
level=level,
fill_value=fill_value,
broadcast=isinstance(other, Series),
)
rmul = multiply = mul
def ne(self, other, axis="columns", level=None):
return self._binary_op(
"ne", other, axis=axis, level=level, broadcast=isinstance(other, Series)
)
def nlargest(self, n, columns, keep="first"):
"""
Return the first `n` rows ordered by `columns` in descending order.
Return the first `n` rows with the largest values in `columns`, in
descending order. The columns that are not specified are returned as
well, but not used for ordering.
This method is equivalent to
``df.sort_values(columns, ascending=False).head(n)``, but more
performant.
Parameters
----------
n : int
Number of rows to return.
columns : label or list of labels
Column label(s) to order by.
keep : {'first', 'last', 'all'}, default 'first'
Where there are duplicate values:
- `first` : prioritize the first occurrence(s)
- `last` : prioritize the last occurrence(s)
- ``all`` : do not drop any duplicates, even it means
selecting more than `n` items.
.. versionadded:: 0.24.0
"""
return DataFrame(query_compiler=self._query_compiler.nlargest(n, columns, keep))
def nsmallest(self, n, columns, keep="first"):
"""
Return the first `n` rows ordered by `columns` in ascending order.
Return the first `n` rows with the smallest values in `columns`, in
ascending order. The columns that are not specified are returned as
well, but not used for ordering.
This method is equivalent to
``df.sort_values(columns, ascending=True).head(n)``, but more
performant.
Parameters
----------
n : int
Number of items to retrieve.
columns : list or str
Column name or names to order by.
keep : {'first', 'last', 'all'}, default 'first'
Where there are duplicate values:
- ``first`` : take the first occurrence.
- ``last`` : take the last occurrence.
- ``all`` : do not drop any duplicates, even it means
selecting more than `n` items.
Returns
-------
DataFrame
"""
return DataFrame(
query_compiler=self._query_compiler.nsmallest(
n=n, columns=columns, keep=keep
)
)
def slice_shift(self, periods=1, axis=0):
"""
Equivalent to `shift` without copying data.
The shifted data will not include the dropped periods and the
shifted axis will be smaller than the original.
Parameters
----------
periods : int
Number of periods to move, can be positive or negative.
axis : int or str
Shift direction.
Returns
-------
shifted : same type as caller
"""
if periods == 0:
return self.copy()
if axis == "index" or axis == 0:
if abs(periods) >= len(self.index):
return DataFrame(columns=self.columns)
else:
if periods > 0:
new_index = self.index.drop(labels=self.index[:periods])
new_df = self.drop(self.index[-periods:])
else:
new_index = self.index.drop(labels=self.index[periods:])
new_df = self.drop(self.index[:-periods])
new_df.index = new_index
return new_df
else:
if abs(periods) >= len(self.columns):
return DataFrame(index=self.index)
else:
if periods > 0:
new_columns = self.columns.drop(labels=self.columns[:periods])
new_df = self.drop(self.columns[-periods:], axis="columns")
else:
new_columns = self.columns.drop(labels=self.columns[periods:])
new_df = self.drop(self.columns[:-periods], axis="columns")
new_df.columns = new_columns
return new_df
def unstack(self, level=-1, fill_value=None):
"""
Pivot a level of the (necessarily hierarchical) index labels.
Returns a DataFrame having a new level of column labels whose inner-most level
consists of the pivoted index labels.
If the index is not a MultiIndex, the output will be a Series
(the analogue of stack when the columns are not a MultiIndex).
The level involved will automatically get sorted.
Parameters
----------
level : int, str, or list of these, default -1 (last level)
Level(s) of index to unstack, can pass level name.
fill_value : int, str or dict
Replace NaN with this value if the unstack produces missing values.
Returns
-------
Series or DataFrame
"""
if not isinstance(self.index, pandas.MultiIndex) or (
isinstance(self.index, pandas.MultiIndex)
and is_list_like(level)
and len(level) == self.index.nlevels
):
return self._reduce_dimension(
query_compiler=self._query_compiler.unstack(level, fill_value)
)
else:
return DataFrame(
query_compiler=self._query_compiler.unstack(level, fill_value)
)
def pivot(self, index=None, columns=None, values=None):
"""
Return reshaped DataFrame organized by given index / column values.
Reshape data (produce a "pivot" table) based on column values. Uses
unique values from specified `index` / `columns` to form axes of the
resulting DataFrame.
Parameters
----------
index : str or object, optional
Column to use to make new frame's index. If None, uses
existing index.
columns : str or object
Column to use to make new frame's columns.
values : str, object or a list of the previous, optional
Column(s) to use for populating new frame's values. If not
specified, all remaining columns will be used and the result will
have hierarchically indexed columns.
"""
return self.__constructor__(
query_compiler=self._query_compiler.pivot(
index=index, columns=columns, values=values
)
)
def pivot_table(
self,
values=None,
index=None,
columns=None,
aggfunc="mean",
fill_value=None,
margins=False,
dropna=True,
margins_name="All",
observed=False,
):
result = DataFrame(
query_compiler=self._query_compiler.pivot_table(
index=index,
values=values,
columns=columns,
aggfunc=aggfunc,
fill_value=fill_value,
margins=margins,
dropna=dropna,
margins_name=margins_name,
observed=observed,
)
)
return result
@property
def plot(
self,
x=None,
y=None,
kind="line",
ax=None,
subplots=False,
sharex=None,
sharey=False,
layout=None,
figsize=None,
use_index=True,
title=None,
grid=None,
legend=True,
style=None,
logx=False,
logy=False,
loglog=False,
xticks=None,
yticks=None,
xlim=None,
ylim=None,
rot=None,
fontsize=None,
colormap=None,
table=False,
yerr=None,
xerr=None,
secondary_y=False,
sort_columns=False,
**kwargs,
):
return self._to_pandas().plot
def pow(self, other, axis="columns", level=None, fill_value=None):
if isinstance(other, Series):
return self._default_to_pandas(
"pow", other, axis=axis, level=level, fill_value=fill_value
)
return self._binary_op(
"pow",
other,
axis=axis,
level=level,
fill_value=fill_value,
broadcast=isinstance(other, Series),
)
def prod(
self,
axis=None,
skipna=None,
level=None,
numeric_only=None,
min_count=0,
**kwargs,
):
"""
Return the product of the values for the requested axis.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
skipna : bool, default True
Exclude NA/null values when computing the result.
level : int or level name, default None
If the axis is a MultiIndex (hierarchical), count along a particular level,
collapsing into a Series.
numeric_only : bool, default None
Include only float, int, boolean columns. If None, will attempt to use everything,
then use only numeric data. Not implemented for Series.
min_count : int, default 0
The required number of valid values to perform the operation.
If fewer than min_count non-NA values are present the result will be NA.
**kwargs
Additional keyword arguments to be passed to the function.
Returns
-------
Series or DataFrame (if level specified)
The product of the values for the requested axis.
"""
axis = self._get_axis_number(axis)
axis_to_apply = self.columns if axis else self.index
if (
skipna is not False
and numeric_only is None
and min_count > len(axis_to_apply)
):
new_index = self.columns if not axis else self.index
return Series(
[np.nan] * len(new_index), index=new_index, dtype=np.dtype("object")
)
data = self._validate_dtypes_sum_prod_mean(axis, numeric_only, ignore_axis=True)
if level is not None:
return data.__constructor__(
query_compiler=data._query_compiler.prod_min_count(
axis=axis,
skipna=skipna,
level=level,
numeric_only=numeric_only,
min_count=min_count,
**kwargs,
)
)
if min_count > 1:
return data._reduce_dimension(
data._query_compiler.prod_min_count(
axis=axis,
skipna=skipna,
level=level,
numeric_only=numeric_only,
min_count=min_count,
**kwargs,
)
)
return data._reduce_dimension(
data._query_compiler.prod(
axis=axis,
skipna=skipna,
level=level,
numeric_only=numeric_only,
min_count=min_count,
**kwargs,
)
)
product = prod
radd = add
def query(self, expr, inplace=False, **kwargs):
"""Queries the Dataframe with a boolean expression
Returns:
A new DataFrame if inplace=False
"""
ErrorMessage.non_verified_udf()
self._validate_eval_query(expr, **kwargs)
inplace = | validate_bool_kwarg(inplace, "inplace") | pandas.util._validators.validate_bool_kwarg |
# -*- coding: utf-8 -*-
# pylint: disable-msg=E1101,W0612
from datetime import datetime, timedelta
import pytest
import re
from numpy import nan as NA
import numpy as np
from numpy.random import randint
from pandas.compat import range, u
import pandas.compat as compat
from pandas import Index, Series, DataFrame, isna, MultiIndex, notna
from pandas.util.testing import assert_series_equal
import pandas.util.testing as tm
import pandas.core.strings as strings
class TestStringMethods(object):
def test_api(self):
# GH 6106, GH 9322
assert Series.str is strings.StringMethods
assert isinstance(Series(['']).str, strings.StringMethods)
# GH 9184
invalid = Series([1])
with tm.assert_raises_regex(AttributeError,
"only use .str accessor"):
invalid.str
assert not hasattr(invalid, 'str')
def test_iter(self):
# GH3638
strs = 'google', 'wikimedia', 'wikipedia', 'wikitravel'
ds = Series(strs)
for s in ds.str:
# iter must yield a Series
assert isinstance(s, Series)
# indices of each yielded Series should be equal to the index of
# the original Series
tm.assert_index_equal(s.index, ds.index)
for el in s:
# each element of the series is either a basestring/str or nan
assert isinstance(el, compat.string_types) or isna(el)
# desired behavior is to iterate until everything would be nan on the
# next iter so make sure the last element of the iterator was 'l' in
# this case since 'wikitravel' is the longest string
assert s.dropna().values.item() == 'l'
def test_iter_empty(self):
ds = Series([], dtype=object)
i, s = 100, 1
for i, s in enumerate(ds.str):
pass
# nothing to iterate over so nothing defined values should remain
# unchanged
assert i == 100
assert s == 1
def test_iter_single_element(self):
ds = Series(['a'])
for i, s in enumerate(ds.str):
pass
assert not i
assert_series_equal(ds, s)
def test_iter_object_try_string(self):
ds = Series([slice(None, randint(10), randint(10, 20)) for _ in range(
4)])
i, s = 100, 'h'
for i, s in enumerate(ds.str):
pass
assert i == 100
assert s == 'h'
def test_cat(self):
one = np.array(['a', 'a', 'b', 'b', 'c', NA], dtype=np.object_)
two = np.array(['a', NA, 'b', 'd', 'foo', NA], dtype=np.object_)
# single array
result = strings.str_cat(one)
exp = 'aabbc'
assert result == exp
result = strings.str_cat(one, na_rep='NA')
exp = 'aabbcNA'
assert result == exp
result = strings.str_cat(one, na_rep='-')
exp = 'aabbc-'
assert result == exp
result = strings.str_cat(one, sep='_', na_rep='NA')
exp = 'a_a_b_b_c_NA'
assert result == exp
result = strings.str_cat(two, sep='-')
exp = 'a-b-d-foo'
assert result == exp
# Multiple arrays
result = strings.str_cat(one, [two], na_rep='NA')
exp = np.array(['aa', 'aNA', 'bb', 'bd', 'cfoo', 'NANA'],
dtype=np.object_)
tm.assert_numpy_array_equal(result, exp)
result = strings.str_cat(one, two)
exp = np.array(['aa', NA, 'bb', 'bd', 'cfoo', NA], dtype=np.object_)
tm.assert_almost_equal(result, exp)
def test_count(self):
values = np.array(['foo', 'foofoo', NA, 'foooofooofommmfoo'],
dtype=np.object_)
result = strings.str_count(values, 'f[o]+')
exp = np.array([1, 2, NA, 4])
tm.assert_numpy_array_equal(result, exp)
result = Series(values).str.count('f[o]+')
exp = Series([1, 2, NA, 4])
assert isinstance(result, Series)
tm.assert_series_equal(result, exp)
# mixed
mixed = ['a', NA, 'b', True, datetime.today(), 'foo', None, 1, 2.]
rs = strings.str_count(mixed, 'a')
xp = np.array([1, NA, 0, NA, NA, 0, NA, NA, NA])
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.count('a')
xp = Series([1, NA, 0, NA, NA, 0, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = [u('foo'), u('foofoo'), NA, u('foooofooofommmfoo')]
result = strings.str_count(values, 'f[o]+')
exp = np.array([1, 2, NA, 4])
tm.assert_numpy_array_equal(result, exp)
result = Series(values).str.count('f[o]+')
exp = Series([1, 2, NA, 4])
assert isinstance(result, Series)
tm.assert_series_equal(result, exp)
def test_contains(self):
values = np.array(['foo', NA, 'fooommm__foo',
'mmm_', 'foommm[_]+bar'], dtype=np.object_)
pat = 'mmm[_]+'
result = strings.str_contains(values, pat)
expected = np.array([False, NA, True, True, False], dtype=np.object_)
tm.assert_numpy_array_equal(result, expected)
result = strings.str_contains(values, pat, regex=False)
expected = np.array([False, NA, False, False, True], dtype=np.object_)
tm.assert_numpy_array_equal(result, expected)
values = ['foo', 'xyz', 'fooommm__foo', 'mmm_']
result = strings.str_contains(values, pat)
expected = np.array([False, False, True, True])
assert result.dtype == np.bool_
tm.assert_numpy_array_equal(result, expected)
# case insensitive using regex
values = ['Foo', 'xYz', 'fOOomMm__fOo', 'MMM_']
result = strings.str_contains(values, 'FOO|mmm', case=False)
expected = np.array([True, False, True, True])
tm.assert_numpy_array_equal(result, expected)
# case insensitive without regex
result = strings.str_contains(values, 'foo', regex=False, case=False)
expected = np.array([True, False, True, False])
tm.assert_numpy_array_equal(result, expected)
# mixed
mixed = ['a', NA, 'b', True, datetime.today(), 'foo', None, 1, 2.]
rs = strings.str_contains(mixed, 'o')
xp = np.array([False, NA, False, NA, NA, True, NA, NA, NA],
dtype=np.object_)
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.contains('o')
xp = Series([False, NA, False, NA, NA, True, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = np.array([u'foo', NA, u'fooommm__foo', u'mmm_'],
dtype=np.object_)
pat = 'mmm[_]+'
result = strings.str_contains(values, pat)
expected = np.array([False, np.nan, True, True], dtype=np.object_)
tm.assert_numpy_array_equal(result, expected)
result = strings.str_contains(values, pat, na=False)
expected = np.array([False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
values = np.array(['foo', 'xyz', 'fooommm__foo', 'mmm_'],
dtype=np.object_)
result = strings.str_contains(values, pat)
expected = np.array([False, False, True, True])
assert result.dtype == np.bool_
tm.assert_numpy_array_equal(result, expected)
# na
values = Series(['om', 'foo', np.nan])
res = values.str.contains('foo', na="foo")
assert res.loc[2] == "foo"
def test_startswith(self):
values = Series(['om', NA, 'foo_nom', 'nom', 'bar_foo', NA, 'foo'])
result = values.str.startswith('foo')
exp = Series([False, NA, True, False, False, NA, True])
tm.assert_series_equal(result, exp)
# mixed
mixed = np.array(['a', NA, 'b', True, datetime.today(),
'foo', None, 1, 2.], dtype=np.object_)
rs = strings.str_startswith(mixed, 'f')
xp = np.array([False, NA, False, NA, NA, True, NA, NA, NA],
dtype=np.object_)
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.startswith('f')
assert isinstance(rs, Series)
xp = Series([False, NA, False, NA, NA, True, NA, NA, NA])
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('om'), NA, u('foo_nom'), u('nom'), u('bar_foo'), NA,
u('foo')])
result = values.str.startswith('foo')
exp = Series([False, NA, True, False, False, NA, True])
tm.assert_series_equal(result, exp)
result = values.str.startswith('foo', na=True)
tm.assert_series_equal(result, exp.fillna(True).astype(bool))
def test_endswith(self):
values = Series(['om', NA, 'foo_nom', 'nom', 'bar_foo', NA, 'foo'])
result = values.str.endswith('foo')
exp = Series([False, NA, False, False, True, NA, True])
| tm.assert_series_equal(result, exp) | pandas.util.testing.assert_series_equal |
""" Indexer for raman data files """
import hashlib
from typing import List
# get_directory_paths_for_run_mode
# from .index_selection import index_selection
import logging
import sys
from pathlib import Path
import pandas as pd
from raman_fitting.config.filepath_helper import get_directory_paths_for_run_mode
# parse_filepath_to_sid_and_pos
from raman_fitting.indexing.filename_parser import index_dtypes_collection
from raman_fitting.indexing.filename_parser_collector import make_collection
# from raman_fitting.utils._dev_sqlite_db import df_to_db_sqlalchemy
# from .. import __package_name__
logger = logging.getLogger(__name__)
logger.propagate = False
__all__ = ["MakeRamanFilesIndex"]
#%%
class MakeRamanFilesIndex:
"""
Finds the RAMAN files in the data folder from config and creates an overview, on the attribute .index
finds a list of files,
"""
# index_file_sample_cols = {'FileStem': 'string',
# 'SampleID': 'string',
# 'SamplePos': 'int64',
# 'SampleGroup': 'string',
# 'FilePath': 'string')
# index_file_stat_cols = ('FileCreationDate' , 'FileCreation','FileModDate', 'FileMod', 'FileHash')
# INDEX_FILE_NAME = 'index.csv'
debug = False
table_name = "ramanfiles"
# RESULTS_DIR = config.RESULTS_DIR,
# DATASET_DIR = config.DATASET_DIR,
# INDEX_FILE = config.INDEX_FILE,
def __init__(
self, force_reload=True, run_mode="normal", dataset_dirs=None, **kwargs
):
self._cqnm = self.__class__.__qualname__
self._kwargs = kwargs
self.force_reload = force_reload
self.run_mode = run_mode
if not dataset_dirs:
dataset_dirs = get_directory_paths_for_run_mode(run_mode=self.run_mode)
self.dataset_dirs = dataset_dirs
for k, val in self.dataset_dirs.items():
if isinstance(val, Path):
setattr(self, k, val)
# if val.is_dir() or val.is_file():
self.raman_files = self.find_files(data_dir=self.DATASET_DIR)
self.index = pd.DataFrame()
self._error_parse_filenames = []
if "normal" in run_mode and not self.debug and not self.force_reload:
self.index = self.load_index()
else:
self.index = self.reload_index()
self.index_selection = self.index_selection(self.index, **self._kwargs)
@staticmethod
def find_files(data_dir: Path = Path()) -> List:
"""
Creates a list of all raman type files found in the DATASET_DIR which are used in the creation of the index.
"""
if not isinstance(data_dir, Path):
logger.warning(f"find_files warning: arg is not Path.")
return []
raman_files_raw = []
if data_dir.exists():
RFs = data_dir.rglob("*txt")
if RFs:
raman_files_raw = [
i
for i in RFs
if not "fail" in i.stem and not "Labjournal" in str(i)
]
logger.info(
f"find_files {len(raman_files_raw)} files were found in the chosen data dir:\n\t{data_dir}"
)
else:
logger.warning(
f"find_files warning: the chose data file dir was empty.\n{data_dir}\mPlease choose another directory which contains your data files."
)
else:
logger.warning(
f"find_files warning: the chosen data file dir does not exists.\n{data_dir}\nPlease choose an existing directory which contains your data files."
)
return raman_files_raw
def make_index(self):
"""loops over the files and scrapes the index data from each file"""
raman_files = self.raman_files
pp_collection = make_collection(raman_files, **self._kwargs)
index = pd.DataFrame([i.parse_result for i in pp_collection])
index = self._extra_assign_destdir_and_set_paths(index)
logger.info(
f"{self._cqnm} successfully made index {len(index)} from {len(raman_files)} files"
)
if self._error_parse_filenames:
logger.info(
f"{self._cqnm} errors for filename parser {len(self._error_parse_filenames)} from {len(raman_files)} files"
)
return index
def _extra_assign_destdir_and_set_paths(self, index: pd.DataFrame):
"""assign the DestDir column to index and sets column values as object type"""
if hasattr(index, "SampleGroup"):
index = index.assign(
**{
"DestDir": [
self.RESULTS_DIR.joinpath(sGrp)
for sGrp in index.SampleGroup.to_numpy()
]
}
)
_path_dtypes_map = {
k: val for k, val in index_dtypes_collection.items() if "Path" in val
}
for k, val in _path_dtypes_map.items():
if hasattr(index, k):
if "Path" in val:
index[k] = [Path(i) for i in index[k].to_numpy()]
return index
def export_index(self, index):
"""saves the index to a defined Index file"""
if not index.empty:
if not self.INDEX_FILE.parent.exists():
logger.info(
f"{self._cqnm} created parent dir: {self.INDEX_FILE.parent}"
)
self.INDEX_FILE.parent.mkdir(exist_ok=True, parents=True)
index.to_csv(self.INDEX_FILE)
_dtypes = index.dtypes.to_frame("dtypes")
_dtypes.to_csv(self._dtypes_filepath())
# self.save_merge_to_db(DB_filepath, index, self.table_name)
logger.info(
f"{self._cqnm} Succesfully Exported Raman Index file to:\n\t{self.INDEX_FILE}\nwith len({len(index)})."
)
else:
logger.info(f"{self._cqnm} Empty index not exported")
def load_index(self):
"""loads the index from from defined Index file"""
if self.INDEX_FILE.exists():
try:
_dtypes = pd.read_csv(self._dtypes_filepath(), index_col=[0]).to_dict()[
"dtypes"
]
_dtypes_datetime = {
k: val
for k, val in _dtypes.items()
if "datetime" in val or k.endswith("Date")
}
_dtypes_no_datetime = {
k: val
for k, val in _dtypes.items()
if k not in _dtypes_datetime.keys()
}
index = pd.read_csv(
self.INDEX_FILE,
index_col=[0],
dtype=_dtypes_no_datetime,
parse_dates=list(_dtypes_datetime.keys()),
)
index = self._extra_assign_destdir_and_set_paths(index)
logger.info(
f"Succesfully imported Raman Index file from {self.INDEX_FILE}, with len({len(index)})"
)
if not len(self.index) == (
len(self.raman_files) + len(self._error_parse_filenames)
):
logger.error(
f"""'Error in load_index from {self.INDEX_FILE},
\nlength of loaded index not same as number of raman files
\n starting reload index ... """
)
self.index = self.reload_index()
except Exception as e:
logger.error(
f"Error in load_index from {self.INDEX_FILE},\n{e}\n starting reload index ... "
)
index = self.reload_index()
else:
logger.error(
f"Error in load_index: {self.INDEX_FILE} does not exists, starting reload index ... "
)
index = self.reload_index()
return index
def reload_index(self):
"""restarts the index creation from scratch and export."""
logger.info(f"{self._cqnm} starting reload index.")
index = pd.DataFrame()
try:
logger.info(f"{self._cqnm} making index.")
try:
index = self.make_index()
except Exception as e:
logger.error(f"{self._cqnm} make index error:\n\t{e}")
try:
self.export_index(index)
except Exception as e:
logger.error(f"{self._cqnm} export after make index error:\n\t{e}")
except Exception as e:
logger.error(f"{self._cqnm} reload index error:\n\t{e}")
return index
def index_selection(
self, index= | pd.DataFrame() | pandas.DataFrame |
import pyverdict
import argparse
import logging
import os
import time
import pandas as pd
import pickle
import re
parser = argparse.ArgumentParser()
parser.add_argument('-s','-sampling_ratio',help='sampling ratio',dest='sampling_ratio',required=True)
args = parser.parse_args()
print(args.sampling_ratio)
sampling_ratio = args.sampling_ratio
if not os.path.exists('../../output/verdict/instacart-1000-{}'.format(sampling_ratio)):
# logger.info('creating directory Accuracy')
os.makedirs('../../output/verdict/instacart-1000-{}'.format(sampling_ratio))
if __name__=='__main__':
print("main executing")
with open('../../input/instacart_queries/queries-test-1000.pkl', 'rb') as f:
queries = pickle.load(f)
verdict = pyverdict.postgres('127.0.0.1',5433,dbname='instacart',user='analyst',password='<PASSWORD>')
verdict.sql("DROP ALL SCRAMBLE public.order_products")
verdict.sql("DROP ALL SCRAMBLE public.orders")
verdict.sql("""CREATE SCRAMBLE IF NOT EXISTS public.order_products_instacart_x
FROM public.order_products SIZE {}""".format(sampling_ratio))
verdict.sql("""CREATE SCRAMBLE IF NOT EXISTS public.orders_instacart_x
FROM public.orders SIZE {}""".format(sampling_ratio))
# print(res)
# sys.exit(0)
query_answers_dic = {}
query_answers_dic['query_name'] = []
query_answers_dic['time'] = []
query_names = {}
i = 0
regex_orders = re.compile(r"orders", re.IGNORECASE)
regex_order_products = re.compile(r"order_products", re.IGNORECASE)
for qname,q in queries:
print(q)
q = regex_orders.sub("orders_instacart_x",q)
q = regex_order_products.sub("order_products_instacart_x",q)
print("Changed Query :")
print(q)
print("================================")
start = time.time()
try:
res_df_v = verdict.sql(q)
except Exception as e:
print("Query {} not supported".format(qname))
print(e)
end = time.time()-start
res_df_v.to_pickle('../../output/verdict/instacart-1000-{}/{}.pkl'.format(sampling_ratio,i))
if qname not in query_names:
query_names[qname] = [i]
else:
query_names[qname].append(i)
query_answers_dic['time'].append(end)
query_answers_dic['query_name'].append(qname)
i+=1
verdict.close()
qa = | pd.DataFrame(query_answers_dic) | pandas.DataFrame |
# *****************************************************************************
# Copyright (c) 2019, Intel Corporation All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# *****************************************************************************
# -*- coding: utf-8 -*-
import unittest
import platform
import sdc
import numpy as np
import pandas as pd
import glob
import gc
import re
import pyarrow.parquet as pq
from sdc.str_arr_ext import StringArray
from sdc.str_ext import unicode_to_std_str, std_str_to_unicode
from sdc.tests.gen_test_data import ParquetGenerator
class TestStrings(unittest.TestCase):
def test_pass_return(self):
def test_impl(_str):
return _str
hpat_func = sdc.jit(test_impl)
# pass single string and return
arg = 'test_str'
self.assertEqual(hpat_func(arg), test_impl(arg))
# pass string list and return
arg = ['test_str1', 'test_str2']
self.assertEqual(hpat_func(arg), test_impl(arg))
def test_const(self):
def test_impl():
return 'test_str'
hpat_func = sdc.jit(test_impl)
self.assertEqual(hpat_func(), test_impl())
def test_str2str(self):
str2str_methods = ['capitalize', 'casefold', 'lower', 'lstrip',
'rstrip', 'strip', 'swapcase', 'title', 'upper']
for method in str2str_methods:
func_text = "def test_impl(_str):\n"
func_text += " return _str.{}()\n".format(method)
loc_vars = {}
exec(func_text, {}, loc_vars)
test_impl = loc_vars['test_impl']
hpat_func = sdc.jit(test_impl)
arg = ' \tbbCD\t '
self.assertEqual(hpat_func(arg), test_impl(arg))
def test_equality(self):
arg = 'test_str'
def test_impl(_str):
return (_str == 'test_str')
hpat_func = sdc.jit(test_impl)
self.assertEqual(hpat_func(arg), test_impl(arg))
def test_impl(_str):
return (_str != 'test_str')
hpat_func = sdc.jit(test_impl)
self.assertEqual(hpat_func(arg), test_impl(arg))
def test_concat(self):
def test_impl(_str):
return (_str + 'test_str')
hpat_func = sdc.jit(test_impl)
arg = 'a_'
self.assertEqual(hpat_func(arg), test_impl(arg))
def test_split(self):
def test_impl(_str):
return _str.split('/')
hpat_func = sdc.jit(test_impl)
arg = 'aa/bb/cc'
self.assertEqual(hpat_func(arg), test_impl(arg))
def test_replace(self):
def test_impl(_str):
return _str.replace('/', ';')
hpat_func = sdc.jit(test_impl)
arg = 'aa/bb/cc'
self.assertEqual(hpat_func(arg), test_impl(arg))
def test_getitem_int(self):
def test_impl(_str):
return _str[3]
hpat_func = sdc.jit(test_impl)
arg = 'aa/bb/cc'
self.assertEqual(hpat_func(arg), test_impl(arg))
def test_string_int_cast(self):
def test_impl(_str):
return int(_str)
hpat_func = sdc.jit(test_impl)
arg = '12'
self.assertEqual(hpat_func(arg), test_impl(arg))
def test_string_float_cast(self):
def test_impl(_str):
return float(_str)
hpat_func = sdc.jit(test_impl)
arg = '12.2'
self.assertEqual(hpat_func(arg), test_impl(arg))
def test_string_str_cast(self):
def test_impl(a):
return str(a)
hpat_func = sdc.jit(test_impl)
for arg in [np.int32(45), 43, np.float32(1.4), 4.5]:
py_res = test_impl(arg)
h_res = hpat_func(arg)
# XXX: use startswith since hpat output can have extra characters
self.assertTrue(h_res.startswith(py_res))
def test_re_sub(self):
def test_impl(_str):
p = re.compile('ab*')
return p.sub('ff', _str)
hpat_func = sdc.jit(test_impl)
arg = 'aabbcc'
self.assertEqual(hpat_func(arg), test_impl(arg))
def test_regex_std(self):
def test_impl(_str, _pat):
return sdc.str_ext.contains_regex(
_str, sdc.str_ext.compile_regex(_pat))
hpat_func = sdc.jit(test_impl)
self.assertEqual(hpat_func('What does the fox say',
r'd.*(the |fox ){2}'), True)
self.assertEqual(hpat_func('What does the fox say', r'[kz]u*'), False)
def test_replace_regex_std(self):
def test_impl(_str, pat, val):
s = unicode_to_std_str(_str)
e = sdc.str_ext.compile_regex(unicode_to_std_str(pat))
val = unicode_to_std_str(val)
out = sdc.str_ext.str_replace_regex(s, e, val)
return std_str_to_unicode(out)
hpat_func = sdc.jit(test_impl)
_str = 'What does the fox say'
pat = r'd.*(the |fox ){2}'
val = 'does the cat '
self.assertEqual(
hpat_func(_str, pat, val),
_str.replace(re.compile(pat).search(_str).group(), val)
)
def test_replace_noregex_std(self):
def test_impl(_str, pat, val):
s = unicode_to_std_str(_str)
e = unicode_to_std_str(pat)
val = unicode_to_std_str(val)
out = sdc.str_ext.str_replace_noregex(s, e, val)
return std_str_to_unicode(out)
hpat_func = sdc.jit(test_impl)
_str = 'What does the fox say'
pat = 'does the fox'
val = 'does the cat'
self.assertEqual(
hpat_func(_str, pat, val),
_str.replace(pat, val)
)
# string array tests
def test_string_array_constructor(self):
# create StringArray and return as list of strings
def test_impl():
return StringArray(['ABC', 'BB', 'CDEF'])
hpat_func = sdc.jit(test_impl)
self.assertTrue(np.array_equal(hpat_func(), ['ABC', 'BB', 'CDEF']))
def test_string_array_comp(self):
def test_impl():
A = StringArray(['ABC', 'BB', 'CDEF'])
B = A == 'ABC'
return B.sum()
hpat_func = sdc.jit(test_impl)
self.assertEqual(hpat_func(), 1)
def test_string_series(self):
def test_impl(ds):
rs = ds == 'one'
return ds, rs
hpat_func = sdc.jit(test_impl)
df = pd.DataFrame(
{
'A': [1, 2, 3] * 33,
'B': ['one', 'two', 'three'] * 33
}
)
ds, rs = hpat_func(df.B)
gc.collect()
self.assertTrue(isinstance(ds, pd.Series) and isinstance(rs, pd.Series))
self.assertTrue(ds[0] == 'one' and ds[2] == 'three' and rs[0] and not rs[2])
def test_string_array_bool_getitem(self):
def test_impl():
A = StringArray(['ABC', 'BB', 'CDEF'])
B = A == 'ABC'
C = A[B]
return len(C) == 1 and C[0] == 'ABC'
hpat_func = sdc.jit(test_impl)
self.assertEqual(hpat_func(), True)
def test_string_NA_box(self):
# create `example.parquet` file
ParquetGenerator.gen_pq_test()
def test_impl():
df = pq.read_table('example.parquet').to_pandas()
return df.five
hpat_func = sdc.jit(test_impl)
# XXX just checking isna() since Pandas uses None in this case
# instead of nan for some reason
np.testing.assert_array_equal(hpat_func().isna(), test_impl().isna())
# test utf8 decode
def test_decode_empty1(self):
def test_impl(S):
return S[0]
hpat_func = sdc.jit(test_impl)
S = pd.Series([''])
self.assertEqual(hpat_func(S), test_impl(S))
def test_decode_single_ascii_char1(self):
def test_impl(S):
return S[0]
hpat_func = sdc.jit(test_impl)
S = | pd.Series(['A']) | pandas.Series |
# Copyright 2019 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Inventory holding helper function of building visualization for report."""
# pylint: disable-msg=wrong-import-position
import re
import os
from typing import Set, Dict, List
import pandas as pd
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot as plt
import seaborn as sns
from ml_eda.metadata import run_metadata_pb2
FIGURE_SIZE = (10, 8)
XLABEL_SIZE = 10
def _trim_xlabel(xlabels: List[str]) -> List[str]:
return [item[0:XLABEL_SIZE] if len(item) > XLABEL_SIZE else item
for item in xlabels]
def plot_bar_chart(
analysis: run_metadata_pb2.Analysis,
figure_base_path: str) -> str:
"""Create histogram for numerical attributes or bar chart for categorical
Args:
analysis: (run_metadata_pb2.Analysis), the analysis should be one of
the following
- HISTOGRAM for histogram of numerical attribute
- VALUE_COUNTS for bar chart of categorical attributes
figure_base_path: (string), the folder for holding figures
Returns:
string, path of the generated figure
"""
# pylint: disable-msg=too-many-locals
supported_analysis = {
run_metadata_pb2.Analysis.HISTOGRAM,
run_metadata_pb2.Analysis.VALUE_COUNTS
}
assert analysis.name in supported_analysis
# The result of supported analysis should be in the format of TableMetric
table_metric = analysis.tmetrics[0]
attribute_name = analysis.features[0].name
columns = []
if analysis.name == run_metadata_pb2.Analysis.HISTOGRAM:
boundaries = table_metric.column_indexes
for item in boundaries:
# For better display, the midpoint of a bin is computed
boundary = re.findall(r"\d+\.?\d*", item)
if len(boundary) == 1:
center = boundary[0] + '+'
else:
left, right = boundary
center = "{0:.2f}".format((float(left) + float(right)) / 2)
columns.append(center)
else:
columns.extend(table_metric.column_indexes)
# Trim the xlabel to make it look nicer
columns = _trim_xlabel(columns)
for row in table_metric.rows:
row_values = [item.value for item in row.cells]
df = | pd.DataFrame({'bin_name': columns, "Frequency": row_values}) | pandas.DataFrame |
import fileinput
import pandas as pd
import numpy as np
import drep
import os
import shutil
import json
import re
from PyPDF2 import PdfFileReader
from .dprint import dprint
from .config import _globals
pd.set_option('display.max_rows', 500)
pd.set_option('display.max_columns', 500)
| pd.set_option('display.width', 1000) | pandas.set_option |
#
# Copyright (C) 2019 Databricks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from distutils.version import LooseVersion
from itertools import product
import unittest
import pandas as pd
import numpy as np
import pyspark
from databricks import koalas as ks
from databricks.koalas.config import set_option, reset_option
from databricks.koalas.frame import DataFrame
from databricks.koalas.testing.utils import ReusedSQLTestCase, SQLTestUtils
from databricks.koalas.typedef.typehints import (
extension_dtypes,
extension_dtypes_available,
extension_float_dtypes_available,
extension_object_dtypes_available,
)
class OpsOnDiffFramesEnabledTest(ReusedSQLTestCase, SQLTestUtils):
@classmethod
def setUpClass(cls):
super().setUpClass()
set_option("compute.ops_on_diff_frames", True)
@classmethod
def tearDownClass(cls):
reset_option("compute.ops_on_diff_frames")
super().tearDownClass()
@property
def pdf1(self):
return pd.DataFrame(
{"a": [1, 2, 3, 4, 5, 6, 7, 8, 9], "b": [4, 5, 6, 3, 2, 1, 0, 0, 0]},
index=[0, 1, 3, 5, 6, 8, 9, 10, 11],
)
@property
def pdf2(self):
return pd.DataFrame(
{"a": [9, 8, 7, 6, 5, 4, 3, 2, 1], "b": [0, 0, 0, 4, 5, 6, 1, 2, 3]},
index=list(range(9)),
)
@property
def pdf3(self):
return pd.DataFrame(
{"b": [1, 1, 1, 1, 1, 1, 1, 1, 1], "c": [1, 1, 1, 1, 1, 1, 1, 1, 1]},
index=list(range(9)),
)
@property
def pdf4(self):
return pd.DataFrame(
{"e": [2, 2, 2, 2, 2, 2, 2, 2, 2], "f": [2, 2, 2, 2, 2, 2, 2, 2, 2]},
index=list(range(9)),
)
@property
def pdf5(self):
return pd.DataFrame(
{
"a": [1, 2, 3, 4, 5, 6, 7, 8, 9],
"b": [4, 5, 6, 3, 2, 1, 0, 0, 0],
"c": [4, 5, 6, 3, 2, 1, 0, 0, 0],
},
index=[0, 1, 3, 5, 6, 8, 9, 10, 11],
).set_index(["a", "b"])
@property
def pdf6(self):
return pd.DataFrame(
{
"a": [9, 8, 7, 6, 5, 4, 3, 2, 1],
"b": [0, 0, 0, 4, 5, 6, 1, 2, 3],
"c": [9, 8, 7, 6, 5, 4, 3, 2, 1],
"e": [4, 5, 6, 3, 2, 1, 0, 0, 0],
},
index=list(range(9)),
).set_index(["a", "b"])
@property
def pser1(self):
midx = pd.MultiIndex(
[["lama", "cow", "falcon", "koala"], ["speed", "weight", "length", "power"]],
[[0, 3, 1, 1, 1, 2, 2, 2], [0, 2, 0, 3, 2, 0, 1, 3]],
)
return pd.Series([45, 200, 1.2, 30, 250, 1.5, 320, 1], index=midx)
@property
def pser2(self):
midx = pd.MultiIndex(
[["lama", "cow", "falcon"], ["speed", "weight", "length"]],
[[0, 0, 0, 1, 1, 1, 2, 2, 2], [0, 1, 2, 0, 1, 2, 0, 1, 2]],
)
return pd.Series([-45, 200, -1.2, 30, -250, 1.5, 320, 1, -0.3], index=midx)
@property
def pser3(self):
midx = pd.MultiIndex(
[["koalas", "cow", "falcon"], ["speed", "weight", "length"]],
[[0, 0, 0, 1, 1, 1, 2, 2, 2], [1, 1, 2, 0, 0, 2, 2, 2, 1]],
)
return pd.Series([45, 200, 1.2, 30, 250, 1.5, 320, 1, 0.3], index=midx)
@property
def kdf1(self):
return ks.from_pandas(self.pdf1)
@property
def kdf2(self):
return ks.from_pandas(self.pdf2)
@property
def kdf3(self):
return ks.from_pandas(self.pdf3)
@property
def kdf4(self):
return ks.from_pandas(self.pdf4)
@property
def kdf5(self):
return ks.from_pandas(self.pdf5)
@property
def kdf6(self):
return ks.from_pandas(self.pdf6)
@property
def kser1(self):
return ks.from_pandas(self.pser1)
@property
def kser2(self):
return ks.from_pandas(self.pser2)
@property
def kser3(self):
return ks.from_pandas(self.pser3)
def test_ranges(self):
self.assert_eq(
(ks.range(10) + ks.range(10)).sort_index(),
(
ks.DataFrame({"id": list(range(10))}) + ks.DataFrame({"id": list(range(10))})
).sort_index(),
)
def test_no_matched_index(self):
with self.assertRaisesRegex(ValueError, "Index names must be exactly matched"):
ks.DataFrame({"a": [1, 2, 3]}).set_index("a") + ks.DataFrame(
{"b": [1, 2, 3]}
).set_index("b")
def test_arithmetic(self):
self._test_arithmetic_frame(self.pdf1, self.pdf2, check_extension=False)
self._test_arithmetic_series(self.pser1, self.pser2, check_extension=False)
@unittest.skipIf(not extension_dtypes_available, "pandas extension dtypes are not available")
def test_arithmetic_extension_dtypes(self):
self._test_arithmetic_frame(
self.pdf1.astype("Int64"), self.pdf2.astype("Int64"), check_extension=True
)
self._test_arithmetic_series(
self.pser1.astype(int).astype("Int64"),
self.pser2.astype(int).astype("Int64"),
check_extension=True,
)
@unittest.skipIf(
not extension_float_dtypes_available, "pandas extension float dtypes are not available"
)
def test_arithmetic_extension_float_dtypes(self):
self._test_arithmetic_frame(
self.pdf1.astype("Float64"), self.pdf2.astype("Float64"), check_extension=True
)
self._test_arithmetic_series(
self.pser1.astype("Float64"), self.pser2.astype("Float64"), check_extension=True
)
def _test_arithmetic_frame(self, pdf1, pdf2, *, check_extension):
kdf1 = ks.from_pandas(pdf1)
kdf2 = ks.from_pandas(pdf2)
def assert_eq(actual, expected):
if LooseVersion("1.1") <= LooseVersion(pd.__version__) < LooseVersion("1.2.2"):
self.assert_eq(actual, expected, check_exact=not check_extension)
if check_extension:
if isinstance(actual, DataFrame):
for dtype in actual.dtypes:
self.assertTrue(isinstance(dtype, extension_dtypes))
else:
self.assertTrue(isinstance(actual.dtype, extension_dtypes))
else:
self.assert_eq(actual, expected)
# Series
assert_eq((kdf1.a - kdf2.b).sort_index(), (pdf1.a - pdf2.b).sort_index())
assert_eq((kdf1.a * kdf2.a).sort_index(), (pdf1.a * pdf2.a).sort_index())
if check_extension and not extension_float_dtypes_available:
self.assert_eq(
(kdf1["a"] / kdf2["a"]).sort_index(), (pdf1["a"] / pdf2["a"]).sort_index()
)
else:
assert_eq((kdf1["a"] / kdf2["a"]).sort_index(), (pdf1["a"] / pdf2["a"]).sort_index())
# DataFrame
assert_eq((kdf1 + kdf2).sort_index(), (pdf1 + pdf2).sort_index())
# Multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "a"), ("x", "b")])
kdf1.columns = columns
kdf2.columns = columns
pdf1.columns = columns
pdf2.columns = columns
# Series
assert_eq(
(kdf1[("x", "a")] - kdf2[("x", "b")]).sort_index(),
(pdf1[("x", "a")] - pdf2[("x", "b")]).sort_index(),
)
assert_eq(
(kdf1[("x", "a")] - kdf2["x"]["b"]).sort_index(),
(pdf1[("x", "a")] - pdf2["x"]["b"]).sort_index(),
)
assert_eq(
(kdf1["x"]["a"] - kdf2[("x", "b")]).sort_index(),
(pdf1["x"]["a"] - pdf2[("x", "b")]).sort_index(),
)
# DataFrame
assert_eq((kdf1 + kdf2).sort_index(), (pdf1 + pdf2).sort_index())
def _test_arithmetic_series(self, pser1, pser2, *, check_extension):
kser1 = ks.from_pandas(pser1)
kser2 = ks.from_pandas(pser2)
def assert_eq(actual, expected):
if LooseVersion("1.1") <= LooseVersion(pd.__version__) < LooseVersion("1.2.2"):
self.assert_eq(actual, expected, check_exact=not check_extension)
if check_extension:
self.assertTrue(isinstance(actual.dtype, extension_dtypes))
else:
self.assert_eq(actual, expected)
# MultiIndex Series
assert_eq((kser1 + kser2).sort_index(), (pser1 + pser2).sort_index())
assert_eq((kser1 - kser2).sort_index(), (pser1 - pser2).sort_index())
assert_eq((kser1 * kser2).sort_index(), (pser1 * pser2).sort_index())
if check_extension and not extension_float_dtypes_available:
self.assert_eq((kser1 / kser2).sort_index(), (pser1 / pser2).sort_index())
else:
assert_eq((kser1 / kser2).sort_index(), (pser1 / pser2).sort_index())
def test_arithmetic_chain(self):
self._test_arithmetic_chain_frame(self.pdf1, self.pdf2, self.pdf3, check_extension=False)
self._test_arithmetic_chain_series(
self.pser1, self.pser2, self.pser3, check_extension=False
)
@unittest.skipIf(not extension_dtypes_available, "pandas extension dtypes are not available")
def test_arithmetic_chain_extension_dtypes(self):
self._test_arithmetic_chain_frame(
self.pdf1.astype("Int64"),
self.pdf2.astype("Int64"),
self.pdf3.astype("Int64"),
check_extension=True,
)
self._test_arithmetic_chain_series(
self.pser1.astype(int).astype("Int64"),
self.pser2.astype(int).astype("Int64"),
self.pser3.astype(int).astype("Int64"),
check_extension=True,
)
@unittest.skipIf(
not extension_float_dtypes_available, "pandas extension float dtypes are not available"
)
def test_arithmetic_chain_extension_float_dtypes(self):
self._test_arithmetic_chain_frame(
self.pdf1.astype("Float64"),
self.pdf2.astype("Float64"),
self.pdf3.astype("Float64"),
check_extension=True,
)
self._test_arithmetic_chain_series(
self.pser1.astype("Float64"),
self.pser2.astype("Float64"),
self.pser3.astype("Float64"),
check_extension=True,
)
def _test_arithmetic_chain_frame(self, pdf1, pdf2, pdf3, *, check_extension):
kdf1 = ks.from_pandas(pdf1)
kdf2 = ks.from_pandas(pdf2)
kdf3 = ks.from_pandas(pdf3)
common_columns = set(kdf1.columns).intersection(kdf2.columns).intersection(kdf3.columns)
def assert_eq(actual, expected):
if LooseVersion("1.1") <= LooseVersion(pd.__version__) < LooseVersion("1.2.2"):
self.assert_eq(actual, expected, check_exact=not check_extension)
if check_extension:
if isinstance(actual, DataFrame):
for column, dtype in zip(actual.columns, actual.dtypes):
if column in common_columns:
self.assertTrue(isinstance(dtype, extension_dtypes))
else:
self.assertFalse(isinstance(dtype, extension_dtypes))
else:
self.assertTrue(isinstance(actual.dtype, extension_dtypes))
else:
self.assert_eq(actual, expected)
# Series
assert_eq((kdf1.a - kdf2.b - kdf3.c).sort_index(), (pdf1.a - pdf2.b - pdf3.c).sort_index())
assert_eq(
(kdf1.a * (kdf2.a * kdf3.c)).sort_index(), (pdf1.a * (pdf2.a * pdf3.c)).sort_index()
)
if check_extension and not extension_float_dtypes_available:
self.assert_eq(
(kdf1["a"] / kdf2["a"] / kdf3["c"]).sort_index(),
(pdf1["a"] / pdf2["a"] / pdf3["c"]).sort_index(),
)
else:
assert_eq(
(kdf1["a"] / kdf2["a"] / kdf3["c"]).sort_index(),
(pdf1["a"] / pdf2["a"] / pdf3["c"]).sort_index(),
)
# DataFrame
if check_extension and (
LooseVersion("1.0") <= LooseVersion(pd.__version__) < LooseVersion("1.1")
):
self.assert_eq(
(kdf1 + kdf2 - kdf3).sort_index(), (pdf1 + pdf2 - pdf3).sort_index(), almost=True
)
else:
assert_eq((kdf1 + kdf2 - kdf3).sort_index(), (pdf1 + pdf2 - pdf3).sort_index())
# Multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "a"), ("x", "b")])
kdf1.columns = columns
kdf2.columns = columns
pdf1.columns = columns
pdf2.columns = columns
columns = pd.MultiIndex.from_tuples([("x", "b"), ("y", "c")])
kdf3.columns = columns
pdf3.columns = columns
common_columns = set(kdf1.columns).intersection(kdf2.columns).intersection(kdf3.columns)
# Series
assert_eq(
(kdf1[("x", "a")] - kdf2[("x", "b")] - kdf3[("y", "c")]).sort_index(),
(pdf1[("x", "a")] - pdf2[("x", "b")] - pdf3[("y", "c")]).sort_index(),
)
assert_eq(
(kdf1[("x", "a")] * (kdf2[("x", "b")] * kdf3[("y", "c")])).sort_index(),
(pdf1[("x", "a")] * (pdf2[("x", "b")] * pdf3[("y", "c")])).sort_index(),
)
# DataFrame
if check_extension and (
LooseVersion("1.0") <= LooseVersion(pd.__version__) < LooseVersion("1.1")
):
self.assert_eq(
(kdf1 + kdf2 - kdf3).sort_index(), (pdf1 + pdf2 - pdf3).sort_index(), almost=True
)
else:
assert_eq((kdf1 + kdf2 - kdf3).sort_index(), (pdf1 + pdf2 - pdf3).sort_index())
def _test_arithmetic_chain_series(self, pser1, pser2, pser3, *, check_extension):
kser1 = ks.from_pandas(pser1)
kser2 = ks.from_pandas(pser2)
kser3 = ks.from_pandas(pser3)
def assert_eq(actual, expected):
if LooseVersion("1.1") <= LooseVersion(pd.__version__) < LooseVersion("1.2.2"):
self.assert_eq(actual, expected, check_exact=not check_extension)
if check_extension:
self.assertTrue(isinstance(actual.dtype, extension_dtypes))
else:
self.assert_eq(actual, expected)
# MultiIndex Series
assert_eq((kser1 + kser2 - kser3).sort_index(), (pser1 + pser2 - pser3).sort_index())
assert_eq((kser1 * kser2 * kser3).sort_index(), (pser1 * pser2 * pser3).sort_index())
if check_extension and not extension_float_dtypes_available:
if LooseVersion(pd.__version__) >= LooseVersion("1.0"):
self.assert_eq(
(kser1 - kser2 / kser3).sort_index(), (pser1 - pser2 / pser3).sort_index()
)
else:
expected = pd.Series(
[249.0, np.nan, 0.0, 0.88, np.nan, np.nan, np.nan, np.nan, np.nan, -np.inf]
+ [np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan],
index=pd.MultiIndex(
[
["cow", "falcon", "koala", "koalas", "lama"],
["length", "power", "speed", "weight"],
],
[
[0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 3, 3, 3, 4, 4, 4],
[0, 1, 2, 2, 3, 0, 0, 1, 2, 3, 0, 0, 3, 3, 0, 2, 3],
],
),
)
self.assert_eq((kser1 - kser2 / kser3).sort_index(), expected)
else:
assert_eq((kser1 - kser2 / kser3).sort_index(), (pser1 - pser2 / pser3).sort_index())
assert_eq((kser1 + kser2 * kser3).sort_index(), (pser1 + pser2 * pser3).sort_index())
def test_mod(self):
pser = pd.Series([100, None, -300, None, 500, -700])
pser_other = pd.Series([-150] * 6)
kser = ks.from_pandas(pser)
kser_other = ks.from_pandas(pser_other)
self.assert_eq(kser.mod(kser_other).sort_index(), pser.mod(pser_other))
self.assert_eq(kser.mod(kser_other).sort_index(), pser.mod(pser_other))
self.assert_eq(kser.mod(kser_other).sort_index(), pser.mod(pser_other))
def test_rmod(self):
pser = pd.Series([100, None, -300, None, 500, -700])
pser_other = pd.Series([-150] * 6)
kser = ks.from_pandas(pser)
kser_other = ks.from_pandas(pser_other)
self.assert_eq(kser.rmod(kser_other).sort_index(), pser.rmod(pser_other))
self.assert_eq(kser.rmod(kser_other).sort_index(), pser.rmod(pser_other))
self.assert_eq(kser.rmod(kser_other).sort_index(), pser.rmod(pser_other))
def test_getitem_boolean_series(self):
pdf1 = pd.DataFrame(
{"A": [0, 1, 2, 3, 4], "B": [100, 200, 300, 400, 500]}, index=[20, 10, 30, 0, 50]
)
pdf2 = pd.DataFrame(
{"A": [0, -1, -2, -3, -4], "B": [-100, -200, -300, -400, -500]},
index=[0, 30, 10, 20, 50],
)
kdf1 = ks.from_pandas(pdf1)
kdf2 = ks.from_pandas(pdf2)
self.assert_eq(pdf1[pdf2.A > -3].sort_index(), kdf1[kdf2.A > -3].sort_index())
self.assert_eq(pdf1.A[pdf2.A > -3].sort_index(), kdf1.A[kdf2.A > -3].sort_index())
self.assert_eq(
(pdf1.A + 1)[pdf2.A > -3].sort_index(), (kdf1.A + 1)[kdf2.A > -3].sort_index()
)
def test_loc_getitem_boolean_series(self):
pdf1 = pd.DataFrame(
{"A": [0, 1, 2, 3, 4], "B": [100, 200, 300, 400, 500]}, index=[20, 10, 30, 0, 50]
)
pdf2 = pd.DataFrame(
{"A": [0, -1, -2, -3, -4], "B": [-100, -200, -300, -400, -500]},
index=[20, 10, 30, 0, 50],
)
kdf1 = ks.from_pandas(pdf1)
kdf2 = ks.from_pandas(pdf2)
self.assert_eq(pdf1.loc[pdf2.A > -3].sort_index(), kdf1.loc[kdf2.A > -3].sort_index())
self.assert_eq(pdf1.A.loc[pdf2.A > -3].sort_index(), kdf1.A.loc[kdf2.A > -3].sort_index())
self.assert_eq(
(pdf1.A + 1).loc[pdf2.A > -3].sort_index(), (kdf1.A + 1).loc[kdf2.A > -3].sort_index()
)
def test_bitwise(self):
pser1 = pd.Series([True, False, True, False, np.nan, np.nan, True, False, np.nan])
pser2 = pd.Series([True, False, False, True, True, False, np.nan, np.nan, np.nan])
kser1 = ks.from_pandas(pser1)
kser2 = ks.from_pandas(pser2)
self.assert_eq(pser1 | pser2, (kser1 | kser2).sort_index())
self.assert_eq(pser1 & pser2, (kser1 & kser2).sort_index())
pser1 = pd.Series([True, False, np.nan], index=list("ABC"))
pser2 = pd.Series([False, True, np.nan], index=list("DEF"))
kser1 = ks.from_pandas(pser1)
kser2 = ks.from_pandas(pser2)
self.assert_eq(pser1 | pser2, (kser1 | kser2).sort_index())
self.assert_eq(pser1 & pser2, (kser1 & kser2).sort_index())
@unittest.skipIf(
not extension_object_dtypes_available, "pandas extension object dtypes are not available"
)
def test_bitwise_extension_dtype(self):
def assert_eq(actual, expected):
if LooseVersion("1.1") <= LooseVersion(pd.__version__) < LooseVersion("1.2.2"):
self.assert_eq(actual, expected, check_exact=False)
self.assertTrue(isinstance(actual.dtype, extension_dtypes))
else:
self.assert_eq(actual, expected)
pser1 = pd.Series(
[True, False, True, False, np.nan, np.nan, True, False, np.nan], dtype="boolean"
)
pser2 = pd.Series(
[True, False, False, True, True, False, np.nan, np.nan, np.nan], dtype="boolean"
)
kser1 = ks.from_pandas(pser1)
kser2 = ks.from_pandas(pser2)
assert_eq((kser1 | kser2).sort_index(), pser1 | pser2)
assert_eq((kser1 & kser2).sort_index(), pser1 & pser2)
pser1 = pd.Series([True, False, np.nan], index=list("ABC"), dtype="boolean")
pser2 = pd.Series([False, True, np.nan], index=list("DEF"), dtype="boolean")
kser1 = ks.from_pandas(pser1)
kser2 = ks.from_pandas(pser2)
# a pandas bug?
# assert_eq((kser1 | kser2).sort_index(), pser1 | pser2)
# assert_eq((kser1 & kser2).sort_index(), pser1 & pser2)
assert_eq(
(kser1 | kser2).sort_index(),
pd.Series([True, None, None, None, True, None], index=list("ABCDEF"), dtype="boolean"),
)
assert_eq(
(kser1 & kser2).sort_index(),
pd.Series(
[None, False, None, False, None, None], index=list("ABCDEF"), dtype="boolean"
),
)
def test_concat_column_axis(self):
pdf1 = pd.DataFrame({"A": [0, 2, 4], "B": [1, 3, 5]}, index=[1, 2, 3])
pdf1.columns.names = ["AB"]
pdf2 = pd.DataFrame({"C": [1, 2, 3], "D": [4, 5, 6]}, index=[1, 3, 5])
pdf2.columns.names = ["CD"]
kdf1 = ks.from_pandas(pdf1)
kdf2 = ks.from_pandas(pdf2)
kdf3 = kdf1.copy()
kdf4 = kdf2.copy()
pdf3 = pdf1.copy()
pdf4 = pdf2.copy()
columns = pd.MultiIndex.from_tuples([("X", "A"), ("X", "B")], names=["X", "AB"])
pdf3.columns = columns
kdf3.columns = columns
columns = pd.MultiIndex.from_tuples([("X", "C"), ("X", "D")], names=["Y", "CD"])
pdf4.columns = columns
kdf4.columns = columns
pdf5 = pd.DataFrame({"A": [0, 2, 4], "B": [1, 3, 5]}, index=[1, 2, 3])
pdf6 = pd.DataFrame({"C": [1, 2, 3]}, index=[1, 3, 5])
kdf5 = ks.from_pandas(pdf5)
kdf6 = ks.from_pandas(pdf6)
ignore_indexes = [True, False]
joins = ["inner", "outer"]
objs = [
([kdf1.A, kdf2.C], [pdf1.A, pdf2.C]),
# TODO: ([kdf1, kdf2.C], [pdf1, pdf2.C]),
([kdf1.A, kdf2], [pdf1.A, pdf2]),
([kdf1.A, kdf2.C], [pdf1.A, pdf2.C]),
([kdf3[("X", "A")], kdf4[("X", "C")]], [pdf3[("X", "A")], pdf4[("X", "C")]]),
([kdf3, kdf4[("X", "C")]], [pdf3, pdf4[("X", "C")]]),
([kdf3[("X", "A")], kdf4], [pdf3[("X", "A")], pdf4]),
([kdf3, kdf4], [pdf3, pdf4]),
([kdf5, kdf6], [pdf5, pdf6]),
([kdf6, kdf5], [pdf6, pdf5]),
]
for ignore_index, join in product(ignore_indexes, joins):
for i, (kdfs, pdfs) in enumerate(objs):
with self.subTest(ignore_index=ignore_index, join=join, pdfs=pdfs, pair=i):
actual = ks.concat(kdfs, axis=1, ignore_index=ignore_index, join=join)
expected = pd.concat(pdfs, axis=1, ignore_index=ignore_index, join=join)
self.assert_eq(
repr(actual.sort_values(list(actual.columns)).reset_index(drop=True)),
repr(expected.sort_values(list(expected.columns)).reset_index(drop=True)),
)
def test_combine_first(self):
pser1 = pd.Series({"falcon": 330.0, "eagle": 160.0})
pser2 = pd.Series({"falcon": 345.0, "eagle": 200.0, "duck": 30.0})
kser1 = ks.from_pandas(pser1)
kser2 = ks.from_pandas(pser2)
self.assert_eq(
kser1.combine_first(kser2).sort_index(), pser1.combine_first(pser2).sort_index()
)
with self.assertRaisesRegex(
ValueError, "`combine_first` only allows `Series` for parameter `other`"
):
kser1.combine_first(50)
kser1.name = ("X", "A")
kser2.name = ("Y", "B")
pser1.name = ("X", "A")
pser2.name = ("Y", "B")
self.assert_eq(
kser1.combine_first(kser2).sort_index(), pser1.combine_first(pser2).sort_index()
)
# MultiIndex
midx1 = pd.MultiIndex(
[["lama", "cow", "falcon", "koala"], ["speed", "weight", "length", "power"]],
[[0, 3, 1, 1, 1, 2, 2, 2], [0, 2, 0, 3, 2, 0, 1, 3]],
)
midx2 = pd.MultiIndex(
[["lama", "cow", "falcon"], ["speed", "weight", "length"]],
[[0, 0, 0, 1, 1, 1, 2, 2, 2], [0, 1, 2, 0, 1, 2, 0, 1, 2]],
)
pser1 = pd.Series([45, 200, 1.2, 30, 250, 1.5, 320, 1], index=midx1)
pser2 = pd.Series([-45, 200, -1.2, 30, -250, 1.5, 320, 1, -0.3], index=midx2)
kser1 = ks.from_pandas(pser1)
kser2 = ks.from_pandas(pser2)
self.assert_eq(
kser1.combine_first(kser2).sort_index(), pser1.combine_first(pser2).sort_index()
)
# Series come from same DataFrame
pdf = pd.DataFrame(
{
"A": {"falcon": 330.0, "eagle": 160.0},
"B": {"falcon": 345.0, "eagle": 200.0, "duck": 30.0},
}
)
pser1 = pdf.A
pser2 = pdf.B
kser1 = ks.from_pandas(pser1)
kser2 = ks.from_pandas(pser2)
self.assert_eq(
kser1.combine_first(kser2).sort_index(), pser1.combine_first(pser2).sort_index()
)
kser1.name = ("X", "A")
kser2.name = ("Y", "B")
pser1.name = ("X", "A")
pser2.name = ("Y", "B")
self.assert_eq(
kser1.combine_first(kser2).sort_index(), pser1.combine_first(pser2).sort_index()
)
def test_insert(self):
#
# Basic DataFrame
#
pdf = pd.DataFrame([1, 2, 3])
kdf = ks.from_pandas(pdf)
pser = pd.Series([4, 5, 6])
kser = ks.from_pandas(pser)
kdf.insert(1, "y", kser)
pdf.insert(1, "y", pser)
self.assert_eq(kdf.sort_index(), pdf.sort_index())
#
# DataFrame with Index different from inserting Series'
#
pdf = pd.DataFrame([1, 2, 3], index=[10, 20, 30])
kdf = ks.from_pandas(pdf)
pser = pd.Series([4, 5, 6])
kser = ks.from_pandas(pser)
kdf.insert(1, "y", kser)
pdf.insert(1, "y", pser)
self.assert_eq(kdf.sort_index(), pdf.sort_index())
#
# DataFrame with Multi-index columns
#
pdf = pd.DataFrame({("x", "a"): [1, 2, 3]})
kdf = ks.from_pandas(pdf)
pser = pd.Series([4, 5, 6])
kser = ks.from_pandas(pser)
pdf = pd.DataFrame({("x", "a", "b"): [1, 2, 3]})
kdf = ks.from_pandas(pdf)
kdf.insert(0, "a", kser)
pdf.insert(0, "a", pser)
self.assert_eq(kdf.sort_index(), pdf.sort_index())
kdf.insert(0, ("b", "c", ""), kser)
pdf.insert(0, ("b", "c", ""), pser)
self.assert_eq(kdf.sort_index(), pdf.sort_index())
def test_compare(self):
if LooseVersion(pd.__version__) >= LooseVersion("1.1"):
pser1 = pd.Series(["b", "c", np.nan, "g", np.nan])
pser2 = pd.Series(["a", "c", np.nan, np.nan, "h"])
kser1 = ks.from_pandas(pser1)
kser2 = ks.from_pandas(pser2)
self.assert_eq(
pser1.compare(pser2).sort_index(), kser1.compare(kser2).sort_index(),
)
# `keep_shape=True`
self.assert_eq(
pser1.compare(pser2, keep_shape=True).sort_index(),
kser1.compare(kser2, keep_shape=True).sort_index(),
)
# `keep_equal=True`
self.assert_eq(
pser1.compare(pser2, keep_equal=True).sort_index(),
kser1.compare(kser2, keep_equal=True).sort_index(),
)
# `keep_shape=True` and `keep_equal=True`
self.assert_eq(
pser1.compare(pser2, keep_shape=True, keep_equal=True).sort_index(),
kser1.compare(kser2, keep_shape=True, keep_equal=True).sort_index(),
)
# MultiIndex
pser1.index = pd.MultiIndex.from_tuples(
[("a", "x"), ("b", "y"), ("c", "z"), ("x", "k"), ("q", "l")]
)
pser2.index = pd.MultiIndex.from_tuples(
[("a", "x"), ("b", "y"), ("c", "z"), ("x", "k"), ("q", "l")]
)
kser1 = ks.from_pandas(pser1)
kser2 = ks.from_pandas(pser2)
self.assert_eq(
pser1.compare(pser2).sort_index(), kser1.compare(kser2).sort_index(),
)
# `keep_shape=True` with MultiIndex
self.assert_eq(
pser1.compare(pser2, keep_shape=True).sort_index(),
kser1.compare(kser2, keep_shape=True).sort_index(),
)
# `keep_equal=True` with MultiIndex
self.assert_eq(
pser1.compare(pser2, keep_equal=True).sort_index(),
kser1.compare(kser2, keep_equal=True).sort_index(),
)
# `keep_shape=True` and `keep_equal=True` with MultiIndex
self.assert_eq(
pser1.compare(pser2, keep_shape=True, keep_equal=True).sort_index(),
kser1.compare(kser2, keep_shape=True, keep_equal=True).sort_index(),
)
else:
kser1 = ks.Series(["b", "c", np.nan, "g", np.nan])
kser2 = ks.Series(["a", "c", np.nan, np.nan, "h"])
expected = ks.DataFrame(
[["b", "a"], ["g", None], [None, "h"]], index=[0, 3, 4], columns=["self", "other"]
)
self.assert_eq(expected, kser1.compare(kser2).sort_index())
# `keep_shape=True`
expected = ks.DataFrame(
[["b", "a"], [None, None], [None, None], ["g", None], [None, "h"]],
index=[0, 1, 2, 3, 4],
columns=["self", "other"],
)
self.assert_eq(
expected, kser1.compare(kser2, keep_shape=True).sort_index(),
)
# `keep_equal=True`
expected = ks.DataFrame(
[["b", "a"], ["g", None], [None, "h"]], index=[0, 3, 4], columns=["self", "other"]
)
self.assert_eq(
expected, kser1.compare(kser2, keep_equal=True).sort_index(),
)
# `keep_shape=True` and `keep_equal=True`
expected = ks.DataFrame(
[["b", "a"], ["c", "c"], [None, None], ["g", None], [None, "h"]],
index=[0, 1, 2, 3, 4],
columns=["self", "other"],
)
self.assert_eq(
expected, kser1.compare(kser2, keep_shape=True, keep_equal=True).sort_index(),
)
# MultiIndex
kser1 = ks.Series(
["b", "c", np.nan, "g", np.nan],
index=pd.MultiIndex.from_tuples(
[("a", "x"), ("b", "y"), ("c", "z"), ("x", "k"), ("q", "l")]
),
)
kser2 = ks.Series(
["a", "c", np.nan, np.nan, "h"],
index=pd.MultiIndex.from_tuples(
[("a", "x"), ("b", "y"), ("c", "z"), ("x", "k"), ("q", "l")]
),
)
expected = ks.DataFrame(
[["b", "a"], [None, "h"], ["g", None]],
index=pd.MultiIndex.from_tuples([("a", "x"), ("q", "l"), ("x", "k")]),
columns=["self", "other"],
)
self.assert_eq(expected, kser1.compare(kser2).sort_index())
# `keep_shape=True`
expected = ks.DataFrame(
[["b", "a"], [None, None], [None, None], [None, "h"], ["g", None]],
index=pd.MultiIndex.from_tuples(
[("a", "x"), ("b", "y"), ("c", "z"), ("q", "l"), ("x", "k")]
),
columns=["self", "other"],
)
self.assert_eq(
expected, kser1.compare(kser2, keep_shape=True).sort_index(),
)
# `keep_equal=True`
expected = ks.DataFrame(
[["b", "a"], [None, "h"], ["g", None]],
index=pd.MultiIndex.from_tuples([("a", "x"), ("q", "l"), ("x", "k")]),
columns=["self", "other"],
)
self.assert_eq(
expected, kser1.compare(kser2, keep_equal=True).sort_index(),
)
# `keep_shape=True` and `keep_equal=True`
expected = ks.DataFrame(
[["b", "a"], ["c", "c"], [None, None], [None, "h"], ["g", None]],
index=pd.MultiIndex.from_tuples(
[("a", "x"), ("b", "y"), ("c", "z"), ("q", "l"), ("x", "k")]
),
columns=["self", "other"],
)
self.assert_eq(
expected, kser1.compare(kser2, keep_shape=True, keep_equal=True).sort_index(),
)
# Different Index
with self.assertRaisesRegex(
ValueError, "Can only compare identically-labeled Series objects"
):
kser1 = ks.Series([1, 2, 3, 4, 5], index=pd.Index([1, 2, 3, 4, 5]),)
kser2 = ks.Series([2, 2, 3, 4, 1], index=pd.Index([5, 4, 3, 2, 1]),)
kser1.compare(kser2)
# Different MultiIndex
with self.assertRaisesRegex(
ValueError, "Can only compare identically-labeled Series objects"
):
kser1 = ks.Series(
[1, 2, 3, 4, 5],
index=pd.MultiIndex.from_tuples(
[("a", "x"), ("b", "y"), ("c", "z"), ("x", "k"), ("q", "l")]
),
)
kser2 = ks.Series(
[2, 2, 3, 4, 1],
index=pd.MultiIndex.from_tuples(
[("a", "x"), ("b", "y"), ("c", "a"), ("x", "k"), ("q", "l")]
),
)
kser1.compare(kser2)
def test_different_columns(self):
kdf1 = self.kdf1
kdf4 = self.kdf4
pdf1 = self.pdf1
pdf4 = self.pdf4
self.assert_eq((kdf1 + kdf4).sort_index(), (pdf1 + pdf4).sort_index(), almost=True)
# Multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "a"), ("x", "b")])
kdf1.columns = columns
pdf1.columns = columns
columns = pd.MultiIndex.from_tuples([("z", "e"), ("z", "f")])
kdf4.columns = columns
pdf4.columns = columns
self.assert_eq((kdf1 + kdf4).sort_index(), (pdf1 + pdf4).sort_index(), almost=True)
def test_assignment_series(self):
kdf = ks.from_pandas(self.pdf1)
pdf = self.pdf1
kser = kdf.a
pser = pdf.a
kdf["a"] = self.kdf2.a
pdf["a"] = self.pdf2.a
self.assert_eq(kdf.sort_index(), pdf.sort_index())
self.assert_eq(kser, pser)
kdf = ks.from_pandas(self.pdf1)
pdf = self.pdf1
kser = kdf.a
pser = pdf.a
kdf["a"] = self.kdf2.b
pdf["a"] = self.pdf2.b
self.assert_eq(kdf.sort_index(), pdf.sort_index())
self.assert_eq(kser, pser)
kdf = ks.from_pandas(self.pdf1)
pdf = self.pdf1
kdf["c"] = self.kdf2.a
pdf["c"] = self.pdf2.a
self.assert_eq(kdf.sort_index(), pdf.sort_index())
# Multi-index columns
kdf = ks.from_pandas(self.pdf1)
pdf = self.pdf1
columns = pd.MultiIndex.from_tuples([("x", "a"), ("x", "b")])
kdf.columns = columns
pdf.columns = columns
kdf[("y", "c")] = self.kdf2.a
pdf[("y", "c")] = self.pdf2.a
self.assert_eq(kdf.sort_index(), pdf.sort_index())
pdf = pd.DataFrame({"a": [1, 2, 3], "Koalas": [0, 1, 2]}).set_index("Koalas", drop=False)
kdf = ks.from_pandas(pdf)
kdf.index.name = None
kdf["NEW"] = ks.Series([100, 200, 300])
pdf.index.name = None
pdf["NEW"] = pd.Series([100, 200, 300])
self.assert_eq(kdf.sort_index(), pdf.sort_index())
def test_assignment_frame(self):
kdf = ks.from_pandas(self.pdf1)
pdf = self.pdf1
kser = kdf.a
pser = pdf.a
kdf[["a", "b"]] = self.kdf1
pdf[["a", "b"]] = self.pdf1
self.assert_eq(kdf.sort_index(), pdf.sort_index())
self.assert_eq(kser, pser)
# 'c' does not exist in `kdf`.
kdf = ks.from_pandas(self.pdf1)
pdf = self.pdf1
kser = kdf.a
pser = pdf.a
kdf[["b", "c"]] = self.kdf1
pdf[["b", "c"]] = self.pdf1
self.assert_eq(kdf.sort_index(), pdf.sort_index())
self.assert_eq(kser, pser)
# 'c' and 'd' do not exist in `kdf`.
kdf = ks.from_pandas(self.pdf1)
pdf = self.pdf1
kdf[["c", "d"]] = self.kdf1
pdf[["c", "d"]] = self.pdf1
self.assert_eq(kdf.sort_index(), pdf.sort_index())
# Multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "a"), ("x", "b")])
kdf = ks.from_pandas(self.pdf1)
pdf = self.pdf1
kdf.columns = columns
pdf.columns = columns
kdf[[("y", "c"), ("z", "d")]] = self.kdf1
pdf[[("y", "c"), ("z", "d")]] = self.pdf1
self.assert_eq(kdf.sort_index(), pdf.sort_index())
kdf = ks.from_pandas(self.pdf1)
pdf = self.pdf1
kdf1 = ks.from_pandas(self.pdf1)
pdf1 = self.pdf1
kdf1.columns = columns
pdf1.columns = columns
kdf[["c", "d"]] = kdf1
pdf[["c", "d"]] = pdf1
self.assert_eq(kdf.sort_index(), pdf.sort_index())
def test_assignment_series_chain(self):
kdf = ks.from_pandas(self.pdf1)
pdf = self.pdf1
kdf["a"] = self.kdf1.a
pdf["a"] = self.pdf1.a
kdf["a"] = self.kdf2.b
pdf["a"] = self.pdf2.b
kdf["d"] = self.kdf3.c
pdf["d"] = self.pdf3.c
self.assert_eq(kdf.sort_index(), pdf.sort_index())
def test_assignment_frame_chain(self):
kdf = ks.from_pandas(self.pdf1)
pdf = self.pdf1
kdf[["a", "b"]] = self.kdf1
pdf[["a", "b"]] = self.pdf1
kdf[["e", "f"]] = self.kdf3
pdf[["e", "f"]] = self.pdf3
kdf[["b", "c"]] = self.kdf2
pdf[["b", "c"]] = self.pdf2
self.assert_eq(kdf.sort_index(), pdf.sort_index())
def test_multi_index_arithmetic(self):
kdf5 = self.kdf5
kdf6 = self.kdf6
pdf5 = self.pdf5
pdf6 = self.pdf6
# Series
self.assert_eq((kdf5.c - kdf6.e).sort_index(), (pdf5.c - pdf6.e).sort_index())
self.assert_eq((kdf5["c"] / kdf6["e"]).sort_index(), (pdf5["c"] / pdf6["e"]).sort_index())
# DataFrame
self.assert_eq((kdf5 + kdf6).sort_index(), (pdf5 + pdf6).sort_index(), almost=True)
def test_multi_index_assignment_series(self):
kdf = ks.from_pandas(self.pdf5)
pdf = self.pdf5
kdf["x"] = self.kdf6.e
pdf["x"] = self.pdf6.e
self.assert_eq(kdf.sort_index(), pdf.sort_index())
kdf = ks.from_pandas(self.pdf5)
pdf = self.pdf5
kdf["e"] = self.kdf6.e
pdf["e"] = self.pdf6.e
self.assert_eq(kdf.sort_index(), pdf.sort_index())
kdf = ks.from_pandas(self.pdf5)
pdf = self.pdf5
kdf["c"] = self.kdf6.e
pdf["c"] = self.pdf6.e
self.assert_eq(kdf.sort_index(), pdf.sort_index())
def test_multi_index_assignment_frame(self):
kdf = ks.from_pandas(self.pdf5)
pdf = self.pdf5
kdf[["c"]] = self.kdf5
pdf[["c"]] = self.pdf5
self.assert_eq(kdf.sort_index(), pdf.sort_index())
kdf = ks.from_pandas(self.pdf5)
pdf = self.pdf5
kdf[["x"]] = self.kdf5
pdf[["x"]] = self.pdf5
self.assert_eq(kdf.sort_index(), pdf.sort_index())
kdf = ks.from_pandas(self.pdf6)
pdf = self.pdf6
kdf[["x", "y"]] = self.kdf6
pdf[["x", "y"]] = self.pdf6
self.assert_eq(kdf.sort_index(), pdf.sort_index())
def test_frame_loc_setitem(self):
pdf_orig = pd.DataFrame(
[[1, 2], [4, 5], [7, 8]],
index=["cobra", "viper", "sidewinder"],
columns=["max_speed", "shield"],
)
kdf_orig = ks.DataFrame(pdf_orig)
pdf = pdf_orig.copy()
kdf = kdf_orig.copy()
pser1 = pdf.max_speed
pser2 = pdf.shield
kser1 = kdf.max_speed
kser2 = kdf.shield
another_kdf = ks.DataFrame(pdf_orig)
kdf.loc[["viper", "sidewinder"], ["shield"]] = -another_kdf.max_speed
pdf.loc[["viper", "sidewinder"], ["shield"]] = -pdf.max_speed
self.assert_eq(kdf, pdf)
self.assert_eq(kser1, pser1)
self.assert_eq(kser2, pser2)
pdf = pdf_orig.copy()
kdf = kdf_orig.copy()
pser1 = pdf.max_speed
pser2 = pdf.shield
kser1 = kdf.max_speed
kser2 = kdf.shield
kdf.loc[another_kdf.max_speed < 5, ["shield"]] = -kdf.max_speed
pdf.loc[pdf.max_speed < 5, ["shield"]] = -pdf.max_speed
self.assert_eq(kdf, pdf)
self.assert_eq(kser1, pser1)
self.assert_eq(kser2, pser2)
pdf = pdf_orig.copy()
kdf = kdf_orig.copy()
pser1 = pdf.max_speed
pser2 = pdf.shield
kser1 = kdf.max_speed
kser2 = kdf.shield
kdf.loc[another_kdf.max_speed < 5, ["shield"]] = -another_kdf.max_speed
pdf.loc[pdf.max_speed < 5, ["shield"]] = -pdf.max_speed
self.assert_eq(kdf, pdf)
self.assert_eq(kser1, pser1)
self.assert_eq(kser2, pser2)
def test_frame_iloc_setitem(self):
pdf = pd.DataFrame(
[[1, 2], [4, 5], [7, 8]],
index=["cobra", "viper", "sidewinder"],
columns=["max_speed", "shield"],
)
kdf = ks.DataFrame(pdf)
another_kdf = ks.DataFrame(pdf)
kdf.iloc[[0, 1, 2], 1] = -another_kdf.max_speed
pdf.iloc[[0, 1, 2], 1] = -pdf.max_speed
self.assert_eq(kdf, pdf)
# TODO: matching the behavior with pandas 1.2 and uncomment below test
# with self.assertRaisesRegex(
# ValueError,
# "shape mismatch: value array of shape (3,) could not be broadcast to indexing "
# "result of shape (2,1)",
# ):
# kdf.iloc[[1, 2], [1]] = -another_kdf.max_speed
kdf.iloc[[0, 1, 2], 1] = 10 * another_kdf.max_speed
pdf.iloc[[0, 1, 2], 1] = 10 * pdf.max_speed
self.assert_eq(kdf, pdf)
# TODO: matching the behavior with pandas 1.2 and uncomment below test
# with self.assertRaisesRegex(
# ValueError,
# "shape mismatch: value array of shape (3,) could not be broadcast to indexing "
# "result of shape (1,)",
# ):
# kdf.iloc[[0], 1] = 10 * another_kdf.max_speed
def test_series_loc_setitem(self):
pdf = pd.DataFrame({"x": [1, 2, 3], "y": [4, 5, 6]}, index=["cobra", "viper", "sidewinder"])
kdf = ks.from_pandas(pdf)
pser = pdf.x
psery = pdf.y
kser = kdf.x
ksery = kdf.y
pser_another = pd.Series([1, 2, 3], index=["cobra", "viper", "sidewinder"])
kser_another = ks.from_pandas(pser_another)
kser.loc[kser % 2 == 1] = -kser_another
pser.loc[pser % 2 == 1] = -pser_another
self.assert_eq(kser, pser)
self.assert_eq(kdf, pdf)
self.assert_eq(ksery, psery)
pdf = pd.DataFrame({"x": [1, 2, 3], "y": [4, 5, 6]}, index=["cobra", "viper", "sidewinder"])
kdf = ks.from_pandas(pdf)
pser = pdf.x
psery = pdf.y
kser = kdf.x
ksery = kdf.y
kser.loc[kser_another % 2 == 1] = -kser
pser.loc[pser_another % 2 == 1] = -pser
self.assert_eq(kser, pser)
self.assert_eq(kdf, pdf)
self.assert_eq(ksery, psery)
pdf = pd.DataFrame({"x": [1, 2, 3], "y": [4, 5, 6]}, index=["cobra", "viper", "sidewinder"])
kdf = ks.from_pandas(pdf)
pser = pdf.x
psery = pdf.y
kser = kdf.x
ksery = kdf.y
kser.loc[kser_another % 2 == 1] = -kser
pser.loc[pser_another % 2 == 1] = -pser
self.assert_eq(kser, pser)
self.assert_eq(kdf, pdf)
self.assert_eq(ksery, psery)
pdf = pd.DataFrame({"x": [1, 2, 3], "y": [4, 5, 6]}, index=["cobra", "viper", "sidewinder"])
kdf = ks.from_pandas(pdf)
pser = pdf.x
psery = pdf.y
kser = kdf.x
ksery = kdf.y
kser.loc[kser_another % 2 == 1] = -kser_another
pser.loc[pser_another % 2 == 1] = -pser_another
self.assert_eq(kser, pser)
self.assert_eq(kdf, pdf)
self.assert_eq(ksery, psery)
pdf = pd.DataFrame({"x": [1, 2, 3], "y": [4, 5, 6]}, index=["cobra", "viper", "sidewinder"])
kdf = ks.from_pandas(pdf)
pser = pdf.x
psery = pdf.y
kser = kdf.x
ksery = kdf.y
kser.loc[["viper", "sidewinder"]] = -kser_another
pser.loc[["viper", "sidewinder"]] = -pser_another
self.assert_eq(kser, pser)
self.assert_eq(kdf, pdf)
self.assert_eq(ksery, psery)
pdf = pd.DataFrame({"x": [1, 2, 3], "y": [4, 5, 6]}, index=["cobra", "viper", "sidewinder"])
kdf = ks.from_pandas(pdf)
pser = pdf.x
psery = pdf.y
kser = kdf.x
ksery = kdf.y
kser.loc[kser_another % 2 == 1] = 10
pser.loc[pser_another % 2 == 1] = 10
self.assert_eq(kser, pser)
self.assert_eq(kdf, pdf)
self.assert_eq(ksery, psery)
def test_series_iloc_setitem(self):
pdf = pd.DataFrame({"x": [1, 2, 3], "y": [4, 5, 6]}, index=["cobra", "viper", "sidewinder"])
kdf = ks.from_pandas(pdf)
pser = pdf.x
psery = pdf.y
kser = kdf.x
ksery = kdf.y
pser1 = pser + 1
kser1 = kser + 1
pser_another = pd.Series([1, 2, 3], index=["cobra", "viper", "sidewinder"])
kser_another = ks.from_pandas(pser_another)
kser.iloc[[0, 1, 2]] = -kser_another
pser.iloc[[0, 1, 2]] = -pser_another
self.assert_eq(kser, pser)
self.assert_eq(kdf, pdf)
self.assert_eq(ksery, psery)
# TODO: matching the behavior with pandas 1.2 and uncomment below test.
# with self.assertRaisesRegex(
# ValueError,
# "cannot set using a list-like indexer with a different length than the value",
# ):
# kser.iloc[[1, 2]] = -kser_another
kser.iloc[[0, 1, 2]] = 10 * kser_another
pser.iloc[[0, 1, 2]] = 10 * pser_another
self.assert_eq(kser, pser)
self.assert_eq(kdf, pdf)
self.assert_eq(ksery, psery)
# with self.assertRaisesRegex(
# ValueError,
# "cannot set using a list-like indexer with a different length than the value",
# ):
# kser.iloc[[0]] = 10 * kser_another
kser1.iloc[[0, 1, 2]] = -kser_another
pser1.iloc[[0, 1, 2]] = -pser_another
self.assert_eq(kser1, pser1)
self.assert_eq(kdf, pdf)
self.assert_eq(ksery, psery)
# with self.assertRaisesRegex(
# ValueError,
# "cannot set using a list-like indexer with a different length than the value",
# ):
# kser1.iloc[[1, 2]] = -kser_another
pdf = pd.DataFrame({"x": [1, 2, 3], "y": [4, 5, 6]}, index=["cobra", "viper", "sidewinder"])
kdf = ks.from_pandas(pdf)
pser = pdf.x
psery = pdf.y
kser = kdf.x
ksery = kdf.y
piloc = pser.iloc
kiloc = kser.iloc
kiloc[[0, 1, 2]] = -kser_another
piloc[[0, 1, 2]] = -pser_another
self.assert_eq(kser, pser)
self.assert_eq(kdf, pdf)
self.assert_eq(ksery, psery)
# TODO: matching the behavior with pandas 1.2 and uncomment below test.
# with self.assertRaisesRegex(
# ValueError,
# "cannot set using a list-like indexer with a different length than the value",
# ):
# kiloc[[1, 2]] = -kser_another
kiloc[[0, 1, 2]] = 10 * kser_another
piloc[[0, 1, 2]] = 10 * pser_another
self.assert_eq(kser, pser)
self.assert_eq(kdf, pdf)
self.assert_eq(ksery, psery)
# with self.assertRaisesRegex(
# ValueError,
# "cannot set using a list-like indexer with a different length than the value",
# ):
# kiloc[[0]] = 10 * kser_another
def test_update(self):
pdf = pd.DataFrame({"x": [1, 2, 3], "y": [10, 20, 30]})
kdf = ks.from_pandas(pdf)
pser = pdf.x
kser = kdf.x
pser.update(pd.Series([4, 5, 6]))
kser.update(ks.Series([4, 5, 6]))
self.assert_eq(kser.sort_index(), pser.sort_index())
self.assert_eq(kdf.sort_index(), pdf.sort_index())
def test_where(self):
pdf1 = pd.DataFrame({"A": [0, 1, 2, 3, 4], "B": [100, 200, 300, 400, 500]})
pdf2 = pd.DataFrame({"A": [0, -1, -2, -3, -4], "B": [-100, -200, -300, -400, -500]})
kdf1 = ks.from_pandas(pdf1)
kdf2 = ks.from_pandas(pdf2)
self.assert_eq(pdf1.where(pdf2 > 100), kdf1.where(kdf2 > 100).sort_index())
pdf1 = pd.DataFrame({"A": [-1, -2, -3, -4, -5], "B": [-100, -200, -300, -400, -500]})
pdf2 = pd.DataFrame({"A": [-10, -20, -30, -40, -50], "B": [-5, -4, -3, -2, -1]})
kdf1 = ks.from_pandas(pdf1)
kdf2 = ks.from_pandas(pdf2)
self.assert_eq(pdf1.where(pdf2 < -250), kdf1.where(kdf2 < -250).sort_index())
# multi-index columns
pdf1 = pd.DataFrame({("X", "A"): [0, 1, 2, 3, 4], ("X", "B"): [100, 200, 300, 400, 500]})
pdf2 = pd.DataFrame(
{("X", "A"): [0, -1, -2, -3, -4], ("X", "B"): [-100, -200, -300, -400, -500]}
)
kdf1 = ks.from_pandas(pdf1)
kdf2 = ks.from_pandas(pdf2)
self.assert_eq(pdf1.where(pdf2 > 100), kdf1.where(kdf2 > 100).sort_index())
def test_mask(self):
pdf1 = pd.DataFrame({"A": [0, 1, 2, 3, 4], "B": [100, 200, 300, 400, 500]})
pdf2 = pd.DataFrame({"A": [0, -1, -2, -3, -4], "B": [-100, -200, -300, -400, -500]})
kdf1 = ks.from_pandas(pdf1)
kdf2 = ks.from_pandas(pdf2)
self.assert_eq(pdf1.mask(pdf2 < 100), kdf1.mask(kdf2 < 100).sort_index())
pdf1 = pd.DataFrame({"A": [-1, -2, -3, -4, -5], "B": [-100, -200, -300, -400, -500]})
pdf2 = pd.DataFrame({"A": [-10, -20, -30, -40, -50], "B": [-5, -4, -3, -2, -1]})
kdf1 = ks.from_pandas(pdf1)
kdf2 = ks.from_pandas(pdf2)
self.assert_eq(pdf1.mask(pdf2 > -250), kdf1.mask(kdf2 > -250).sort_index())
# multi-index columns
pdf1 = pd.DataFrame({("X", "A"): [0, 1, 2, 3, 4], ("X", "B"): [100, 200, 300, 400, 500]})
pdf2 = pd.DataFrame(
{("X", "A"): [0, -1, -2, -3, -4], ("X", "B"): [-100, -200, -300, -400, -500]}
)
kdf1 = ks.from_pandas(pdf1)
kdf2 = ks.from_pandas(pdf2)
self.assert_eq(pdf1.mask(pdf2 < 100), kdf1.mask(kdf2 < 100).sort_index())
def test_multi_index_column_assignment_frame(self):
pdf = pd.DataFrame({"a": [1, 2, 3, 2], "b": [4.0, 2.0, 3.0, 1.0]})
pdf.columns = pd.MultiIndex.from_tuples([("a", "x"), ("a", "y")])
kdf = ks.DataFrame(pdf)
kdf["c"] = ks.Series([10, 20, 30, 20])
pdf["c"] = pd.Series([10, 20, 30, 20])
kdf[("d", "x")] = ks.Series([100, 200, 300, 200], name="1")
pdf[("d", "x")] = pd.Series([100, 200, 300, 200], name="1")
kdf[("d", "y")] = ks.Series([1000, 2000, 3000, 2000], name=("1", "2"))
pdf[("d", "y")] = pd.Series([1000, 2000, 3000, 2000], name=("1", "2"))
kdf["e"] = ks.Series([10000, 20000, 30000, 20000], name=("1", "2", "3"))
pdf["e"] = pd.Series([10000, 20000, 30000, 20000], name=("1", "2", "3"))
kdf[[("f", "x"), ("f", "y")]] = ks.DataFrame(
{"1": [100000, 200000, 300000, 200000], "2": [1000000, 2000000, 3000000, 2000000]}
)
pdf[[("f", "x"), ("f", "y")]] = pd.DataFrame(
{"1": [100000, 200000, 300000, 200000], "2": [1000000, 2000000, 3000000, 2000000]}
)
self.assert_eq(repr(kdf.sort_index()), repr(pdf))
with self.assertRaisesRegex(KeyError, "Key length \\(3\\) exceeds index depth \\(2\\)"):
kdf[("1", "2", "3")] = ks.Series([100, 200, 300, 200])
def test_series_dot(self):
pser = pd.Series([90, 91, 85], index=[2, 4, 1])
kser = ks.from_pandas(pser)
pser_other = pd.Series([90, 91, 85], index=[2, 4, 1])
kser_other = ks.from_pandas(pser_other)
self.assert_eq(kser.dot(kser_other), pser.dot(pser_other))
kser_other = ks.Series([90, 91, 85], index=[1, 2, 4])
pser_other = pd.Series([90, 91, 85], index=[1, 2, 4])
self.assert_eq(kser.dot(kser_other), pser.dot(pser_other))
# length of index is different
kser_other = ks.Series([90, 91, 85, 100], index=[2, 4, 1, 0])
with self.assertRaisesRegex(ValueError, "matrices are not aligned"):
kser.dot(kser_other)
# for MultiIndex
midx = pd.MultiIndex(
[["lama", "cow", "falcon"], ["speed", "weight", "length"]],
[[0, 0, 0, 1, 1, 1, 2, 2, 2], [0, 1, 2, 0, 1, 2, 0, 1, 2]],
)
pser = pd.Series([45, 200, 1.2, 30, 250, 1.5, 320, 1, 0.3], index=midx)
kser = ks.from_pandas(pser)
pser_other = pd.Series([-450, 20, 12, -30, -250, 15, -320, 100, 3], index=midx)
kser_other = ks.from_pandas(pser_other)
self.assert_eq(kser.dot(kser_other), pser.dot(pser_other))
pser = pd.Series([0, 1, 2, 3])
kser = ks.from_pandas(pser)
# DataFrame "other" without Index/MultiIndex as columns
pdf = pd.DataFrame([[0, 1], [-2, 3], [4, -5], [6, 7]])
kdf = ks.from_pandas(pdf)
self.assert_eq(kser.dot(kdf), pser.dot(pdf))
# DataFrame "other" with Index as columns
pdf.columns = pd.Index(["x", "y"])
kdf = ks.from_pandas(pdf)
self.assert_eq(kser.dot(kdf), pser.dot(pdf))
pdf.columns = pd.Index(["x", "y"], name="cols_name")
kdf = ks.from_pandas(pdf)
self.assert_eq(kser.dot(kdf), pser.dot(pdf))
pdf = pdf.reindex([1, 0, 2, 3])
kdf = ks.from_pandas(pdf)
self.assert_eq(kser.dot(kdf), pser.dot(pdf))
# DataFrame "other" with MultiIndex as columns
pdf.columns = | pd.MultiIndex.from_tuples([("a", "x"), ("b", "y")]) | pandas.MultiIndex.from_tuples |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Nov 9 21:23:40 2019
@authors: <EMAIL>
Last modified: 2019-11-24
------------------------------------------------------
** Semantic Search Analysis: Maintain Match Files **
------------------------------------------------------
Update things like removing dupes that sneak in over time, punctuation, resorting...
"""
#%%
# ============================================
# 1. Start-up / What to put into place, where
# ============================================
'''
File locations, etc.
'''
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.pyplot import pie, axis, show
import matplotlib.ticker as mtick # used for example in 100-percent bars chart
import numpy as np
import os
import re
import string
import requests
import json
import lxml.html as lh
from lxml.html import fromstring
# Set working directory and directories for read/write
home_folder = os.path.expanduser('~')
os.chdir(home_folder + '/Projects/classifysearches')
dataMatchFiles = 'data/matchFiles/' # Permanent helper files; both reading and writing required
#%%
# ========================================================================
# To update SiteSpecificMatches.xlsx, such as punctuation, removing dupes
# ========================================================================
SiteSpecificMatches = | pd.read_excel('data/matchFiles/SiteSpecificMatches.xlsx') | pandas.read_excel |
# Altmetric json documentaion
# https://docs.google.com/spreadsheets/d/1ndVY8Q2LOaZO_P_HDmSQulagjeUrS250mAL2N5V8GvY/edit#gid=0
from Bio import Entrez
from dotenv import load_dotenv
import pandas as pd
import os
import time
import re, string
import requests
from bs4 import BeautifulSoup
import sys
import ast
import numpy as np
from urllib.parse import urlparse
import datetime
from datetime import datetime
import pycountry
load_dotenv(verbose=True)
PTH = os.environ.get('PTH_A')
Entrez.email = os.environ.get('EMAIL')
API_KEY = os.environ.get('API_KEY')
start = str(sys.argv[1])
end = str(sys.argv[2])
s = start.replace("/","")
en = end.replace("/","")
# Articles that cite a given article ONLY covers journals indexed for PubMed Central
# (https://www.ncbi.nlm.nih.gov/pmc/tools/cites-citedby/)
# http://biopython.org/DIST/docs/tutorial/Tutorial.html#htoc137
def who_cited(pmids):
# get pmc_ids
results = Entrez.read(Entrez.elink(dbfrom="pubmed", db="pmc",
api_key = API_KEY,
usehistory ='y',
retmax = 10000000,
LinkName="pubmed_pmc_refs", id=pmids))
citations = []
for link in results:
if len(link['LinkSetDb']) > 0:
citations.append(len(link['LinkSetDb'][0]["Link"]))
else:
citations.append(0)
return(citations)
def pritify(df):
print('pritify...')
df['Article.ELocationID'] = df['Article.ELocationID'].str.strip("']")
df['Article.ELocationID'] = df['Article.ELocationID'].str.strip("['")
df['Article.ELocationID'] = df['Article.ELocationID'].str.strip("['")
return(df)
def Altmetric(pmids):
if len(pmids) == 0:
return(pd.DataFrame())
Frame = pd.DataFrame()
i=0
for pmid in pmids:
try:
i = i+1
print('Altmetric', i, 'out of', len(pmids))
url = 'https://api.altmetric.com/v1/pmid/'+str(pmid)
r = requests.get(url)
time.sleep(1) # rate limit is 1 per sec
f = pd.json_normalize(r.json())
Frame = Frame.append(f, ignore_index=True)
except:
print("no results")
if len(Frame) == 0:
Frame.at[0,'pmid'] = pmid
return(Frame)
def keep_first_link(df):
pattern = re.compile(r"[^a-zA-Z0-9-//?.]+")
links=[]
# keep only first link
for i in range(0,len(df)):
print(i)
url = df.iloc[i]['tool_URL'].split(",")[0]
url = url[url.find("http"):]
index = url.rfind('.')
url = url[:index+1]+pattern.sub('', url[index:])
url = url.strip(".")
links.append(url)
return(links)
def reorder_colmns(df):
columns = [
"Tool_URL",
"url_status",
"Tool_Name",
"Tool_Description",
"PMID",
"DOI",
"Article_Title",
"Abstract",
"Author_Information",
"Publication_Type",
"Grant_List",
"Chemical_List",
"Citations",
"Article_Language",
"KeywordList",
"Last_Updated",
"Added_On",
"Published_On",
"Journal",
"ISSN",
"Journal_Title",
"Altmetric_Score",
"Readers_Count",
"Readers_in_Mendeley",
"Readers_in_Connotea",
"Readers_in_Citeulike",
"Cited_By_Posts_Count",
"Twitter_accounts_that_tweeted_this_publication",
"Users_who_mentioned_the_publication_on_Twitter",
"Scientists_who_mentioned_the_publication_on_Twitter",
"News_sources_that_mentioned_the_publication",
"Mentions_in_social_media",
"Facebook_Shares"
]
# add columns that are missing
for col in columns:
if col not in df.columns:
df[col]=''
return(df[columns])
def test_tool_name(df):
df['is_tool'] = 1
for i in range(len(df)):
name = df.iloc[i]['Tool_Name']
print(i)
url = 'https://bio.tools/api/tool/'+name+'?format=json'
response = requests.get(url, timeout=20)
f = response.content
dict_str = f.decode("UTF-8")
if not isinstance(dict_str, str):
mydata = ast.literal_eval(dict_str)
if mydata['detail'] == 'Not found.':
df.at[i,'is_tool'] = 0
return(df)
def clean_tool_name(df):
df = df[df['Tool_Name'].notna()]
df = df.reset_index(drop=True)
for i in range(len(df['Tool_Name'])):
name = df.iloc[i]['Tool_Name']
if (len(name) < 3 or len(name) > 20) or ('www' in name) or ('http' in name) or (name.replace(".","").isdigit() or ("." in name)):
url = df.iloc[i]['Tool_URL']
if str(url) != 'nan':
p = urlparse(url)
p = p.path.strip("/")
name = p.split("/")[-1]
else:
df.at[i,'Tool_Name'] = "NA"
continue
if len(name.replace(".","")) > 2:
for w in ['.jsp','.php','.html','.css','.gz','.gzip','.git','.htm','.zip','.exe', '.js', '.asp','version','.pl','.aspx', '.xls', '.jar','.py']:
name = name.replace(w,"")
df.at[i,'Tool_Name'] = name
for w in ['supplement','english','resource','datalist','software','article', 'index', 'india','softwar', 'algorithm', 'markov', 'RNA-Seq','pubmed','covid']:
if name.lower() in w:
df.at[i,'Tool_Name'] = "NA"
else:
df.at[i,'Tool_Name'] = "NA"
if len(name) > 30:
df.at[i,'Tool_Name'] = "NA"
for w in ['dna','rna',]:
if w == name.lower():
df.at[i,'Tool_Name'] = "NA"
df = df[df['Tool_Name']!="NA"]
return(df)
def remove_tools(df):
# remove tools with the same names but different url
x = df[df.duplicated(['Tool_Name'])]
y = x[x.duplicated(['Tool_URL'])]['PMID'].tolist()
x = x[~x['PMID'].isin(y)]['PMID'].tolist()
df = df[~df['PMID'].isin(x)]
# remove tools with long/short names
df = df[(df['Tool_Name'].str.len() > 2) & (df['Tool_Name'].str.len() < 31)]
df = df.reset_index(drop=True)
# remove tools with the same PMID (keep first one)
df = df.drop_duplicates(subset='PMID', keep="first")
return(df)
def get_country(author_list):
countries = []
country_names = [country.name.lower() for country in pycountry.countries ]
country_names.append("usa")
country_names.append('Korea')
country_names.append(' uk ')
country_names.append(' uk.')
country_names.append('United Arab Emirates')
country_names.append('Saudi Arabia')
country_names.append('Taiwan')
for affil in author_list:
text = str(affil)
country = [country for country in country_names if country in text.lower()]
if len(country) > 0:
countries.append(country[0])
else:
countries.append('')
countries=[c.replace(".","") for c in countries]
countries = [ c.strip() for c in countries ]
countries=[c.capitalize() for c in countries]
for i in range(len(countries)):
if countries[i].lower() in ["usa", " uk ", " uk"]:
countries[i] = countries[i].upper()
if countries[i].lower() == "united kingdom" or countries[i].lower() == "uk":
countries[i] = "UK"
if countries[i].lower() == "united states":
countries[i] = "USA"
return(countries)
def read_data(fpath):
try:
return(pd.read_csv(fpath,dtype=str))
except:
print("No tools were detected for",start)
sys.exit()
if __name__=='__main__':
# laod tools
tools = read_data(os.path.join(PTH,'data/classified_tools_'+s+'_'+en+'.csv'))
tools = tools[pd.notna(tools['tool_URL'])]
tools['tool_URL'] = keep_first_link(tools)
tools['num_citations'] = who_cited(tools['PMID'].tolist())
tools = pritify(tools)
Altmetric_dataframe = Altmetric(tools['PMID'].tolist())
tools['PMID'] = tools['PMID'].astype(str)
tools['PMID'] = [ pmid.replace('.0','') for pmid in tools['PMID'] ]
Altmetric_dataframe['pmid'] = Altmetric_dataframe['pmid'].astype('str')
# merge the Altmetric dataframe with the tools dataframe. Keep all data from tools
tools1 = tools.merge(Altmetric_dataframe, left_on='PMID', right_on='pmid',how='left')
meta = pd.read_csv('https://raw.githubusercontent.com/MaayanLab/BioToolStory/master/PubMedTools/CF/data/tool_meta.csv')
# keep only columns in tools1
cl = set(meta['old_name'].tolist()).intersection(tools1.columns)
# reorder columns
tools1 = tools1[list(cl)]
# rename columns!!!!!
for i in range(0,len(meta)):
tools1.rename(columns={meta['old_name'][i]: meta['new_name'][i]}, inplace=True)
# reorder tools names
tools1 = reorder_colmns(tools1)
tools1 = tools1[~tools1['Tool_URL'].isna()]
tools1['Tool_URL'] = [ x.replace("..",".") for x in tools1['Tool_URL'] ]
tools1['Tool_Name'] = [ BeautifulSoup(str(x), "lxml").text for x in tools1['Tool_Name'] ]
# test tools against bio.tools api
tools1 = test_tool_name(tools1)
tools1 = tools1[ | pd.notna(tools1['Abstract']) | pandas.notna |
import click as ck
import numpy as np
import pandas as pd
from collections import Counter
from utils import Ontology, FUNC_DICT, NAMESPACES, MOLECULAR_FUNCTION, BIOLOGICAL_PROCESS, CELLULAR_COMPONENT
import logging
logging.basicConfig(level=logging.INFO)
@ck.command()
@ck.option(
'--go-file', '-gf', default='data/go.obo',
help='Gene Ontology file in OBO Format')
@ck.option(
'--data-file', '-df', default='data/swissprot_exp_zero_10.pkl',
help='Uniprot KB, generated with uni2pandas.py')
@ck.option(
'--sim-file', '-sf', default='data/swissprot_exp.sim',
help='Sequence similarity generated with Diamond')
def main(go_file, data_file, sim_file):
go = Ontology(go_file, with_rels=True)
logging.info('GO loaded')
df = pd.read_pickle(data_file)
proteins = set(df['proteins'].values)
print("DATA FILE" ,len(df))
logging.info('Processing annotations')
annotations = list()
for ont in ['mf', 'bp', 'cc']:
cnt = Counter()
iprs = Counter()
index = []
for i, row in enumerate(df.itertuples()):
ok = False
for term in row.zero_annotations:
if go.get_namespace(term) == NAMESPACES[ont]:
cnt[term] += 1
ok = True
for ipr in row.interpros:
iprs[ipr] += 1
if ok:
index.append(i)
del cnt[FUNC_DICT[ont]] # Remove top term
tdf = df.iloc[index]
terms = list(cnt.keys())
interpros = list(iprs.keys())
print(f'Number of {ont} terms {len(terms)}')
print(f'Number of {ont} iprs {len(iprs)}')
print(f'Number of {ont} proteins {len(tdf)}')
terms_df = | pd.DataFrame({'gos': terms}) | pandas.DataFrame |
import logging
import pandas as pd
from easysparql import easysparqlclass
import seaborn as sns
import matplotlib.pyplot as plt
from pandas.api.types import CategoricalDtype
from tadaqq.util import compute_scores
PRINT_DIFF = True
def get_logger(name, level=logging.INFO):
logger = logging.getLogger(name)
formatter = logging.Formatter('%(name)-12s>> %(message)s')
# formatter = logging.Formatter('%(asctime)s %(name)-12s %(levelname)-8s %(message)s')
handler = logging.StreamHandler()
handler.setFormatter(formatter)
if not logger.hasHandlers():
logger.addHandler(handler)
logger.setLevel(level)
return logger
logger = get_logger(__name__, level=logging.INFO)
# logger = get_logger(__name__, level=logging.DEBUG)
esparql = easysparqlclass.EasySparql(cache_dir=".cache", logger=logger)
# def compute_scores(eval_data, k=1):
# """
# """
# corr = 0
# incorr = 0
# notf = 0
# for d in eval_data:
# if d == -1:
# notf += 1
# elif d <= k:
# corr += 1
# elif d < 1:
# err_msg = "Error: compute_scores> Invalid k <%s>" % str(d)
# print(err_msg)
# raise Exception(err_msg)
# else:
# incorr += 1
# if corr == 0:
# prec = 0
# rec = 0
# f1 = 0
# else:
# prec = corr / (corr+incorr)
# rec = corr / (corr+notf)
# f1 = 2*prec*rec / (prec+rec)
# # print("#corr: %d\t#incorr: %d\t#notf: %d" % (corr, incorr, notf))
# return prec, rec, f1
# # print("Precision: %.2f\nRecall: %.2f\nF1: %.2f" % (prec, rec, f1))
def get_num_rows(fdir):
df = pd.read_csv(fdir)
return len(df.index)
def compute_scores_per_key(eval_pp, fname=None, print_scores=False):
"""
eval_pp: dict
For example (property as a key)
{
"generic property": [1,... ] (k values),
}
"""
lines = []
print("\n\n| %15s | %15s | %15s | %5s |" % ("Key", "Precision", "Recall", "F1"))
print("|:%s:|:%s:|:%s:|:%s:|" % ("-"*15,"-"*15,"-"*15,"-"*5,))
for p in eval_pp:
prec, rec, f1 = compute_scores(eval_pp[p])
lines.append([p, 'prec', prec])
lines.append([p, 'rec', rec])
lines.append([p, 'f1', f1])
# if PRINT_DIFF:
# print("%s: \n\t%f1.2\t%f1.2\t%f1.2" % (p, prec, rec, f1))
if print_scores:
print("| %15s | %15.2f | %15.2f | %5.2f| " % (p, prec, rec, f1))
if fname:
generate_diagram(lines, fname)
def generate_diagram(acc, draw_fname):
"""
:param acc: acc
:param draw_file_base: base of the diagram
:return: None
"""
data = | pd.DataFrame(acc, columns=['Property Concept', 'Metric', 'Value']) | pandas.DataFrame |
"""
.. module:: volatility
:synopsis: Volatility Indicators.
.. moduleauthor:: <NAME> (Bukosabino)
"""
import numpy as np
import pandas as pd
from ta.utils import IndicatorMixin
class AverageTrueRange(IndicatorMixin):
"""Average True Range (ATR)
The indicator provide an indication of the degree of price volatility.
Strong moves, in either direction, are often accompanied by large ranges,
or large True Ranges.
http://stockcharts.com/school/doku.php?id=chart_school:technical_indicators:average_true_range_atr
Args:
high(pandas.Series): dataset 'High' column.
low(pandas.Series): dataset 'Low' column.
close(pandas.Series): dataset 'Close' column.
n(int): n period.
fillna(bool): if True, fill nan values.
"""
def __init__(self, high: pd.Series, low: pd.Series, close: pd.Series, n: int = 14, fillna: bool = False):
self._high = high
self._low = low
self._close = close
self._n = n
self._fillna = fillna
self._run()
def _run(self):
cs = self._close.shift(1)
tr = self._true_range(self._high, self._low, cs)
atr = np.zeros(len(self._close))
atr[self._n-1] = tr[0:self._n].mean()
for i in range(self._n, len(atr)):
atr[i] = (atr[i-1] * (self._n-1) + tr.iloc[i]) / float(self._n)
self._atr = pd.Series(data=atr, index=tr.index)
def average_true_range(self) -> pd.Series:
"""Average True Range (ATR)
Returns:
pandas.Series: New feature generated.
"""
atr = self._check_fillna(self._atr, value=0)
return pd.Series(atr, name='atr')
class BollingerBands(IndicatorMixin):
"""Bollinger Bands
https://school.stockcharts.com/doku.php?id=technical_indicators:bollinger_bands
Args:
close(pandas.Series): dataset 'Close' column.
n(int): n period.
ndev(int): n factor standard deviation
fillna(bool): if True, fill nan values.
"""
def __init__(self, close: pd.Series, n: int = 20, ndev: int = 2, fillna: bool = False):
self._close = close
self._n = n
self._ndev = ndev
self._fillna = fillna
self._run()
def _run(self):
min_periods = 0 if self._fillna else self._n
self._mavg = self._close.rolling(self._n, min_periods=min_periods).mean()
self._mstd = self._close.rolling(self._n, min_periods=min_periods).std(ddof=0)
self._hband = self._mavg + self._ndev * self._mstd
self._lband = self._mavg - self._ndev * self._mstd
def bollinger_mavg(self) -> pd.Series:
"""Bollinger Channel Middle Band
Returns:
pandas.Series: New feature generated.
"""
mavg = self._check_fillna(self._mavg, value=-1)
return pd.Series(mavg, name='mavg')
def bollinger_hband(self) -> pd.Series:
"""Bollinger Channel High Band
Returns:
pandas.Series: New feature generated.
"""
hband = self._check_fillna(self._hband, value=-1)
return pd.Series(hband, name='hband')
def bollinger_lband(self) -> pd.Series:
"""Bollinger Channel Low Band
Returns:
pandas.Series: New feature generated.
"""
lband = self._check_fillna(self._lband, value=-1)
return pd.Series(lband, name='lband')
def bollinger_wband(self) -> pd.Series:
"""Bollinger Channel Band Width
From: https://school.stockcharts.com/doku.php?id=technical_indicators:bollinger_band_width
Returns:
pandas.Series: New feature generated.
"""
wband = ((self._hband - self._lband) / self._mavg) * 100
wband = self._check_fillna(wband, value=0)
return pd.Series(wband, name='bbiwband')
def bollinger_pband(self) -> pd.Series:
"""Bollinger Channel Percentage Band
From: https://school.stockcharts.com/doku.php?id=technical_indicators:bollinger_band_perce
Returns:
pandas.Series: New feature generated.
"""
pband = (self._close - self._lband) / (self._hband - self._lband)
pband = self._check_fillna(pband, value=0)
return pd.Series(pband, name='bbipband')
def bollinger_hband_indicator(self) -> pd.Series:
"""Bollinger Channel Indicator Crossing High Band (binary).
It returns 1, if close is higher than bollinger_hband. Else, it returns 0.
Returns:
pandas.Series: New feature generated.
"""
hband = pd.Series(np.where(self._close > self._hband, 1.0, 0.0), index=self._close.index)
hband = self._check_fillna(hband, value=0)
return pd.Series(hband, index=self._close.index, name='bbihband')
def bollinger_lband_indicator(self) -> pd.Series:
"""Bollinger Channel Indicator Crossing Low Band (binary).
It returns 1, if close is lower than bollinger_lband. Else, it returns 0.
Returns:
pandas.Series: New feature generated.
"""
lband = pd.Series(np.where(self._close < self._lband, 1.0, 0.0), index=self._close.index)
lband = self._check_fillna(lband, value=0)
return pd.Series(lband, name='bbilband')
class KeltnerChannel(IndicatorMixin):
"""KeltnerChannel
Keltner Channels are a trend following indicator used to identify reversals with channel breakouts and
channel direction. Channels can also be used to identify overbought and oversold levels when the trend
is flat.
https://school.stockcharts.com/doku.php?id=technical_indicators:keltner_channels
Args:
high(pandas.Series): dataset 'High' column.
low(pandas.Series): dataset 'Low' column.
close(pandas.Series): dataset 'Close' column.
n(int): n period.
n_atr(int): n atr period. Only valid if ov param is False.
fillna(bool): if True, fill nan values.
ov(bool): if True, use original version as the centerline (SMA of typical price)
if False, use EMA of close as the centerline. More info:
https://school.stockcharts.com/doku.php?id=technical_indicators:keltner_channels
"""
def __init__(
self, high: pd.Series, low: pd.Series, close: pd.Series, n: int = 20, n_atr: int = 10,
fillna: bool = False, ov: bool = True):
self._high = high
self._low = low
self._close = close
self._n = n
self._n_atr = n_atr
self._fillna = fillna
self._ov = ov
self._run()
def _run(self):
min_periods = 1 if self._fillna else self._n
if self._ov:
self._tp = ((self._high + self._low + self._close) / 3.0).rolling(self._n, min_periods=min_periods).mean()
self._tp_high = (((4 * self._high) - (2 * self._low) + self._close) / 3.0).rolling(
self._n, min_periods=0).mean()
self._tp_low = (((-2 * self._high) + (4 * self._low) + self._close) / 3.0).rolling(
self._n, min_periods=0).mean()
else:
self._tp = self._close.ewm(span=self._n, min_periods=min_periods, adjust=False).mean()
atr = AverageTrueRange(
close=self._close, high=self._high, low=self._low, n=self._n_atr, fillna=self._fillna
).average_true_range()
self._tp_high = self._tp + (2*atr)
self._tp_low = self._tp - (2*atr)
def keltner_channel_mband(self) -> pd.Series:
"""Keltner Channel Middle Band
Returns:
pandas.Series: New feature generated.
"""
tp = self._check_fillna(self._tp, value=-1)
return pd.Series(tp, name='mavg')
def keltner_channel_hband(self) -> pd.Series:
"""Keltner Channel High Band
Returns:
pandas.Series: New feature generated.
"""
tp = self._check_fillna(self._tp_high, value=-1)
return pd.Series(tp, name='kc_hband')
def keltner_channel_lband(self) -> pd.Series:
"""Keltner Channel Low Band
Returns:
pandas.Series: New feature generated.
"""
tp_low = self._check_fillna(self._tp_low, value=-1)
return pd.Series(tp_low, name='kc_lband')
def keltner_channel_wband(self) -> pd.Series:
"""Keltner Channel Band Width
Returns:
pandas.Series: New feature generated.
"""
wband = ((self._tp_high - self._tp_low) / self._tp) * 100
wband = self._check_fillna(wband, value=0)
return pd.Series(wband, name='bbiwband')
def keltner_channel_pband(self) -> pd.Series:
"""Keltner Channel Percentage Band
Returns:
pandas.Series: New feature generated.
"""
pband = (self._close - self._tp_low) / (self._tp_high - self._tp_low)
pband = self._check_fillna(pband, value=0)
return pd.Series(pband, name='bbipband')
def keltner_channel_hband_indicator(self) -> pd.Series:
"""Keltner Channel Indicator Crossing High Band (binary)
It returns 1, if close is higher than keltner_channel_hband. Else, it returns 0.
Returns:
pandas.Series: New feature generated.
"""
hband = pd.Series(np.where(self._close > self._tp_high, 1.0, 0.0), index=self._close.index)
hband = self._check_fillna(hband, value=0)
return pd.Series(hband, name='dcihband')
def keltner_channel_lband_indicator(self) -> pd.Series:
"""Keltner Channel Indicator Crossing Low Band (binary)
It returns 1, if close is lower than keltner_channel_lband. Else, it returns 0.
Returns:
pandas.Series: New feature generated.
"""
lband = pd.Series(np.where(self._close < self._tp_low, 1.0, 0.0), index=self._close.index)
lband = self._check_fillna(lband, value=0)
return pd.Series(lband, name='dcilband')
class DonchianChannel(IndicatorMixin):
"""Donchian Channel
https://www.investopedia.com/terms/d/donchianchannels.asp
Args:
high(pandas.Series): dataset 'High' column.
low(pandas.Series): dataset 'Low' column.
close(pandas.Series): dataset 'Close' column.
n(int): n period.
fillna(bool): if True, fill nan values.
"""
def __init__(
self,
high: pd.Series,
low: pd.Series,
close: pd.Series,
n: int = 20,
offset: int = 0,
fillna: bool = False):
self._offset = offset
self._close = close
self._high = high
self._low = low
self._n = n
self._fillna = fillna
self._run()
def _run(self):
self._min_periods = 1 if self._fillna else self._n
self._hband = self._high.rolling(self._n, min_periods=self._min_periods).max()
self._lband = self._low.rolling(self._n, min_periods=self._min_periods).min()
def donchian_channel_hband(self) -> pd.Series:
"""Donchian Channel High Band
Returns:
pandas.Series: New feature generated.
"""
hband = self._check_fillna(self._hband, value=-1)
if self._offset != 0:
hband = hband.shift(self._offset)
return pd.Series(hband, name='dchband')
def donchian_channel_lband(self) -> pd.Series:
"""Donchian Channel Low Band
Returns:
pandas.Series: New feature generated.
"""
lband = self._check_fillna(self._lband, value=-1)
if self._offset != 0:
lband = lband.shift(self._offset)
return pd.Series(lband, name='dclband')
def donchian_channel_mband(self) -> pd.Series:
"""Donchian Channel Middle Band
Returns:
pandas.Series: New feature generated.
"""
mband = ((self._hband - self._lband) / 2.0) + self._lband
mband = self._check_fillna(mband, value=-1)
if self._offset != 0:
mband = mband.shift(self._offset)
return pd.Series(mband, name='dcmband')
def donchian_channel_wband(self) -> pd.Series:
"""Donchian Channel Band Width
Returns:
pandas.Series: New feature generated.
"""
mavg = self._close.rolling(self._n, min_periods=self._min_periods).mean()
wband = ((self._hband - self._lband) / mavg) * 100
wband = self._check_fillna(wband, value=0)
if self._offset != 0:
wband = wband.shift(self._offset)
return pd.Series(wband, name='dcwband')
def donchian_channel_pband(self) -> pd.Series:
"""Donchian Channel Percentage Band
Returns:
pandas.Series: New feature generated.
"""
pband = (self._close - self._lband) / (self._hband - self._lband)
pband = self._check_fillna(pband, value=0)
if self._offset != 0:
pband = pband.shift(self._offset)
return | pd.Series(pband, name='dcpband') | pandas.Series |
# Copyright (C) 2019-2020 Zilliz. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=too-many-lines
# pylint: disable=too-many-public-methods, unused-argument, redefined-builtin
from collections.abc import Iterable
from distutils.version import LooseVersion
import numbers
import pyarrow
from pandas.api.extensions import ExtensionDtype, ExtensionArray
from pandas.api.extensions import register_extension_dtype
import numpy as np
import pandas as pd
import arctern
@register_extension_dtype
class GeoDtype(ExtensionDtype):
type = object
name = "GeoDtype"
na_value = pd.NA
kind = 'O'
def __repr__(self):
return "GeoDtype"
@classmethod
def construct_from_string(cls, string):
if not isinstance(string, str):
raise TypeError(
"'construct_from_string' expects a string, got {}".format(type(string))
)
if string == cls.name:
return cls()
raise TypeError(
"Cannot construct a '{}' from '{}'".format(cls.__name__, string)
)
@classmethod
def construct_array_type(cls):
return GeoArray
def from_wkt(data):
"""
Convert a list or array of wkt formed string to a GeoArray.
:param data: array-like
list or array of wkt formed string
:return: GeoArray
"""
return GeoArray(arctern.ST_GeomFromText(data).values)
def to_wkt(data):
"""
Convert GeoArray or np.ndarray or list to a numpy string array of wkt formed string.
"""
if not isinstance(data, (GeoArray, np.ndarray, list)):
raise ValueError("'data' must be a GeoArray or np.ndarray or list.")
return np.asarray(arctern.ST_AsText(data), dtype=str)
def from_wkb(data):
"""
Convert a list or array of wkb objects to a GeoArray.
:param data: array-like
list or array of wkb objects
:return: GeoArray
"""
# pandas.infer_type can't infer custom ExtensionDtype
if not isinstance(getattr(data, "dtype", None), GeoDtype) and len(data) != 0:
from pandas.api.types import infer_dtype
inferred = infer_dtype(data, skipna=True)
if inferred in ("bytes", "empty"):
pass
else:
raise ValueError("'data' must be bytes type array or list.")
if not isinstance(data, np.ndarray):
array = np.empty(len(data), dtype=object)
array[:] = data
else:
array = data
mask = pd.isna(array)
array[mask] = None
return GeoArray(array)
def is_geometry_array(data):
"""
Check if the data is array like, and dtype is `GeoDtype`.
"""
return isinstance(getattr(data, "dtype", None), GeoDtype)
def is_scalar_geometry(data):
"""
Check if the data is bytes dtype.
"""
return isinstance(data, bytes)
class GeoArray(ExtensionArray):
_dtype = GeoDtype()
def __init__(self, data):
if not isinstance(data, (np.ndarray, GeoArray)):
raise TypeError(
"'data' should be array of wkb formed bytes. Use from_wkt to construct a GeoArray.")
if not data.ndim == 1:
raise ValueError("'data' should be 1-dim array of wkb formed bytes.")
self.data = data
@property
def dtype(self):
return self._dtype
def __len__(self):
return self.shape[0]
@property
def shape(self):
return (self.size,)
@property
def size(self):
return self.data.size
@property
def ndim(self):
return len(self.shape)
@property
def nbytes(self):
return self.data.nbytes
def copy(self):
return GeoArray(self.data.copy())
def isna(self):
return np.array([g is None or g is np.nan for g in self.data], dtype=bool)
def fillna(self, value=None, method=None, limit=None):
from pandas.util._validators import validate_fillna_kwargs
value, method = validate_fillna_kwargs(value, method)
mask = self.isna()
from pandas.api.types import is_array_like, infer_dtype
if is_array_like(value):
if len(value) != len(self):
raise ValueError(
f"Length of 'value' does not match. Got ({len(value)}) "
f"expected {len(self)}"
)
value = value[mask]
else:
# because pandas infer_type(scalar) cant work on scalar value, we put the value into a list
value = [value]
if mask.any():
if method is not None:
from pandas.core.missing import pad_1d
from pandas.core.missing import backfill_1d
func = pad_1d if method == "pad" else backfill_1d
new_values = func(self.astype(object), limit=limit, mask=mask)
new_values = self._from_sequence(new_values, dtype=self.dtype)
# raise NotImplementedError("not support fillna with method")
else:
# translate value
if not isinstance(getattr(value, "dtype", value), (GeoDtype, type(None))):
inferred_type = infer_dtype(value, skipna=True)
if inferred_type == "string":
value = arctern.ST_GeomFromText(value)
elif inferred_type == "bytes":
pass
else:
raise ValueError(
"can only fillna with wkt formed string or wkb formed bytes")
# fill with value
new_values = self.copy()
new_values[mask] = value
else:
new_values = self.copy()
return new_values
def _bin_op(self, other, op):
def convert_values(values):
if isinstance(values, ExtensionArray) or | pd.api.types.is_list_like(values) | pandas.api.types.is_list_like |
# -*- coding: utf-8 -*-
"""
Created on Sat Aug 11 10:17:13 2018
@author: David
"""
# Built-in libraries
#import argparse
#import collections
#import multiprocessing
import os
#import pickle
#import time
# External libraries
#import rasterio
#import gdal
import matplotlib.pyplot as plt
from matplotlib.patches import FancyBboxPatch
import numpy as np
import pandas as pd
from scipy.optimize import curve_fit
from scipy.stats import linregress
from scipy.stats import median_abs_deviation
import xarray as xr
# Local libraries
import debrisglobal.globaldebris_input as debris_prms
from meltcurves import melt_fromdebris_func
#%%% ===== SCRIPT OPTIONS =====
option_melt_comparison = False
option_hd_comparison = True
option_hd_centerline = False
option_hd_spatial_compare = False
hd_obs_fp = debris_prms.main_directory + '/../hd_obs/'
melt_compare_fp = debris_prms.main_directory + '/../hd_obs/figures/hd_melt_compare/'
hd_compare_fp = debris_prms.main_directory + '/../hd_obs/figures/hd_obs_compare/'
hd_centerline_fp = debris_prms.main_directory + '/../hd_obs/centerline_hd/'
if os.path.exists(melt_compare_fp) == False:
os.makedirs(melt_compare_fp)
if os.path.exists(hd_compare_fp) == False:
os.makedirs(hd_compare_fp)
#%% ===== FUNCTIONS =====
def plot_hd_vs_melt_comparison(measured_hd_list, measured_melt_list, yearfracs_list, glac_name, fig_fn,
melt_fp, melt_fn, ds_names=None,
hd_min=0, hd_max=2, hd_tick_major=0.25, hd_tick_minor=0.05,
melt_min=0, melt_max=70, melt_tick_major=10, melt_tick_minor=5,
plot_meltfactor=False, z_value = 1.645, fontsize=11):
#%%
""" Plot comparison of debris vs. melt for various sites """
# Dataset of melt data
ds_ostrem = xr.open_dataset(melt_fp + melt_fn)
ds_ostrem = ds_ostrem.sortby('hd_cm')
time_year = pd.to_datetime(ds_ostrem.time.values).year
time_daysperyear = np.array([366 if x%4 == 0 else 365 for x in time_year])
time_yearfrac = time_year + (pd.to_datetime(ds_ostrem.time.values).dayofyear-1) / time_daysperyear
color_dict = {0:'k', 1:'b', 2:'r'}
symbol_dict = {0:'D', 1:'o', 2:'^'}
# ===== PLOT DEBRIS VS. SURFACE LOWERING =====
fig, ax = plt.subplots(1, 1, squeeze=False, sharex=False, sharey=False,
gridspec_kw = {'wspace':0.4, 'hspace':0.15})
melt_obs_all = []
hd_obs_all = []
melt_mod_all = []
melt_mod_bndlow_all = []
melt_mod_bndhigh_all = []
for n in np.arange(0,len(measured_hd_list)):
measured_hd = measured_hd_list[n]
measured_melt = measured_melt_list[n]
melt_obs_all.extend(measured_melt)
hd_obs_all.extend(measured_hd)
yearfracs = yearfracs_list[n]
start_yearfrac = yearfracs[0]
end_yearfrac = yearfracs[1]
if ds_names is not None:
ds_name = ds_names[n]
else:
ds_name = None
start_idx = np.where(abs(time_yearfrac - start_yearfrac) == abs(time_yearfrac - start_yearfrac).min())[0][0]
end_idx = np.where(abs(time_yearfrac - end_yearfrac) == abs(time_yearfrac - end_yearfrac).min())[0][0]
# Ostrem Curve
debris_thicknesses = ds_ostrem.hd_cm.values
debris_melt_df = pd.DataFrame(np.zeros((len(debris_thicknesses),3)),
columns=['debris_thickness', 'melt_mmwed', 'melt_std_mmwed'])
nelev = 0
for ndebris, debris_thickness in enumerate(debris_thicknesses):
# Units: mm w.e. per day
melt_mmwed = (ds_ostrem['melt'][ndebris,start_idx:end_idx,nelev].values.sum()
* 1000 / len(time_yearfrac[start_idx:end_idx]))
melt_std_mmwed = (ds_ostrem['melt_std'][ndebris,start_idx:end_idx,nelev].values.sum()
* 1000 / len(time_yearfrac[start_idx:end_idx]))
debris_melt_df.loc[ndebris] = debris_thickness / 100, melt_mmwed, melt_std_mmwed
debris_melt_df['melt_bndlow_mmwed'] = debris_melt_df['melt_mmwed'] - z_value * debris_melt_df['melt_std_mmwed']
debris_melt_df['melt_bndhigh_mmwed'] = debris_melt_df['melt_mmwed'] + z_value * debris_melt_df['melt_std_mmwed']
#%%
# MEAN CURVE
fit_idx = list(np.where(debris_thicknesses >= 5)[0])
func_coeff, pcov = curve_fit(melt_fromdebris_func,
debris_melt_df.debris_thickness.values[fit_idx],
debris_melt_df.melt_mmwed.values[fit_idx])
melt_cleanice = debris_melt_df.loc[0,'melt_mmwed']
# Fitted curve
debris_4curve = np.arange(0.02,5.01,0.01)
melt_4curve = melt_fromdebris_func(debris_4curve, func_coeff[0], func_coeff[1])
# add clean ice
debris_4curve = np.concatenate([[0.0], debris_4curve])
melt_4curve = np.concatenate([[melt_cleanice], melt_4curve])
# Linearly interpolate between 0 cm and 2 cm for the melt rate
def melt_0to2cm_adjustment(melt, melt_clean, melt_2cm, hd):
""" Linearly interpolate melt factors between 0 and 2 cm
based on clean ice and 2 cm sub-debris melt """
melt[(hd >= 0) & (hd < 0.02)] = (
melt_clean + hd[(hd >= 0) & (hd < 0.02)] / 0.02 * (melt_2cm - melt_clean))
return melt
melt_mod = melt_fromdebris_func(measured_hd, func_coeff[0], func_coeff[1])
melt_2cm = melt_fromdebris_func(0.02, func_coeff[0], func_coeff[1])
melt_mod = melt_0to2cm_adjustment(melt_mod, melt_cleanice, melt_2cm, measured_hd)
melt_mod_all.extend(melt_mod)
# LOWER BOUND CURVE
func_coeff_bndlow, pcov = curve_fit(melt_fromdebris_func,
debris_melt_df.debris_thickness.values[fit_idx],
debris_melt_df.melt_bndlow_mmwed.values[fit_idx])
melt_cleanice_bndlow = debris_melt_df.loc[0,'melt_bndlow_mmwed']
# Fitted curve
debris_4curve = np.arange(0.02,5.01,0.01)
melt_4curve_bndlow = melt_fromdebris_func(debris_4curve, func_coeff_bndlow[0], func_coeff_bndlow[1])
# add clean ice
debris_4curve = np.concatenate([[0.0], debris_4curve])
melt_4curve_bndlow = np.concatenate([[melt_cleanice_bndlow], melt_4curve_bndlow])
melt_mod_bndlow = melt_fromdebris_func(measured_hd, func_coeff_bndlow[0], func_coeff_bndlow[1])
melt_2cm_bndlow = melt_fromdebris_func(0.02, func_coeff_bndlow[0], func_coeff_bndlow[1])
melt_mod_bndlow = melt_0to2cm_adjustment(melt_mod_bndlow, melt_cleanice_bndlow, melt_2cm_bndlow, measured_hd)
melt_mod_bndlow_all.extend(melt_mod_bndlow)
# UPPER BOUND CURVE
func_coeff_bndhigh, pcov = curve_fit(melt_fromdebris_func,
debris_melt_df.debris_thickness.values[fit_idx],
debris_melt_df.melt_bndhigh_mmwed.values[fit_idx])
melt_cleanice_bndhigh = debris_melt_df.loc[0,'melt_bndhigh_mmwed']
# Fitted curve
debris_4curve = np.arange(0.02,5.01,0.01)
melt_4curve_bndhigh = melt_fromdebris_func(debris_4curve, func_coeff_bndhigh[0], func_coeff_bndhigh[1])
# add clean ice
debris_4curve = np.concatenate([[0.0], debris_4curve])
melt_4curve_bndhigh = np.concatenate([[melt_cleanice_bndhigh], melt_4curve_bndhigh])
melt_mod_bndhigh = melt_fromdebris_func(measured_hd, func_coeff_bndhigh[0], func_coeff_bndhigh[1])
melt_2cm_bndhigh = melt_fromdebris_func(0.02, func_coeff_bndhigh[0], func_coeff_bndhigh[1])
melt_mod_bndhigh = melt_0to2cm_adjustment(melt_mod_bndhigh, melt_cleanice_bndhigh,melt_2cm_bndhigh, measured_hd)
melt_mod_bndhigh_all.extend(melt_mod_bndhigh)
if plot_meltfactor:
melt_4curve = melt_4curve / melt_cleanice
melt_4curve_bndlow = melt_4curve_bndlow / melt_cleanice
melt_4curve_bndhigh = melt_4curve_bndhigh / melt_cleanice
# Plot curve
ax[0,0].plot(measured_hd, measured_melt, symbol_dict[n], color=color_dict[n],
markersize=3, markerfacecolor="None", markeredgewidth=0.5, zorder=5, label=ds_name, clip_on=False)
ax[0,0].plot(debris_4curve, melt_4curve,
color=color_dict[n], linewidth=1, linestyle='--', zorder=5-n)
ax[0,0].fill_between(debris_4curve, melt_4curve_bndlow, melt_4curve_bndhigh,
color=color_dict[n], linewidth=0, zorder=5-n, alpha=0.2)
# text
# ax[0,0].text(0.5, 1.09, glac_name, size=fontsize-2, horizontalalignment='center', verticalalignment='top',
# transform=ax[0,0].transAxes)
ax[0,0].text(0.5, 1.11, glac_name, size=fontsize-2, horizontalalignment='center', verticalalignment='top',
transform=ax[0,0].transAxes)
# eqn_text = r'$b = \frac{b_{0}}{1 + kb_{0}h}$'
# coeff1_text = r'$b_{0} = ' + str(np.round(func_coeff[0],2)) + '$'
# coeff2_text = r'$k = ' + str(np.round(func_coeff[1],2)) + '$'
# # coeff$\frac{b_{0}}{1 + 2kb_{0}h}$'
# ax[0,0].text(0.9, 0.95, eqn_text, size=12, horizontalalignment='right', verticalalignment='top',
# transform=ax[0,0].transAxes)
# ax[0,0].text(0.615, 0.83, 'where', size=10, horizontalalignment='left', verticalalignment='top',
# transform=ax[0,0].transAxes)
# ax[0,0].text(0.66, 0.77, coeff1_text, size=10, horizontalalignment='left', verticalalignment='top',
# transform=ax[0,0].transAxes)
# ax[0,0].text(0.66, 0.7, coeff2_text, size=10, horizontalalignment='left', verticalalignment='top',
# transform=ax[0,0].transAxes)
# X-label
# ax[0,0].set_xlabel('Debris thickness (m)', size=fontsize)
ax[0,0].set_xlim(hd_min, hd_max)
ax[0,0].xaxis.set_major_locator(plt.MultipleLocator(hd_tick_major))
ax[0,0].xaxis.set_minor_locator(plt.MultipleLocator(hd_tick_minor))
# Y-label
# if plot_meltfactor:
# ylabel_str = 'Melt (-)'
# else:
# ylabel_str = 'Melt (mm w.e. d$^{-1}$)'
# ax[0,0].set_ylabel(ylabel_str, size=fontsize)
ax[0,0].set_ylim(melt_min, melt_max)
ax[0,0].yaxis.set_major_locator(plt.MultipleLocator(melt_tick_major))
ax[0,0].yaxis.set_minor_locator(plt.MultipleLocator(melt_tick_minor))
# Tick parameters
ax[0,0].yaxis.set_ticks_position('both')
ax[0,0].tick_params(axis='both', which='major', labelsize=fontsize-2, direction='inout')
ax[0,0].tick_params(axis='both', which='minor', labelsize=fontsize-4, direction='in')
# Legend
ax[0,0].legend(ncol=1, fontsize=fontsize-3, frameon=True, handlelength=1,
handletextpad=0.15, columnspacing=0.5, borderpad=0.25, labelspacing=0.5, framealpha=0.5)
# Save plot
fig.set_size_inches(2, 1.5)
fig.savefig(melt_compare_fp + fig_fn, bbox_inches='tight', dpi=300, transparent=True)
plt.close()
return hd_obs_all, melt_obs_all, melt_mod_all, melt_mod_bndlow_all, melt_mod_bndhigh_all
#%%
if option_melt_comparison:
# glaciers = ['1.15645', '2.14297', '6.00474', '7.01044', '10.01732', '11.00719', '11.02810', '11.02858', '11.03005',
# '12.01012', '12.01132', '13.05000', '13.43232', '14.06794', '14.16042', '15.03733', '15.03743',
# '15.04045', '15.07886', '15.11758', '18.02397']
glaciers = ['1.15645', '2.14297', '7.01044', '11.00719', '11.02472', '11.02810', '11.02858', '11.03005',
'12.01012', '12.01132', '13.05000', '13.43165', '13.43232', '14.06794', '14.16042', '15.03733',
'15.03743', '15.04045', '15.07122', '15.07886', '15.11758', '18.02375', '18.02397']
# glaciers = ['10.01732']
# glaciers = ['13.43165']
# glaciers = ['13.43232']
# glaciers = ['11.02858']
z_value = 1.645
hd_obs_all, melt_obs_all, melt_mod_all, melt_mod_bndlow_all, melt_mod_bndhigh_all, reg_all = [], [], [], [], [], []
rgiid_all = []
# ===== KENNICOTT (1.15645) ====
if '1.15645' in glaciers:
print('\nmelt comparison with Anderson et al. 2019')
# Data: debris thickness (m) and melt rate (mm w.e. d-1)
mb_df = pd.read_csv(debris_prms.main_directory + '/../hd_obs/datasets/1.15645_kennicott_anderson_2019-melt.csv')
measured_hd_list = [mb_df.hd_m.values]
measured_melt_list = [mb_df.melt_mmwed.values]
glac_name = "Kennicott (1.15645)"
fig_fn = '1.15645_hd_melt_And2019.png'
# ds_names = ['Anderson 2019\n(6/18/11$\u2009$-$\u2009$8/16/11)']
ds_names = ['6/18/11$\u2009$-$\u2009$8/16/11']
melt_fp = debris_prms.output_fp + 'ostrem_curves/exp4/'
melt_fn = '6150N-21700E-debris_melt_curve.nc'
yearfracs_list = [[2011 + 169/365, 2011 + 228/365]]
hd_min, hd_max = 0, np.ceil(np.max([x.max() for x in measured_hd_list])/0.1)*0.1 + 0.05
hd_tick_major, hd_tick_minor = 0.2, 0.05
melt_min, melt_max = 0, np.round(np.ceil(np.max([x.max() for x in measured_melt_list]) / 10) * 10,0) + 5
melt_tick_major, melt_tick_minor = 20, 5
for n in np.arange(0,len(measured_hd_list)):
assert len(measured_hd_list[n]) == len(measured_melt_list[n]), 'length of hd differs from melt'
hd_obs, melt_obs, melt_mod, melt_mod_bndlow, melt_mod_bndhigh = (
plot_hd_vs_melt_comparison(measured_hd_list, measured_melt_list, yearfracs_list, glac_name, fig_fn,
melt_fp, melt_fn,
ds_names=ds_names, hd_min=hd_min, hd_max=hd_max,
hd_tick_major=hd_tick_major, hd_tick_minor=hd_tick_minor,
melt_min=melt_min, melt_max=melt_max,
melt_tick_major=melt_tick_major, melt_tick_minor=melt_tick_minor))
hd_obs_all.append(hd_obs)
melt_obs_all.append(melt_obs)
melt_mod_all.append(melt_mod)
melt_mod_bndlow_all.append(melt_mod_bndlow)
melt_mod_bndhigh_all.append(melt_mod_bndhigh)
reg_all.append(['01'])
rgiid_all.append(['1.15645'])
# ===== Emmons (2.14297) ====
if '2.14297' in glaciers:
print('\nmelt comparison with Moore et al. 2019')
# Data: debris thickness (m) and melt rate (mm w.e. d-1)
mb_df = pd.read_csv(debris_prms.main_directory + '/../hd_obs/datasets/2.14297_moore2019-melt.csv')
measured_hd_list = [mb_df.hd_m.values]
measured_melt_list = [mb_df.melt_mmwed.values]
glac_name = "Emmons (2.14297)"
fig_fn = '2.14297_hd_melt_Moo2019.png'
# ds_names = ['Moore 2019\n(7/31/14$\u2009$-$\u2009$8/10/14)']
ds_names = ['7/31/14$\u2009$-$\u2009$8/10/14']
melt_fp = debris_prms.output_fp + 'ostrem_curves/exp4/'
melt_fn = '4700N-23825E-debris_melt_curve.nc'
yearfracs_list = [[2014 + 212/365, 2014 + 222/365]]
hd_min, hd_max = 0, np.ceil(np.max([x.max() for x in measured_hd_list])/0.1)*0.1 + 0.05
hd_tick_major, hd_tick_minor = 0.2, 0.05
melt_min, melt_max = 0, np.round(np.ceil(np.max([x.max() for x in measured_melt_list]) / 10) * 10,0) + 5
melt_tick_major, melt_tick_minor = 20, 5
for n in np.arange(0,len(measured_hd_list)):
assert len(measured_hd_list[n]) == len(measured_melt_list[n]), 'length of hd differs from melt'
hd_obs, melt_obs, melt_mod, melt_mod_bndlow, melt_mod_bndhigh = (
plot_hd_vs_melt_comparison(measured_hd_list, measured_melt_list, yearfracs_list, glac_name, fig_fn,
melt_fp, melt_fn,
ds_names=ds_names, hd_min=hd_min, hd_max=hd_max,
hd_tick_major=hd_tick_major, hd_tick_minor=hd_tick_minor,
melt_min=melt_min, melt_max=melt_max,
melt_tick_major=melt_tick_major, melt_tick_minor=melt_tick_minor))
hd_obs_all.append(hd_obs)
melt_obs_all.append(melt_obs)
melt_mod_all.append(melt_mod)
melt_mod_bndlow_all.append(melt_mod_bndlow)
melt_mod_bndhigh_all.append(melt_mod_bndhigh)
reg_all.append(['02'])
rgiid_all.append(['2.14297'])
# ===== Svinafellsjokull (06.00474) ====
if '6.00474' in glaciers:
print('\nmelt comparison with Moller et al (2016)')
# Data: debris thickness (m) and melt rate (mm w.e. d-1)
mb_df = pd.read_csv(debris_prms.main_directory + '/../hd_obs/datasets/6.00474_moller2016-melt.csv')
measured_hd_list = [mb_df.hd_m.values]
measured_melt_list = [mb_df['melt_mf'].values]
glac_name = "Svinafellsjokull (6.00474)"
fig_fn = '6.00474_hd_melt_Moller2016.png'
# ds_names = ['Moller 2016\n(5/17/13$\u2009$-$\u2009$5/30/13)']
ds_names = ['5/17/13$\u2009$-$\u2009$5/30/13']
melt_fp = debris_prms.output_fp + 'ostrem_curves/exp4/'
melt_fn = '6400N-34325E-debris_melt_curve.nc'
yearfracs_list = [[2013 + 137/365, 2013 + 150/365]]
hd_min, hd_max = 0, np.ceil(np.max([x.max() for x in measured_hd_list])/0.1)*0.1 + 0.05
hd_tick_major, hd_tick_minor = 0.1, 0.05
# melt_min, melt_max = 0, np.round(np.ceil(np.max([x.max() for x in measured_melt_list]) / 10) * 10,0) + 5
# melt_tick_major, melt_tick_minor = 10, 5
melt_min, melt_max = 0, np.round(np.ceil(np.max([x.max() for x in measured_melt_list]) / 0.1) * 0.1,1) + 0.1
melt_tick_major, melt_tick_minor = 0.5, 0.1
for n in np.arange(0,len(measured_hd_list)):
assert len(measured_hd_list[n]) == len(measured_melt_list[n]), 'length of hd differs from melt'
plot_hd_vs_melt_comparison(measured_hd_list, measured_melt_list, yearfracs_list, glac_name, fig_fn,
melt_fp, melt_fn,
ds_names=ds_names, hd_min=hd_min, hd_max=hd_max,
hd_tick_major=hd_tick_major, hd_tick_minor=hd_tick_minor,
melt_min=melt_min, melt_max=melt_max,
melt_tick_major=melt_tick_major, melt_tick_minor=melt_tick_minor,
plot_meltfactor=True)
hd_obs, melt_obs, melt_mod, melt_mod_bndlow, melt_mod_bndhigh = (
plot_hd_vs_melt_comparison(measured_hd_list, measured_melt_list, yearfracs_list, glac_name, fig_fn,
melt_fp, melt_fn,
ds_names=ds_names, hd_min=hd_min, hd_max=hd_max,
hd_tick_major=hd_tick_major, hd_tick_minor=hd_tick_minor,
melt_min=melt_min, melt_max=melt_max,
melt_tick_major=melt_tick_major, melt_tick_minor=melt_tick_minor))
hd_obs_all.append(hd_obs)
melt_obs_all.append(melt_obs)
melt_mod_all.append(melt_mod)
melt_mod_bndlow_all.append(melt_mod_bndlow)
melt_mod_bndhigh_all.append(melt_mod_bndhigh)
reg_all.append(['06'])
rgiid_all.append(['6.00474'])
# ===== Larsbreen (7.01044) ====
if '7.01044' in glaciers:
print('\nmelt comparison with Nicholson and Benn 2006')
# Data: debris thickness (m) and melt rate (mm w.e. d-1)
mb_df = pd.read_csv(debris_prms.main_directory + '/../hd_obs/datasets/7.01044_larsbreen_NB2006-melt.csv')
measured_hd_list = [mb_df.hd_m.values]
measured_melt_list = [mb_df.melt_mmwed.values]
glac_name = "Larsbreen (7.01044)"
fig_fn = '7.01044_hd_melt_NichBenn2006.png'
# ds_names = ['Nicholson 2006\n(7/09/02$\u2009$-$\u2009$7/20/02)']
ds_names = ['7/09/02$\u2009$-$\u2009$7/20/02']
melt_fp = debris_prms.output_fp + 'ostrem_curves/exp4/'
melt_fn = '7825N-1600E-debris_melt_curve.nc'
yearfracs_list = [[2002 + 191/366, 2002 + 202/366]]
hd_min, hd_max = 0, np.ceil(np.max([x.max() for x in measured_hd_list])/0.1)*0.1 + 0.05
hd_tick_major, hd_tick_minor = 0.2, 0.05
melt_min, melt_max = 0, np.round(np.ceil(np.max([x.max() for x in measured_melt_list]) / 10) * 10,0) + 5
melt_tick_major, melt_tick_minor = 20, 5
for n in np.arange(0,len(measured_hd_list)):
assert len(measured_hd_list[n]) == len(measured_melt_list[n]), 'length of hd differs from melt'
hd_obs, melt_obs, melt_mod, melt_mod_bndlow, melt_mod_bndhigh = (
plot_hd_vs_melt_comparison(measured_hd_list, measured_melt_list, yearfracs_list, glac_name, fig_fn,
melt_fp, melt_fn,
ds_names=ds_names, hd_min=hd_min, hd_max=hd_max,
hd_tick_major=hd_tick_major, hd_tick_minor=hd_tick_minor,
melt_min=melt_min, melt_max=melt_max,
melt_tick_major=melt_tick_major, melt_tick_minor=melt_tick_minor))
hd_obs_all.append(hd_obs)
melt_obs_all.append(melt_obs)
melt_mod_all.append(melt_mod)
melt_mod_bndlow_all.append(melt_mod_bndlow)
melt_mod_bndhigh_all.append(melt_mod_bndhigh)
reg_all.append(['07'])
rgiid_all.append(['7.01044'])
# ===== <NAME> (10.01732) ====
if '10.01732' in glaciers:
# print('\nmelt comparison with Mayer et al (2011)')
assert True == False, '10.01732 NEEDS TO DO THE MODELING FIRST!'
# # Data: debris thickness (m) and melt rate (mm w.e. d-1)
# mb_df = pd.read_csv(debris_prms.main_directory + '/../hd_obs/datasets/10.01732_mayer2011-melt.csv')
# measured_hd_list = [mb_df.hd_m.values]
# measured_melt_list = [mb_df['melt_mf'].values]
# glac_name = "<NAME> (10.01732)"
# fig_fn = '10.01732_hd_melt_Mayer2011.png'
## ds_names = ['Mayer 2011\n(7/11/07$\u2009$-$\u2009$7/30/07)']
# ds_names = ['7/11/07$\u2009$-$\u2009$7/30/07']
# melt_fp = debris_prms.output_fp + 'ostrem_curves/exp4/'
# melt_fn = '5000N-8775E-debris_melt_curve.nc'
# yearfracs_list = [[2007 + 192/365, 2007 + 211/365]]
#
# hd_min, hd_max = 0, np.ceil(np.max([x.max() for x in measured_hd_list])/0.1)*0.1 + 0.05
# hd_tick_major, hd_tick_minor = 0.1, 0.02
# melt_min, melt_max = 0, np.round(np.ceil(np.max([x.max() for x in measured_melt_list]) / 10) * 10,0) + 5
# melt_tick_major, melt_tick_minor = 10, 5
#
# for n in np.arange(0,len(measured_hd_list)):
# assert len(measured_hd_list[n]) == len(measured_melt_list[n]), 'length of hd differs from melt'
#
# hd_obs, melt_obs, melt_mod, melt_mod_bndlow, melt_mod_bndhigh = (
# plot_hd_vs_melt_comparison(measured_hd_list, measured_melt_list, yearfracs_list, glac_name, fig_fn,
# melt_fp, melt_fn,
# ds_names=ds_names, hd_min=hd_min, hd_max=hd_max,
# hd_tick_major=hd_tick_major, hd_tick_minor=hd_tick_minor,
# melt_min=melt_min, melt_max=melt_max,
# melt_tick_major=melt_tick_major, melt_tick_minor=melt_tick_minor))
# hd_obs_all.append(hd_obs)
# melt_obs_all.append(melt_obs)
# melt_mod_all.append(melt_mod)
# melt_mod_bndlow_all.append(melt_mod_bndlow)
# melt_mod_bndhigh_all.append(melt_mod_bndhigh)
# reg_all.append(['10'])
# ===== Vernagtferner (11.00719) ====
if '11.00719' in glaciers:
print('\nmelt comparison with Juen et al (2013)')
# Data: debris thickness (m) and melt rate (mm w.e. d-1)
mb_df = pd.read_csv(debris_prms.main_directory + '/../hd_obs/datasets/11.00719_vernagtferner_juen2013-melt.csv')
measured_hd_list = [mb_df.hd_m.values]
measured_melt_list = [mb_df.melt_mmwed.values]
glac_name = "Vernagtferner (11.00719)"
fig_fn = '11.00719_hd_melt_Juen2013.png'
# ds_names = ['Juen 2013\n(6/25/10$\u2009$-$\u2009$7/10/10)']
ds_names = ['6/25/10$\u2009$-$\u2009$7/10/10']
melt_fp = debris_prms.output_fp + 'ostrem_curves/exp4/'
melt_fn = '4700N-1075E-debris_melt_curve.nc'
yearfracs_list = [[2010 + 176/365, 2010 + 191/365]]
hd_min, hd_max = 0, np.ceil(np.max([x.max() for x in measured_hd_list])/0.1)*0.1 + 0.05
hd_tick_major, hd_tick_minor = 0.1, 0.05
melt_min, melt_max = 0, np.round(np.ceil(np.max([x.max() for x in measured_melt_list]) / 10) * 10,0) + 5
melt_tick_major, melt_tick_minor = 20, 5
for n in np.arange(0,len(measured_hd_list)):
assert len(measured_hd_list[n]) == len(measured_melt_list[n]), 'length of hd differs from melt'
hd_obs, melt_obs, melt_mod, melt_mod_bndlow, melt_mod_bndhigh = (
plot_hd_vs_melt_comparison(measured_hd_list, measured_melt_list, yearfracs_list, glac_name, fig_fn,
melt_fp, melt_fn,
ds_names=ds_names, hd_min=hd_min, hd_max=hd_max,
hd_tick_major=hd_tick_major, hd_tick_minor=hd_tick_minor,
melt_min=melt_min, melt_max=melt_max,
melt_tick_major=melt_tick_major, melt_tick_minor=melt_tick_minor))
hd_obs_all.append(hd_obs)
melt_obs_all.append(melt_obs)
melt_mod_all.append(melt_mod)
melt_mod_bndlow_all.append(melt_mod_bndlow)
melt_mod_bndhigh_all.append(melt_mod_bndhigh)
reg_all.append(['11'])
rgiid_all.append(['11.00719'])
# ===== Vernocolo (11.02472) =====
if '11.02472' in glaciers:
print('\nmelt comparison with bocchiola et al. (2015)')
# Data: debris thickness (m) and melt rate (mm w.e. d-1)
mb_df = pd.read_csv(debris_prms.main_directory + '/../hd_obs/datasets/11.02472_bocchiola2015-melt.csv')
measured_hd_list = [mb_df.hd_m.values]
measured_melt_list = [mb_df.melt_mmwed.values]
glac_name = "Venerocolo (11.02472)"
fig_fn = '11.02472_hd_melt_Boc2015.png'
ds_names = ['8/10/07$\u2009$-$\u2009$9/13/07']
melt_fp = debris_prms.output_fp + 'ostrem_curves/exp4/'
melt_fn = '4625N-1050E-debris_melt_curve.nc'
yearfracs_list = [[2007 + 222/365, 2007 + 256/365]]
hd_min, hd_max = 0, np.ceil(np.max([x.max() for x in measured_hd_list])/0.1)*0.1 + 0.05
hd_tick_major, hd_tick_minor = 0.1, 0.05
melt_min, melt_max = 0, np.round(np.ceil(np.max([x.max() for x in measured_melt_list]) / 10) * 10,0) + 5
melt_tick_major, melt_tick_minor = 10, 5
for n in np.arange(0,len(measured_hd_list)):
assert len(measured_hd_list[n]) == len(measured_melt_list[n]), 'length of hd differs from melt'
hd_obs, melt_obs, melt_mod, melt_mod_bndlow, melt_mod_bndhigh = (
plot_hd_vs_melt_comparison(measured_hd_list, measured_melt_list, yearfracs_list, glac_name, fig_fn,
melt_fp, melt_fn,
ds_names=ds_names, hd_min=hd_min, hd_max=hd_max,
hd_tick_major=hd_tick_major, hd_tick_minor=hd_tick_minor,
melt_min=melt_min, melt_max=melt_max,
melt_tick_major=melt_tick_major, melt_tick_minor=melt_tick_minor))
hd_obs_all.append(hd_obs)
melt_obs_all.append(melt_obs)
melt_mod_all.append(melt_mod)
melt_mod_bndlow_all.append(melt_mod_bndlow)
melt_mod_bndhigh_all.append(melt_mod_bndhigh)
reg_all.append(['11'])
rgiid_all.append(['11.02472'])
# ===== Arolla (11.02810) ====
if '11.02810' in glaciers:
print('\nmelt comparison with Reid et al (2012)')
# Data: debris thickness (m) and melt rate (mm w.e. d-1)
mb_df = pd.read_csv(debris_prms.main_directory + '/../hd_obs/datasets/11.02810_arolla_reid2012-melt.csv')
measured_hd_list = [mb_df.hd_m.values]
measured_melt_list = [mb_df.melt_mmwed.values]
glac_name = "Arolla (11.02810)"
fig_fn = '11.02810_hd_melt_Reid2012.png'
# ds_names = ['Reid 2012\n(7/28/10$\u2009$-$\u2009$9/09/10)']
ds_names = ['7/28/10$\u2009$-$\u2009$9/09/10']
melt_fp = debris_prms.output_fp + 'ostrem_curves/exp4/'
melt_fn = '4600N-750E-debris_melt_curve.nc'
yearfracs_list = [[2010 + 209/365, 2010 + 252/365]]
hd_min, hd_max = 0, np.ceil(np.max([x.max() for x in measured_hd_list])/0.1)*0.1 + 0.05
hd_tick_major, hd_tick_minor = 0.1, 0.05
melt_min, melt_max = 0, np.round(np.ceil(np.max([x.max() for x in measured_melt_list]) / 10) * 10,0) + 5
melt_tick_major, melt_tick_minor = 10, 5
for n in np.arange(0,len(measured_hd_list)):
assert len(measured_hd_list[n]) == len(measured_melt_list[n]), 'length of hd differs from melt'
hd_obs, melt_obs, melt_mod, melt_mod_bndlow, melt_mod_bndhigh = (
plot_hd_vs_melt_comparison(measured_hd_list, measured_melt_list, yearfracs_list, glac_name, fig_fn,
melt_fp, melt_fn,
ds_names=ds_names, hd_min=hd_min, hd_max=hd_max,
hd_tick_major=hd_tick_major, hd_tick_minor=hd_tick_minor,
melt_min=melt_min, melt_max=melt_max,
melt_tick_major=melt_tick_major, melt_tick_minor=melt_tick_minor))
hd_obs_all.append(hd_obs)
melt_obs_all.append(melt_obs)
melt_mod_all.append(melt_mod)
melt_mod_bndlow_all.append(melt_mod_bndlow)
melt_mod_bndhigh_all.append(melt_mod_bndhigh)
reg_all.append(['11'])
rgiid_all.append(['11.02810'])
# ===== Belvedere (11.02858) ====
if '11.02858' in glaciers:
print('\nmelt comparison with Nicholson and Benn (2006)')
# Data: debris thickness (m) and melt rate (mm w.e. d-1)
mb_df = pd.read_csv(debris_prms.main_directory + '/../hd_obs/datasets/11.02858_belvedere_nb2006-melt.csv')
measured_hd_list = [mb_df.hd_m.values]
measured_melt_list = [mb_df.melt_mmwed.values]
glac_name = "Belvedere (11.02858)"
fig_fn = '11.02858_hd_melt_NB2006.png'
# ds_names = ['Nicholson 2006\n(8/06/03$\u2009$-$\u2009$8/10/03)']
ds_names = ['8/06/03$\u2009$-$\u2009$8/10/03']
melt_fp = debris_prms.output_fp + 'ostrem_curves/exp4/'
melt_fn = '4600N-800E-debris_melt_curve.nc'
yearfracs_list = [[2003 + 218/365, 2003 + 222/365]]
hd_min, hd_max = 0, np.ceil(np.max([x.max() for x in measured_hd_list])/0.1)*0.1 + 0.05
hd_tick_major, hd_tick_minor = 0.1, 0.05
melt_min, melt_max = 0, np.round(np.ceil(np.max([x.max() for x in measured_melt_list]) / 10) * 10,0) + 5
melt_tick_major, melt_tick_minor = 40, 10
for n in np.arange(0,len(measured_hd_list)):
assert len(measured_hd_list[n]) == len(measured_melt_list[n]), 'length of hd differs from melt'
hd_obs, melt_obs, melt_mod, melt_mod_bndlow, melt_mod_bndhigh = (
plot_hd_vs_melt_comparison(measured_hd_list, measured_melt_list, yearfracs_list, glac_name, fig_fn,
melt_fp, melt_fn,
ds_names=ds_names, hd_min=hd_min, hd_max=hd_max,
hd_tick_major=hd_tick_major, hd_tick_minor=hd_tick_minor,
melt_min=melt_min, melt_max=melt_max,
melt_tick_major=melt_tick_major, melt_tick_minor=melt_tick_minor))
hd_obs_all.append(hd_obs)
melt_obs_all.append(melt_obs)
melt_mod_all.append(melt_mod)
melt_mod_bndlow_all.append(melt_mod_bndlow)
melt_mod_bndhigh_all.append(melt_mod_bndhigh)
reg_all.append(['11'])
rgiid_all.append(['11.02858'])
# ===== MIAGE (11.03005) ====
if '11.03005' in glaciers:
print('\nmelt comparison with Reid and Brock (2010)')
# Data: debris thickness (m) and melt rate (mm w.e. d-1)
mb_df = pd.read_csv(debris_prms.main_directory + '/../hd_obs/datasets/11.03005_reid2010-melt.csv')
measured_hd_list = [mb_df.hd_m.values]
measured_melt_list = [mb_df.melt_mmwed.values]
glac_name = 'Miage (11.03005)'
fig_fn = '11.03005_hd_melt_Reid2010.png'
# ds_names = ['Reid 2010\n(6/21/05$\u2009$-$\u2009$9/04/05)']
ds_names = ['6/21/05$\u2009$-$\u2009$9/04/05']
melt_fp = debris_prms.output_fp + 'ostrem_curves/exp4/'
melt_fn = '4575N-675E-debris_melt_curve.nc'
yearfracs_list = [[2005 + 172/365, 2005 + 247/365]]
hd_min, hd_max = 0, np.ceil(np.max([x.max() for x in measured_hd_list])/0.1)*0.1 + 0.05
hd_tick_major, hd_tick_minor = 0.2, 0.05
melt_min, melt_max = 0, np.round(np.ceil(np.max([x.max() for x in measured_melt_list]) / 10) * 10,0) + 5
melt_tick_major, melt_tick_minor = 20, 5
for n in np.arange(0,len(measured_hd_list)):
assert len(measured_hd_list[n]) == len(measured_melt_list[n]), 'length of hd differs from melt'
hd_obs, melt_obs, melt_mod, melt_mod_bndlow, melt_mod_bndhigh = (
plot_hd_vs_melt_comparison(measured_hd_list, measured_melt_list, yearfracs_list, glac_name, fig_fn,
melt_fp, melt_fn,
ds_names=ds_names, hd_min=hd_min, hd_max=hd_max,
hd_tick_major=hd_tick_major, hd_tick_minor=hd_tick_minor,
melt_min=melt_min, melt_max=melt_max,
melt_tick_major=melt_tick_major, melt_tick_minor=melt_tick_minor))
hd_obs_all.append(hd_obs)
melt_obs_all.append(melt_obs)
melt_mod_all.append(melt_mod)
melt_mod_bndlow_all.append(melt_mod_bndlow)
melt_mod_bndhigh_all.append(melt_mod_bndhigh)
reg_all.append(['11'])
rgiid_all.append(['11.03005'])
# ===== Zopkhito (12.01012) ====
if '12.01012' in glaciers:
print('\nmelt comparison with Lambrecht et al (2011)')
# Data: debris thickness (m) and melt rate (mm w.e. d-1)
mb_df = pd.read_csv(debris_prms.main_directory + '/../hd_obs/datasets/12.01012_lambrecht2011-melt2008.csv')
mb_df2 = pd.read_csv(debris_prms.main_directory + '/../hd_obs/datasets/12.01012_lambrecht2011-melt2009.csv')
measured_hd_list = [mb_df.hd_m.values, mb_df2.hd_m.values]
measured_melt_list = [mb_df['melt_mmwed'].values, mb_df2['melt_mmwed'].values]
glac_name = "Zopkhito (12.01012)"
fig_fn = '12.01012_hd_melt_Lambrecht2011.png'
# ds_names = ['Lambrecht 2011\n(6/20/08$\u2009$-$\u2009$6/27/08)',
# 'Lambrecht 2011\n(7/01/09$\u2009$-$\u2009$7/08/09)']
ds_names = ['6/26/08$\u2009$-$\u2009$7/01/08',
'7/13/09$\u2009$-$\u2009$7/17/09']
melt_fp = debris_prms.output_fp + 'ostrem_curves/exp4/'
melt_fn = '4300N-4350E-debris_melt_curve.nc'
yearfracs_list = [[2008 + 178/366, 2008 + 183/366], [2009 + 194/365, 2009 + 198/365]]
hd_min, hd_max = 0, np.ceil(np.max([x.max() for x in measured_hd_list])/0.1)*0.1 + 0.05
hd_tick_major, hd_tick_minor = 0.1, 0.05
melt_min, melt_max = 0, np.round(np.ceil(np.max([x.max() for x in measured_melt_list]) / 10) * 10,0) + 5
melt_tick_major, melt_tick_minor = 40, 10
for n in np.arange(0,len(measured_hd_list)):
assert len(measured_hd_list[n]) == len(measured_melt_list[n]), 'length of hd differs from melt'
hd_obs, melt_obs, melt_mod, melt_mod_bndlow, melt_mod_bndhigh = (
plot_hd_vs_melt_comparison(measured_hd_list, measured_melt_list, yearfracs_list, glac_name, fig_fn,
melt_fp, melt_fn,
ds_names=ds_names, hd_min=hd_min, hd_max=hd_max,
hd_tick_major=hd_tick_major, hd_tick_minor=hd_tick_minor,
melt_min=melt_min, melt_max=melt_max,
melt_tick_major=melt_tick_major, melt_tick_minor=melt_tick_minor))
hd_obs_all.append(hd_obs)
melt_obs_all.append(melt_obs)
melt_mod_all.append(melt_mod)
melt_mod_bndlow_all.append(melt_mod_bndlow)
melt_mod_bndhigh_all.append(melt_mod_bndhigh)
reg_all.append(['12'])
rgiid_all.append(['12.01012'])
# ===== Djankuat (12.01132) ====
if '12.01132' in glaciers:
print('\nmelt comparison with Lambrecht et al (2011)')
# Data: debris thickness (m) and melt rate (mm w.e. d-1)
mb_df = pd.read_csv(debris_prms.main_directory + '/../hd_obs/datasets/12.01132_lambrecht2011-melt2007.csv')
mb_df2 = pd.read_csv(debris_prms.main_directory + '/../hd_obs/datasets/12.01132_lambrecht2011-melt2008.csv')
measured_hd_list = [mb_df.hd_m.values, mb_df2.hd_m.values]
measured_melt_list = [mb_df['melt_mmwed'].values, mb_df2['melt_mmwed'].values]
glac_name = "Djankuat (12.01132)"
fig_fn = '12.01132_hd_melt_Lambrecht2011.png'
ds_names = ['6/26/07$\u2009$-$\u2009$6/30/07','6/30/08$\u2009$-$\u2009$9/14/08']
melt_fp = debris_prms.output_fp + 'ostrem_curves/exp4/'
melt_fn = '4325N-4275E-debris_melt_curve.nc'
yearfracs_list = [[2007 + 177/366, 2007 + 181/366], [2008 + 182/366, 2008 + 258/366]]
hd_min, hd_max = 0, np.ceil(np.max([x.max() for x in measured_hd_list])/0.1)*0.1 + 0.05
hd_tick_major, hd_tick_minor = 0.2, 0.05
melt_min, melt_max = 0, np.round(np.ceil(np.max([x.max() for x in measured_melt_list]) / 10) * 10,0) + 5
melt_tick_major, melt_tick_minor = 20, 5
for n in np.arange(0,len(measured_hd_list)):
assert len(measured_hd_list[n]) == len(measured_melt_list[n]), 'length of hd differs from melt'
hd_obs, melt_obs, melt_mod, melt_mod_bndlow, melt_mod_bndhigh = (
plot_hd_vs_melt_comparison(measured_hd_list, measured_melt_list, yearfracs_list, glac_name, fig_fn,
melt_fp, melt_fn,
ds_names=ds_names, hd_min=hd_min, hd_max=hd_max,
hd_tick_major=hd_tick_major, hd_tick_minor=hd_tick_minor,
melt_min=melt_min, melt_max=melt_max,
melt_tick_major=melt_tick_major, melt_tick_minor=melt_tick_minor))
hd_obs_all.append(hd_obs)
melt_obs_all.append(melt_obs)
melt_mod_all.append(melt_mod)
melt_mod_bndlow_all.append(melt_mod_bndlow)
melt_mod_bndhigh_all.append(melt_mod_bndhigh)
reg_all.append(['12'])
rgiid_all.append(['12.01132'])
# ===== S Inylchek (13.05000) ====
if '13.05000' in glaciers:
print('\nmelt comparison with Hagg et al (2008)')
# Data: debris thickness (m) and melt rate (mm w.e. d-1)
mb_df = pd.read_csv(debris_prms.main_directory + '/../hd_obs/datasets/13.05000_hagg2008-melt.csv')
measured_hd_list = [mb_df.hd_m.values]
measured_melt_list = [mb_df.melt_mmwed.values]
glac_name = "S Inylchek (13.05000)"
fig_fn = '13.05000_hd_melt_Hagg2008.png'
# ds_names = ['Hagg 2008\n(7/30/05$\u2009$-$\u2009$8/10/05)']
ds_names = ['7/30/05$\u2009$-$\u2009$8/10/05']
melt_fp = debris_prms.output_fp + 'ostrem_curves/exp4/'
melt_fn = '4200N-8025E-debris_melt_curve.nc'
yearfracs_list = [[2005 + 211/365, 2005 + 222/365]]
hd_min, hd_max = 0, np.ceil(np.max([x.max() for x in measured_hd_list])/0.1)*0.1 + 0.05
hd_tick_major, hd_tick_minor = 0.2, 0.05
melt_min, melt_max = 0, np.round(np.ceil(np.max([x.max() for x in measured_melt_list]) / 10) * 10,0) + 5
melt_tick_major, melt_tick_minor = 20, 5
for n in np.arange(0,len(measured_hd_list)):
assert len(measured_hd_list[n]) == len(measured_melt_list[n]), 'length of hd differs from melt'
hd_obs, melt_obs, melt_mod, melt_mod_bndlow, melt_mod_bndhigh = (
plot_hd_vs_melt_comparison(measured_hd_list, measured_melt_list, yearfracs_list, glac_name, fig_fn,
melt_fp, melt_fn,
ds_names=ds_names, hd_min=hd_min, hd_max=hd_max,
hd_tick_major=hd_tick_major, hd_tick_minor=hd_tick_minor,
melt_min=melt_min, melt_max=melt_max,
melt_tick_major=melt_tick_major, melt_tick_minor=melt_tick_minor))
hd_obs_all.append(hd_obs)
melt_obs_all.append(melt_obs)
melt_mod_all.append(melt_mod)
melt_mod_bndlow_all.append(melt_mod_bndlow)
melt_mod_bndhigh_all.append(melt_mod_bndhigh)
reg_all.append(['13'])
rgiid_all.append(['13.05000'])
# ===== No 72 =====
if '13.43165' in glaciers:
print('\nmelt comparison with Wang et al (2017)')
# Data: debris thickness (m) and melt rate (mm w.e. d-1)
mb_df = pd.read_csv(debris_prms.main_directory + '/../hd_obs/datasets/13.43165_wang2017-melt.csv')
measured_hd_list = [mb_df.hd_m.values]
measured_melt_list = [mb_df.melt_mmwed.values]
glac_name = "Qingbingtan (13.43165)"
fig_fn = '13.43165_hd_melt_wang2017.png'
ds_names = ['8/01/08$\u2009$-$\u2009$8/01/09']
melt_fp = debris_prms.output_fp + 'ostrem_curves/exp4/'
melt_fn = '4175N-8000E-debris_melt_curve.nc'
yearfracs_list = [[2008 + 214/366, 2009 + 213/365]]
hd_min, hd_max = 0, np.ceil(np.max([x.max() for x in measured_hd_list])/0.1)*0.1 + 0.05
hd_tick_major, hd_tick_minor = 0.2, 0.05
melt_min, melt_max = 0, np.round(np.ceil(np.max([x.max() for x in measured_melt_list]) / 10) * 10,0) + 5
melt_tick_major, melt_tick_minor = 20, 5
for n in np.arange(0,len(measured_hd_list)):
assert len(measured_hd_list[n]) == len(measured_melt_list[n]), 'length of hd differs from melt'
hd_obs, melt_obs, melt_mod, melt_mod_bndlow, melt_mod_bndhigh = (
plot_hd_vs_melt_comparison(measured_hd_list, measured_melt_list, yearfracs_list, glac_name, fig_fn,
melt_fp, melt_fn,
ds_names=ds_names, hd_min=hd_min, hd_max=hd_max,
hd_tick_major=hd_tick_major, hd_tick_minor=hd_tick_minor,
melt_min=melt_min, melt_max=melt_max,
melt_tick_major=melt_tick_major, melt_tick_minor=melt_tick_minor))
hd_obs_all.append(hd_obs)
melt_obs_all.append(melt_obs)
melt_mod_all.append(melt_mod)
melt_mod_bndlow_all.append(melt_mod_bndlow)
melt_mod_bndhigh_all.append(melt_mod_bndhigh)
reg_all.append(['13'])
rgiid_all.append(['13.43165'])
# ===== Koxkar (13.43232) ====
if '13.43232' in glaciers:
print('\nmelt comparison with Han et al 2006 (measured via conduction place, not ablation stake)')
# Data: debris thickness (m) and melt rate (mm w.e. d-1)
mb_df = | pd.read_csv(debris_prms.main_directory + '/../hd_obs/datasets/13.43232_Han2006-melt_site1.csv') | pandas.read_csv |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
data_series.py
Classes for manipulating the data series.
Created by <NAME> on 2016-09-22.
Copyright (c) 2016 <NAME>. All rights reserved.
"""
import pandas as pd
class NominalData(object):
"""Holds onto series and frames containing data in nominal units."""
def __init__(self, stocks_df, gs10_s):
"""Initialize the NominalPriceData object.
All frames and series should be indexed by a timestamp representing the start of the period for the row.
:param stocks_df: A frame of nominal stock price, dividend, and earnings data.
:param gs10_s: A series with the nominal yield in percent for 10-year treasury bonds.
"""
self.stocks_df = stocks_df
self.gs10_s = gs10_s / 100
class RealStockData(object):
"""Convert stock data in nominal units to real data.
The resulting frame contains:
- price (real)
- dividend (real)
- earnings (real)
- m_return (real) (monthly return)
"""
def __init__(self, nominal_data, cpi_s, base_price_level):
"""
:param nominal_data: A nominal data object.
:param cpi_s: A series with CPI data, (pandas-)indexed the same as the nominal data.
:param base_price_level: The base price level representing 1.
"""
self.nominal_data = nominal_data
self.cpi_s = cpi_s
self.base_price_level = base_price_level
self.df = None
self._compute_df()
def _compute_df(self):
df = self._real_dollar_df()
df = self._enrich_with_real_monthly_return(df)
# Remove the nominal-unit columns
del df['P']
del df['D']
del df['E']
self.df = df
def _real_dollar_df(self):
"""Return a frame with real values for stock data in the fields price, dividend, and earnings.
Take the nominal-units data and use CPI data to convert to real units.
Return the original frame augmented with the real values.
"""
stocks_df = self.nominal_data.stocks_df
bpl = self.base_price_level
cpi = self.cpi_s
inflation_scale = bpl / cpi
price = pd.to_numeric(stocks_df['P']) * inflation_scale
dividend = pd.to_numeric(stocks_df['D']) * inflation_scale
earnings = | pd.to_numeric(stocks_df['E']) | pandas.to_numeric |
import re
import os
import pandas as pd
from textblob import TextBlob
import matplotlib.pyplot as plt
from wordcloud import WordCloud, STOPWORDS
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer
from nltk.tokenize import word_tokenize
import nltk
nltk.download('stopwords')
nltk.download('wordnet')
class MovieReviewObject:
def __init__(self):
self.review_data = | pd.DataFrame() | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Thu Oct 18 11:47:42 2018
@author: lefko
1sec~230 indexes
1index=0.004125 sec
every 1 index deviation from 230 points/sec = shifted axis values by 0.0002 sec
shock ttl on anIn3 channel of fp
"""
import csv
import pandas as pd
import numpy as np
import os
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
import time
print(os.system('ls'))
def slash(operating_system):
if operating_system == 1:
streep = '//'
elif operating_system == 2:
streep = '/'
return streep
def split_list(n): #called only by get_sub_list function
"""takes a list of indices as input,subtracts the next(2) from the previous one(1),and returns the(2nd) index of the pairs that have diference>1"""
return [(x+1) for x,y in zip(n, n[1:]) if y-x != 1]
def get_sub_list(my_list):
"""will split the list based on the index.returns a list of lists with the individual trials(when shock was ON)in each row"""
my_index = split_list(my_list)
output = list()
prev = 0
for index in my_index:
new_list = [ x for x in my_list[prev:] if x < index]
output.append(new_list)
prev += len(new_list)
output.append([ x for x in my_list[prev:]])
return output
#better names for function inputs; photosignal,indexes to timelock,time before,time after(time before and after are in sample sizes)
def pertrial_array(signal, response, pre_slice,post_slice):
p=0
timeindex = np.arange((-pre_slice/230),(post_slice/230), 0.0043478261)
output = []
while p < len(response):
output.append([n for n in signal[(response[p])-pre_slice:(response[p])+post_slice]])
p += 1
output = pd.DataFrame(output, dtype='float64')
output = output.transpose().set_index(timeindex)
return output
#Creates a single avrage array from all the trials
def arrays_avg(dataframe):
dataframe['avg'] = dataframe.mean(axis=1, skipna = True)
return dataframe
#Obtains the first index in 'dataframe' which matches the number in 'timearray'
#creates a list of all the indexes that match the timestamp from 'timearray'.returns the1st?
def get_index(dataframe, timearray):
output = []
k =0
while k <len(timearray):
output.append(dataframe.index[(dataframe['Time(s)'] < timearray[k] + 0.01) & (dataframe['Time(s)'] > timearray[k]-0.01)].tolist())
k+= 1
return [x[0] for x in output]
#creates the filde directory for the result to be saved in. inputs: the data directory, in which the saving folder will be created, and an extension for ex: analysis
def make_path(directory, extension ):
date = time.strftime('%x'); new_date = date[6:8]; new_date = new_date + date[0:2] ; date = new_date + date[3:5]
saving_dir = directory + '/'+ str(extension)+'/'+str(date)
if os.path.exists(saving_dir):
numbers = [x for x in range(2,100)]
for i in range(len(numbers)):
if not os.path.exists(saving_dir + '_' + str(numbers[i])):
os.makedirs(saving_dir + '_' + str(numbers[i]))
saving_dir = (saving_dir + '_' + str(numbers[i]))
break
else:
os.makedirs(saving_dir)
return saving_dir
###########################################################################################################################################################3
#def info_page(filename, saving_dir):
# firstPage = plt.figure(figsize=(11.69,8.27))
# firstPage.clf()
# txt = filename
# firstPage.text(0.5,0.5,txt, transform=firstPage.transFigure, size=24, ha="center")
## pp.savefig()
# plt.savefig(saving_dir+'//'+'infoPage'+".png", bbox_inches='tight') ####change filename WD
# plt.close()
#
#
#calculates standard deviation (of sample,NOT population), count, SEM, avg+-sem, for the columns with shock data
def calc_SEM (df, shock_columns):
df['stdev']=df[shock_columns].std(axis=1)
df['count']=len(shock_columns)
df['SEM']=df['stdev']/(np.sqrt(df['count']))
df['avg+sem']=df['avg']+df['SEM']
df['avg-sem']=df['avg']-df['SEM']
return df
#creates trials timelocked to each respective event, and their average. takes DF/f0 files(of each event) as input
def make_trials(DFfile):
df_photo = pd.read_csv(DFfile, usecols=['Time(s)',"Analog In. | Ch.1 AIn-1 - Dem (AOut-1)_LowPass_dF/F0",'Analog In. | Ch.2 AIn-2 - Dem (AOut-2)_LowPass_dF/F0','Analog In. | Ch.3 AIn-3'] )
print('start processing'+DFfile[:-8]+'\n')
#checking decimation
print('in this file has ' + '%.2f' % (230/df_photo.loc[230,'Time(s)']) + ' samples per sec \n')
#renames columns
df_photo = df_photo.rename(columns={'Analog In. | Ch.1 AIn-1 - Dem (AOut-1)_LowPass_dF/F0':'isosb_df/f', 'Analog In. | Ch.2 AIn-2 - Dem (AOut-2)_LowPass_dF/F0': 'gcamp_df/f', 'Analog In. | Ch.3 AIn-3' : "shock"})
#rounds values (of ttl pulses) for better handling
df_photo.shock = df_photo.shock.round()
#selects all values (from all columns) during shock and puts them in new dataframe
shockON= df_photo.loc[df_photo['shock'] >= 1.5]
#has ALL the indices from main df_photo, during shocks(whole duration)
shockIndx = np.array(shockON.index.tolist(), dtype = int)
#getting the indeces of every first time shock was on.
indx1stShocks = np.array([x[0] for x in get_sub_list(shockIndx)], dtype = int)
indx1stShocks=indx1stShocks[:-1] #drop last value,since pulse always turns on when medpc turns off,at the end of measurment!
#creating a dataframe with the values of all columns(t,df_photo ch1-3).t column is the timestamps for every shock onset.has the original indeces from df_photo
FirstShockON=df_photo.loc[indx1stShocks]
ShockOnset_TS=FirstShockON['Time(s)'].to_frame().reset_index(drop=True) #making df of ts, ready to be sent to excel
#creates numpy arrayS of the photosignal of both ch from the dataframe
ch1 = np.array(df_photo['isosb_df/f'])
ch2 = np.array(df_photo['gcamp_df/f'])
#Defines a variable containing the size of the slice of signal you wish to obtain in 'samples' units
size_before= 690 #3sec
size_after= 1150 #5sec
#creates the trials arrays,with tphotometric data around shock onset using pertrial_array,for each channel
photoShock1= pertrial_array(ch1, indx1stShocks, size_before, size_after)
photoShock2= pertrial_array(ch2, indx1stShocks, size_before, size_after)
photoShock_mc=photoShock2-photoShock1
shock_columns=list(photoShock1) #created a list with headers of shock columns. to be used later when df also has avg, sems etc
#averages the trials using session_avg
photoShock1 = arrays_avg(photoShock1)
photoShock2 = arrays_avg(photoShock2)
photoShock_mc=arrays_avg(photoShock_mc)
print('trials made for'+str( DFfile[:-7])+ '\n' +'calculating now the SEMs \n')
photoShock1=calc_SEM(photoShock1, shock_columns)
photoShock2=calc_SEM(photoShock2, shock_columns)
photoShock_mc=calc_SEM(photoShock_mc, shock_columns)
return photoShock1, photoShock2, ShockOnset_TS, shock_columns, photoShock_mc
#will plot individual trials, already contained in the dataframe. Takes as inputs the df and the event(eg onset-for labeling purposes)&columns to be plotted(trial containing)
def plot_indiv(df,col_to_plot,event, color1, saving_dir ):
print("creating trial plots")
graph_nr= len (col_to_plot)
cols = 2
# Calculates how many Rows are needed, for the given number of total graphs and columns
rows = graph_nr // cols
rows += graph_nr % cols
# Create a Position index, for placing every subplot
Position = range(1,graph_nr + 1)
fig = plt.figure(figsize=(5*cols, 4*rows))
fig.suptitle(str(event)+ "\n" ,fontsize=15, y=1) #aadds title over all subplots, y variable for title location
fig = plt.figure(1)
# add every single subplot to the figure with a for loop
for k in range(graph_nr):
ax = fig.add_subplot(rows,cols,Position[k])
#selecting lim of y axis from the min/max value from all columns that contain fight data
ax.set_ylim(df[col_to_plot].min().min(), df[col_to_plot].max().max()) #df.min() gives the min of each column. df.min().min() gives the total min
x=df.index
y=df[k]
ax.plot(x,y,color=color1, label='dF/F0', linewidth=1)
ax.set_title ('shock'+str(k))
plt.axvline(x=0, color='gray',linestyle='dashed', alpha=0.2)
plt.xlabel("time (s)")
plt.ylabel("dF/F0", labelpad=0)
ax.spines["top"].set_visible(False)
ax.spines["right"].set_visible(False)
plt.legend()
plt.tight_layout()
# plt.savefig(saving_dir+"\\"+str(event[5:])+".png", bbox_inches='tight') ####change filename WD
# pp.savefig()
plt.show()
#will plot the avg already contained in a dataframe. Inputs: dataframe,columns to be plotted(trial containing), color, &str of event for labelling
#adds fig to pdf
def plot_avg(df,fights_col, event, color1, saving_dir):
ax = plt.subplot(111)
ax.spines["top"].set_visible(False)
ax.spines["right"].set_visible(False)
plt.tick_params(top=False ,right=False)
plt.ylabel("dF/F0"); plt.xlabel('Time (s)')
plt.title("average "+str(event)+"\n", fontsize=15)
#ax.set_ylim(df[fights_col].min().min(), df[fights_col].max().max())# sets the same y axis limits as the individual fights
x=df.index
y1=df['avg']
y2=df['avg-sem']
y3=df['avg+sem']
ax.plot(x,y1 , color=color1, label= 'avg', linewidth=1)
plt.axvline(x=0, color='gray',linestyle='dashed', alpha=0.2)
plt.fill_between(x,y2,y3, color =color1, alpha =0.25)
# plt.savefig(saving_dir + '\\' + "avg"+event[5:]+".png", bbox_inches='tight')
# pp.savefig()
plt.show()
def plot_gcampWisosb(df_gcamp,df_isosb,event, gcamp_color, isosb_color):
ax = plt.subplot(111)
ax.spines["top"].set_visible(False)
ax.spines["right"].set_visible(False)
plt.tick_params(top=False ,right=False)
plt.ylabel("dF/F0"); plt.xlabel('Time (s)')
plt.title("average "+str(event)+"\n", fontsize=15)
x=df_gcamp.index
y1=df_gcamp['avg']
y2=df_gcamp['avg+sem']
y3=df_gcamp['avg-sem']
y4=df_isosb['avg']
y5=df_isosb['avg+sem']
y6=df_isosb['avg-sem']
ax.plot(x,y1 ,color=gcamp_color, linewidth=1)
ax.plot(x,y4,color=isosb_color, label='ísosbestic', linewidth=1)
plt.axvline(x=0, color='gray',linestyle='dashed', alpha=0.2)
plt.fill_between(x,y2,y3, color=gcamp_color, alpha =0.25)
plt.fill_between(x,y5,y6, color=isosb_color, alpha=0.25)
plt.legend()
# plt.savefig(dir1 + '\\RESULTS\\' + "avg"+event[5:]+".png", bbox_inches='tight')--> to save every plot as one image
# pp.savefig()
plt.show()
def shock_analysis(filename,directory):
#creating pdf to save figures and adds first page with mouse info, date, experiment,all in filename
#
streep=slash(1)
photoShock1, photoShock2, shockTS, shock_col,photoShock_mc =make_trials(filename)
#plotting
plot_indiv(photoShock2, shock_col, "individual shocks_dLight",'#2ca02c', directory) #gcamp(off)-green
plot_avg(photoShock2, shock_col, " shock_dLight", '#2ca02c', directory)
plot_indiv(photoShock1, shock_col, "individual shocsk_isosb",'#1f77b4', directory) #isosb(off)-blue
plot_avg(photoShock1, shock_col, " shock_isosb", '#1f77b4', directory)
# plot_avg(photoShock_mc, shock_col,'shock motion corrected', "#e377c2")# plotss motion corrected signal-BE CAUIOUS
# plot_gcampWisosb(photoShock2,photoShock1, 'shock onset','#2ca02c','#1f77b4')
'''un-comment next block if you need to add plot with adjustable x axis range in plot'''
#xmin=230 #df starts from -3sec, i want it to start from -2sec, need to drop 1sec, (1*230) not sure if these numbers correct
#xmax=1150 #x max needed is 2sec, df starts from -3 sec, so need to drop everything after 5 sec (5*230=1150)
#plot_gcampWisosb(photoShock2.iloc[xmin:xmax],photoShock1[xmin:xmax], 'shock onset','#2ca02c','#1f77b4')
# pp.close() #to close the pdf where the plots are saved
print('sending data to excel \n')
# #Sends the Dataframes to Excel
# writer = pd.ExcelWriter(directory+ filename[:-6]+'_RESULT.xlsx')
# photoShock1.to_excel(writer,'shock_isosb', freeze_panes=(1,1) )
# photoShock2.to_excel(writer,'shock_dLight', freeze_panes=(1,1))
# photoShock_mc.to_excel(writer,'motion_corr', freeze_panes=(1,1))
# shockTS.to_excel(writer,'Shock timestamps(sec)', index_label='shock#')
# writer.save()
# photoshock_all=pd.concat({'isosb':photoShock1,'gcamp':photoShock2},axis=1) trying to have 1muliindex df with both gcamp &isosb data
return photoShock1, photoShock2
print('finished analyzing' +str(filename[:-6]))
#changes directory to where the files to be read are-use double backslach *********************************************************************************************************************************
dir1 = './data/raw/'
#os.chdir (dir1)
#saving_dir=make_path(dir1, 'analysis')
saving_dir='./results/output'
#!1 for windows(returns//) and 2 for linux(returns/)
streep=slash(1)
batch_avg1= | pd.DataFrame() | pandas.DataFrame |
"""
The Colloid_output module contains classes to read LB Colloids simulation
outputs and perform post processing. Many classes are available to
provide plotting functionality. ModelPlot and CCModelPlot are useful for
visualizing colloid-surface forces and colloid-colloid forces respectively.
example import of the Colloid_output.py module is as follows
>>> from lb_colloids import ColloidOutput
>>> import matplotlib.pyplot as plt
>>>
>>> hdf = "mymodel.hdf5"
>>> mp = ColloidOutput.ModelPlot(hdf)
>>> # model plot accepts matplotlib args and kwargs!!!
>>> mp.plot('edl_x', cmap='viridis')
>>> plt.show()
"""
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import h5py as H
class Breakthrough(object):
"""
Class to prepare and plot breakthrough curve data from endpoint
files.
Parameters:
----------
:param str filename: <>.endpoint file
Attributes:
----------
:ivar df: (pandas DataFrame): dataframe of endpoint data
:ivar resolution: (float): model resolution
:ivar timestep: (float): model timestep
:ivar continuous: (int): interval of continuous release, 0 means pulse
:ivar ncol: (float): number of colloids per release in simulation
:ivar total_ncol: (int): total number of colloids in simulation
"""
def __init__(self, filename):
if not filename.endswith('.endpoint'):
raise FileTypeError('.endpoint file must be supplied')
reader = ASCIIReader(filename)
self.df = reader.df
self.resolution = reader.resolution
self.timestep = reader.timestep
self.continuous = reader.continuous
# todo: replace this call with something from the header later!
self.ncol = reader.ncol
self.total_ncol = float(self.df.shape[0])
self.__breakthrough_curve = None
self.__reader = reader
@property
def breakthrough_curve(self):
"""
Property method that performs a dynamic
calculation of breakthrough curve data
"""
max_ts = self.df['nts'].max()
if self.__breakthrough_curve is None:
if not self.continuous:
bt_colloids = self.df.loc[self.df['flag'] == 3]
bt_colloids = bt_colloids.sort_values('end-ts')
ncols = []
nts = []
ncol = 0
for index, row in bt_colloids.iterrows():
ncol += 1
ncols.append(float(ncol))
nts.append(row['end-ts'])
ncols.append(float(ncol))
nts.append(max_ts)
df = pd.DataFrame({'nts': nts, 'ncol': ncols}).set_index('ncol')
self.__breakthrough_curve = df
else:
bt_colloids = self.df.loc[self.df['flag'] == 3]
bt_colloids = bt_colloids.sort_values('end-ts')
ncols = []
nts = []
ncol = 0
ncol_per_release = []
for index, row in bt_colloids.iterrows():
lower_ts = row['end-ts'] - self.continuous
upper_ts = row['end-ts']
t = bt_colloids.loc[(bt_colloids['end-ts'] >= lower_ts) & (bt_colloids['end-ts'] <= upper_ts)]
ncol += 1
ncols.append(float(ncol))
ncol_per_release.append(len(t))
nts.append(row['end-ts'])
ncols.append(float(ncol))
nts.append(max_ts)
ncol_per_release.append(len(bt_colloids.loc[(bt_colloids['end-ts'] >= max_ts - self.continuous)
& (bt_colloids['end-ts'] <= max_ts)]))
df = | pd.DataFrame({'nts': nts, 'ncol': ncols, 'ncpr': ncol_per_release}) | pandas.DataFrame |
# -*- coding: utf-8 -*-
# pylint: disable=E1101,E1103,W0232
import os
import sys
from datetime import datetime
from distutils.version import LooseVersion
import numpy as np
import pandas as pd
import pandas.compat as compat
import pandas.core.common as com
import pandas.util.testing as tm
from pandas import (Categorical, Index, Series, DataFrame, PeriodIndex,
Timestamp, CategoricalIndex)
from pandas.compat import range, lrange, u, PY3
from pandas.core.config import option_context
# GH 12066
# flake8: noqa
class TestCategorical(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
self.factor = Categorical.from_array(['a', 'b', 'b', 'a',
'a', 'c', 'c', 'c'],
ordered=True)
def test_getitem(self):
self.assertEqual(self.factor[0], 'a')
self.assertEqual(self.factor[-1], 'c')
subf = self.factor[[0, 1, 2]]
tm.assert_almost_equal(subf._codes, [0, 1, 1])
subf = self.factor[np.asarray(self.factor) == 'c']
tm.assert_almost_equal(subf._codes, [2, 2, 2])
def test_getitem_listlike(self):
# GH 9469
# properly coerce the input indexers
np.random.seed(1)
c = Categorical(np.random.randint(0, 5, size=150000).astype(np.int8))
result = c.codes[np.array([100000]).astype(np.int64)]
expected = c[np.array([100000]).astype(np.int64)].codes
self.assert_numpy_array_equal(result, expected)
def test_setitem(self):
# int/positional
c = self.factor.copy()
c[0] = 'b'
self.assertEqual(c[0], 'b')
c[-1] = 'a'
self.assertEqual(c[-1], 'a')
# boolean
c = self.factor.copy()
indexer = np.zeros(len(c), dtype='bool')
indexer[0] = True
indexer[-1] = True
c[indexer] = 'c'
expected = Categorical.from_array(['c', 'b', 'b', 'a',
'a', 'c', 'c', 'c'], ordered=True)
self.assert_categorical_equal(c, expected)
def test_setitem_listlike(self):
# GH 9469
# properly coerce the input indexers
np.random.seed(1)
c = Categorical(np.random.randint(0, 5, size=150000).astype(
np.int8)).add_categories([-1000])
indexer = np.array([100000]).astype(np.int64)
c[indexer] = -1000
# we are asserting the code result here
# which maps to the -1000 category
result = c.codes[np.array([100000]).astype(np.int64)]
self.assertEqual(result, np.array([5], dtype='int8'))
def test_constructor_unsortable(self):
# it works!
arr = np.array([1, 2, 3, datetime.now()], dtype='O')
factor = Categorical.from_array(arr, ordered=False)
self.assertFalse(factor.ordered)
if compat.PY3:
self.assertRaises(
TypeError, lambda: Categorical.from_array(arr, ordered=True))
else:
# this however will raise as cannot be sorted (on PY3 or older
# numpies)
if LooseVersion(np.__version__) < "1.10":
self.assertRaises(
TypeError,
lambda: Categorical.from_array(arr, ordered=True))
else:
Categorical.from_array(arr, ordered=True)
def test_is_equal_dtype(self):
# test dtype comparisons between cats
c1 = Categorical(list('aabca'), categories=list('abc'), ordered=False)
c2 = Categorical(list('aabca'), categories=list('cab'), ordered=False)
c3 = Categorical(list('aabca'), categories=list('cab'), ordered=True)
self.assertTrue(c1.is_dtype_equal(c1))
self.assertTrue(c2.is_dtype_equal(c2))
self.assertTrue(c3.is_dtype_equal(c3))
self.assertFalse(c1.is_dtype_equal(c2))
self.assertFalse(c1.is_dtype_equal(c3))
self.assertFalse(c1.is_dtype_equal(Index(list('aabca'))))
self.assertFalse(c1.is_dtype_equal(c1.astype(object)))
self.assertTrue(c1.is_dtype_equal(CategoricalIndex(c1)))
self.assertFalse(c1.is_dtype_equal(
CategoricalIndex(c1, categories=list('cab'))))
self.assertFalse(c1.is_dtype_equal(CategoricalIndex(c1, ordered=True)))
def test_constructor(self):
exp_arr = np.array(["a", "b", "c", "a", "b", "c"])
c1 = Categorical(exp_arr)
self.assert_numpy_array_equal(c1.__array__(), exp_arr)
c2 = Categorical(exp_arr, categories=["a", "b", "c"])
self.assert_numpy_array_equal(c2.__array__(), exp_arr)
c2 = Categorical(exp_arr, categories=["c", "b", "a"])
self.assert_numpy_array_equal(c2.__array__(), exp_arr)
# categories must be unique
def f():
Categorical([1, 2], [1, 2, 2])
self.assertRaises(ValueError, f)
def f():
| Categorical(["a", "b"], ["a", "b", "b"]) | pandas.Categorical |
import subprocess
import os
import argparse
import pandas as pd
import numpy as np
from common.data import get_last_row
from common.feature import add_pct_diff_feature, add_volatility_feature
from stock.data import get_all_data
from stock.pred import predict
def main():
parser = argparse.ArgumentParser(description='daily run and reporting')
parser.add_argument("-k", "--key", help="Set the alpha advantage api key")
args = parser.parse_args()
if args.key == None:
print('Must have api key')
return
get_all_data('data', args.key)
index = | pd.read_csv(f'data/index.csv') | pandas.read_csv |
from __future__ import division
import numpy as np
from sklearn.cross_validation import KFold, LabelShuffleSplit, LeavePLabelOut
from sklearn.linear_model import Ridge
from sklearn.grid_search import GridSearchCV
from sklearn.metrics import mean_squared_error
from sklearn.pipeline import Pipeline
from mne.utils import _time_mask
from mne.connectivity import spectral_connectivity
from scipy.stats import pearsonr
import pandas as pd
from tqdm import tqdm
from copy import deepcopy
__all__ = ['EncodingModel',
'svd_clean',
'delay_timeseries',
'snr_epochs']
class EncodingModel(object):
def __init__(self, delays=None, est=None, scorer=None, preproc_y=True):
"""Fit a STRF model.
Fit a receptive field using time lags and a custom estimator or
pipeline. This implementation uses Ridge regression and scikit-learn.
It creates time lags for the input matrix, then does cross validation
to fit a STRF model.
Parameters
----------
delays : array, shape (n_delays,)
The delays to include when creating time lags. The input array X
will end up having shape (n_feats * n_delays, n_times)
est : list instance of sklearn estimator | pipeline with estimator
The estimator to use for fitting. This may be a pipeline, in which
case the final estimator must create a `coef_` attribute after
fitting. If an estimator is passed, it also must produce a `coef_`
attribute after fitting. If estimator is type `GridSearchCV`, then
a grid search will be performed on each CV iteration (using the cv
object stored in GridSearchCV). Extra attributes will be generated.
(see `fit` documentation)
scorer : function | None
The scorer to use when evaluating on the held-out test set.
It must accept two 1-d arrays as inputs (the true values first,
and predicted values second), and output a scalar value.
If None, it will be mean squared error.
preproc_y : bool
Whether to apply the preprocessing steps of the estimator used in
fitting on the predictor variables prior to model fitting.
References
----------
[1] Theunissen, <NAME>. et al. Estimating spatio-temporal receptive
fields of auditory and visual neurons from their responses to
natural stimuli. Network 12, 289–316 (2001).
[2] <NAME>. & <NAME>. Methods for first-order kernel estimation:
simple-cell receptive fields from responses to natural scenes.
Network 14, 553–77 (2003).
"""
self.delays = np.array([0]) if delays is None else delays
self.n_delays = len(self.delays)
self.est = Ridge() if est is None else est
self.scorer = mean_squared_error if scorer is None else scorer
self.preproc_y = preproc_y
def fit(self, X, y, sfreq, times=None, tmin=None, tmax=None, cv=None,
cv_params=None, feat_names=None, verbose=False):
"""Fit the model.
Fits a receptive field model. Model results are stored as attributes.
Parameters
----------
X : array, shape (n_epochs, n_feats, n_times)
The input data for the regression
y : array, shape (n_epochs, n_times,)
The output data for the regression
sfreq : float
The sampling frequency for the time dimension
times : array, shape (n_times,)
The times corresponding to the final axis of x/y. Is used to
specify subsets of time per trial (using tmin/tmax)
tmin : float | array, shape (n_epochs,)
The beginning time for each epoch. Optionally a different time
for each epoch may be provided.
tmax : float | array, shape (n_epochs,)
The end time for each epoch. Optionally a different time for each
epoch may be provided.
cv : int | instance of (KFold, LabelShuffleSplit)
The cross validation object to use for the outer loop
feat_names : list of strings/ints/floats, shape (n_feats,) : None
A list of values corresponding to input features. Useful for
keeping track of the coefficients in the model after time lagging.
verbose : bool
If True, will display a progress bar during fits for CVs remaining.
Attributes
----------
coef_ : array, shape (n_features * n_lags)
The average coefficients across CV splits
coefs_all_ : array, shape(n_cv, n_features * n_lags)
The raw coefficients for each iteration of cross-validation.
coef_names : array, shape (n_features * n_lags, 2)
A list of coefficient names, useful for keeping track of time lags
scores_ : array, shape (n_cv,)
Prediction scores for each cross-validation split on the held-out
test set. Scores are outputs of the `scorer` attribute function.
best_estimators_ : list of estimators, shape (n_cv,)
If initial estimator is type `GridSearchCV`, this is the list of
chosen estimators on each cv split.
best_params_ : list of dicts, shape (n_cv,)
If initial estimator is type `GridSearchCV`, this is the list of
chosen parameters on each cv split.
"""
if feat_names is not None:
if len(feat_names) != X.shape[1]:
raise ValueError(
'feat_names and X.shape[0] must be the same size')
if times is None:
times = np.arange(X.shape[-1]) / float(sfreq)
self.tmin = times[0] if tmin is None else tmin
self.tmax = times[-1] if tmax is None else tmax
self.times = times
self.sfreq = sfreq
# Delay X
X, y, labels, names = _build_design_matrix(X, y, sfreq, self.times,
self.delays, self.tmin,
self.tmax, feat_names)
self.feat_names = np.array(names)
cv = _check_cv(X, labels, cv, cv_params)
# Define names for input variabels to keep track of time delays
X_names = [(feat, delay)
for delay in self.delays for feat in self.feat_names]
self.coef_names = np.array(X_names)
# Build model instance
if not isinstance(self.est, Pipeline):
self.est = Pipeline([('est', self.est)])
# Create model metadata that we'll add to the obj later
model_data = dict(coefs_all_=[], scores_=[])
if isinstance(self.est.steps[-1][-1], GridSearchCV):
model_data.update(dict(best_estimators_=[], best_params_=[]))
# Fit the model and collect model results
if verbose is True:
cv = tqdm(cv)
for i, (tr, tt) in enumerate(cv):
X_tr = X[:, tr].T
X_tt = X[:, tt].T
y_tr = y[tr, np.newaxis]
y_tt = y[tt, np.newaxis]
if self.preproc_y:
y_tr, y_tt = [self.est._pre_transform(i)[0] for i in [y_tr, y_tt]]
self.est.fit(X_tr, y_tr)
mod = deepcopy(self.est.steps[-1][-1])
if isinstance(mod, GridSearchCV):
# If it's a GridSearch, then add a "best_params" object
# Assume hyperparameter search
if mod.refit:
model_data['best_estimators_'].append(mod.best_estimator_)
model_data['coefs_all_'].append(mod.best_estimator_.coef_)
model_data['best_params_'].append(mod.best_params_)
else:
model_data['coefs_all_'].append(mod.coef_)
# Fit model + make predictions
scr = self.scorer(y_tt, self.est.predict(X_tt))
model_data['scores_'].append(scr)
for key, val in model_data.iteritems():
setattr(self, key, np.array(val))
self.coefs_ = np.mean(self.coefs_all_, axis=0)
self.cv = cv
def predict(self, X):
"""Generate predictions using a fit receptive field model.
This uses the `coef_` attribute for predictions.
"""
X_lag = delay_timeseries(X, self.sfreq, self.delays)
Xt = self.est._pre_transform(X_lag.T)[0]
return np.dot(Xt, self.coefs_)
def coefs_as_series(self, agg=None):
"""Return the raw coefficients as a pandas series.
Parameters
----------
agg : None | function
If agg is None, all coefs across CVs will be returned. If it
is a function, it will be applied across CVs and the output
will be shape (n_coefficients,).
Outputs
-------
sr : pandas Series, shape (n_coefficients,) | (n_cv * n_coefficients)
The coefficients as a pandas series object.
"""
ix = pd.MultiIndex.from_tuples(self.coef_names, names=['feat', 'lag'])
if agg is None:
sr = []
for icv, icoef in enumerate(self.coefs_all_):
isr = | pd.DataFrame(icoef[:, np.newaxis], index=ix) | pandas.DataFrame |
import numpy as np
import pandas as pd
import requests
from sklearn.linear_model import LinearRegression
from datetime import date
from functools import lru_cache
# Note: pagination not implemented, so don't set lookbacks > 10,000 (>27 years)
class CMDataLoader:
@staticmethod
@lru_cache
def __get_network_data(metrics: list = ['HashRate', 'IssTotUSD', 'FeeTotUSD', 'PriceUSD', 'FeeMeanNtv'], end_time: str = date.today().strftime('%Y-%m-%d')):
api_str = f"https://community-api.coinmetrics.io/v4/timeseries/asset-metrics?assets=btc&metrics={','.join(metrics)}&end_time={end_time}&page_size=10000&pretty=true"
response = requests.get(api_str).json()
return pd.DataFrame(response['data'])
# Returns last lookback days of hash rate, from lookback_date
@staticmethod
@lru_cache
def get_historical_hash_rate(lookback: int = 30, lookback_date: str = date.today().strftime('%Y-%m-%d')):
return pd.to_numeric(CMDataLoader.__get_network_data(end_time = lookback_date).HashRate.tail(lookback)).reset_index(drop = True)
# Returns last lookback days of USD miner revenue, from lookback_date
@staticmethod
@lru_cache
def get_historical_miner_revenue_usd(lookback: int = 30, lookback_date: str = date.today().strftime('%Y-%m-%d')):
values = CMDataLoader.__get_network_data(end_time = lookback_date)
return (pd.to_numeric(values.IssTotUSD) + | pd.to_numeric(values.FeeTotUSD) | pandas.to_numeric |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.