prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
import requests
import time
import multiprocessing
import pandas as pd
from collections import OrderedDict
from factornado import get_logger
from examples import minimal, registry, tasks, periodic_task
import uuid
import pytest
multiprocessing.set_start_method('fork')
open('/tmp/test_examples.log', 'w').write('')
logger = get_logger(
file='/tmp/test_examples.log',
level=10,
levels={'requests': 30, 'tornado': 30, 'urllib3': 30}
)
@pytest.fixture(scope="session")
def server():
servers = OrderedDict([
('registry', registry),
('tasks', tasks),
('minimal', minimal),
('periodic_task', periodic_task),
])
tasks.app.mongo.tasks.delete_many({})
periodic_task.app.mongo.periodic.delete_many({})
periodic_task.app.mongo.periodic.insert_one({'dt': pd.Timestamp.utcnow(), 'nb': 0})
for i in range(30):
try:
time.sleep(2)
for key, val in servers.items():
logger.debug('Try HEARTBEAT on {} (try {})'.format(key, 1+i))
url = 'http://127.0.0.1:{}'.format(registry.app.get_port())
if val.app.config['name'] != 'registry':
url += '/{}'.format(val.app.config['name'])
r = requests.post(url + '/heartbeat')
r.raise_for_status()
assert r.text == 'ok'
logger.debug('Success HEARTBEAT on {} (try {})'.format(key, 1+i))
except Exception:
raise
break
class s(object):
url = 'http://127.0.0.1:{port}'.format(port=registry.app.get_port())
yield s
class TestMinimal(object):
def test_minimal(self, server):
r = requests.get(server.url + '/minimal/hello')
r.raise_for_status()
assert r.text == 'Hello world\n'
def test_minimal_logs(self, server):
r = requests.get(server.url + '/minimal/log', params=dict(n=10000))
r.raise_for_status()
assert b"================" in r.content
class TestRegistry(object):
def test_registry_hello(self, server):
r = requests.get(server.url)
r.raise_for_status()
assert r.text == 'This is registry\n'
r = requests.get(server.url + '/')
r.raise_for_status()
assert r.text == 'This is registry\n'
def test_registry_register(self, server):
# Register a service named 'foo' at url 'foo_url'
r = requests.post(server.url + '/register/foo', data='{"url": "foo_url"}')
r.raise_for_status()
# Register a service named 'foo' at url 'foo_url_2'
r = requests.post(server.url + '/register/foo', data='{"url": "foo_url_2"}')
r.raise_for_status()
# Get urls for service 'foo'
r = requests.get(server.url + '/register/foo')
r.raise_for_status()
doc = r.json()
assert "foo" in doc
assert len(doc["foo"]) > 1
assert doc["foo"][0]["name"] == "foo"
assert doc["foo"][0]["info"] == {}
assert doc["foo"][0]["_id"] == "foo_url_2"
assert doc["foo"][1]["name"] == "foo"
assert doc["foo"][1]["info"] == {}
assert doc["foo"][1]["_id"] == "foo_url"
former_id = doc["foo"][0]['id']
for x in doc["foo"]:
assert x['id'] <= former_id
former_id = x['id']
# Get urls for all services
r = requests.get(server.url + '/register/all')
r.raise_for_status()
doc = r.json()
assert "foo" in doc
assert len(doc["foo"]) > 1
def test_registry_heartbeat(self, server):
# This query shall be proxied to 'minimal' through 'registry'
r = requests.post(server.url + '/heartbeat')
r.raise_for_status()
assert r.text == 'ok'
def test_registry_minimal(self, server):
# This query shall be proxied to 'minimal' through 'registry'
r = requests.get(server.url + '/minimal/hello')
r.raise_for_status()
assert r.text == 'Hello world\n'
class TestTasks(object):
def test_tasks_hello(self, server):
r = requests.get(server.url + '/tasks')
r.raise_for_status()
assert r.text == 'This is tasks\n'
r = requests.get(server.url + '/tasks/')
r.raise_for_status()
assert r.text == 'This is tasks\n'
def test_tasks_action_simple(self, server):
r = requests.put(server.url + '/tasks/action/task01/key01/stack', data={})
r.raise_for_status()
doc = r.json()
assert 'after' in doc
assert 'before' in doc
assert doc['after']['key'] == 'key01'
assert doc['after']['task'] == 'task01'
assert doc['after']['_id'] == 'task01/key01'
assert doc['after']['status'] in ['todo', 'toredo']
def test_tasks_action_priority(self, server):
r = requests.put(
server.url + '/tasks/action/task01/key01/stack',
data={},
params={'priority': 1})
r.raise_for_status()
doc = r.json()
assert 'after' in doc
assert 'before' in doc
assert doc['after']['key'] == 'key01'
assert doc['after']['task'] == 'task01'
assert doc['after']['_id'] == 'task01/key01'
assert doc['after']['status'] in ['todo', 'toredo']
assert doc['after']['priority'] == 1
def test_tasks_force_simple(self, server):
r = requests.put(server.url + '/tasks/force/task01/key01/fail', data={})
r.raise_for_status()
doc = r.json()
assert 'after' in doc
assert 'before' in doc
assert doc['after']['key'] == 'key01'
assert doc['after']['task'] == 'task01'
assert doc['after']['_id'] == 'task01/key01'
assert doc['after']['status'] == 'fail'
def test_tasks_force_priority(self, server):
r = requests.put(
server.url + '/tasks/force/task01/key01/toredo',
data={},
params={'priority': 1})
r.raise_for_status()
doc = r.json()
assert 'after' in doc
assert 'before' in doc
assert doc['after']['key'] == 'key01'
assert doc['after']['task'] == 'task01'
assert doc['after']['_id'] == 'task01/key01'
assert doc['after']['status'] == 'toredo'
assert doc['after']['priority'] == 1
def test_tasks_assignOne_simple(self, server):
while True:
r = requests.put(server.url + '/tasks/assignOne/task01', data={})
r.raise_for_status()
if r.status_code != 200:
assert r.status_code == 204
break
r = requests.put(server.url + '/tasks/force/task01/key01/todo', data={})
r.raise_for_status()
r = requests.put(server.url + '/tasks/assignOne/task01', data={})
r.raise_for_status()
assert r.status_code == 200
doc = r.json()
assert doc['key'] == 'key01'
assert doc['task'] == 'task01'
assert doc['status'] == 'todo'
def test_tasks_assignOne_double(self, server):
while True:
r = requests.put(server.url + '/tasks/assignOne/task01', data={})
r.raise_for_status()
if r.status_code != 200:
assert r.status_code == 204
break
r = requests.put(server.url + '/tasks/force/task01/key01/todo', data={})
r.raise_for_status()
r = requests.put(server.url + '/tasks/force/task01/key02/todo', data={})
r.raise_for_status()
r = requests.put(server.url + '/tasks/assignOne/task01', data={})
r.raise_for_status()
assert r.status_code == 200
doc = r.json()
assert doc['key'] == 'key01'
assert doc['task'] == 'task01'
assert doc['status'] == 'todo'
r = requests.put(server.url + '/tasks/assignOne/task01', data={})
r.raise_for_status()
assert r.status_code == 200
doc = r.json()
assert doc['key'] == 'key02'
assert doc['task'] == 'task01'
assert doc['status'] == 'todo'
r = requests.put(server.url + '/tasks/assignOne/task01', data={})
r.raise_for_status()
assert r.status_code == 204
def test_get_by_key(self, server):
r = requests.put(server.url + '/tasks/force/task01/key01/todo', data={})
r.raise_for_status()
r = requests.get(server.url + '/tasks/getByKey/task01/key01')
r.raise_for_status()
doc = r.json()
assert doc['task'] == 'task01'
assert doc['key'] == 'key01'
assert doc['status'] == 'todo'
def test_get_by_status(self, server):
r = requests.put(server.url + '/tasks/force/task01/key01/todo', data={})
r.raise_for_status()
r = requests.put(server.url + '/tasks/force/task01/key02/done', data={})
r.raise_for_status()
r = requests.put(server.url + '/tasks/force/task01/key03/fail', data={})
r.raise_for_status()
r = requests.get(server.url + '/tasks/getByStatus/task01/todo%2Cdone%2Cfail', data={})
r.raise_for_status()
doc = r.json()
assert 'done' in doc
assert 'fail' in doc
assert 'todo' in doc
assert 'task01/key01' in [x['_id'] for x in doc['todo']]
assert 'task01/key02' in [x['_id'] for x in doc['done']]
assert 'task01/key03' in [x['_id'] for x in doc['fail']]
def test_tasks_multithreading(self, server):
def log_function(thread_id, r, operation):
r.raise_for_status()
open('mylog.log', 'a').write(' '.join([
pd.Timestamp.utcnow().isoformat(),
thread_id,
operation,
str(r.status_code),
str(pd.Timedelta(r.elapsed))[-15:],
]) + '\n')
def process_test(server, n=50):
thread_id = uuid.uuid4().hex[:8]
for i in range(n):
r = requests.put(server + '/action/someTask/someKey/stack')
log_function(thread_id, r, 'stack')
r = requests.put(server + '/assignOne/someTask')
log_function(thread_id, r, 'assignOne')
if r.status_code == 200:
r = requests.put(server + '/action/someTask/someKey/success')
log_function(thread_id, r, 'success')
# We launch 10 clients that will ask for tasks in the same time.
open('mylog.log', 'w').write('')
for i in range(10):
multiprocessing.Process(target=process_test,
args=(server.url + '/tasks',),
).start()
# We wait for the clients to finish their job.
for i in range(60):
data = list(map(lambda x: x.strip().split(), open('mylog.log').readlines()))
data = pd.DataFrame(
data,
columns=['dt', 'thread', 'action', 'code', 'duration'])
data['dt'] = data['dt'].apply(pd.Timestamp)
summary = data.groupby([
'thread', 'action', 'code']).apply(len).unstack(0).T.fillna(0).astype(int)
time.sleep(1)
if 'stack' in summary and summary['stack', '200'].max() == 50:
break
# Up to there, the task mechanism has run without failures.
assert ('stack' in summary and
summary['stack', '200'].max() == 50), 'No thread ended his job'
# Let's test if no task has been assigned twice in the same time.
z = data[data.action.isin(['assignOne', 'success']) & (data.code == '200')].set_index('dt')
z.sort_index(inplace=True)
z['nbDoing'] = (z.action == 'assignOne').cumsum() - (z.action == 'success').cumsum()
z['dt'] = (pd.np.diff(z.index.values).astype(int)*1e-9).tolist() + [None]
# We check that no task was assigned twice for more than 0.1 sec.
assert (z[z.nbDoing > 1]['dt'] < 0.1).all()
class TestPeriodicTask(object):
def test_periodic_task(self, server):
# We call '/latest' in a loop till at least 3 documents have been created.
timeout = pd.Timestamp.utcnow() + pd.Timedelta(60, 's')
while True:
r = requests.get(server.url + '/periodictask/latest')
r.raise_for_status()
if r.text != 'null':
doc = r.json()
if doc['nb'] > 3:
break
elif | pd.Timestamp.utcnow() | pandas.Timestamp.utcnow |
# coding: utf-8
# # Experiment PAMAP with mcfly
# ## Import required Python modules
# In[1]:
import sys
import os
import numpy as np
import pandas as pd
# mcfly
from mcfly import modelgen, find_architecture, storage
from keras.models import load_model
np.random.seed(2)
# In[2]:
sys.path.insert(0, os.path.abspath('../..'))
from utils import tutorial_pamap2
# Load the preprocessed data as stored in Numpy-files. Please note that the data has already been split up in a training (training), validation (val), and test subsets. It is common practice to call the input data X and the labels y.
# In[3]:
data_path = '/media/sf_VBox_Shared/timeseries/PAMAP_Dataset/cleaned_7act/'
# In[4]:
X_train, y_train_binary, X_val, y_val_binary, X_test, y_test_binary, labels = tutorial_pamap2.load_data(data_path)
# In[5]:
print('x shape:', X_train.shape)
print('y shape:', y_train_binary.shape)
# The data is split between train test and validation.
# In[6]:
print('train set size:', X_train.shape[0])
print('validation set size:', X_val.shape[0])
print('test set size:', X_test.shape[0])
# Let's have a look at the distribution of the labels:
# In[7]:
frequencies = y_train_binary.mean(axis=0)
frequencies_df = | pd.DataFrame(frequencies, index=labels, columns=['frequency']) | pandas.DataFrame |
import math
import pandas as pd
import numpy as np
from sklearn import metrics
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression, SGDRegressor
from sklearn.cross_validation import cross_val_score
from sklearn.metrics import mean_squared_error
from sklearn import svm
def get_past_midfielders():
data = pd.read_csv('../resources/merged.csv', sep=',', encoding='utf-8', index_col=0)
model = data[['player_id', 'name', 'season', 'pos', 'round', 'team_rank', 'opponent_team_rank', 'team_pot', 'opp_pot',
'concede_pot', 'opp_concede_pot', 'prev_points', 'form_points', 'total_points',
'long_form', 'ict_form']]
MidfielderModal = model.loc[model['pos'] == 'Defender']
MidfielderModal.drop('pos', axis=1, inplace=True)
MidfielderModal.sort_values(['season', 'round'], ascending=True, inplace=True)
MidfielderModal.to_csv('../resources/predictions/MIDFIELDERS.csv', sep=',', encoding='utf-8')
players = MidfielderModal[8587:]
keys = MidfielderModal['round']
values = pd.cut(MidfielderModal['round'], 3, labels=[1, 2, 3])
dictionary = dict(zip(keys, values))
MidfielderModal['round'] = values
X = MidfielderModal.drop(['total_points', 'season', 'player_id', 'name'], axis=1)
y = MidfielderModal[['total_points']]
X_train = X[:8586]
X_test = X[8587:]
y_train = y[:8586]
y_test = y[8587:]
regression_model = LinearRegression()
regression_model.fit(X_train, y_train)
score = regression_model.score(X_test, y_test)
y_pred = regression_model.predict(X_test)
testing = pd.concat([X_test, y_test], 1)
testing['Predicted'] = np.round(y_pred, 1)
testing['Prediction_Error'] = testing['total_points'] - testing['Predicted']
testing['player_id'] = 0
testing['name'] = 0
testing['player_id'] = players.player_id
testing['name'] = players.name
testing['round'] = 34
testing.to_csv('../resources/past/34_MIDS.csv', sep=',', encoding='utf-8')
# get_past_midfielders()
def merge():
one = pd.read_csv('../resources/predictions/30FOR.csv', sep=',', encoding='utf-8', index_col=0)
two = pd.read_csv('../resources/predictions/31FOR.csv', sep=',', encoding='utf-8', index_col=0)
three = pd.read_csv('../resources/predictions/32FOR.csv', sep=',', encoding='utf-8', index_col=0)
four = | pd.read_csv('../resources/predictions/33FOR.csv', sep=',', encoding='utf-8', index_col=0) | pandas.read_csv |
'''
This module contains all functions relating to time series data
'''
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import random
import yfinance as yf
from plotly import graph_objs as go
import seaborn as sns
from IPython.display import display, Image
from .structdata import get_cat_feats, get_num_feats, get_date_cols
def extract_dates(data=None, date_cols=None, subset=None, drop=True):
'''
Extracts date information in a dataframe and append to the original data as new columns.
For extracting only time features, use datasist.timeseries.extract_time function
Parameters:
-----------
data: DataFrame or named Series
The data set to extract date information from.
date_cols: List, Array
Name of date columns/features in data set.
subset: List, Array
Date features to return. One of:
['dow' ==> day of the week
'doy' ==> day of the year
'dom' ==> day of the month
'hr' ==> hour
'min', ==> minute
'is_wkd' ==> is weekend?
'yr' ==> year
'qtr' ==> quarter
'mth' ==> month ]
drop: bool, Default True
Drops the original date columns from the data set.
Return:
-------
DataFrame or Series.
'''
df = data.copy()
for date_col in date_cols:
#Convert date feature to Pandas DateTime
df[date_col ]= | pd.to_datetime(df[date_col]) | pandas.to_datetime |
# -*- coding: utf-8 -*-
"""
Created on Thu Feb 12 2016
@author: Hector
Class to load and store configurations
read from run, trajectory or validation files
"""
import os
import numpy as _np
import pandas as _pd
import natsort as _ns
import glob as _glob
def _hyp_split(x, listing):
param_value = x.strip().rstrip('"').replace("'", "").split('=')
pname = param_value[0].replace("__", "")
if pname not in listing:
listing.append(pname)
return param_value[1]
def _validate_config_columns(x, past_names):
split_str = x.split(':')
name_l0 = split_str[0]
name_l1 = split_str[-1]
if name_l1 in past_names and name_l1 != 'choice':
name_l1 = name_l1 + '_' + split_str[1]
past_names.append(name_l1)
return name_l0, name_l1
def _validate_choice_names(x):
split_str = x.split(':')
name = split_str[-1]
if name == 'choice':
return split_str[0]
elif(split_str[0] == 'regressor' or
split_str[0] == 'classifier' or split_str[0] == 'preprocessor'):
return name + '_' + split_str[1]
else:
return name
DEBUG = False
class ConfigReader:
def __init__(self, data_dir=None, dataset=None):
self.runs_df = None
self.bests_df = None
self.trajectories_df = None
self.dataset = dataset
self.data_dir = data_dir
self.full_config = False
def load_run_configs(self, data_dir=None, dataset=None,
preprocessor='no_preprocessing', full_config=False):
"""
Loads all configurations run by SMAC, with validation error response
:param data_dir: Directory of where SMAC files live
:param dataset: In this case, the dataset used to train the model
:param preprocessor: Preprocessing method used in the data. None means all
:param full_config: Whether to return also the configuration of the preprocessor,
imputation and one-hot-encoding
:return: pandas.DataFrame with the every performance (training errors) and the feed neural network
configurations run by SMAC
"""
if data_dir is None and self.data_dir is None:
raise ValueError('Location of information not given')
elif self.data_dir is not None:
data_dir = self.data_dir
if dataset is None:
if self.dataset is None:
raise ValueError('Dataset not given')
else:
dataset = self.dataset
run_filename = "runs_and_results-SHUTDOWN*"
state_seed = "state-run*"
if preprocessor == 'all':
scenario_dir = os.path.join(data_dir, dataset, '*', dataset, state_seed, run_filename)
elif preprocessor is not None:
scenario_dir = os.path.join(data_dir, dataset, preprocessor, dataset, state_seed, run_filename)
else:
scenario_dir = os.path.join(data_dir, dataset, state_seed, run_filename)
dirs = _ns.natsorted(_glob.glob(scenario_dir))
if len(dirs) == 0:
raise ValueError('No runs_and_results files found.')
seeds_names = ['runs_' + itseeds.split('state-run')[-1].split('/')[0] for itseeds in dirs]
all_runs = []
all_best = []
runs_by_seed = []
for fnames in dirs:
try:
run_res, best_run = self.load_run_by_file(fnames, full_config=full_config)
all_runs.append(run_res)
all_best.append(best_run)
runs_by_seed.append(run_res.shape[0])
except IndexError:
print('CRASH in: ' + os.path.split(fnames)[1])
# Treat each seed as independent runs
runs_all_df = | _pd.concat(all_runs, axis=0) | pandas.concat |
import pandas as pd
import cachetools.func
import warnings
from . import states
@cachetools.func.ttl_cache(ttl=600)
def load_and_massage(url):
df = pd.read_csv(url)
df = df.drop(columns=['Lat', 'Long'])
df = df.rename(columns={'Province/State' : 'province', 'Country/Region' : 'country'})
df = df.drop(columns=['province']).groupby('country').sum()
df = df.T
df.index = pd.to_datetime(df.index)
return df
@cachetools.func.ttl_cache(ttl=600)
def load_countries():
sources = {
'confirmed' : 'https://github.com/CSSEGISandData/COVID-19/raw/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_confirmed_global.csv',
'death' : 'https://github.com/CSSEGISandData/COVID-19/raw/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_deaths_global.csv'
}
# Load each data file into a dataframe with row index = date, and column index = (country, province)
d = {key: load_and_massage(url) for key, url in sources.items()}
# Concatenate data frames: column index is now (variable, country)
df = pd.concat(d.values(), axis=1, keys=d.keys())
# Permute order of index to (country, province, variable) and sort the columns by the index value
df = df.reorder_levels([1,0], axis=1).sort_index(axis=1)
return df
def filter_counties(df):
'''Filter to valid counties'''
# Subset to locations:
# (1) in US,
# (2) with county name
df = df.loc[(df['iso2']=='US') & (df['Admin2']) & (df['FIPS'])].copy()
return df
def get_place_info():
'''Get combined metadata data frame for countries, US states, US counties'''
country_info = get_country_info()
state_info = get_state_info()
county_info = get_county_info()
return pd.concat([country_info, state_info, county_info], sort=False)
def get_country_info():
'''Get country info from JHU location lookup file'''
url = 'https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/UID_ISO_FIPS_LookUp_Table.csv'
df = pd.read_csv(url, dtype={'FIPS': object})
df = df.loc[ | pd.isnull(df['Province_State']) | pandas.isnull |
# -*- coding: utf-8 -*-
"""
Created on Fri Mar 8 20:05:29 2019
@author: badat
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os,sys
pwd = os.getcwd()
sys.path.insert(0,pwd)
print('-'*30)
print(os.getcwd())
print('-'*30)
import pandas as pd
import os.path
import numpy as np
import time
from nets import vgg
from D_utility import evaluate,Logger,LearningRate,get_compress_type
from global_setting_MSCOCO import NFS_path,train_img_path,test_img_path,n_report,n_cycles
import pdb
import pickle
from tensorflow.contrib import slim
import tensorflow as tf
#%% data flag
idx_GPU=7
is_save = False
os.environ["CUDA_VISIBLE_DEVICES"]="{}".format(idx_GPU)
name='e2e_baseline_logistic_MSCOCO'
save_path = NFS_path+'results/'+name
learning_rate_base = 0.001
batch_size = 1
n_cycles *= 32
#%%
description = ''
description += 'learning_rate_base {} \n'.format(learning_rate_base)
description += 'batch_size {} \n'.format(batch_size)
description += 'signal_strength {} \n'.format(-1)
description += 'n_cycles {} \n'.format(n_cycles)
description += 'vgg'
#%%
checkpoints_dir = './model/vgg_ImageNet/vgg_16.ckpt'
is_train = tf.Variable(True,trainable=False,name='is_train')
#%%
print('number of cycles {}'.format(n_cycles))
#%% Dataset
image_size = vgg.vgg_16.default_image_size
height = image_size
width = image_size
def parser(record):
feature = {'img_id': tf.FixedLenFeature([], tf.string),
'img': tf.FixedLenFeature([], tf.string),
'label': tf.FixedLenFeature([], tf.string)}
parsed = tf.parse_single_example(record, feature)
img_id = parsed['img_id']
img = tf.reshape(tf.decode_raw( parsed['img'],tf.float32),[height, width, 3])
label = tf.clip_by_value(tf.decode_raw( parsed['label'],tf.int32),-1,1)
return img_id,img,label
def parser_test(record):
feature = {'img_id': tf.FixedLenFeature([], tf.string),
'img': tf.FixedLenFeature([], tf.string),
'label_1k': tf.FixedLenFeature([], tf.string)}
parsed = tf.parse_single_example(record, feature)
img_id = parsed['img_id']
img = tf.reshape(tf.decode_raw( parsed['img'],tf.float32),[height, width, 3])
label = tf.clip_by_value(tf.decode_raw( parsed['label_1k'],tf.int32),-1,1)
return img_id,img,label
#%%
def load_1k_name():
path = './data/MSCOCO_1k/vocab_coco.pkl'
with open(path,'rb') as f:
vocab = pickle.load(f)
return vocab['words']
classes = load_1k_name()
n_classes = len(classes)
#%% load in memory
sess = tf.InteractiveSession()#tf.InteractiveSession(config=tf.ConfigProto(log_device_placement=True))
g = tf.get_default_graph()
#%%
Theta = tf.get_variable('Theta',shape=[4096+1,n_classes])
#%%
fraction_idx_var = tf.get_variable('fraction_idx_var',shape=(),dtype = tf.int32,trainable = False)
#%%
dataset_tr = tf.data.TFRecordDataset(train_img_path,compression_type=get_compress_type(train_img_path))
dataset_tr = dataset_tr.map(parser)
#dataset_tr = dataset_tr.shuffle(20000)
dataset_tr = dataset_tr.batch(batch_size)
dataset_tr = dataset_tr.repeat()
iterator_tr = dataset_tr.make_initializable_iterator()
(img_ids_tr,img_tr,labels_tr) = iterator_tr.get_next()
dataset_tst = tf.data.TFRecordDataset(test_img_path,compression_type=get_compress_type(test_img_path))
dataset_tst = dataset_tst.map(parser_test).batch(100)
iterator_tst = dataset_tst.make_initializable_iterator()
(img_ids_tst,img_tst,labels_tst) = iterator_tst.get_next()
#%% ResNet
img_input_ph = tf.placeholder(dtype=tf.float32,shape=[None,height,width,3])
with slim.arg_scope(vgg.vgg_arg_scope()):
logit, end_points = vgg.vgg_16(img_input_ph, num_classes=1000, is_training=is_train)
features_concat = end_points['vgg_16/fc7']#g.get_tensor_by_name('resnet_v1_101/pool5:0')
features_concat = features_concat[:,0,0,:]#tf.squeeze(features_concat)
features_concat = tf.concat([features_concat,tf.ones([tf.shape(features_concat)[0],1])],axis = 1,name='feature_input_point')
labels_ph = tf.placeholder(dtype=tf.float32, shape=(None,n_classes)) #Attributes[:,:,fraction_idx_var]
#%%
with tf.variable_scope("logistic"):
logits = tf.matmul(features_concat,Theta)
labels_binary = labels_ph#tf.div(labels_ph+1,2)
loss_logistic = tf.losses.sigmoid_cross_entropy(multi_class_labels=labels_binary, logits=logits) #,weights=labels_weight
with tf.variable_scope("regularizer"):
loss_regularizer = tf.square(tf.norm(Theta[:-1,:]))
#%% shared operation
grad_logistic = tf.gradients(loss_logistic, Theta)
grad_regularizer = tf.gradients(loss_regularizer,Theta)
norm_grad_logistic = tf.norm(grad_logistic)
norm_grad_regularizer = tf.norm(grad_regularizer)
norm_Theta = tf.norm(Theta)
raitio_regularizer_grad = norm_grad_logistic/norm_grad_regularizer
#%%
tf.global_variables_initializer().run()
sess.run(iterator_tr.initializer)
#%%
lr = LearningRate(learning_rate_base,sess)
optimizer = tf.train.RMSPropOptimizer(
lr.get_lr(),
0.9, # decay
0.9, # momentum
1.0 #rmsprop_epsilon
)
loss = loss_logistic
loss_regularizer = tf.losses.get_regularization_loss()
#loss += tf.losses.get_regularization_loss()
grad_vars = optimizer.compute_gradients(loss)
print('-'*30)
print('Decompose update ops')
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
train = optimizer.apply_gradients(grad_vars)
print('-'*30)
#%%
saver = tf.train.Saver()
init_var = tf.trainable_variables()
pdb.set_trace()
saver_init = tf.train.Saver(init_var)
df_result = | pd.DataFrame() | pandas.DataFrame |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import accuracy_score, classification_report
from sklearn.ensemble import BaggingClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.pipeline import Pipeline
from sklearn.model_selection import GridSearchCV
from sklearn.ensemble import AdaBoostClassifier
from sklearn.ensemble import GradientBoostingClassifier
import xgboost as xgb
from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import KNeighborsClassifier
#from sklearn import svm
from sklearn.naive_bayes import MultinomialNB
## 데이터 전처리 ##
## HR data load ##
hr_data = pd.read_csv("d:/data/WA_Fn-UseC_-HR-Employee-Attrition.csv") # shape 1470, 35
## yes/no -> 1/0 ##
## dummy ## -> 범주형 one-hot
hr_data['Attrition_ind'] = 0
hr_data.loc[hr_data['Attrition'] == 'Yes', 'Attrition_ind'] = 1
dummy_bus = pd.get_dummies(hr_data['BusinessTravel'], prefix='busns_trvl')
dummy_dpt = pd.get_dummies(hr_data['Department'], prefix='dept')
dummy_edufield = pd.get_dummies(hr_data['EducationField'], prefix='edufield')
dummy_gender = pd.get_dummies(hr_data['Gender'], prefix='gend')
dummy_jobrole = pd.get_dummies(hr_data['JobRole'], prefix='jobrole')
dummy_maritstat = pd.get_dummies(hr_data['MaritalStatus'], prefix='maritalstat')
dummy_overtime = pd.get_dummies(hr_data['OverTime'], prefix='overtime')
# cont columns #
continuous_columns = ['Age','DailyRate','DistanceFromHome','Education','EnvironmentSatisfaction',
'HourlyRate', 'JobInvolvement', 'JobLevel','JobSatisfaction','MonthlyIncome', 'MonthlyRate', 'NumCompaniesWorked',
'PercentSalaryHike', 'PerformanceRating', 'RelationshipSatisfaction','StockOptionLevel', 'TotalWorkingYears',
'TrainingTimesLastYear','WorkLifeBalance', 'YearsAtCompany', 'YearsInCurrentRole', 'YearsSinceLastPromotion',
'YearsWithCurrManager']
hr_continuous = hr_data[continuous_columns]
hr_data_new = pd.concat([dummy_bus, dummy_dpt, dummy_edufield, dummy_gender,
dummy_jobrole, dummy_maritstat, dummy_overtime, hr_continuous,
hr_data['Attrition_ind']], axis=1)
#hr_data_new.shape # shape (1470, 52)
## split ##
x_train, x_test, y_train, y_test = train_test_split(hr_data_new.drop(['Attrition_ind'], axis=1),
hr_data['Attrition_ind'], train_size=0.7,
test_size=0.3, random_state=77)
from sklearn.preprocessing import MinMaxScaler
ss= MinMaxScaler()
x_train = ss.fit_transform(x_train)
x_test = ss.transform(x_test)
x_train = pd.DataFrame(x_train, columns=hr_data_new.drop(['Attrition_ind'], axis=1).columns)
x_test = pd.DataFrame(x_test, columns=hr_data_new.drop(['Attrition_ind'], axis=1).columns)
### decision Tree ###
dt = DecisionTreeClassifier(criterion='gini', max_depth=5, min_samples_split=2,
min_samples_leaf=1, random_state=77)
dt.fit(x_train, y_train)
## confusion_matrix ##
print(pd.crosstab(y_train, dt.predict(x_train), rownames=['actual'], colnames=['predict']))
print(round(accuracy_score(y_train, dt.predict(x_train)), 4))
print(classification_report(y_train, dt.predict(x_train)))
print(pd.crosstab(y_test, dt.predict(x_test), rownames=['actual'], colnames=['predict']))
print(round(accuracy_score(y_test, dt.predict(x_test)), 4))
print(classification_report(y_test, dt.predict(x_test)))
### 가중값 튜닝 ###
dummy = np.empty((6,10))
dt_wttune = pd.DataFrame(dummy)
dt_wttune.columns = ["zero_wght", "one_wght", "tr_accuracy", "tst_accuracy", "prec_zero",
"prec_one", "prec_ovil", "recl_zero", "recl_one", "recl_evil"]
zero_clwghts = [0.01, 0.1, 0.2, 0.3, 0.4, 0.5]
print("=========tuning========")
for i in range(len(zero_clwghts)):
clwght = {0:zero_clwghts[i], 1:1.0-zero_clwghts[i]}
dt_fit = DecisionTreeClassifier(criterion='gini', max_depth=5, min_samples_split=2,
min_samples_leaf=1, random_state=77, class_weight=clwght)
dt_fit.fit(x_train, y_train)
dt_wttune.loc[i, 'zero_wght'] = clwght[0]
dt_wttune.loc[i, 'one_wght'] = clwght[1]
dt_wttune.loc[i, 'tr_accuracy'] = round(accuracy_score(y_train, dt_fit.predict(x_train)), 3)
dt_wttune.loc[i ,'tst_accuracy'] = round(accuracy_score(y_test, dt_fit.predict(x_test)), 3)
clf_sp = classification_report(y_test, dt_fit.predict(x_test)).split()
dt_wttune.loc[i, 'prec_zero'] = float(clf_sp[5])
dt_wttune.loc[i, 'prec_one'] = float(clf_sp[10])
dt_wttune.loc[i, 'prec_ovil'] = float(clf_sp[17])
dt_wttune.loc[i, 'recl_zero'] = float(clf_sp[6])
dt_wttune.loc[i, 'recl_one'] = float(clf_sp[11])
dt_wttune.loc[i, 'recl_ovil'] = float(clf_sp[18])
print(clwght)
print(round(accuracy_score(y_train, dt_fit.predict(x_train)), 3), "\t",
round(accuracy_score(y_test, dt_fit.predict(x_test)), 3))
print(pd.crosstab(y_test, dt_fit.predict(x_test), rownames=['actual'], colnames=['predict']))
print()
### 배깅 ###
bag_fit = BaggingClassifier(base_estimator=dt_fit, n_estimators=5000, max_samples=0.67,
max_features=1.0, bootstrap=True, bootstrap_features=False,
n_jobs=-1, random_state=77)
bag_fit.fit(x_train, y_train)
y_hat = bag_fit.predict(x_train)
print( | pd.crosstab(y_train, y_hat, rownames=['actual'], colnames=['predict']) | pandas.crosstab |
# import sys
# sys.path.append('JEMIPYC')
# from array_check_function_global import df,dfn,dfv,dfx,dfnx,dfvx
import pandas as pd
import numpy as np
tab = '__'
# no-extension , number of parameters is not limited, 2 or 3, whatever you want.
# ex) df(A,B,C,D,...,Z...)
# of course you just put one parameter.
def df(*x):
pd.reset_option('display.max_columns')
pd.reset_option('display.max_rows')
leng = len(x)
df_concat = []
for i in range(leng):
row=len(x[0])
blank = ['']*row
blank = pd.DataFrame(blank,columns=[tab])
xx = pd.DataFrame(x[i])
if(i==0):
df_concat = xx
else:
df_concat = pd.concat([df_concat,blank,xx], axis=1)
df_concat.replace(np.nan, '', inplace=True)
display(df_concat)
def dfn(*x):
pd.reset_option('display.max_columns')
pd.reset_option('display.max_rows')
leng = len(x)
df_concat = []
for i in range(leng):
row=len(x[0])
blank = ['']*row
tabn = '{'+str(i+1)+'}'
blank = pd.DataFrame(blank,columns=[tabn])
xx = pd.DataFrame(x[i])
if(i==0):
df_concat = pd.concat([xx,blank], axis=1)
else:
df_concat = pd.concat([df_concat,xx,blank], axis=1)
df_concat.replace(np.nan, '', inplace=True)
display(df_concat)
def dfv(*x):
pd.reset_option('display.max_columns')
pd.reset_option('display.max_rows')
leng = len(x)
df_concat = []
for i in range(leng):
xs = x[i]
row=len(x[0])
blank = ['']*row
if((i+1)!=leng):
# print(i)
vname = x[-1][i]
# print(vname)
tabv = "<("+str(vname)+")"
blank = pd.DataFrame(blank,columns=[tabv])
xx = pd.DataFrame(x[i])
if(i==0):
df_concat = pd.concat([xx,blank], axis=1)
else:
df_concat = pd.concat([df_concat,xx,blank], axis=1)
# print(df_concat)
df_concat.replace(np.nan, '', inplace=True)
display(df_concat)
# extension
def dfx(*x):
pd.set_option('display.max_columns', None)
pd.set_option('display.max_rows', None)
leng = len(x)
df_concat = []
for i in range(leng):
row=len(x[0])
blank = ['']*row
blank = pd.DataFrame(blank,columns=[tab])
xx = pd.DataFrame(x[i])
if(i==0):
df_concat = xx
else:
df_concat = pd.concat([df_concat,blank,xx], axis=1)
df_concat.replace(np.nan, '', inplace=True)
display(df_concat)
def dfnx(*x):
pd.set_option('display.max_columns', None)
| pd.set_option('display.max_rows', None) | pandas.set_option |
import xarray as xr
from pandas import to_datetime, date_range, DateOffset
import numpy as np
import os
def compress_netcfd(folder_path, start_date, out_folder, file_name, num_of_rivids):
"""
Takes the 52 individual ensembles and combines them into one compact NetCDF file, saving disk space in the process.
Parameters
----------
folder_path: str
The path to the folder containing the 52 ensemble forecast files in NetCDF format
start_date: str
The start date in YYYYMMDD format.
out_folder: str
The path to the folder that you want the more compact NetCDF file in.
file_name: str
The name of the region. For example, if the files followed the pattern of "Qout_africa_continental_1.nc,
this argument would be "Qout_africa_continental"
num_of_rivids: int
The number of streams that are contained in the region.
"""
# Based on 15 day forecast
forecast_day_indices = np.array([0, 8, 16, 24, 32, 40, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84], dtype=np.int8)
# Based on 10 day forecast
# Excluding the first day because we already have initialization from the normal forecasts
high_res_forecast_day_indices = np.array([24, 48, 72, 92, 100, 108, 112, 116, 120, 124])
start_datetime = to_datetime(start_date, infer_datetime_format=True)
dates = date_range(start_datetime + | DateOffset(1) | pandas.DateOffset |
"""
Predicts binding affinity for given descriptors using the specified trained model.
"""
import os
import pickle
import sys
from argparse import ArgumentParser, Namespace
from typing import Any
import pandas as pd
from pandas import DataFrame
def parse_args() -> Namespace:
"""
predict.py [-h] --model FILE --descriptors FILE --output FILE
"""
parser = ArgumentParser(description=__doc__)
required = parser.add_argument_group('required arguments')
required.add_argument(
'--model', required=True, type=str, metavar='FILE',
help='Trained model to make predictions with. Must implement a predict(X) method like sklearn\'s predictors.'
'Must be saved in binary format as produced by pickle.dump().')
required.add_argument(
'--descriptors', required=True, type=str, metavar='FILE',
help='ECIF::LD descriptors to predict binding affinity for (CSV format). All columns except "Receptor" and'
'"Ligand" (both optional) are assumed to be part of the descriptors. Descriptors must be of same length as'
'those used to train the model.')
required.add_argument(
'--output', required=True, type=str, metavar='FILE', help='Path to write results to')
return parser.parse_args()
def load_model(file: str) -> Any:
with open(file, 'rb') as f:
model = pickle.load(f)
# Verify that the model really has a predict method.
if not callable(getattr(model, 'predict', None)):
print_error_and_exit('Model has no predict method. Can\'t use it.')
return model
def load_descriptors(file: str) -> DataFrame:
return | pd.read_csv(file) | pandas.read_csv |
# Gist example of IB wrapper from here: https://gist.github.com/robcarver17/f50aeebc2ecd084f818706d9f05c1eb4
#
# Download API from http://interactivebrokers.github.io/#
# (must be at least version 9.73)
#
# Install python API code /IBJts/source/pythonclient $ python3 setup.py install
#
# Note: The test cases, and the documentation refer to a python package called IBApi,
# but the actual package is called ibapi. Go figure.
#
# Get the latest version of the gateway:
# https://www.interactivebrokers.com/en/?f=%2Fen%2Fcontrol%2Fsystemstandalone-ibGateway.php%3Fos%3Dunix
# (for unix: windows and mac users please find your own version)
#
# Run the gateway
#
# user: edemo
# pwd: <PASSWORD>
#
# duration units and bar sizes:
# https://interactivebrokers.github.io/tws-api/historical_bars.html#hd_duration
# limitations:
# https://interactivebrokers.github.io/tws-api/historical_limitations.html
import os
import time
import pprint
import queue
import datetime
import traceback
from pytz import timezone
from pathlib import Path
import pandas as pd
import numpy as np
from tqdm import tqdm
import seaborn as sns
import matplotlib.pyplot as plt
from ibapi.wrapper import EWrapper
from ibapi.client import EClient
from ibapi.contract import Contract as IBcontract
from threading import Thread
DEFAULT_HISTORIC_DATA_ID = 50
DEFAULT_GET_CONTRACT_ID = 43
DEFAULT_GET_NP_ID = 42
DEFAULT_GET_EARLIEST_ID = 1
DEFAULT_HISTORIC_NEWS_ID = 1001
## marker for when queue is finished
FINISHED = object()
STARTED = object()
TIME_OUT = object()
class finishableQueue(object):
def __init__(self, queue_to_finish):
self._queue = queue_to_finish
self.status = STARTED
def get(self, timeout):
"""
Returns a list of queue elements once timeout is finished, or a FINISHED flag is received in the queue
:param timeout: how long to wait before giving up
:return: list of queue elements
"""
contents_of_queue = []
finished = False
while not finished:
try:
current_element = self._queue.get(timeout=timeout)
if current_element is FINISHED:
finished = True
self.status = FINISHED
else:
contents_of_queue.append(current_element)
## keep going and try and get more data
except queue.Empty:
## If we hit a time out it's most probable we're not getting a finished element any time soon
## give up and return what we have
finished = True
self.status = TIME_OUT
return contents_of_queue
def timed_out(self):
return self.status is TIME_OUT
class TestWrapper(EWrapper):
"""
The wrapper deals with the action coming back from the IB gateway or TWS instance
We override methods in EWrapper that will get called when this action happens, like currentTime
Extra methods are added as we need to store the results in this object
"""
def __init__(self):
self._my_contract_details = {}
self._my_historic_data_dict = {}
self._my_earliest_timestamp_dict = {}
self._my_np_dict = {}
self._my_hn_dict = {}
self._my_na_dict = {}
self._my_errors = queue.Queue()
## error handling code
def init_error(self):
error_queue = queue.Queue()
self._my_errors = error_queue
def get_error(self, timeout=5):
if self.is_error():
try:
return self._my_errors.get(timeout=timeout)
except queue.Empty:
return None
return None
def is_error(self):
an_error_if=not self._my_errors.empty()
return an_error_if
def error(self, id, errorCode, errorString):
## Overriden method
errormsg = "IB error id %d errorcode %d string %s" % (id, errorCode, errorString)
self._my_errors.put(errormsg)
## get contract details code
def init_contractdetails(self, reqId):
self._my_contract_details[reqId] = queue.Queue()
return self._my_contract_details[reqId]
def contractDetails(self, reqId, contractDetails):
## overridden method
if reqId not in self._my_contract_details.keys():
self.init_contractdetails(reqId)
self._my_contract_details[reqId].put(contractDetails)
def contractDetailsEnd(self, reqId):
## overriden method
if reqId not in self._my_contract_details.keys():
self.init_contractdetails(reqId)
self._my_contract_details[reqId].put(FINISHED)
def init_historicprices(self, tickerid):
self._my_historic_data_dict[tickerid] = queue.Queue()
return self._my_historic_data_dict[tickerid]
def init_earliest_timestamp(self, tickerid):
self._my_earliest_timestamp_dict[tickerid] = queue.Queue()
return self._my_earliest_timestamp_dict[tickerid]
def init_np(self, tickerid):
self._my_np_dict[tickerid] = queue.Queue()
return self._my_np_dict[tickerid]
def init_hn(self, requestId):
self._my_hn_dict[requestId] = queue.Queue()
return self._my_hn_dict[requestId]
def init_na(self, requestId):
self._my_na_dict[requestId] = queue.Queue()
return self._my_na_dict[requestId]
def historicalData(self, tickerid, bar):
## Overriden method
## Note I'm choosing to ignore barCount, WAP and hasGaps but you could use them if you like
# pprint.pprint(bar.__dict__)
bardata = (bar.date, bar.open, bar.high, bar.low, bar.close, bar.volume)
historic_data_dict = self._my_historic_data_dict
## Add on to the current data
if tickerid not in historic_data_dict.keys():
self.init_historicprices(tickerid)
historic_data_dict[tickerid].put(bardata)
def headTimestamp(self, tickerid, headTimestamp:str):
## overridden method
if tickerid not in self._my_earliest_timestamp_dict.keys():
self.init_earliest_timestamp(tickerid)
self._my_earliest_timestamp_dict[tickerid].put(headTimestamp)
self._my_earliest_timestamp_dict[tickerid].put(FINISHED)
def newsProviders(self, newsProviders):
## overridden method
tickerid = DEFAULT_GET_NP_ID
if tickerid not in self._my_np_dict.keys():
self.init_np(tickerid)
self._my_np_dict[tickerid].put(newsProviders)
self._my_np_dict[tickerid].put(FINISHED)
def historicalDataEnd(self, tickerid, start:str, end:str):
## overriden method
if tickerid not in self._my_historic_data_dict.keys():
self.init_historicprices(tickerid)
self._my_historic_data_dict[tickerid].put(FINISHED)
def historicalNews(self, requestId, time, providerCode, articleId, headline):
newsdata = (time, providerCode, articleId, headline)
newsdict = self._my_hn_dict
if requestId not in newsdict.keys():
self.init_hn(requestId)
newsdict[requestId].put(newsdata)
def historicalNewsEnd(self, requestId, hasMore):
if requestId not in self._my_hn_dict.keys():
self.init_hn(requestId)
if hasMore:
print('more results available')
self._my_hn_dict[requestId].put(FINISHED)
def newsArticle(self, requestId, articleType, articleText):
if requestId not in self._my_na_dict.keys():
self.init_na(requestId)
self._my_na_dict[requestId].put((articleType, articleText))
self._my_na_dict[requestId].put(FINISHED)
class TestClient(EClient):
"""
The client method
We don't override native methods, but instead call them from our own wrappers
"""
def __init__(self, wrapper):
## Set up with a wrapper inside
EClient.__init__(self, wrapper)
def resolve_ib_contract(self, ibcontract, reqId=DEFAULT_GET_CONTRACT_ID):
"""
From a partially formed contract, returns a fully fledged version
:returns fully resolved IB contract
"""
## Make a place to store the data we're going to return
contract_details_queue = finishableQueue(self.init_contractdetails(reqId))
print("Getting full contract details from the server... ")
self.reqContractDetails(reqId, ibcontract)
## Run until we get a valid contract(s) or get bored waiting
MAX_WAIT_SECONDS = 3
new_contract_details = contract_details_queue.get(timeout = MAX_WAIT_SECONDS)
while self.wrapper.is_error():
print(self.get_error())
if contract_details_queue.timed_out():
print("Exceeded maximum wait for wrapper to confirm finished - seems to be normal behaviour")
if len(new_contract_details)==0:
print("Failed to get additional contract details: returning unresolved contract")
return ibcontract, new_contract_details
if len(new_contract_details)>1:
print("got multiple contracts; using first one")
new_contract_details = new_contract_details[0]
resolved_ibcontract = new_contract_details.contract
return resolved_ibcontract, new_contract_details
def get_IB_historical_data(self,
ibcontract,
whatToShow="ADJUSTED_LAST",
durationStr="1 Y",
barSizeSetting="1 day",
tickerid=DEFAULT_HISTORIC_DATA_ID,
latest_date=None):
"""
Returns historical prices for a contract, up to latest_date
if latest_date is none, uses todays date
latest_date should be of form %Y%m%d %H:%M:%S %Z
ibcontract is a Contract
:returns list of prices in 4 tuples: Open high low close volume
"""
# set latest_date to today and now if it is None
if latest_date is None:
latest_date = get_latest_date_local()
## Make a place to store the data we're going to return
historic_data_queue = finishableQueue(self.init_historicprices(tickerid))
# Request some historical data. Native method in EClient
self.reqHistoricalData(
tickerid, # tickerId,
ibcontract, # contract,
latest_date, # endDateTime,
durationStr, # durationStr,
barSizeSetting, # barSizeSetting,
whatToShow=whatToShow,
useRTH=1,
formatDate=1,
keepUpToDate=False, # <<==== added for api 9.73.2
chartOptions=[] ## chartOptions not used
)
## Wait until we get a completed data, an error, or get bored waiting
MAX_WAIT_SECONDS = 5
while True:
print("Getting historical data from the server... could take %d seconds to complete " % MAX_WAIT_SECONDS)
historic_data = historic_data_queue.get(timeout=MAX_WAIT_SECONDS)
er = ''
while self.wrapper.is_error():
er = self.get_error()
print(er)
if 'Not connected' in er:
print('sleeping 30s to wait for reconnection; suggest restarting TWS')
time.sleep(30)
if "HMDS query returned no data" in er:
print(historic_data)
print(historic_data is None)
if historic_data_queue.timed_out() and er is None:
print("Exceeded maximum wait for wrapper to confirm finished - seems to be normal behaviour")
# only keep trying if not connected
if not 'Not connected' in er:
break
# TODO: this is cancelling query early maybe?
self.cancelHistoricalData(tickerid)
# convert to pandas dataframe
# date, open, high, low, close, vol
# already adjusted for splits
if len(historic_data) != 0:
df = pd.DataFrame.from_records(data=historic_data, index='datetime', columns=['datetime', 'open', 'high', 'low', 'close', 'volume'])
df.index = pd.to_datetime(df.index)
if whatToShow not in ['TRADES', 'ADJUSTED_LAST']:
# volume only available for trades
df.drop('volume', axis=1, inplace=True)
return df
else:
return historic_data
def getEarliestTimestamp(self, contract, whatToShow='ADJUSTED_LAST', useRTH=1, formatDate=1, tickerid=DEFAULT_GET_EARLIEST_ID):
# parameters: https://interactivebrokers.github.io/tws-api/classIBApi_1_1EClient.html#a059b5072d1e8e8e96394e53366eb81f3
## Make a place to store the data we're going to return
## Wait until we get a completed data, an error, or get bored waiting
MAX_WAIT_SECONDS = 2
tries = 0
while True:
tries += 1
earliest_timestamp_queue = finishableQueue(self.init_earliest_timestamp(tickerid))
self.reqHeadTimeStamp(tickerid, contract, whatToShow, useRTH, formatDate)
print("Getting earliest timestamp from the server... could take %d seconds to complete " % MAX_WAIT_SECONDS)
earliest = earliest_timestamp_queue.get(timeout=MAX_WAIT_SECONDS)
while self.wrapper.is_error():
er = self.get_error()
print(er)
if 'No head time stamp' in er:
return None
break
if earliest_timestamp_queue.timed_out():
print("Exceeded maximum wait for wrapper to confirm finished - seems to be normal behaviour")
self.cancelHeadTimeStamp(tickerid)
if len(earliest) != 0 or tries == 20:
return None
break
return earliest[0] # first element in list
def getNewsProviders(self):
"""
available news providers by default are
[140007057343600: BRFG, Briefing.com General Market Columns,
140007057342704: BRFUPDN, Briefing.com Analyst Actions,
140007057343544: DJNL, Dow Jones Newsletters]
"""
## Make a place to store the data we're going to return
tickerid = DEFAULT_GET_NP_ID
np_queue = finishableQueue(self.init_np(tickerid))
# Request news providers. Native method in EClient
self.reqNewsProviders()
## Wait until we get a completed data, an error, or get bored waiting
MAX_WAIT_SECONDS = 2
print("Getting list of news providers from the server... could take %d seconds to complete " % MAX_WAIT_SECONDS)
nps = np_queue.get(timeout=MAX_WAIT_SECONDS)
while self.wrapper.is_error():
print(self.get_error())
if np_queue.timed_out():
print("Exceeded maximum wait for wrapper to confirm finished - seems to be normal behaviour")
return nps[0] # list within a list
def getHistoricalNews(self, reqId, conId, providerCodes, startDateTime, endDateTime, totalResults):
hn_queue = finishableQueue(self.init_hn(reqId))
self.reqHistoricalNews(reqId, conId, providerCodes, startDateTime, endDateTime, totalResults, historicalNewsOptions=[])
## Wait until we get a completed data, an error, or get bored waiting
MAX_WAIT_SECONDS = 15
print("Getting historical news from the server... could take %d seconds to complete " % MAX_WAIT_SECONDS)
hn = hn_queue.get(timeout=MAX_WAIT_SECONDS)
while self.wrapper.is_error():
print(self.get_error())
if hn_queue.timed_out():
print("Exceeded maximum wait for wrapper to confirm finished - seems to be normal behaviour")
return hn
def getNewsArticle(self, reqId, providerCode, articleId):
na_queue = finishableQueue(self.init_na(reqId))
self.reqNewsArticle(reqId, providerCode, articleId, [])
## Wait until we get a completed data, an error, or get bored waiting
MAX_WAIT_SECONDS = 5
print("Getting historical news from the server... could take %d seconds to complete " % MAX_WAIT_SECONDS)
na = na_queue.get(timeout=MAX_WAIT_SECONDS)
while self.wrapper.is_error():
print(self.get_error())
if na_queue.timed_out():
print("Exceeded maximum wait for wrapper to confirm finished - seems to be normal behaviour")
return na
class TestApp(TestWrapper, TestClient):
def __init__(self, ipaddress, portid, clientid):
TestWrapper.__init__(self)
TestClient.__init__(self, wrapper=self)
self.connect(ipaddress, portid, clientid)
thread = Thread(target = self.run)
thread.start()
setattr(self, "_thread", thread)
self.init_error()
def get_hist_data_date_range(self,
ibcontract,
whatToShow='TRADES',
barSizeSetting='3 mins',
start_date=None,
end_date=None,
tickerid=DEFAULT_HISTORIC_DATA_ID):
"""
gets historic data for date range
if start_date is None, then first finds earliest date available,
and gets all data to there
if end_date is None, will get data to latest possible time
start_date and end_date should be strings in format YYYYMMDD
useful options for whatToShow for stocks can be:
ADJUSTED_LAST (adj for splits and dividends)
TRADES (only adjusted for splits)
BID
ASK
OPTION_IMPLIED_VOLATILITY
HISTORICAL_VOLATILITY
"""
# convert start_date string to datetime date object for comparisons
start_date_datetime_date = pd.to_datetime('1800-01-01').date() # early date so it doesn't match df.index.date below (if not updating data)
if start_date is not None:
# go one day past start date just to make sure we have all data
start_date_datetime_date = (pd.to_datetime(start_date) - pd.Timedelta('1D')).date()
smallbars = ['1 secs', '5 secs', '10 secs', '15 secs', '30 secs', '1 min']
max_step_sizes = {'1 secs': '1800 S', # 30 mins
'5 secs': '3600 S', # 1 hour
'10 secs': '14400 S', # 4 hours
'15 secs': '14400 S', # 4 hours
'30 secs': '28800 S', # 8 hours
'1 min': '1 D',
'2 mins': '2 D',
'3 mins': '1 W',
'5 mins': '1 W',
'10 mins': '1 W',
'15 mins': '1 W',
'20 mins': '1 W',
'30 mins': '1 M',
'1 hour': '1 M',
'2 hours': '1 M',
'3 hours': '1 M',
'4 hours': '1 M',
'8 hours': '1 M',
'1 day': '1 Y',
'1 week': '1 Y',
'1 month': '1 Y'}
# TODO: check if earliest timestamp is nothing or before/after end_date
earliest_timestamp = self.getEarliestTimestamp(ibcontract, whatToShow=whatToShow, tickerid=tickerid)
if earliest_timestamp is not None:
earliest_datestamp = earliest_timestamp[:8]
# if timeout, will return empty list
df = []
if end_date is None:
latest_date = None
else:
# TODO: need to adopt this to other than mountain time
latest_date = end_date + ' ' + get_close_hour_local() + ':00:00'
# list is returned if there is an error or something?
tries = 0
while type(df) is list:
tries += 1
df = self.get_IB_historical_data(ibcontract,
whatToShow=whatToShow,
durationStr=max_step_sizes[barSizeSetting],
barSizeSetting=barSizeSetting,
tickerid=tickerid,
latest_date=latest_date)
if tries == 10:
print('tried to get historic data 10x and failed, retutrning None')
return None
earliest_date = df.index[0]
full_df = df
self.df = full_df
df_dates = df.index.date
# keep going until the same result is returned twice...not perfectly efficient but oh well
previous_earliest_date = None
i = 0
start_time = time.time()
is_list = 0
while previous_earliest_date != earliest_date:
i += 1
print(i)
print(previous_earliest_date)
print(earliest_date)
# TODO: if "HMDS query returned no data" in error lots of times, maybe finish it
df = self.get_IB_historical_data(ibcontract,
whatToShow=whatToShow,
durationStr=max_step_sizes[barSizeSetting],
barSizeSetting=barSizeSetting,
tickerid=tickerid,
latest_date=earliest_date.strftime('%Y%m%d %H:%M:%S'))
if type(df) is list:
is_list += 1
# we've probably hit the earliest time we can get
if earliest_timestamp is not None:
if is_list >= 3 and earliest_date.date().strftime('%Y%m%d') == earliest_datestamp:
print("hit earliest timestamp")
break
if is_list >= 10:
print('hit 10 lists in a row')
break
df_dates = None
continue
else:
is_list = 0
previous_earliest_date = earliest_date
earliest_date = df.index[0]
full_df = | pd.concat([df, full_df]) | pandas.concat |
"""Class and methods to evaluate backtests, event studies and risk premiums
- Event studies, cumulative abnormal returns
- Risk premiums, Fama-MacBeth regressions
- Sharpe ratio, appraisal ratio, walk-forward backtests
Author: <NAME>
License: MIT
"""
import numpy as np
import scipy
import pandas as pd
import time
import statsmodels
import statsmodels.api as sm
import statsmodels.formula.api as smf
from patsy.builtins import Q
import matplotlib.pyplot as plt
from pandas import DataFrame, Series
from matplotlib import dates as mdates
from sqlalchemy import Integer, String, Float, SmallInteger, Boolean, BigInteger
from sqlalchemy import Column, Index
from pandas.api import types
from .structured import Structured
from .display import plot_date, plot_bands
from .solve import least_squares
try:
from settings import ECHO
except:
ECHO = False
def _astype(v, t=str):
"""Convert each element in list of nested lists to target type"""
return [_astype(u, t) for u in v] if types.is_list_like(v) else t(v)
def _as_compound(rets, intervals, dates=None):
"""Compounds series of returns between (list of) date tuples (inclusive)"""
if len(intervals)==0:
return []
elif len(intervals)==1:
return [_as_compound(rets, intervals[0])]
elif len(intervals)==2 and isinstance(intervals[0], int):
d = rets.index if dates is None else dates
return np.prod(rets[(d >= intervals[0]) & (d <= intervals[1])] + 1) - 1
else:
return [_as_compound(rets, interval) for interval in intervals]
class DailyPerformance:
"""Simple class for computing daily realized returns on periodic holdings"""
def __init__(self, stocks):
"""Initialize with Stocks or Stocks-like object"""
self.stocks = stocks
def __call__(self, holdings, end):
"""Return series of daily returns through end date
Parameters
----------
holdings : dict of Series keyed by int date
Series, indexed by permno, contain stock weights on rebalance date
end : int
Last date of daily returns to compute performance for
Returns
-------
performance : Series
Daily realized portfolio returns
"""
rebals = sorted(holdings.keys()) # rebalance dates, including initial
dates = self.stocks.bd.date_range(rebals[0], end)[1:] # return dates
curr = holdings[rebals[0]] # initial portfolio
perf = {} # collect daily performance
for date in dates[1:]: # loop over return dates
ret = self.stocks.get_ret(date, date)
perf[date] = sum(curr * ret['ret'].reindex(curr.index, fill_value=0))
if date in rebals: # update daily portfolio holdings
curr = holdings[date]
else:
curr = curr * (1 + ret['retx'].reindex(curr.index)).fillna(1)
return Series(perf, name='ret')
class BackTest(Structured):
"""Base class for computing portfolio backtest returns
Parameters
----------
sql : SQL object
SQL database connection to store results
bench : benchmark Structured dataset instance
Where riskfree and benchmark returns can be retrieved from
rf : str
Column name of riskfree rate in benchmark dataset
max_date : int
Last date of any backtest (i.e. bench and stock returns available)
table : str, default is 'backtests'
name of table in user database to store results in
Notes
-----
assumes that monthly risk free rates also available through {bench},
with name suffixed by "(mo)". if backtest dates appears to be monthly
frequency, monthly risk free rates will be retrieved
and used rather than compounding from daily (reduce precision errors).
Examples
-------
backtest = BackTest(user, bench, 'RF', 20200930)
"""
# schema of the table to store backtested performance returns
def __init__(self, sql, bench, rf, max_date, table='backtests', echo=ECHO):
"""Initialize class to evaluate backtest performance"""
tables = {'backtests':
sql.Table(table,
Column('permno', String(32), primary_key=True),
Column('begret', Integer),
Column('endret', Integer, primary_key=True),
Column('longs', Integer),
Column('shorts', Integer),
Column('buys', Float),
Column('sells', Float),
Column('long_weight', Float),
Column('short_weight', Float),
Column('excess', Float),
Column('ret', Float))}
super().__init__(sql, bench.bd, tables, 'permno', name='backtests')
self.echo = echo
self.bench = bench
self.max_date = max_date
self.rf = bench.get_series([rf], 'ret', end=max_date)[rf]
rf = bench.get_series([rf + "(mo)"], 'ret', end=max_date) # monthly
self.monthly_ = {(bench.bd.begmo(d), bench.bd.endmo(d)):
float(rf.loc[d]) for d in rf.index}
self.annualized = {} # collect annualized backtest statistics
self.perf = None # raw performance before attribution
self.excess = None # with excess returns after attribution
self.label = None # label name
def _print(self, *args, echo=None):
if echo or self.echo_:
print(*args)
def __call__(self, stocks, holdings, label, overlap=0):
"""Compute holding returns and rebalance statistics
Parameters
----------
stocks: Structured data set
Where securities' identifiers, returns and data can be accessed
holdings: dict {rebaldate : holdings Series}
Each Series is indexed by permno, with weights in column
Last item of dict (can be empty) is ignored for calculating returns
label: string
Label to set to name this backtest
overlap: int, default is 0
Number of months to smooth holdings
Returns
-------
perf : DataFrame
portfolio holdings returns after each rebalance date
Notes
-----
if CRSP ('delist' and using monthly): apply dlst returns to performance
"""
for d, h in holdings.items():
if not h.index.is_unique:
raise ValueError(f"duplicate holdings index date={d}")
pordates = sorted(list(holdings.keys()))
self._print(len(pordates), 'dates:', pordates[0], '-', pordates[-1])
perf = {} # accum performance each period
smooth = [] # to queue rolling holdings
prev = Series(dtype=float) # prior holdings, adjusted by returns
holding_periods = stocks.bd.holding_periods(pordates)
for pordate, (begret, endret) in zip(pordates[:-1], holding_periods):
if (begret, endret) in self.monthly_:
riskfree = self.monthly_[(begret, endret)]
else:
riskfree = _as_compound(self.rf, (begret, endret))
# insert current holdings into smooth
if len(smooth) > overlap: # smooth has list of recent holdings
smooth.pop()
smooth.insert(0, holdings[pordate].copy())
# compute rolling weights, by adding to superset of permnos in curr
permnos = sorted(set(np.ravel([list(p.index) for p in smooth])))
curr = Series(index=permnos, data=[0] * len(permnos), dtype=float)
for p in smooth: # assign weight in smoothed final
curr[p.index] += p / len(smooth)
# get stocks' returns
ret = stocks.get_ret(begret, endret, delist=True)
ret = ret['ret'].reindex(curr.index, fill_value=0)
r = sum(curr * ret) # portfolio return this month
# compute turnover
delta = pd.concat((prev, curr), axis=1, join='outer').fillna(0)
delta = delta.iloc[:, 1] - delta.iloc[:, 0] # change in holdings
# collect
perf.update({int(endret):
{'begret': int(begret),
'endret': int(endret),
'longs': sum(curr > 0),
'shorts': sum(curr < 0),
'long_weight': curr[curr > 0].sum(),
'short_weight': curr[curr < 0].sum(),
'ret': r,
'excess': r - (curr.sum() * riskfree),
'buys': delta[delta>0].abs().sum(),
'sells': delta[delta<0].abs().sum()}})
# adjust stock weights by retx till end of holding period
retx = stocks.get_ret(begret, endret, field='retx')['retx']
prev = curr * (1 + retx.reindex(curr.index)).fillna(1)
for i in range(len(smooth)):
smooth[i] *= (1 + retx.reindex(smooth[i].index)).fillna(1)
self._print(f"(backtest) {pordate} {len(curr)} {r:.4f}")
self.perf = DataFrame.from_dict(perf, orient='index')
self.label = label
self.excess = None
return perf
def drawdown(self):
"""Compute max drawdown and period: amount of loss from previous high"""
cumsum = self.perf['excess'].cumsum()
cummax = cumsum.cummax()
end = (cummax - cumsum).idxmax()
beg = cumsum[cumsum.index <= end].idxmax()
dd = cumsum.loc[[beg, end]]
return dd
def write(self, label):
"""Save backtest performance returns to database"""
self['backtests'].create(checkfirst=True)
delete = self['backtests']\
.delete().where(self['backtests'].c['permno'] == label)
self.sql.run(delete)
self.perf['permno'] = label
self.sql.load_dataframe(self['backtests'].key, self.perf)
def read(self, label=None):
"""Load backtest performance returns from database"""
if label is None:
q = (f"SELECT {self.identifier}, count(*) as count,"
f" min(begret) as begret, max(endret) as endret "
f" from {self['backtests'].key} group by {self.identifier}")
return self.sql.read_dataframe(q).set_index(self.identifier)
q = (f"SELECT * from {self['backtests'].key} "
f"where {self.identifier} = '{label}'")
self.perf = self.sql.read_dataframe(q)\
.sort_values(['endret'])\
.set_index('endret', drop=False)\
.drop(columns=['permno'])
self.label = label
self.excess = None
return self.perf
def get_series(self, field='ret', start=19000000, end=None):
"""Retrieve saved backtest as a series"""
return self.sql.pivot(self['backtests'].key, index='endret',
columns='permno', values=field,
where=(f"endret >= {start} AND endret <= "
f"{self.max_date if end is None else end} "
f"AND permno={self.label}"))\
.rename(columns={'endret': 'date'})
def fit(self, benchnames, beg=0, end=None, haclags=1):
"""Compute performance attribution against benchmarks
Parameters
----------
benchnames: list of str
Names of benchmark returns to compute attribution against
haclags: int, optional
Option for robustcov statistics = number of Newey-West lags
Returns
-------
DataFrame
Each row is excess returns performance following each rebalance date
Attributes
-----
annualized : dict of performance ratios
'excess': annualized excess (of portfolio weight*riskfree) return
'sharpe': annualized sharpe ratio
'jensen': annualized jensen alpha
'appraisal': annualized appraisal ratio
'welch-t': t-stat for structural break after 2002
'welch-p': p-value for structural break after 2002
'turnover': annualized total turnover rate
'buys': annualized buy rate
'sells': annualized sell rate
results : statsmodels OLS results
from fitting statsmodels OLS of excess on benchmark returns
"""
# collect performance between beg and end dates
end = end or self.max_date
#
# this can be simplified
#
d = self.perf.loc[beg:end].index
nyears = len(self.rf.loc[d[0]:d[-1]]) / 252
p = self.perf.loc[d, 'excess'].rename(self.label).to_frame()
# collect benchmark returns
df = self.bench.get_series(benchnames, 'ret', end=self.max_date)
retdates = _astype(self.perf.loc[d, ['begret','endret']].values, int)
for b in benchnames:
p[b] = _as_compound(df[b], retdates)
# compute time-series regression results
rhs = ' + '.join([f"Q('{b}')" for b in benchnames])
r = smf.ols(f"{self.label} ~ {rhs}", data=p).fit()
r = r.get_robustcov_results(cov_type='HAC', use_t=None, maxlags=haclags)
pre2002 = p.loc[p.index < 20020101, self.label]
post2002 = p.loc[p.index >= 20020101, self.label]
welch = scipy.stats.ttest_ind(post2002, pre2002, equal_var=False)
mult = (len(p) - 1) / nyears
self.annualized = {
'excess': mult * np.mean(p[self.label]),
'sharpe': np.sqrt(mult)*p[self.label].mean() / p[self.label].std(),
'jensen': mult * r.params[0],
'appraisal': np.sqrt(mult) * r.params[0] / np.std(r.resid),
'welch-t': welch[0],
'welch-p': welch[1],
'turnover': np.mean(self.perf.loc[d, ['buys','sells']]\
.abs().values) * mult / 2,
'longs': self.perf.loc[d, 'longs'].mean(),
'shorts': self.perf.loc[d, 'shorts'].mean(),
'buys': mult * self.perf.loc[d, 'buys'].mean() / 2,
'sells': mult * self.perf.loc[d, 'sells'].mean() / 2}
self.results = r
self.excess = p
return self.excess
def plot(self, num=1, flip=False, drawdown=False, figsize=(10,12)):
"""Plot time series of excess vs benchmark returns
Parameters
----------
num : int, optional
Figure number to use in plt
flip: bool, default False
If None, auto-detect and flip returns to be positive
If False, never flip
If True, always flip
drawdown: bool, default False
If True, plot peak and trough points of max (additive) drawdown
"""
if flip:
label = 'MINUS ' + self.label
m = -1
else:
label = self.label
m = 1
if self.excess is None: # attribution was not run
excess = m * self.perf[['excess']].rename(columns={'excess': label})
perf = self.perf
else:
excess = self.excess.rename(columns={'excess': label})
excess[label] *= m
perf = self.perf[(self.perf.index >= self.excess.index[0]) &
(self.perf.index <= self.excess.index[-1])]
fig, (ax1, ax2) = plt.subplots(2, 1, sharex=True, clear=True,
figsize=figsize, num=num)
plot_date(y1=excess.cumsum(), label1='cumulative ret', marker=None,
ax=ax1, points=self.drawdown() if drawdown else None)
plot_date(y1=perf[['longs','shorts']],
y2=(perf['buys'] + perf['sells']) / 4,
ax=ax2, marker=None, ls1='-', ls2=':', cn=excess.shape[1],
label1='#holdings', label2='turnover', legend2=['turnover'])
plt.tight_layout(pad=3)
class EventStudy(Structured):
"""Class to support statistical tests of event studies
Parameters
----------
sql : SQL object
connection to user database to store results
bench : Benchmarks structured data object
to retrieve benchmark market returns
max_date : int
last date to run event study (not used)
table : str, default is 'events'
name of table in user database to store results in
"""
def __init__(self, sql, bench, max_date, table='events'):
"""Initialize for event study calculations"""
tables = {'events':
sql.Table(table,
Column('permno', String(32), primary_key=True),
Column('name', String(32), primary_key=True),
Column('beg', Integer),
Column('end', Integer),
Column('rows', Integer),
Column('days', Integer),
Column('effective', Float),
Column('window', Float),
Column('window_t', Float),
Column('post', Float),
Column('post_t', Float))}
super().__init__(sql, bench.bd, tables, 'permno', name='events')
self.bench = bench
self.max_date = max_date
self.ar_ = {}
self.ret_ = {}
def write(self, label, overwrite=True):
"""Save event study summary to database"""
self['events'].create(checkfirst=True)
if overwrite:
delete = self['events'].delete().where(
self['events'].c['permno'] == label)
self.sql.run(delete)
ar = DataFrame.from_dict(self.ar_, orient='index')
ar['permno'] = label
self.sql.load_dataframe(self['events'].key, ar)
return ar
def read(self, label=None, name=None):
"""Load event study summary from database"""
where = ' and '.join([f"{k} = '{v}'" for k,v in
[['permno', label], ['name', name]] if v])
q = "SELECT * from {table} {where}".format(
table=self['events'].key,
where="where " + where if where else '')
return self.sql.read_dataframe(q)
def __call__(self, stocks, df, left, right, post, date_field):
"""Retrieve event window market-adjusted returns where valid/available
Parameters
----------
stocks : Structured object
Stock returns data
df : DataFrame
Input list of stocks of identifiers and event dates
left, right, post : int
left and right (inclusive) window and post-drift date offsets
date_field : str
Name of date column in df
"""
ret = stocks.get_window(
dataset='daily',
field='ret',
permnos=df[stocks.identifier],
dates=df[date_field],
date_field='date',
left=left,
right=post)
cols = list(range(post-left+1))
# require at least window and one post-event returns available
rets = ret[ret[cols[:(right-left+2)]].notna().all(axis=1)]
rets.index = np.arange(len(rets))
# get market returns for market-model adjustment
mkt = self.bench.get_window(dataset='daily',
field='ret',
permnos=['Mkt-RF'] * len(rets),
date_field='date',
dates=rets['date'],
left=left,
right=post)
rf = self.bench.get_window(dataset='daily',
field='ret',
permnos=['RF'] * len(rets),
date_field='date',
dates=rets['date'],
left=left,
right=post)
mkt = (mkt[cols] + rf[cols]).reset_index(drop=True)
ar = (rets[cols] - mkt[cols]).cumsum(axis=1).fillna(0)
br = ((1 + rets[cols]).cumprod(axis=1) -
(1 + mkt[cols]).cumprod(axis=1)).fillna(0)
self.car = rets[['permno', 'date']].join(ar)
self.bhar = rets[['permno', 'date']].join(br)
self.left = left
self.right = right
self.post = post
self.rows = rets[['permno', 'date']]
return self.rows
def fit(self, rows=None, car=False, name='event_', rho=0.3):
"""Compute CAR/BHAR statistics from cumulative rets
Parameters
----------
rows : list of int, default is None
Subset of rows to evaluate; None selects all rows
car : bool, default is False
Whether to evaluate CAR or BHAR
name : str, default is None
save results in cache by this label
rho : float between 0 and 1, default is 0.3
rule-of-thumb to adjust stderrs for cross-correlated errors
Returns
-------
dict :
summary statistics of full and subsamples:
'window', 'window-tvalue' are CAR at end of event window
'post', 'post-tvalue' are CAR from end of event till post-drift end
'car', 'car-stderr' are daily CAR from beginning of event
'rows', 'days' are number of stocks, and after groupby dates
"""
window = self.right - self.left + 1
cols = ['date'] + list(range(self.post-self.left+1))
rets = (self.car if car else self.bhar)[cols]
cumret = (rets if rows is None else rets.iloc[rows])
n = int(len(cumret))
b = int(min(cumret['date']))
e = int(max(cumret['date']))
cumret = cumret.groupby('date').mean()
means = cumret.mean()
L = self.post-self.left
overlap = ((len(cumret) * (L+1))/(len(self.bd.date_range(b,e)) + L)) - 1
effective = len(cumret) / (1 + (rho * min(max(overlap, 0), L)))
stderr = cumret.std() / np.sqrt(effective)
posterr = ((cumret.iloc[:, window:]\
.sub(cumret.iloc[:, window-1], axis=0)).std()
/ np.sqrt(effective))
cumret.iloc[:, window:].std() / np.sqrt(effective)
tstat = means[window - 1]/stderr[window - 1]
post = cumret.iloc[:,-1] - cumret.iloc[:, window-1]
post_sem = post.std() / np.sqrt(effective)
ar = Series({'name' : name,
'window' : means[window - 1],
'window_t' : means[window - 1]/stderr[window - 1],
'post' : post.mean(),
'post_t' : post.mean() / post_sem,
'beg' : b,
'end' : e,
'effective' : int(effective),
'days' : len(cumret),
'rows' : n})
self.ret_[name] = {'means' : means.values,
'stderr' : stderr.values,
'posterr' : posterr.values,
'car' : car}
self.ar_[name] = ar.copy()
return ar
def plot(self, name='event_', drift=False, ax=None,
loc='best', title='', c='C0', vline=None, hline=None, width=1.96):
"""Plot cumulative abnormal returns, drift and confidence bands"""
ax = ax or plt.gca()
window = self.right - self.left + 1
if vline is None:
vline = self.right
if hline is None:
hline = self.ret_['event_']['means'][window-1] if drift else 0
r = self.ret_[name]
ar = self.ar_[name]
plot_bands([0] + list(r['means']),
[0] + ([0] * (window if drift else 0))
+ list(r['posterr' if drift else 'stderr']),
x=np.arange(self.left-1, self.post+1), loc=loc,
hline=hline, vline=vline, title=title, c=c, width=width,
legend=["CAR" if r['car'] else "BHAR", f"{width} stderrs"],
xlabel=(f"{int(ar['beg'])}-{int(ar['end'])}"
f" (dates={int(ar['days'])}, n={int(ar['rows'])})"),
ylabel="CAR" if r['car'] else "BHAR", ax=ax)
plt.tight_layout(pad=3)
def wald_test(R, r, theta, avar):
"""helper method to compute wald test of linear hypotheses
Parameters
----------
R : (Q x P) array of float
input coefficients to test hypothesis that R theta = r
r : (Q x 1) vector of float
input constants to test hypotheses that R theta = r
theta : (P x 1) vector of float
estimated model parameters
avar : (P x P) array of float
Returns
-------
stats : dict
'wald': wald test statistic, 'p-value': of chi-square with df=Q
"""
theta = theta.reshape(-1, 1) # P x 1 parameter estimates
r = r.reshape(-1, 1) # Q x 1 hypotheses
R = R.reshape(len(r), len(theta)) # Q x P coefficients
wald = (R @ theta - r).T @ np.linalg.inv(R @ avar @ R.T) @ (R @ theta - r)
w = wald.reshape(-1)[0]
return {'wald': w, 'p-value': 1 - scipy.stats.chi2.cdf(w, len(r))}
class RiskPremium:
"""Class to support statistical tests of factor loading premiums
Parameters
----------
sql : SQL object
connection to user database to store results
bench : Benchmarks structured data object
to retrieve benchmark market returns
rf : str
series name of riskfree rate from bench database
end : int
last date to run event study (not used)
"""
def __init__(self, sql, bench, rf, end):
"""Initialize for testing factor loading premiums"""
self.sql = sql
self.bench = bench
self.rf = bench.get_series([rf], 'ret', end=end)[rf]
rf = bench.get_series([rf + "(mo)"], 'ret', end=end) # monthly riskfree
self.monthly_ = {(bench.bd.begmo(d), bench.bd.endmo(d)):
float(rf.loc[d]) for d in rf.index}
self.end_ = end
def __call__(self, stocks, loadings, weights=None, standardize=[]):
"""Estimate factor risk premiums with cross-sectional FM regressions
Parameters
----------
stocks : Structured data object
From which to retrieve stocks' returns data
loadings : dict, keyed by rebalance date:int, of DataFrame
DataFrames indexed by stocks permno, with columns of loadings values
standardize : list of str, default is []
List of column labels to demean and rescale (eql-wtd stdev = 1)
weights : str, default is None
Column for weighted least squares, and weighted demean
Returns
-------
ret : DataFrame
means and standard errors of FM regression coefficients/premiums
"""
self.perf = DataFrame()
pordates = sorted(list(loadings.keys()))
self.holdrets = stocks.bd.holding_periods(pordates)
for pordate, holdrets in zip(pordates[:-1], self.holdrets):
if holdrets in self.monthly_:
rf = self.monthly_[holdrets]
else:
rf = _as_compound(self.rf, holdrets)
df = loadings[pordate]
if weights is None:
w = np.ones(len(df))
else:
w = df[weights].to_numpy()
df = df.drop(columns=[weights])
x = df.columns
for col in standardize: # weighted mean <- 0, equal wtd stdev <- 1
df[col] -= np.average(df[col], weights=w)
df[col] /= np.std(df[col])
df = df.join(stocks.get_ret(*holdrets, delist=True)-rf, how='left')
p = least_squares(df.dropna(), x=x, y='ret', add_constant=False)
p.name = holdrets[1]
self.perf = self.perf.append(p)
self.results = {'mean': self.perf.mean(), 'stderr': self.perf.sem(),
'std': self.perf.std(), 'count': len(self.perf)}
return DataFrame(self.results).T
def fit(self, benchnames=None):
"""Compute risk premiums and benchmark correlations"""
out = []
if benchnames:
df = self.bench.get_series(benchnames, 'ret')
b = DataFrame({k: _as_compound(df[k], self.holdrets)
for k in benchnames}, index=self.perf.index)
out.append(DataFrame({'mean': b.mean(), 'stderr': b.sem(),
'std': b.std(), 'count': len(b)}).T\
.rename_axis('Benchmark Returns', axis=1))
corr = b.join(self.perf).corr()
out.append(corr.loc[benchnames, benchnames].rename_axis(
'Correlation of Benchmark Returns', axis=1))
out.append(corr.loc[self.perf.columns, benchnames].rename_axis(
'Correlation of Estimated Factor and Benchmark Returns', axis=1))
else:
corr = self.perf.corr()
out.append( | DataFrame(self.results) | pandas.DataFrame |
'''Tests for regressionplots, entire module is skipped
'''
import numpy as np
import nose
import statsmodels.api as sm
from statsmodels.graphics.regressionplots import (plot_fit, plot_ccpr,
plot_partregress, plot_regress_exog, abline_plot,
plot_partregress_grid, plot_ccpr_grid)
from pandas import Series, DataFrame
try:
import matplotlib.pyplot as plt #makes plt available for test functions
have_matplotlib = True
except:
have_matplotlib = False
def setup():
if not have_matplotlib:
raise nose.SkipTest('No tests here')
def teardown_module():
plt.close('all')
class TestPlot(object):
def __init__(self):
self.setup() #temp: for testing without nose
def setup(self):
nsample = 100
sig = 0.5
x1 = np.linspace(0, 20, nsample)
x2 = 5 + 3* np.random.randn(nsample)
X = np.c_[x1, x2, np.sin(0.5*x1), (x2-5)**2, np.ones(nsample)]
beta = [0.5, 0.5, 1, -0.04, 5.]
y_true = np.dot(X, beta)
y = y_true + sig * np.random.normal(size=nsample)
exog0 = sm.add_constant(np.c_[x1, x2], prepend=False)
res = sm.OLS(y, exog0).fit()
self.res = res
def test_plot_fit(self):
res = self.res
fig = plot_fit(res, 0, y_true=None)
x0 = res.model.exog[:, 0]
yf = res.fittedvalues
y = res.model.endog
px1, px2 = fig.axes[0].get_lines()[0].get_data()
np.testing.assert_equal(x0, px1)
np.testing.assert_equal(y, px2)
px1, px2 = fig.axes[0].get_lines()[1].get_data()
np.testing.assert_equal(x0, px1)
np.testing.assert_equal(yf, px2)
plt.close(fig)
def test_plot_oth(self):
#just test that they run
res = self.res
endog = res.model.endog
exog = res.model.exog
plot_fit(res, 0, y_true=None)
plot_partregress_grid(res, exog_idx=[0,1])
plot_regress_exog(res, exog_idx=0)
plot_ccpr(res, exog_idx=0)
plot_ccpr_grid(res, exog_idx=[0])
plot_ccpr_grid(res, exog_idx=[0,1])
plt.close('all')
class TestPlotPandas(TestPlot):
def setup(self):
nsample = 100
sig = 0.5
x1 = np.linspace(0, 20, nsample)
x2 = 5 + 3* np.random.randn(nsample)
X = np.c_[x1, x2, np.sin(0.5*x1), (x2-5)**2, np.ones(nsample)]
beta = [0.5, 0.5, 1, -0.04, 5.]
y_true = np.dot(X, beta)
y = y_true + sig * np.random.normal(size=nsample)
exog0 = sm.add_constant(np.c_[x1, x2], prepend=False)
exog0 = DataFrame(exog0, columns=["const", "var1", "var2"])
y = | Series(y, name="outcome") | pandas.Series |
"""
This script takes outputs from distances2.py and bins the data, producing a final results table
ready to be plotted as a histogram.
<NAME>
February 6th 2017
Copyright 2017 Harvard University, Wu Lab
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import argparse
import pandas as pd
import numpy as np
def get_args():
parser = argparse.ArgumentParser(description="Description")
parser.add_argument('-q', "--queryToRef", type=str,
help='Filename for the query to ref distances')
parser.add_argument('-f', '--filenames', type=str,
help='File containing filenames of files containing query to random distances')
parser.add_argument('-rl', '--refLabel', type=str,
help='Label for reference')
parser.add_argument('-ql', '--queryLabel', type=str,
help='Label for query')
parser.add_argument('-b', '--bins', type=int, default=50,
help='Number of bins to aggregate into')
parser.add_argument('-x', '--xLims', type=int, nargs=2, default=(0, 20000),
help='Upper and lower limits of bins for data aggregation, supply two integers, separated by'
'a space')
return parser.parse_args()
def get_Filenames_file(strFilenames):
"""
Obtain a list of the filenames of the files to process
"""
with open(strFilenames, 'r') as Filenames_filehandle:
aFiles = [line.strip() for line in Filenames_filehandle]
return aFiles
def getDataIntoPandasNoHeader(strFilename, arHeaders):
#This is for files without headers
#print 'Getting data from {0}'.format(strFilename)
pdData = pd.read_csv(strFilename, sep='\t', header=None, names=arHeaders)
return pdData
def getSeriesWithBinsIndex(npHistDensityY, npHistDensityX):
#Round the numbers
npHistXRounded = np.around(npHistDensityX, decimals=3)
#Get into list
arHistXRounded = npHistXRounded.tolist()
#Get rid of the last bin - there are always one more bin edges than there are numbers of bins, and you want
#to make bin edges into lables (or indexes) for the counts, so you just forget the last one.
#e.g. bin 0 to 0.1 is now just called bin 0, bin 0.49 to 0.5 is called bin 0.49
arHistXRounded.pop()
#Make into a series
seriesHistDensity = | pd.Series(npHistDensityY, index=arHistXRounded) | pandas.Series |
import logging
logging.basicConfig(level=logging.WARNING)
import pytest
import numpy
import os
import pypipegraph as ppg
import pandas as pd
from pathlib import Path
from pandas.testing import assert_frame_equal
import dppd
import dppd_plotnine # noqa:F401
from mbf_qualitycontrol.testing import assert_image_equal
from mbf_sampledata import get_sample_data
import mbf_genomics.regions as regions
from mbf_genomics.annotator import Constant, Annotator
from .shared import (
get_genome,
get_genome_chr_length,
force_load,
inside_ppg,
run_pipegraph,
RaisesDirectOrInsidePipegraph,
MockGenome,
)
dp, X = dppd.dppd()
@pytest.mark.usefixtures("new_pipegraph")
class TestGenomicRegionsLoadingPPGOnly:
def test_dependency_passing(self):
job = ppg.ParameterInvariant("sha", (None,))
a = regions.GenomicRegions("shu", lambda: None, [job], get_genome())
load_job = a.load()
assert job in load_job.lfg.prerequisites
def test_dependency_may_be_iterable_instead_of_list(self):
job = ppg.ParameterInvariant("shu", (None,))
a = regions.GenomicRegions("shu", lambda: None, (job,), get_genome())
load_job = a.load()
assert job in load_job.lfg.prerequisites
def test_depenencies_must_be_jobs(self):
ppg.ParameterInvariant("shu", (None,))
with pytest.raises(ValueError):
regions.GenomicRegions("shu", lambda: None, ["shu"], get_genome())
@pytest.mark.usefixtures("both_ppg_and_no_ppg")
class TestGenomicRegionsLoading:
def test_raises_on_duplicate_name(self, both_ppg_and_no_ppg):
def sample_data():
return pd.DataFrame(
{"chr": ["Chromosome"], "start": [1000], "stop": [1100]}
)
regions.GenomicRegions("shu", sample_data, [], get_genome())
if inside_ppg():
with pytest.raises(ValueError):
regions.GenomicRegions("shu", sample_data, [], get_genome())
both_ppg_and_no_ppg.new_pipegraph()
regions.GenomicRegions(
"shu", sample_data, [], get_genome()
) # should not raise
def test_raises_on_non_iterable_dependencies(self):
def sample_data():
return pd.DataFrame(
{"chr": ["Chromosome"], "start": [1000], "stop": [1100]}
)
with pytest.raises(ValueError):
regions.GenomicRegions("shu", sample_data, "aaeu", get_genome())
with pytest.raises(ValueError):
regions.GenomicRegions("shu", sample_data, 1, get_genome())
with pytest.raises(ValueError):
regions.GenomicRegions("shu", sample_data, iter([]), get_genome())
def test_loading(self):
def sample_data():
return pd.DataFrame(
{"chr": ["Chromosome"], "start": [1000], "stop": [1100]}
)
a = regions.GenomicRegions("sha", sample_data, [], get_genome())
if inside_ppg():
assert not hasattr(a, "df")
force_load(a.load())
else:
assert hasattr(a, "df")
run_pipegraph()
assert hasattr(a, "df")
assert len(a.df) == 1
assert "chr" in a.df.columns
assert "start" in a.df.columns
assert "stop" in a.df.columns
def test_filtering_copy_anno(self, clear_annotators):
import mbf_genomics
def sample_data():
return pd.DataFrame(
{
"chr": "Chromosome",
"start": [1000, 1001, 1002],
"stop": [1100, 1101, 1102],
}
)
a = regions.GenomicRegions(
"sha", sample_data, [], get_genome(), on_overlap="ignore"
)
b = a.filter("filtered", ("start", "==", 1001))
class CopyAnno(mbf_genomics.annotator.Annotator):
def __init__(self):
self.columns = ["copy"]
def calc(self, df):
return pd.DataFrame({"copy": df["start"]})
a += CopyAnno()
if inside_ppg():
assert not hasattr(a, "df")
force_load(a.load())
force_load(b.annotate())
else:
assert hasattr(a, "df")
run_pipegraph()
print(b.df)
assert (b.df["start"] == [1001]).all()
assert (b.df["copy"] == [1001]).all()
def test_raises_on_invalid_on_overlap(self):
def inner():
regions.GenomicRegions(
"shu",
lambda: None,
[],
get_genome(),
on_overlap="run in circles all about",
)
with pytest.raises(ValueError):
inner()
def test_magic(self):
def sample_data():
return pd.DataFrame(
{"chr": ["Chromosome"], "start": [1000], "stop": [1100]}
)
a = regions.GenomicRegions("shu", sample_data, [], get_genome())
hash(a)
str(a)
repr(a)
bool(a)
a.load()
run_pipegraph()
with pytest.raises(TypeError):
iter(a)
def test_loading_missing_start(self):
def sample_data():
return pd.DataFrame({"chr": "1", "stop": [1100]})
with RaisesDirectOrInsidePipegraph(ValueError):
a = regions.GenomicRegions("sha", sample_data, [], get_genome())
force_load(a.load)
def test_loading_missing_chr(self):
def sample_data():
return pd.DataFrame({"start": [1000], "stop": [1100]})
with RaisesDirectOrInsidePipegraph(ValueError):
a = regions.GenomicRegions("sha", sample_data, [], get_genome())
force_load(a.load)
def test_loading_missing_stop(self):
def sample_data():
return pd.DataFrame({"chr": "Chromosome", "start": [1200]})
with RaisesDirectOrInsidePipegraph(ValueError):
a = regions.GenomicRegions("sha", sample_data, [], get_genome())
force_load(a.load)
def test_loading_raises_on_invalid_chromosome(self):
def sample_data():
return pd.DataFrame({"chr": ["1b"], "start": [1200], "stop": [1232]})
with RaisesDirectOrInsidePipegraph(ValueError):
a = regions.GenomicRegions("sharum", sample_data, [], get_genome())
force_load(a.load)
def test_loading_raises_on_no_int_start(self):
def sample_data():
return pd.DataFrame(
{"chr": ["Chromosome"], "start": ["shu"], "stop": [1232]}
)
with RaisesDirectOrInsidePipegraph(ValueError):
a = regions.GenomicRegions("sharum", sample_data, [], get_genome())
force_load(a.load)
def test_loading_raises_on_no_int_stop(self):
def sample_data():
return pd.DataFrame({"chr": ["Chromosome"], "start": [2], "stop": [20.0]})
with RaisesDirectOrInsidePipegraph(ValueError):
a = regions.GenomicRegions("sharum", sample_data, [], get_genome())
force_load(a.load)
def test_loading_raises_on_no_str_chr(self):
def sample_data():
return | pd.DataFrame({"chr": [1], "start": [2], "stop": [20]}) | pandas.DataFrame |
import warnings
from typing import Union
import numpy as np
import pandas as pd
from sklearn.decomposition import PCA, KernelPCA, IncrementalPCA, FastICA, SparsePCA
try:
from PyEMD import EMD, EEMD
except ModuleNotFoundError:
EMD, EEMD = None, None
from ai4water.utils.utils import dateandtime_now
from ._transformations import MinMaxScaler, PowerTransformer, QuantileTransformer, StandardScaler
from ._transformations import FunctionTransformer, RobustScaler, MaxAbsScaler
# TODO add logistic, tanh and more scalers.
# rpca
# tSNE
# UMAP
# PaCMAP
# which transformation to use? Some related articles/posts
# https://scikit-learn.org/stable/modules/preprocessing.html
# http://www.faqs.org/faqs/ai-faq/neural-nets/part2/section-16.html
# https://data.library.virginia.edu/interpreting-log-transformations-in-a-linear-model/
class EmdTransformer(object):
"""Empirical Mode Decomposition"""
def __init__(self, ensemble=False, **kwargs):
self.ensemble = ensemble
if ensemble:
self.emd_obj = EEMD(**kwargs)
else:
self.emd_obj = EMD(**kwargs)
def fit_transform(self, data, **kwargs):
if isinstance(data, pd.DataFrame):
data = data.values
else:
assert isinstance(data, np.ndarray)
assert len(data.shape) == 2
imfs = []
for col in range(data.shape[1]):
if self.ensemble:
IMFs = self.emd_obj.eemd(data[:, col], **kwargs)
else:
IMFs = self.emd_obj.emd(data[:, col], **kwargs)
imfs.append(IMFs.T)
return np.concatenate(imfs, axis=1)
def inverse_transform(self, **kwargs):
raise NotImplementedError
class scaler_container(object):
def __init__(self):
self.scalers = {}
self.transforming_straight = True
self.zero_indices = None
self.nan_indices = None
self.index = None
class Transformations(scaler_container):
"""
Applies transformation to tabular data.
Any new transforming methods should define two methods one starting with
`transform_with_` and `inverse_transofrm_with_`
https://developers.google.com/machine-learning/data-prep/transform/normalization
Currently following methods are available for transformation and inverse transformation
- minmax :
- maxabs :
- robust :
- power :
- zscore : also known as standard scalers
- quantile :
- log : natural logrithmic
- log10 : log with base 10
- log2 : log with base 2
- tan : tangent
- cumsum : cummulative sum
- pca : principle component analysis
- kpca : kernel component analysis
- ipca : incremental principle component analysis
- fastica : fast incremental component analysis
Following methods have only transformations and not inverse transformations.
They can be used for feature creation.
emd : empirical mode decomposition
eemd : ensemble empirical mode decomposition
To transform a datafrmae using any of the above methods use
```python
>>>scaler = Transformations(data=[1,2,3,5], method='zscore')
>>>scaler.transform()
```
or
```python
>>>scaler = Transformations(data=pd.DataFrame([1,2,3]))
>>>normalized_df, scaler_dict = scaler.transform_with_minmax(return_key=True)
```
or
```python
>>>scaler = Transformations(data=pd.DataFrame([1,2,3]), method='minmax')
>>>normalized_df, scaler_dict = scaler()
```
or using one liner
```python
>>>normalized_df, scaler = Transformations(data=pd.DataFrame([[1,2,3],[4,5,6]], columns=['a', 'b']),
... method='log', features=['a'])('transform')
```
where `method` can be any of the above mentioned methods.
Note: `tan` and `cumsum` do not return original data upon inverse transformation.
Same holds true for methods which causes change in dimension
"""
available_transformers = {
"minmax": MinMaxScaler,
"zscore": StandardScaler,
"robust": RobustScaler,
"maxabs": MaxAbsScaler,
"power": PowerTransformer,
"quantile": QuantileTransformer,
"pca": PCA,
"kpca": KernelPCA,
"ipca": IncrementalPCA,
"fastica": FastICA,
"sparsepca": SparsePCA,
"emd": EmdTransformer,
"eemd": EmdTransformer,
}
dim_expand_methods = ['emd', 'eemd']
dim_red_methods = ["pca", "kpca", "ipca", "fastica", "sparsepca"] # dimensionality reduction methods
mod_dim_methods = dim_red_methods + dim_expand_methods
def __init__(self,
data: Union[pd.DataFrame, np.ndarray, list],
method: str = 'minmax',
features: list = None,
replace_nans: bool = False,
replace_with: Union[str, int, float] = 'mean',
replace_zeros: bool = False,
replace_zeros_with: Union[str, int, float] = 'mean',
**kwargs
):
"""
Arguments:
data : a dataframe or numpy ndarray or array like. The transformed or inversely
transformed value will have the same type as data and will have
the same index as data (in case data is dataframe).
method : method by which to transform and consequencly inversely
transform the data. default is 'minmax'. see `Transformations.available_transformers`
for full list.
features : string or list of strings. Only applicable if `data` is
dataframe. It defines the columns on which we want to apply transformation.
The remaining columns will remain same/unchanged.
replace_nans : If true, then will replace the nan values in data with
some fixed value `replace_with` before transformation. The nan
values will be put back at their places after transformation so
this replacement is done only to avoid error during transformation.
However, the process of putting the nans back does not happen when
the `method` results in dimention change, such as for PCA etc.
replace_with : if replace_nans is True, then this value will be used
to replace nans in dataframe before doing transformation. You can
define the method with which to replace nans for exaple by setting
this argument to 'mean' will replace nans with 'mean' of the
array/column which contains nans. Allowed string values are
'mean', 'max', 'man'.
replace_zeros : same as replace_nans but for zeros in the data.
replace_zeros_with : same as `replace_with` for for zeros in the data.
kwargs : any arguments which are to be provided to transformer on
INTIALIZATION and not during transform or inverse transform e.g.
`n_components` for pca.
Example
---------
```python
>>>from ai4water.preprocessing.transformations import Transformations
>>>from ai4water.datasets import arg_beach
>>>df = arg_beach()
>>>inputs = ['tide_cm', 'wat_temp_c', 'sal_psu', 'air_temp_c', 'pcp_mm', 'pcp3_mm']
>>>transformer = Transformations(data=df[inputs], method='minmax', features=['sal_psu', 'air_temp_c'])
>>>new_data = transformer.transform()
```
Following shows how to apply log transformation on an array containing zeros
by making use of the argument `replace_zeros`. The zeros in the input array
will be replaced internally but will be inserted back afterwards.
```python
>>>from ai4water.preprocessing.transformations import Transformations
>>>transformer = Transformations([1,2,3,0.0, 5, np.nan, 7], method='log', replace_nans=True, replace_zeros=True)
>>>transformed_data = transformer.transform()
... [0.0, 0.6931, 1.0986, 0.0, 1.609, None, 1.9459]
>>>original_data = transformer.inverse_transform(data=transformed_data)
```
"""
super().__init__()
self.method = method
self.replace_nans = replace_nans
self.replace_with = replace_with
self.replace_zeros = replace_zeros
self.replace_zeros_with = replace_zeros_with
data = self.pre_process_data(data.copy())
self.data = data
self.features = features
self.initial_shape = data.shape
self.kwargs = kwargs
self.transformed_features = None
def __call__(self, what="transform", return_key=False, **kwargs):
"""
Calls the `transform` and `inverse_transform` methods.
"""
if what.upper().startswith("TRANS"):
self.transforming_straight = True
return self.transform(return_key=return_key, **kwargs)
elif what.upper().startswith("INV"):
self.transforming_straight = False
return self.inverse_transform(**kwargs)
else:
raise ValueError(f"The class Transformation can not be called with keyword argument 'what'={what}")
def __getattr__(self, item):
"""
Gets the attributes from underlying transformation modules.
"""
if item.startswith('_'):
return self.__getattribute__(item)
elif item.startswith("transform_with"):
transformer = item.split('_')[2]
if transformer.lower() in list(self.available_transformers.keys()) + ["log",
"tan", "cumsum", "log10", "log2"]:
self.method = transformer
return self.transform_with_sklearn
elif item.startswith("inverse_transform_with"):
transformer = item.split('_')[3]
if transformer.lower() in list(self.available_transformers.keys()) + ["log",
"tan", "cumsum", "log10", "log2"]:
self.method = transformer
return self.inverse_transform_with_sklearn
else:
raise AttributeError(f'Transformations has not attribute {item}')
@property
def data(self):
return self._data
@data.setter
def data(self, x):
if isinstance(x, pd.DataFrame):
self._data = x
else:
assert isinstance(x, np.ndarray)
xdf = pd.DataFrame(x, columns=['data'+str(i) for i in range(x.shape[1])])
self._data = xdf
@property
def features(self):
return self._features
@features.setter
def features(self, x):
if x is None:
x = list(self.data.columns)
assert len(x) == len(set(x)), f"duplicated features are not allowed. Features are: {x}"
self._features = x
@property
def transformed_features(self):
return self._transformed_features
@transformed_features.setter
def transformed_features(self, x):
self._transformed_features = x
@property
def num_features(self):
return len(self.features)
@property
def change_dim(self):
if self.method.lower() in self.mod_dim_methods:
return True
else:
return False
def get_scaler(self):
return self.available_transformers[self.method.lower()]
def pre_process_data(self, data):
"""Makes sure that data is dataframe and optionally replaces nans"""
if isinstance(data, pd.DataFrame):
data = data
else:
data = np.array(data)
if data.ndim == 1:
data = data.reshape(-1, 1)
assert isinstance(data, np.ndarray)
data = pd.DataFrame(data, columns=['data'+str(i) for i in range(data.shape[1])])
# save the index if not already saved so that can be used later
if self.index is None:
self.index = data.index
if self.replace_nans:
indices = {}
for col in data.columns:
# find index of nan values in current column of data
# https://stackoverflow.com/questions/14016247/find-integer-index-of-rows-with-nan-in-pandas-dataframe
i = data[col].index[data[col].apply(np.isnan)]
if len(i) > 0:
indices[col] = i.values
# replace nans with values
if self.replace_with in ['mean', 'max', 'min']:
replace_with = float(getattr(np, 'nan'+self.replace_with)(data[col]))
else:
replace_with = self.replace_with
data[col][indices[col]] = get_val(data[col], replace_with)
# because pre_processing is implemented 2 times, we don't want to overwrite nan_indices
if self.nan_indices is None: self.nan_indices = indices
if len(indices) > 0:
if self.method.lower() == "cumsum":
warnings.warn("Warning: nan values found and they may cause problem")
if self.replace_zeros and self.transforming_straight:
indices = {}
for col in data.columns:
# find index containing 0s in corrent column of dataframe
i = data.index[data[col] == 0.0]
if len(i) > 0:
indices[col] = i.values
if self.replace_zeros_with in ['mean', 'max', 'min']:
replace_with = float(getattr(np, 'nan' + self.replace_zeros_with)(data[col]))
else:
replace_with = self.replace_zeros_with
data[col][indices[col]] = get_val(data[col], replace_with) # todo SettingWithCopyWarning
if self.zero_indices is None:
self.zero_indices = indices
# if self.replace_negatives:
# indices = {}
# for col in data.columns:
# # find index containing negatives in corrent column of dataframe
# i = data.index[data[col] < 0.0]
# if len(i) > 0:
# indices[col] = i.values
# if self.replace_negatives_with in ['mean', 'max', 'min']:
# replace_with = float(getattr(np, 'nan' + self.replace_negatives_with)(data[col]))
# else:
# replace_with = self.replace_negatives_with
# data[col][indices[col]] = get_val(data[col], replace_with)
#
# if self.negative_indices is None: self.negative_indices = indices
return data
def post_process_data(self, data):
"""If nans/zeros were replaced with some value, put nans/zeros back."""
if self.method not in self.dim_red_methods:
if self.replace_nans:
if hasattr(self, 'nan_indices'):
for col, idx in self.nan_indices.items():
data[col][idx] = np.nan
if self.replace_zeros:
if hasattr(self, 'zero_indices'):
for col, idx in self.zero_indices.items():
data[col][idx] = 0.0
# if self.replace_negatives:
# if hasattr(self, 'negative_indices'):
# for col, idx in self.negative_indices.items():
# data[col][idx] = 0.0
return data
def transform_with_sklearn(self, return_key=False, **kwargs):
to_transform = self.get_features() # TODO, shouldn't kwargs go here as input?
if self.method.lower() in ["log", "log10", "log2"]:
if (to_transform.values < 0).any():
raise InvalidValueError(self.method, "negative")
if (np.isnan(to_transform.values).sum() > 0).any():
raise InvalidValueError(self.method, "NaN")
if 0 in to_transform.values:
raise InvalidValueError(self.method, "zero")
if self.method == "log":
scaler = FunctionTransformer(func=np.log, inverse_func=np.exp, validate=True, check_inverse=True)
elif self.method == "log2":
scaler = FunctionTransformer(func=np.log2, inverse_func="""lambda x: 2**x""", validate=True,
check_inverse=True)
else: # "log10":
scaler = FunctionTransformer(func=np.log10, inverse_func="""lambda x: 10**x""", validate=True,
check_inverse=True)
elif self.method.lower() == "tan":
scaler = FunctionTransformer(func=np.tan, inverse_func=np.tanh, validate=True, check_inverse=False)
elif self.method.lower() == "cumsum":
scaler = FunctionTransformer(func=np.cumsum, inverse_func=np.diff, validate=True, check_inverse=False,
kw_args={"axis": 0}, inv_kw_args={"axis": 0, "append": 0})
else:
scaler = self.get_scaler()(**self.kwargs)
data = scaler.fit_transform(to_transform, **kwargs)
if self.method.lower() in self.mod_dim_methods:
features = [self.method.lower() + str(i+1) for i in range(data.shape[1])]
data = pd.DataFrame(data, columns=features)
self.transformed_features = features
self.features = features
else:
data = pd.DataFrame(data, columns=to_transform.columns)
scaler = self.serialize_scaler(scaler, to_transform)
data = self.maybe_insert_features(data)
data = self.post_process_data(data)
self.tr_data = data
if return_key:
return data, scaler
return data
def inverse_transform_with_sklearn(self, **kwargs):
self.transforming_straight = False
scaler = self.get_scaler_from_dict(**kwargs)
to_transform = self.get_features(**kwargs)
data = scaler.inverse_transform(to_transform)
if self.method.lower() in self.mod_dim_methods:
# now use orignal data columns names, but if the class is being directly called for inverse transform
# then we don't know what cols were transformed, in that scenariio use dummy col name.
cols = ['data'+str(i) for i in range(data.shape[1])] if self.transformed_features is None else self.data.columns
data = pd.DataFrame(data, columns=cols)
else:
data = | pd.DataFrame(data, columns=to_transform.columns) | pandas.DataFrame |
#########################################################################
#########################################################################
# Classes for handling genome-wide association input and output files, ##
# analysis and qc programs, and post-hoc analyses ##
#########################################################################
#########################################################################
import cgatcore.experiment as E
import cgatcore.iotools as iotools
import numpy as np
import pandas as pd
import pandas.io.sql as pdsql
import re
import random
import os
import subprocess
import rpy2.robjects as ro
from rpy2.robjects import r as R
from rpy2.robjects import pandas2ri as py2ri
# set matplotlib non-interactive backend to Agg to
# allow running on cluster
import collections
import sqlite3 as sql
from math import *
import scipy.stats as stats
class FileGroup(object):
'''
An object for holding, formatting and processing files for genome-wide
association analysis including compressed and binary files
File types supported:
* plink - .ped and .map files
* plink binary - .bim, .fam. and .bed files
* variant call format - .vcf and .bcf (including gzipped vcf)
* Oxford format - .gen or .bgen with matched sample text file (must
be .sample)
* GRM_binary - genetic relationship matrix calculated in an appropriate
program in binary format. File suffixes are *.grm.bin, *.grm.N.bin
and *.grmid
* GRM_gz - previously calcualted gzip compressed GRM, file suffixes
are *.grm.gz and *.grm.id
Phenotypes are assumed to be contained in the relevant files, if not
then an additional phenotypes files can be included using the
`phenotypes` argument. Covariate files (if different from the phenotypes
file) can also be included in the instantiation of a :FileGroup:
object using the `covarite_files` argument.
Only the `files` and `file_format` arguments are required.
Genotype data are assumed to be raw genotype calls. This can be modified
using the `genotype_format` argument upon instantiation. Values allowed
are:
* calls - standard bi-allelic genotype calls, i.e. AA, AB, BB
* imputed_call - discrete genotype calls from imputed data,
essentially treated the same as ``calls``
* genotype_prob - posterior probabilities for each genotype class,
i.e. 0.88 0.07 0.05 corresponding to homozygote
reference, heterozygote then homozygote rare allele.
'''
# Defaults for file formats
ped_file = None
map_file = None
bim_file = None
fam_file = None
bed_file = None
sample_file = None
gen_file = None
bgen_file = None
vcf_file = None
bcf_file = None
def __init__(self, files, file_format, phenotypes=None,
genotype_format="calls", covariate_files=None):
self.files = files
self.file_format = file_format
self.pheno_file = phenotypes
self.genotype_format = genotype_format
self.covariate_files = covariate_files
self.set_file_prefix(files)
def set_file_prefix(self, infiles):
'''Get file prefixes from input files. These are used across all
file formats, e.g. myfile.bed, myfile.bim, myfile.fam name=myfile.
Only use periods, '.' to denote file suffixes. use hyphens and
underscores for separating file names.
Set these to the appropriate attributes.
'''
file_prefixes = set()
for f in infiles:
# get all input file prefixes
if len(f.split("/")) > 1:
g = f.split("/")[-1]
fdir = f.split("/")[:-1]
fdir = "/".join(fdir)
ffile = fdir + "/" + g.split(".")[0]
file_prefixes.add(ffile)
else:
file_prefixes.add(f.split(".")[0])
# if only prefix then use this for all data files
if len(file_prefixes) == 1:
self.name = [xf for xf in file_prefixes][0]
else:
# if there are multiple prefixes then use separate
# flags for file inputs
self.name = None
# define file types by their suffix instead
if self.file_format == "plink":
self.ped_file = [pf for pf in infiles if re.search(".ped",
pf)][0]
self.map_file = [mf for mf in infiles if re.search(".map",
mf)][0]
# check files exist (i.e. are not the default None values)
try:
assert self.ped_file
except AssertionError:
raise ValueError(".ped file is missing, please "
"specify")
try:
assert self.map_file
except AssertionError:
raise ValueError(".map file is missing, please "
"specify")
elif self.file_format == "plink_binary":
self.fam_file = [ff for ff in infiles if re.search(".fam",
ff)][0]
self.bim_file = [fb for fb in infiles if re.search(".bim",
fb)][0]
self.bed_file = [bf for bf in infiles if re.search(".bed",
bf)][0]
# check files exist (i.e. are not the default None values)
try:
assert self.fam_file
except AssertionError:
raise ValueError(".fam file is missing, please "
"specify")
try:
assert self.bim_file
except AssertionError:
raise ValueError(".bim file is missing, please "
"specify")
try:
assert self.bed_file
except AssertionError:
raise ValueError(".bed file is missing, please "
"specify")
elif self.file_format == "oxford":
self.gen_file = [gf for gf in infiles if re.search(".gen",
gf)][0]
self.sample_file = [sf for sf in infiles if re.search(".sample",
sf)][0]
# check files exist (i.e. are not the default None values)
try:
assert self.gen_file
except AssertionError:
raise ValueError(".gen file missing, please "
"specify")
try:
assert self.sample_file
except AssertionError:
raise ValueError(".sample file missing, please "
"specify")
elif self.file_format == "oxford_binary":
self.bgen_file = [bg for bg in infiles if re.search(".bgen",
bg)][0]
self.sample_file = [sf for sf in infiles if re.search(".sample",
sf)][0]
# check files exist (i.e. are not the default None values)
try:
assert self.bgen_file
except AssertionError:
raise ValueError(".bgen file is missing, please "
"specify")
try:
assert self.sample_file
except AssertionError:
raise ValueError(".sample file is missing, please "
"specify")
elif self.file_format == "vcf":
self.vcf_file = [vf for vf in infiles if re.search(".vcf",
vf)][0]
# check files exist (i.e. are not the default None values)
try:
assert self.vcf_file
except AssertionError:
raise ValueError(".vcf file is missing, please "
"specify")
elif self.file_format == "bcf":
self.bcf_file = [bv for bv in infiles if re.search(".bcf",
bv)][0]
# check files exist (i.e. are not the default None values)
try:
assert self.bcf_file
except AssertionError:
raise ValueError(".bcf file is missing, please "
"specify")
elif self.file_format == "GRM_binary":
self.id_file = [ig for ig in infiles if re.search(".grm.id",
ig)][0]
self.n_file = [gn for gn in infiles if re.search(".grm.N.bin",
gn)][0]
self.bin_file = [gb for gb in infiles if re.search(".grm.bin",
gb)][0]
# check files exits
try:
assert self.id_file
except AssertionError:
raise ValueError("GRM ids file is missing, please "
"specify")
try:
assert self.n_file
except AssertionError:
raise ValueError("grm.N file is missing, please "
"specify")
try:
assert self.bin_file
except AssertionError:
VaueError("GRM genotype is missing, please "
"specify")
elif self.file_format == "GRM_plink":
self.id_file = [ig for ig in infiles if re.search(".rel.id",
ig)][0]
self.rel_file = [gn for gn in infiles if re.search(".rel.N.bin",
gn)][0]
# check files exits
try:
assert self.id_file
except AssertionError:
raise ValueError("GRM ids file is missing, please "
"specify")
try:
assert self.rel_file
except AssertionError:
raise ValueError("rel.N file is missing, please "
"specify")
def set_phenotype(self, pheno_file=None, pheno=1):
'''
Set the phenotype for a set of individuals
using an external phenotypes file.
Default is to use the (n+2)th column, designated
as pheno 1.
'''
if type(pheno) == int:
pheno = str(pheno)
elif type(pheno) == str:
pass
else:
raise AttributeError("Type of pheno unknown. "
"Must be str or int.")
self.pheno_file = pheno_file
self.pheno = pheno
class GWASProgram(object):
'''
A base level object for programs designed to perform genome-wide
association analysis and operate on genome-wide genotyping data.
[INSERT PROPER DOCSTRING - see style guide]
'''
def __init__(self, executable=None, required_format=None):
self.executable = executable
self.require_format = required_format
def program_call(self, infiles, outfile):
'''build a statement to perform genome-wide
analysis using infiles
'''
return ""
def postprocess(self, infiles, outfile):
'''collect and process output files from
program - format for Result class objects'''
return ""
def build(self, infiles, outfile):
'''run analysis program'''
cmd_program = self.program_call(infile, outfile)
cmd_postprocess = self.postprocess(infiles, outfile)
if cmd_postprocess:
cmd_postprocess = cmd_postprocess.strip().endswith(";")
assert cmd_postprocess
else:
pass
statement = " checkpoint; ".join((cmd_program,
cmd_postprocess))
return statement
class GCTA(GWASProgram):
'''
GCTA is designed for computing genetic relationship matrices, linear
mixed model analyses and phenotype estimation/prediction.
It can also perform SNP-wise GWAS.
Files MUST be in Plink binary format
'''
def __init__(self, files, options=None, settings=None,
design=None):
self.infiles = files
self.options = options
self.settings = settings
self.design = design
self.executable = "gcta64"
self.statement = {}
self.filters = []
def program_call(self, infiles, outfile):
'''build GCTA call statement on infiles'''
statement = []
statement.append(self.executable)
if infiles.name:
inputs = self._build_single_file_input(infiles,
infiles.file_format)
statement.append(inputs)
else:
raise AttributeError("Files must be in binary plink format "
"or as a GRM to use GCTA. Please "
"convert and try again.")
if infiles.pheno_file:
statement.append(" --pheno %s --mpheno %s " % (infiles.pheno_file,
infiles.pheno))
else:
pass
self.statement["program"] = " ".join(statement)
def _build_single_file_input(self, infiles, file_format):
'''internal function only. Use it to construct the
file input flags with --file, --bfile or --data
'''
statement = None
if file_format == "plink":
statement = " --file %s " % infiles.name
elif file_format == "plink_binary":
statement = " --bfile %s " % infiles.name
elif file_format == "oxford" or file_format == "oxford_binary":
statement = " --data %s" % infiles.name
elif file_format == "GRM_binary" or file_format == "GRM_plink":
statement = " --grm %s " % infiles.name
else:
raise AttributeError("file format is not defined or recognised."
"Please define the input corectly when "
"instantiating a FileGroup object")
return statement
def PCA(self, n_pcs="20"):
'''
Perform PCA analysis on previosly generated GRM, output the number n
principal componets, default = 20
'''
self._run_tasks(pca=n_pcs)
def apply_filters(self, filter_type, filter_value):
'''
* chromosome - exclude all variants not on the specified chromosome(s).
[str/list]
* autosome_number - for non-human species, the number of chromosomes to
be considered autosomes
* exclude_snps - text file list of variant IDs to exclude from analysis
[file]
* extract - text file list of variant IDs to include in analysis,
ignores all others. [file]
* min_allele_frequency - only include SNPs with cohort/case allele
frequency above this threshold. [float]
* max_allele_frequency - include all SNPs with a MAF equal to or below
this value. [float]
'''
if filter_type == "chromosome":
self._construct_filters(chromosome=filter_value)
elif filter_type == "autosome_number":
self._construct_filters(autosome_number=filter_value)
elif filter_type == "exclude_snps":
self._construct_filters(exclude_snps=filter_value)
elif filter_type == "extract":
self._construct_filters(extract=filter_value)
elif filter_type == "min_allele_frequency":
self._construct_filters(min_allele_frequency=filter_value)
elif filter_type == "max_allele_frequency":
self._construct_filters(max_allele_frequency=filter_value)
elif filter_type == "keep":
self._construct_filters(keep=filter_value)
elif filter_type == "remove":
self._construct_filters(remove=filter_value)
def _construct_filters(self, **kwargs):
'''
Add filter to each GCTA run.
The filters accepted are defined below. These are input as keyword
arguments supported by this function.
* min_allele_frequency - only include SNPs with cohort/case allele
frequency above this threshold. [float]
* max_allele_frequency - include all SNPs with a MAF equal to or below
this value. [float]
* keep - keep individuals with matching individual and family IDs.
[file]
* remove - remove all individuals with matching individual and family
IDs. [file]
* extract - text file list of variant IDs to include in analysis,
ignores all others. [file]
* exclude - text file list of variant IDs to exclude from analysis.
[file]
* chromosome - exclude all variants not on the specified chromosome(s).
[str/list]
* autosome - exclude all non-place and non-autosomal variants.
[boolean]
* covariates_file - specify the covariates file with family and
individual IDs in the first two columns. Covariates are in the
(n+2)th column. Only used in conjunction with `covariate_filter`.
[file]
* covariate_filter - covariate columns value to filter on. Can be
used with non-numeric values to filter out individuals with
covariate =/= `covariate_filter` value. [str/int/float]
* covariate_column - column number to apply filtering to if more
than one covariate in the file. [int]
* update_gender - provide gender information in a separate text
file. [file]
* grm_threshold - remove one of a pair of individuals with
estimated relatedness greater than this value.
* ld_significance - p-value threshold for regression test
of LD significance
* genotype_call - GenCall score cut-off for calling raw
genotypes into Plink PED format
* meta_pval - p-value threshold cut-off for conditional
and joint genome-wide analysis
* cojo_window - distance in kb beyond wich SNPs this
distance apart are assumed to be in linkage equilibrium
* cojo_collinear - multiple regression R^2 on selected SNPs
value above which the testing SNP will not be selected.
* cojo_inflation - adjust COJO analysis test statistics
for genomic control. [boolean]
* reml_iterations - maximum number of iterations to use
during reml analysis. Default is 100. [int]
'''
statement = []
# map of keyword arguments recognised to Plink2 filtering flags
filter_map = {"min_allele_frequency": " --maf %s ",
"max_allele_frequency": " --max-maf %s ",
"keep": " --keep %s ",
"remove": " --remove %s ",
"extract": " --extract %s ",
"exclude": " --exclude %s ",
"chromosome": " --chr %s ",
"autosome": " --autosome ",
"autosome_number": " --autosome-num %s ",
"grm_threshold": " --grm-cutoff %s ",
"ld_significance": " --ls-sig %s ",
"genotype_call": " --gencall %s ",
"meta_pval": " --cojo-p %s ",
"cojo_window": " --cojo-wind %s ",
"cojo_collinear": " --cojo-collinear %s ",
"cojo_inflation": " --cojo-gc ",
"reml_iterations": " --reml-maxit %s "}
# compile all filters together, checking for dependencies.
# use a mapping dictionary to extract the relevant flags and
# combinations to use.
filters = []
filter_dict = {}
for key, value in kwargs.items():
filter_dict[key] = value
for each in filter_dict.keys():
try:
assert filter_map[each]
# check for data type <- behaviour is type dependent
if type(filter_dict[each]) == 'bool':
filters.append(filter_map[each])
else:
filter_val = filter_dict[each]
filters.append(filter_map[each] % filter_val)
except KeyError:
E.warn("%s filter not recognised, please see "
"documentation for allowed filters" % each)
self.filters.append(" ".join(filters))
self.statement["filters"] = " ".join(self.filters)
def mixed_model(self, lmm_method, grm=None, qcovar=None,
dcovar=None):
'''
Run a linear mixed model with the GRM used to model
random effects of an estimated genetic relationshi
between individuals
'''
# add the mlm flag to the statement
self._run_tasks(lmm=lmm_method)
# construct the rest of mlm statement
statement = []
if qcovar:
statement.append(" --qcovar %s " % qcovar)
else:
pass
if dcovar:
statement.append(" --covar %s " % dcovar)
else:
pass
try:
statement.append(" --grm %s " % grm)
except ValueError:
E.warn("No GRM has been provided, the GRM ")
self.statement["mlm"] = " ".join(statement)
def reml_analysis(self, method, parameters, prevalence=None,
qcovariates=None, discrete_covar=None):
'''
Use REML to estimate the proportion of phenotypic variance
explained by the estimated genetic relationship between
individuals.
Arguments
---------
method: string
GCTA method to use for REML estimation of h2. Includes:
* snpBLUP - calculate the SNP BLUPs from the genotype
data and the estimated total genetic value/ breeding value
* fixed_cor -
* priors - provide initial priors for the variance components
estimation
* unconstrained - allow variance estimates to fall outside
of the normal parameter space, bounded [0, ).
* GxE - estimate the contribution of GxE with covariates
to the phenotype variance
* BLUP_EBV - output individual total genetic effect/breeding
values
'''
statement = []
try:
params = parameters.split(",")
if len(params) == 1:
params = params[0]
else:
pass
except AttributeError:
params = parameters
self._run_tasks(parameter=params,
greml=method)
if prevalence:
statement.append(" --prevalence %0.3f " % prevalence)
else:
pass
if qcovariates:
statement.append(" --qcovar %s " % qcovariates)
else:
pass
if discrete_covar:
statement.append(" --covar %s " % discrete_covar)
else:
pass
self.statement["reml"] = " ".join(statement)
def _run_tasks(self, parameter=None, **kwargs):
'''
The principal functions of GCTA revolve around GRM estimation
and variance components analysis, such as REML estimation of
heritability and variance components, BLUP and phenotype prediciton.
It can also be used to do PCA and conditional and joint GWAS.
Tasks
-----
* pca - perform principal components analysis on a GRM
* greml - perform restricted maximum likelihood analysis
for estimation of variance components
* estimate_ld - estimate the linkage disequilibrium structure
over the genomic regions specified
* simulate_gwas - simulate genome-wide association data based
on observed genotype data
* cojo - conditional and joint genome-wide association
analysis across SNPs and covariates
* bivariate_reml - perform GREML on two traits, either both
binary, both quantitative or one of each
* lmm - perform a linear mixed model based association analysis
'''
statement = []
# set up a dictionary of recognised tasks with key word argument
# values as further dictionaries. Use the parameter argument
# to pass arguments by value to string formatting
# put all of the other tasks as options in the calling function
task_map = {"pca": " --pca %s ",
"greml": {"standard": " --reml ",
"priors": " --reml --reml-priors %s ",
"reml_algorithm": " --reml --reml-alg %s ",
"unconstrained": " --reml --reml-no-constrain ",
"GxE": " --reml --gxe %s ",
"LRT": " --reml --reml-lrt %s ",
"BLUP_EBV": " --reml --reml-pred-rand ",
"snpBLUP": " --blup-snp %s "},
"estimate_ld": " --ld %s ",
"simulate_gwas": {"quantitative": " --simu-qt ",
"case_control": " --simu-cc %s %s "},
"cojo": {"stepwise": " --cojo-file %s --cojo-slct ",
"no_selection": " --cojo-file %s --cojo-joint ",
"snp_conditional": " --cojo-file %s --cojo-cond %s "},
"bivariate_reml": {"standard": " --reml-bivar %s ",
"no_residual": " --reml-bivar %s --reml-bivar-nocove ",
"fixed_cor": " --reml-bivar %s --reml-bivar-lrt-rg %s "},
"lmm": {"standard": " --mlma ",
"loco": " --mlma-loco ",
"no_covar": " --mlma-no-adj-covar "},
"remove_relations": {"cutoff": " --grm-cutoff %s "}}
for task, value in kwargs.items():
# check for PCA first as it is not nested in task_map
if task == "pca":
try:
state = task_map[task] % value
statement.append(state)
except TypeError:
statement.append(task_map[task])
statement.append
# LD estimation is likewise not nested
elif task == "estimate_ld":
try:
state = task_map[task] % value
statement.append(state)
except TypeError:
raise IOError("no SNP file list detected")
elif task != "parameter":
try:
# sub_task is a nested dictionary
sub_task = task_map[task]
try:
assert sub_task[value]
try:
# some tasks do not contain task values for the
# parameter argument - catch these with the TypeError
# exception
statement.append(sub_task[value] % parameter)
# the default for parameter is None, check this is appropriate
if not parameter:
E.warn("Parameter value is set to NoneType. "
"Please check this is an appropriate value "
"to pass for this task")
else:
pass
except TypeError:
statement.append(sub_task[value])
except KeyError:
raise KeyError("% Task not recognised, see docs for details of "
"recognised tasks" % task)
except KeyError:
raise KeyError("Task not recognised, see docs for details of "
"recognised tasks")
else:
pass
self.statement["tasks"] = " ".join(statement)
def genetic_relationship_matrix(self, compression="binary", metric=None,
shape="square", options=None):
'''
Calculate the estimated genetic relationship matrix from
genotyping data
* estimate_grm - estimate the realized genetic relationship
matrix between individuals from genotyping data
'''
mapf = {"binary": " --make-grm-bin ",
"gzip": " --make-grm-gz ",
"no_compress": " --make-grm ",
"X_chr": " --make-grm-chr ",
"X_chr_gz": " --make-grm-gz ",
"inbreeding": " --ibc "}
if options == "X_chr":
if compression == "gz":
state = mapf["X_chr_gz"]
else:
state = mapf["X_chr"]
elif options == "inbreding":
state = mapf["inbreeding"]
else:
pass
# check compression is compatible
if compression == "gz":
state = mapf["gzip"]
elif compression == "bin":
state = mapf["binary"]
elif compression is None and not options:
state = mapf["no_compress"]
self.statement["matrix"] = state
def build_statement(self, infiles, outfile, threads=None,
memory=None, parallel=None):
'''
Build statement and execute from components
'''
statement = []
exec_state = self.executable
# calls to function add to the self.statement dictionary
try:
statement.append(self.statement["program"])
except KeyError:
raise AttributeError("Input files and format not detected")
try:
statement.append(self.statement["filters"])
except KeyError:
pass
try:
statement.append(self.statement["tasks"])
except KeyError:
pass
try:
statement.append(self.statement["matrix"])
except KeyError:
pass
try:
statement.append(self.statement["mlm"])
except KeyError:
pass
try:
statement.append(self.statement["reml"])
except KeyError:
pass
if threads:
statement.append(" --thread-num %i " % threads)
else:
pass
# add output flag
statement.append(" --out %s " % outfile)
os.system(" ".join(statement))
class Plink2(GWASProgram):
'''
Run various Plink functions and analysis, including file processing, GRM
calculation, PCA and other GWA tasks
Require Plink v1.9 to be in the users PATH variable as ``plink2`` to
distinguish it from Plink v1.07.
'''
def __init__(self, files, options=None,
settings=None, design=None):
self.infiles = files
self.options = options
self.settings = settings
self.design = design
self.executable = "plink2"
self.statement = {}
self.filters = []
def program_call(self, infiles, outfile):
''' build Plink call statement on infiles'''
statement = []
statement.append(self.executable)
if infiles.name:
inputs = self. _build_single_file_input(infiles,
infiles.file_format)
statement.append(inputs)
else:
inputs = self._build_multiple_file_input(infiles,
infiles.file_format)
statement.append(inputs)
# check for the presence of an additional phenotypes file
try:
if infiles.pheno_file:
statement.append(" --pheno %s --mpheno %s " % (infiles.pheno_file,
infiles.pheno))
else:
pass
except AttributeError:
pass
self.statement["program"] = " ".join(statement)
def hamming_matrix(self, shape, compression, options):
'''
Calculate genomic pair-wise distance matrix between
individuals using Hamming distance across all variants
'''
# check shape is compatible
if not shape:
shape = "triangle"
elif shape in ["square", "square0", "triangle"]:
pass
else:
raise ValueError("matrix shape %s not recognised."
"Valid options are square, square0, "
"and triangle." % shape)
# check compression is compatible
if compression in ["gz", "bin", "bin4"]:
pass
else:
raise ValueError("compression %s not recognised. Accepted "
"formats are gz, bin and bin4." % compression)
if options:
state = self._matrices(matrix_type="hamming", shape=shape,
compression=compression, options=options)
else:
state = self._matrices(matrix_type="hamming", shape=shape,
compression=compression)
self.statement["matrix"] = state
def ibs_matrix(self, shape, compression, options):
'''
Calculate genomic pair-wise similarity matrix between
individuals using proportion of IBS alleles
'''
# check shape is compatible
if shape in ["square", "square0", "triangle"]:
pass
else:
raise ValueError("matrix shape %s not recognised."
"Valid options are square, square0, "
"and triangle." % shape)
# check compression is compatible
if compression in ["gz", "bin", "bin4"]:
pass
else:
raise ValueError("compression %s not recognised. Accepted "
"formats are gz, bin and bin4." % compression)
if options:
state = self._matrices(matrix_type="ibs", shape=shape,
compression=compression, options=options)
else:
state = self._matrices(matrix_type="ibs", shape=shape,
compression=compression)
self.statement["matrix"] = state
def genome_matrix(self, shape, compression, options):
'''
Calculate genomic pair-wise distance matrix between
individuals using 1 - proportion of IBS alleles
'''
# check shape is compatible
if shape in ["square", "square0", "triangle"]:
pass
else:
raise ValueError("matrix shape %s not recognised."
"Valid options are square, square0, "
"and triangle." % shape)
# check compression is compatible
if compression in ["gz", "bin", "bin4"]:
pass
else:
raise ValueError("compression %s not recognised. Accepted "
"formats are gz, bin and bin4." % compression)
if options:
state = self._matrices(matrix_type="genomic", shape=shape,
compression=compression, options=options)
else:
state = self._matrices(matrix_type="genomic", shape=shape,
compression=compression)
self.statement["matrix"] = state
def genetic_relationship_matrix(self, shape, compression, metric,
options=None):
'''
Calculate genomic pair-wise distance matrix between
individuals using proportion of IBS alleles
Requires the use of the Plink2 parallelisation to run with large
cohorts of patients
'''
# check shape is compatible
if shape in ["square", "square0", "triangle"]:
pass
else:
raise ValueError("matrix shape %s not recognised."
"Valid options are square, square0, "
"and triangle." % shape)
# check compression is compatible
if compression in ["gz", "bin", "bin4"]:
pass
else:
raise ValueError("compression %s not recognised. Accepted "
"formats are gz, bin and bin4." % compression)
if metric in ["cov", "ibc2", "ibc3"]:
state = self._matrices(matrix_type="grm", shape=shape,
compression=compression, options=metric)
else:
E.info("%s metric not recognised. Running with default Fhat1" % metric)
state = self._matrices(matrix_type="grm", shape=shape,
compression=compression)
self.statement["matrix"] = state
def apply_filters(self, filter_type, filter_value):
'''
arguments supported by this function.
* genotype_rate - exclude SNPs with a genotyping rate below this
value. [float]
* min_allele_frequency - only include SNPs with cohort/case allele
frequency above this threshold. [float]
* max_allele_frequency - include all SNPs with a MAF equal to or below
this value. [float]
* exclude_snp - exclude this single variant
* exclude_snps - text file list of variant IDs to exclude from analysis.
[file]
* chromosome - exclude all variants not on the specified chromosome(s).
[str/list]
* exclude_chromosome - exclude all variants on the specified
chromosome(s). [str/list]
* autosome - exclude all non-place and non-autosomal variants.
[boolean]
* pseudo_autosome - include the pseudo-autosomal region of chromosome
X. [boolean]
* ignore_indels - remove all indels/multi-character allele coding
variants. [boolean]
* snp_bp_range - (from, to) range in bp of variants to include in
analysis. [tuple]
'''
if filter_type == "genotype_rate":
self._construct_filters(genotype_rate=filter_value)
elif filter_type == "hwe":
self._construct_filters(hwe=filter_value)
elif filter_type == "missingness":
self._construct_filters(missingness=filter_value)
elif filter_type == "min_allele_frequency":
self._construct_filters(min_allele_frequency=filter_value)
elif filter_type == "max_allele_frequency":
self._construct_filters(max_allele_frequency=filter_value)
elif filter_type == "exclude_snp":
self._construct_filters(exclude_snp=filter_value)
elif filter_type == "exclude":
self._construct_filters(exclude=filter_value)
elif filter_type == "extract":
self._construct_filters(extract=filter_value)
elif filter_type == "chromosome":
self._construct_filters(chromosome=filter_value)
elif filter_type == "exclude_chromosome":
self._constuct_filters(exclude_chromosome=filter_value)
elif filter_type == "autosome":
self._construct_filters(autosome=filter_value)
elif filter_type == "pseudo_autosome":
self._construct_filters(pseudo_autosome=filter_value)
elif filter_type == "ignore_indels":
self._construct_filters(ignore_indels=filter_value)
elif filter_type == "snp_bp_range":
self._construct_filters(snp_bp_range=filter_value)
elif filter_type == "conditional_snp":
self._construct_filters(conditional_snp=filter_value)
elif filter_type == "keep":
self._construct_filters(keep=filter_value)
elif filter_type == "remove":
self._construct_filters(remove=filter_value)
elif filter_type == "ignore_indels":
self._construct_filters(ignore_indels=filter_value)
def _build_multiple_file_input(self, infiles, file_format):
'''
internal function only. Use it to construct
the appropriate file input flags
'''
statement = None
if file_format == "oxford":
statement = " --gen %s --sample %s " % (infiles.gen_file,
infiles.sample_file)
elif file_format == "oxford_binary":
statement = " --bgen %s --sample %s " % (infiles.bgen_file,
infiles.sample_file)
elif file_format == "plink":
statement = " --ped %s --map %s " % (infiles.ped_file,
infiles.sample_file)
elif file_format == "plink_binary":
statement = " --bed %s --bim %s --fam %s " % (infiles.bed_file,
infiles.bim_file,
infiles.fam_file)
elif file_format == "vcf":
statement = " --vcf %s.vcf.gz " % infiles.vcf_file
elif file_format == "bcf":
statement = " --bcf %s " % infiles.vcf_file
elif file_format == "GRM_binary":
statement = " --grm-bin %s " % infiles.name
else:
raise AttributeError("file format is not defined. Please "
"define the input file formats when "
"instantiating a FileGroup object")
return statement
def _build_single_file_input(self, infiles, file_format):
'''internal function only. Use it to construct the
file input flags with --file, --bfile or --data
'''
statement = None
if file_format == "plink":
statement = " --file %s " % infiles.name
elif file_format == "plink_binary":
statement = " --bfile %s " % infiles.name
elif file_format == "oxford" or file_format == "oxford_binary":
statement = " --data %s" % infiles.name
elif file_format == "GRM_plink":
statement = " --grm.bin %s " % infiles.name
elif file_format == "GRM_binary":
statement = " --grm-bin %s " % infiles.name
elif file_format == "vcf":
statement = " --vcf %s.vcf.gz " % infiles.name
else:
raise AttributeError("file format is not defined or recognised."
"Please define the input corectly when "
"instantiating a FileGroup object")
return statement
def _construct_filters(self, **kwargs):
'''
Add filter to each plink run. [data type]
The filters accepted are defined below. These are input as keyword
arguments supported by this function.
* genotype_rate - exclude SNPs with a genotyping rate below this
value. [float]
* missingness - exclude individuals with total genotype missingness
above this value. [float]
* hwe - p-value threshold for excluding SNPs deviating from
Hardy-Weinberg expectations. [float]
* min_allele_frequency - only include SNPs with cohort/case allele
frequency above this threshold. [float]
* max_allele_frequency - include all SNPs with a MAF equal to or below
this value. [float]
* mendelian_error - filter out samples/trios exceeding the error
threshold. [float]
* keep - keep individuals with matching individual and family IDs.
[file]
* remove - remove all individuals with matching individual and family
IDs. [file]
* quality_score_file - vcf file with variants and quality scores. Use
`qual_score_column` and `var_id_col` to specify which columns
correspond to the quality score and variant ID columns.
[file] <int> <int>
* min_qual_score - alters the lower bound of the quality score
threshold; default is 0.[int]
* max_qual_score - sets an upper limit on the quality scores;
default is Inf. [int]
* allow_no_sex - prevents phenotypes set to missing if there is no
gender information. [boolean]
* enforce_sex - force phenotype missing when using --make-bed, --recode
or --write-covar. [boolean]
* subset_filter - filter on a particular subset. Choices are: cases,
controls, males, females, founders, nonfounders. [str]
* extract - text file list of variant IDs to include in analysis,
ignores all others. [file]
* exclude - text file list of variant IDs to exclude from analysis.
[file]
* chromosome - exclude all variants not on the specified chromosome(s).
[str/list]
* exclude_chromosome - exclude all variants on the specified
chromosome(s). [str/list]
* autosome - exclude all non-place and non-autosomal variants.
[boolean]
* pseudo_autosome - include the pseudo-autosomal region of chromosome
X. [boolean]
* ignore_indels - remove all indels/multi-character allele coding
variants. [boolean]
* snp_bp_range - (from, to) range in bp of variants to include in
analysis. [tuple]
* specific_snp - only load the variant specified. [str]
* exclude_snp - exclude this single variant
* window_size - alters behaviour of `specific_snp` and `exclude_snp`
to include/exclude SNPs within +/- half of this distance (kb) are
also included. [float]
* range_resolution - sets the resolution of the (from, to) range.
Either bp, kb or mb. If set it will take the values from
`snp_bp_range`. [str/int/float]
* covariates_file - specify the covariates file with family and
individual IDs in the first two columns. Covariates are in the
(n+2)th column. Only used in conjunction with `covariate_filter`.
[file]
* covariate_filter - covariate columns value to filter on. Can be
used with non-numeric values to filter out individuals with
covariate =/= `covariate_filter` value. [str/int/float]
* covariate_column - column number to apply filtering to if more
than one covariate in the file. [int]
'''
statement = []
# map of keyword arguments recognised to Plink2 filtering flags
filter_map = {"genotype_rate": " --geno %s ",
"missingness": "--mind %s ",
"hwe": " --hwe %s ",
"min_allele_frequency": " --maf %s ",
"max_allele_frequency": " --max-maf %s ",
"mendelian_error": " --me %s ",
"keep": " --keep %s ",
"remove": " --remove %s ",
"quality_score_file": " --qual-scores %s ",
"qual_score_column": " %s ",
"var_id_col": " %s ",
"min_qual_score": " --qual-threshold %s ",
"max_qual_score": " --qual-max-threshold %s ",
"allow_no_sex": " --allow-no-sex ",
"enforce_sex": " --must-have-sex ",
"subset_filter": " --filter-%s ",
"extract": " --extract %s ",
"exclude": " --exclude %s ",
"chromosome": " --chr %s ",
"exclude_chromosome": " --not-chr %s ",
"autosome": " --autosome ",
"pseudo_autosome": " --autosome-xy ",
"ignore_indels": " --snps-only no-DI ",
"snp_id_range": " --from %s --to %s ",
"specific_snp": " --snp %s ",
"window_size": " --window %s ",
"exclude_snp": " --exclude-snp %s ",
"snp_bp_range": "--from-bp %s --to-bp %s ",
"covariates_file": " --filter %s ",
"covariate_filter": " %s ",
"covariate_column": " --mfilter %s ",
"missing_phenotype": " --prune ",
"conditional_snp": " --condition %s ",
"haplotype_size": " --blocks-max-kb %s ",
"haplotype_frequency": " --blocks-min-maf %s "}
# compile all filters together, checking for dependencies.
# use a mapping dictionary to extract the relevant flags and
# combinations to use.
filters = []
filter_dict = {}
for key, value in kwargs.items():
filter_dict[key] = value
# need to check for covariates and qual scores - these
# are more complex. Deal with these first and remove
# from dictionary once complete.
try:
assert filter_dict["quality_score_file"]
assert filter_dict["qual_score_column"]
assert filter_dict["var_id_col"]
quals = []
qual_file = filter_dict["quality_score_file"]
score_col = filter_dict["qual_score_column"]
id_col = filter_dict["var_id_col"]
quals.append(filter_map["quality_score_file"] % qual_file)
quals.append(filter_map["qual_score_column"] % score_col)
quals.append(filter_map["var_id_col"] % id_col)
# remove from dictionary
filter_dict.pop("qual_score_column", None)
filter_dict.pop("var_id_col", None)
filters.append(" ".join(quals))
except KeyError:
pass
try:
assert filter_dict["covariates_file"]
assert filter_dict["covariate_filter"]
covars = []
covar_file = filter_dict["covariates_file"]
covar_val = filter_dict["covariate_filter"]
covars.append(filter_map["covariates_file"] % covar_file)
covars.append(filter_map["covariate_filter"] % covar_val)
# check to filter on specific column numnber, default is 3rd file
# column, i.e. (n+2)th column
try:
assert filter_dict["covariate_column"]
covar_col = filter_dict["covariate_column"]
covars.append(filter_map["covariate_column"] % covar_col)
filter_dict.pop("covariate_column", None)
except KeyError:
pass
# remove from dictionary
filter_dict.pop("covariates_file", None)
filter_dict.pop("covariate_filter", None)
filters.append(" ".join(covars))
except KeyError:
pass
# range_resolution and snp_bp_range are used together
try:
assert filter_dict["snp_bp_range"]
flags = filter_map["snp_bp_range"]
from_pos = filter_dict["snp_bp_range"].split(",")[0]
to_pos = filter_dict["snp_bp_range"].split(",")[1]
filters.append(flags % (from_pos, to_pos))
# remove so they are not duplicated - source of bugs
filter_dict.pop("snp_bp_range", None)
except KeyError:
pass
for each in filter_dict.keys():
try:
assert filter_map[each]
# check for data type <- behaviour is type dependent
if type(filter_dict[each]) == bool:
filters.append(filter_map[each])
# handle multiple arguments in string format
elif len(filter_dict[each].split(",")) > 1:
vals = tuple(filter_dict[each].split(","))
filters.append(filter_map[each] % vals)
else:
filter_val = filter_dict[each]
filters.append(filter_map[each] % filter_val)
except KeyError:
E.warn("%s filter not recognised, please see "
"documentation for allowed filters" % each)
self.filters.append(" ".join(filters))
self.statement["filters"] = " ".join(self.filters)
def calc_ld(self, ld_statistic, ld_threshold,
ld_shape="table"):
'''
Calculate linkage disequilibrium between all SNP
pairs.
Arguments
---------
ld_statistic: string
The LD statistic to report, either correlation or squared correlation
of inter-variant allele counts
ld_threshold: float
minimum value to report for pair-wise LD
ld_window: int
max distance (in Kb) between SNPs for calculating LD
ld_shape: string
shape to use for reporting LD, either a table or a matrix. If a
matrix then either square, square with diagnonal (square0) or
triangular. Square matrices are symmetric.
'''
statement = []
ld_map = {"r": " --r %s dprime ",
"r2": "--r2 %s dprime "}
shape_map = {"table": "inter-chr gz",
"square": "square gz",
"square0": "square0 gz",
"triangle": "triangle gz"}
try:
statement.append(ld_map[ld_statistic] % shape_map[ld_shape])
except KeyError:
raise ValueError("%s LD statistic not recognised. Please "
"use eithr 'r' or 'r2'" % ld_statistic)
if type(ld_threshold) == float:
statement.append(" --ld-window-r2 %0.3f " % ld_threshold)
else:
E.warn("threshold type not recognised, setting to default "
"value of 0.2")
self.statement["tasks"] = " ".join(statement)
def _run_tasks(self, parameter=None, **kwargs):
'''
Plink2 is capable of much more than just running basic association
analyses.
These include file processing, reformating, filtering, data summaries,
PCA, clustering, GRM calculation (slow and memory intense), etc.
multiple tasks can be added by separate calls to this function.
For instance, adding phenotype and gender information using the
update_samples task whilst change the file format.
Tasks
-----
* change_format - convert from input format to an alternative format
after applying filters.
* change_missing_values - alters the genotype or phenotype missing
value into the value supplied.
* update_variants - use this to fill in missing variant IDs, useful
for data from exome or whole-genome sequencing that have
non-standard IDs.
* update_samples - update phenotype and sample information
* flip_strands - flip the strand for alleles, swaps A for T and
C for G.
* flip_scan - use the LD-based scan to check SNPs have not had
incorrect strand assignment. Particularly useful if cases and
controls were genotyped separately, or the cohort was genotyped
in different batches.
* sort - sort files by individual and/or family IDs
* merge - merge new filesets with reference fileset.
* merge_mode - handling of missing values and overwriting values
* find_duplicates - find and output duplicate variants based on bp position,
or variant ID. Useful to output for the --exclude filtering flag.
* remove_relations - remove one of a pair of individuals with IBS >=
a threshold. Recommended minimum is 3rd cousins (IBS >= 0.03125).
* check_gender - check imputed gender from non-pseudoautosomal X
chromsome genotypes against self-reported gender
* estimate_haplotypes - assign SNPs to haplotype blocks and get
positional information
'''
statement = []
# set up a dictionary of recognised tasks with key word argument
# values as further dictionaries. Use the parameter argument
# to pass arguments by value to string formatting
task_map = {'change_format': {"plink_binary": " --make-bed ",
"plink": " --recode ",
"oxford": " --recode oxford ",
"oxford_binary": " --recode oxford gen-gz ",
"raw": " --recode A tabx "},
"change_missing_values": {"genotype": " --missing-genotype %s ",
"phenotype": " --missing-phenotype %s "},
"update_variants": {"variant_ids": " --set-missing-var-ids %s ",
"missing_id": " --mising-var-code %s ",
"chromosome": " --update-chr %s ",
"centimorgan": " --update-cm %s ",
"name": " --update-name %s ",
"alleles": " --update-alleles %s ",
"map": " --update-map %s "},
"update_samples": {"sample_ids": " --update-ids %s ",
"parents": " --update-parents %s ",
"gender": " --update-sex %s %s "},
"flip_strands": {"all_samples": " --flip %s ",
"subset": " --flip-subset %s "},
"flip_scan": {"default": " --flip-scan verbose ",
"window": "--flip-scan --flip-scan-window %s ",
"kb": " --flip-scan --flip-scan-window-kb %s ",
"threshold": " --flip-scan --flip-scan-threshold %s "},
"sort": {"none": " --indiv-sort %s ",
"natural": " --indiv-sort %s ",
"ascii": " --indiv-sort %s ",
"file": " --indiv-sort %s "},
"merge": {"plink": " --merge %s ",
"binary_plink": " --bmerge %s "},
"merge_mode": {"default": " --merge-mode 1 ",
"orginal_missing": " --merge-mode 2 ",
"new_nonmissing": " --merge-mode 3 ",
"no_overwrite": " --merge-mode 4 ",
"force": " --merge-mode 5 ",
"report_all": " --merge-mode 6 ",
"report_nonmissing": " --merge-mode 7"},
"find_duplicates": {"same_ref": " --list-duplicate-vars require-same-ref ",
"id_match": " --list-duplicate-vars ids-only ",
"suppress_first": " --list-duplicate-vars suppress-first"},
"remove_relations": {"cutoff": " --rel-cutoff %s "},
"check_gender": " --check-sex ",
"pca": " --pca %s ",
"estimate_haplotypes": " --blocks "}
for task, value in kwargs.items():
# check for PCA first as it is not nested in task_map
if task == "pca":
try:
state = task_map[task] % value
statement.append(state)
except TypeError:
statement.append(task_map[task])
statement.append
elif task == "check_gender":
statement.append(task_map[task])
elif task == "estimate_haplotypes":
statement.append(task_map[task])
elif task != "parameter":
try:
# sub_task is a nested dictionary
sub_task = task_map[task]
try:
assert sub_task[value]
try:
# gender has two string formats
if value == "gender":
gcol = 1
statement.append(sub_task[value] % (parameter,
gcol))
else:
# some tasks do not contain task values for the
# parameter argument - catch these with the TypeError
# exception
statement.append(sub_task[value] % parameter)
# the default for parameter is None, check this is appropriate
if not parameter:
E.warn("Parameter value is set to NoneType. "
"Please check this is an appropriate value "
"to pass for this task")
else:
pass
except TypeError:
statement.append(sub_task[value])
except KeyError:
raise KeyError("No sub task found, see docs for details of "
"recognised tasks")
except KeyError:
raise KeyError("Task not recognised, see docs for details of "
"recognised tasks")
else:
pass
# handle multiple tasks for a single run
try:
curr_tasks = self.statement["tasks"]
new_tasks = " ".join(statement)
self.statement["tasks"] = " ".join([curr_tasks, new_tasks])
except KeyError:
self.statement["tasks"] = " ".join(statement)
def _output_statistics(self, **kwargs):
'''
Summary statistics are written to specific files dictated by the
type of statistic
Statistics
----------
* allele_frequency - writes out MAF to `plink`.frq, this can be
modified with specific keywords.
* missing_data - generates a report of data missingness, can be subset
into within family and/or cluster reports
* hardy_weinberg - calculates all HWE p-values using exact test
statistics. For case/control studies reports are written for case,
controls and combined.
* mendel_errors - generates a Mendelian error report across all trios.
There are 10 different codes responding to different Mendelian error
scenarios.
* inbreeding - calculate observed and expected homozygosity across
individuals and F statistics. If the sample size is small then a
file of MAFs is required. Inbreeding coefficients can also be
reported on request using inbreeding_coef.
* gender_checker - checks gender assignment against X chromosome
genotypes. Gender values can also be imputed based on genotype
information using gender_impute.
* wrights_fst - calculate Wright's Fst statistic given a set of
subpopulations for each autosomal diploid variant. Used in
conjunction with the --within flag.
'''
stats_map = {"allele_frequency": " --freq %s ",
"missing_data": " --missing %s ",
"hardy_weinberg": " --hardy midp ",
"mendel_errors": " --mendel %s ",
"inbreeding": " --het %s ",
"inbreeding_coef": " --ibc ",
"gender_checker": " --check-sex ",
"gender_impute": " --impute-sex ",
"wrights_fst": " --fst --within %s ",
"case_control_fst": "--fst %s "}
statement = []
for key, value in kwargs.tems():
if value:
try:
assert stats_map[key]
statement.append(stats_map[key] % value)
except KeyError:
raise KeyError("statistic not recognised. Please "
"consult the documentation for allowed "
"options.")
else:
try:
assert stats_map[key]
flag = stats_map[key].rstrip("%s ")
statement.append(flag)
except KeyError:
raise KeyError("statistic not recognised. Please "
"consult the documentation for allowed "
"options.")
self.statement["stats"] = " ".join(statement)
def run_association(self, association=None, model=None,
run_options=None,
permutation=False, n_perms=None,
random_seed=None, permutation_options=None,
covariates_file=None, covariates=None):
'''
Construct a statement for a plink2 association analysis.
QC filters are constructed from input during instantiation.
run options include redirecting logging output, using parallelisation,
defining number of threads to use, etc
The default association uses the --assoc flag. Plink will check
phenotype coding, if it is not case/control it assumes
it is a continuous trait and uses linear regression.
Alternative regression models that include covariates can be used,
i.e. logistic and linear regression.
key
***
{CC} - applies to case/control analysis only
{quant} - applies to quantitative trait only
{CC/quant} - applies to both
run_options
-----------
``--assoc``:
* `fisher | fisher-midp` - uses Fisher's exact test to calculate
association p-values or applies Lancaster's mid-p adjustment. {CC}
* `counts` - causes --assoc to report allele counts instead of
frequencies. {CC}
* `set-test` - implements and tests the significance of variant
sets. See documentation below. {CC/quant}
* `qt-means` - generates a .qassoc.means file reporting trait means
and standard deviations by genotype. {quant}
* `lin` - reports the Lin et al (2006) statistic to be reported. If
multiple testing adjustments and/or permutation is also used, they
will be based on this statistic. {quant}
``--model``:
* `fisher | fisher-midp | trend-only` - uses Fisher's exact test
to calculate association p-values or applies Lancaster's mid-p
adjustment. trend-only forces only a trend test to be performed.
{CC}
* `dom | rec | gen | trend` - use the specified test as the basis
for the model permutation. If none are defined the result with the
smallest p-value is reported. {CC}
* --cell - sets the minimum number of observations per cell in the
2x3 contingency table. The default is 0 with the Fisher and
Fiser-midp test, otherwise 5. {CC}
``--linear/logistic``:
* `set-test` - implements and tests the significance of variant
sets. See documentation below. {CC/quant}
* `hide-covar` - removes the covariate specific sections from the
results output. {CC/quant
* `sex | no-x-sex` - `sex` adds sex as covariate to all models,
whislt `no-x-sex` does not include gender into X-chromosome SNP
models. {CC/quant}
* `interaction` - adds in genotype X covariate interaction terms
into the model. Can only be used with permutation is ``--tests``
is also specified. {CC/quant}
* `beta` - reports the beta coefficients instead of the OR in a
logistic model. {CC}
* `standard-beta` - standardizes the phenotype and all predictor
variables to zero mean and unit variance prior to regression
(separate for each variant analysed). {quant}
* `intercept` - includes the intercept in the output results.
{quant}
model
-----
* `recessive` - `recessive` specifies the model assuming the A1 allele
as recessive. {CC/quant}
* `dominant` - `dominant` specifies the model assuming the A1 allele is
dominant. {CC/quant}
* `genotype` - `genotype` adds an additive effect/dominance deviation
2df joint test with two genotype variables in the test (coded 0/1/2
and 0/1/0). {CC/quant}
* `trend` - forces a trend test to be performed. {CC/quant}
* `hethom` - `hethom` uses 0/0/1 and 0/1/0 instead of the genotype
coding. With permutation it will be based on the joint test instead
of just the additive effects. This can be overriden using the
`--tests` flag. {CC/quant}
* `no-snp` - `no-snp` defines a regression of phenotype on covariates
without reference to genotype data, except where `--conditon{-list}`
is specified. If used with permuation, test results will be reported
for every covariate. {CC/quant}
permutation
-----------
If permutation is True, run an adaptive Monte Carlo permutation test.
If n_perms is set, this will run a max(T) permutation test with the n
replications. A random seed will need to be provided.
* `perm-count` - this alters the permutation output report to include
counts instead of frequencies
covariates
----------
These should be provided in a separate file. Specifying which
covariates to include can be done as either a comma-separated list
of covariate names or numbers. These numbers will correspond to the
(n+2)th covariate file column as per the plink documentation.
'''
# model map maps common option effects onto specific syntax
model_map = {"--logistic": {"recessive": "recssive",
"dominant": "dominant",
"genotype": "genotypic"},
"--linear": {"recessive": "recssive",
"dominant": "dominant",
"genotype": "genotypic"},
"--model": {"recessive": "rec",
"dominant": "dom",
"genotype": "gen"}}
statement = []
# construct analysis flags
# add model, i.e. additive, recessive, dominant, etc.
# see docstring for details. Make sure correct modifier is used
# with a mapping dictionary
if association == "logistic":
statement.append(" --logistic ")
m_map = model_map["--logistic"]
if model:
statement.append(m_map[model])
else:
pass
elif association == "linear":
statement.append(" --linear ")
m_map = model_map["--linear"]
if model:
statement.append(m_map[model])
else:
pass
elif association == "model":
statement.append(" --model ")
m_map = model_map["--model"]
statement.append(m_map[model])
else:
statement.append(" --assoc ")
# add in run options. These need to be in their correct
# format already
if run_options:
modifiers = " ".join(run_options)
statement.append(modifiers)
else:
pass
# permutation should have a random seed set by the user. Allow
# this to set it's own seed if one not provided, but report it in
# the log file
if permutation:
try:
assert random_seed
except AssertionError:
rand_seed = random.randint(0, 100000000)
E.warn("No seed is provided for the permutation test. "
"Setting seed to %s. Record this for future "
"replicability" % random_seed)
if n_perms:
statement.append(" mperm=%i --seed %s " % (n_perms,
random_seed))
else:
statement.append(" perm --seed %s " % (random_seed))
else:
pass
# if using linear or logistic, covariates can be added into the model
# to adjust for their effects - assumes fixed effects of covariates
# mixed models are not yet implemented in Plink2.
if covariates:
covars = covariates.split(",")
if len(covars) > 1:
if type(covars[0]) == str:
m_covar = " --covar-name %s " % covariates
elif type(covars[0]) == int:
m_covar = " --covar-number %s " % covariates
else:
# if none are specified then don't adjust the model for any
# and log a warning
E.warn("Covariate header or numbers are not recognised."
"No covariates will be included in the model. Please"
"specifiy them exactly")
covariates = None
covariates_file = None
elif len(covars) == 1:
if type(covars) == str:
m_covar = " --covar-name %s " % covariates
elif type(covars) == int:
m_covar = " --covar-number %i " % covariates
else:
# if none are specified then don't adjust the model for any
# and log a warning
E.warn("Covariate header or numbers are not recognised."
"No covariates will be included in the model. Please"
"specifiy them exactly")
covariates = None
covariates_file = None
if covariates and covariates_file:
statement.append(" --covar %s %s " % (covariates_file,
m_covar))
elif covariates and not covaries_file:
E.warn("No covariate file specified. None included in model.")
elif covariates_file and not covariates:
E.warn("No covariates specified to include in the model."
"None included")
else:
pass
self.statement["assoc"] = " ".join(statement)
def PCA(self, n_pcs="20"):
'''
Perform PCA analysis on previosly generated GRM, output the number n
principal componets, default = 20
'''
self._run_tasks(pca=n_pcs)
def _dimension_reduction(self, **kwargs):
'''
Use PCA to perform dimensionality reduction on
input samples. A PCA can be calculated using
a subset of samples which can then be projected on
to other samples.
'''
# FINISH ME!!!!
def _detect_interactions(self, method=None, modifier=None,
set_file=None, set_mode=None,
report_threshold=None,
sig_threshold=None,
covariates_file=None, covariates=None):
'''
Detect epistatic interactions between SNPs using either an inaccurate
scan (fast-epistasis) or a fully saturated linear model
Methods
-------
fast_epistasis - uses an "imprecise but fast" scan of all 3x3 joint genotype
count tables to test for interactions. Can be modified to use a likelihood
ration test `boost` or a joint-effects test `joint-effects`. Default is
`joint-effects`.
epistasis - uses a linear model to test for interactions between additive
effects after main effects. Logistic regression for case/control and
linear regression for quantitative traits.
two_locus - tests a single interaction between two variants using joint genotype
counts and frequencies.
adjusted - allows adjustment for covariates in the interaction test, and also adjusts
for main effects from both the test and target SNP. Requires and R plugin script.
'''
interact_map = {"fast_epistasis": " --fast-epistasis %s ",
"epistasis": " --epistasis %s ",
"two_locus": " --twolocus %s ",
"adjusted": " --R %s "}
statement = []
if modifier:
statement.append(interact_map[method] % modifier)
else:
modifier = ""
statement.append(interact_map[method] % modifier)
if covariates_file:
statement.append("--covar %s --covar-name %s " % (covariates_file,
covariates))
else:
pass
if set_mode and set_file:
# does not work with two-locus test
if method == "two_locus" and set_mode:
E.warn("Two locus test cannot be used in conjunction "
"with a set-based test.")
elif set_mode:
statement.append(" %s --set %s " % (set_mode, set_file))
else:
pass
else:
pass
# alter reporting of significant interactions and significance
# level of interactions
if report_threshold:
statement.append(" --epi1 %0.3f " % float(report_threshold))
else:
pass
if sig_threshold:
statement.append(" --epi2 %0.3f " % float(sig_threshold))
else:
pass
self.statement["epistasis"] = " ".join(statement)
def _matrices(self, matrix_type, shape="triangle", compression=None, options=None):
'''
Calculate a number of different distance matrices:
realised genetic relationship matrix
relationship covariance matrix
identity by descent/state matrix
hamming distance matrix
* matrix_type - matrix to compute. Can be either IBS, 1 - IBS,
Hamming, GRM
'''
statement = []
if matrix_type == "hamming":
flag = " --distance "
elif matrix_type == "ibs":
flag = " --distance ibs "
elif matrix_type == "genomic":
flag = " --distance 1-ibs "
elif matrix_type == "grm":
flag = " --make-grm-bin "
if options:
statement.append(" ".join([flag, shape, compression, options]))
elif matrix_type == "grm":
statement.append(flag)
else:
statement.append(" ".join([flag, shape, compression]))
return " ".join(statement)
def _qc_methods(self, parameter=None, **kwargs):
''''
Perform QC on genotyping data, SNP-wise and sample-wise.
All arguments are passed as key word arguments, except
cases detailed in `Parameters` where they are passed with
the ``parameter`` argument.
Methods
-------
* ld_prune - generate a list of SNPs in linkage equilibrium by
pruning SNPs on either an LD statistic threshold, i.e. r^2,
or use a variance inflation factor (VIF) threshold
* heterozygosity - calculate average heterozygosity from each
individual across a set of SNPs, threshold on individuals
with deviation from expected proportions
* ibd - calculate the genetic relationship of individuals to
infer relatedness between individuals, threshold on given
degree of relatedness, e.g. IBD > 0.03125, 3rd cousins
* genetic_gender - estimate the gender of an individual
from the X chromosome genotypes - correlate with reported
gender and output discrepancies
* ethnicity_pca - perform PCA using a subset of independent
SNPs to infer genetic ancestry. Compare and contrast this
to individuals reported ancestry. Report discrepancies
and individuals greater than a threshold distance away
from a reference population.
* homozygosity - identifies sets of runs of homozygosity
within individuals. These may be indicative of inbreeding,
systematic genotyping errors or regions under selection.
Parameters
----------
Method parameters can also be passed through this function
as keyword=value pairs.
* ld_prune:
`kb` - this modifier changes the window resolution to kb
rather than bp.
`r2` - the r^2 threshold above which SNPs are to be removed
`vif` - the VIF threshold over which SNPs will be removed
`window` - window size to calculate pair-wise LD over
`step` - step size to advance window by
'''
qc_dict = {"ld_prune": {"R2": " --indep-pairwise %s %s %s ",
"VIF": " --indep %s %s %s "},
"heterozygosity": {"gz": " --het gz",
"raw": " --het "},
"ibd": {"relatives": " --genome gz rel-check ",
"full": " --genome gz full ",
"norm": " --genome gz "},
"genetic_gender": "none",
"ethnicity_pca": "none",
"homozygosity": {"min_snp": " --homozyg-snp %s ",
"min_kb": " --homozyg-kb %s ",
"default": " --homozyg ",
"density": " --homozyg-density ",
"set_gap": " --homozyg-gap ",
"snp_window": " --homozyg-window-snp %s ",
"het_max": " --homozyg-het %s "}}
task_dict = {}
state = []
# put everything in an accessible dictionary first
for task, value in kwargs.items():
task_dict[task] = value
# LD pruning can be passed multiple parameters,
# handle this separately
try:
sub_task = task_dict["ld_prune"]
ld_prune_task = qc_dict["ld_prune"]
try:
step = task_dict["step"]
except KeyError:
raise AttributeError("No step size found, please "
"pass a step size to advance the "
"window by")
try:
window = task_dict["window"]
try:
task_dict["kb"]
window = "".join([window, "kb"])
task_dict.pop("kb", None)
except KeyError:
pass
except KeyError:
raise AttributeError("No window size found. Please input "
"a window size to prune over")
try:
threshold = task_dict["threshold"]
except KeyError:
raise AttributeError("No threshold value, please input "
"a value to LD prune SNPs on")
# add in the kb if it is passed as an argument
state.append(ld_prune_task[sub_task] % (window, step, threshold))
task_dict.pop("threshold", None)
task_dict.pop("ld_prune", None)
task_dict.pop("window", None)
task_dict.pop("step", None)
except KeyError:
pass
for task, value in task_dict.items():
try:
sub_task = qc_dict[task]
try:
state.append(sub_task[value] % parameter)
except TypeError:
state.append(sub_task[value])
except KeyError:
raise AttributeError("Task not found, please see "
"documentation for available features")
self.statement["QC"] = " ".join(state)
def build_statement(self, infiles, outfile, threads=None,
memory="60G", parallel=None):
'''
Build statement and execute from components
'''
statement = []
exec_state = self.executable
# calls to function add to the self.statement dictionary
try:
statement.append(self.statement["program"])
except KeyError:
raise AttributeError("Input files and format not detected")
try:
statement.append(self.statement["QC"])
except KeyError:
pass
try:
statement.append(self.statement["filters"])
except KeyError:
pass
try:
statement.append(self.statement["tasks"])
except KeyError:
pass
try:
statement.append(self.statement["stats"])
except KeyError:
pass
try:
statement.append(self.statement["assoc"])
except KeyError:
pass
try:
statement.append(self.statement["matrix"])
except KeyError:
pass
try:
statement.append(self.statement["epistasis"])
except KeyError:
pass
if threads:
statement.append(" --threads %i " % threads)
else:
pass
if not memory:
pass
elif memory != "60G":
memory = int(memory.strip("G")) * 1000
statement.append(" --memory %i " % memory)
else:
statement.append(" --memory 60000 ")
# add output flag
# outfile needs to be complete path for Plink to save
# results properly - check if it starts with '/',
# if so is already a full path
if not parallel:
if os.path.isabs(outfile):
statement.append(" --out %s " % outfile)
else:
outpath = "/".join([os.getcwd(), outfile])
statement.append(" --out %s " % outpath)
os.system(" ".join(statement))
else:
# parallelisation only really applies to GRM calculation
# at the moment <- need to generalise
# if parallelisation is used, invoke temp files
# then agglomerate files
statements = []
if os.path.isabs(outfile):
outpath = outfile
else:
outpath = "/".join([os.getcwd(), outfile])
for i in range(1, parallel+1):
# copy list, assigning just makes a pointer
p_state = statement[:]
p_state.append(" --parallel %i %i " % (i, parallel))
p_state.append(" --out %s.%i " % (outpath, i))
statements.append(" ".join(p_state))
os.system(";".join(statements))
class PlinkDev(Plink2):
'''
Run various Plink functions and analysis, including file processing, GRM
calculation, PCA and other GWA tasks
Require Plink v1.9_devel to be in the users PATH variable as ``plinkdev`` to
distinguish it from Plink v1.07 and v1.9.
Currently uses Nov 11 development build.
'''
def __init__(self, files, options=None,
settings=None, design=None):
self.infiles = files
self.options = options
self.settings = settings
self.design = design
self.executable = "plinkdev"
self.statement = {}
self.filters = []
class GWASResults(object):
'''
A class for handling the results from a GWA, used for plotting
and post-analysis QC
'''
def __init__(self, assoc_file, **kwargs):
# if the assoc_file is a list of multiple files,
# then merge them into a single files
if type(assoc_file) == list and len(assoc_file) > 1:
E.info("multiple results files detected")
self.infiles = assoc_file
self.infile = None
self.results = self.parse_genome_wide(assoc_file)
else:
E.info("single results file detected")
self.infile = assoc_file
self.infiles = None
# results is a pandas dataframe to operate on
self.results = self.get_results(assoc_file, **kwargs)
def parse_genome_wide(self, association_files):
'''
Accept a list of results files, merge them together
and output as a single dataframe
Will this take a lot of memory??
'''
file0 = association_files.pop(0)
df = self.get_results(file0)
for afile in association_files:
_df = self.get_results(afile)
df = df.append(_df)
df["CHR"] = df["CHR"].astype(np.int64)
df.sort_values(by=["CHR", "BP"], inplace=True)
return df
def get_results(self, association_file,
epistasis=False,
file_format="plink"):
'''
Parse a GWA or epistasis results file and return the table
'''
# use Pandas for now - try something different later
# SQLite DB maybe?
# inconsistent number of white spaces between
# fields means Pandas parsing breaks down
# fields need to be the correct data type,
# i.e. BP = int, P = float, SNP = str, etc
# if the file has already been parsed and processed
# just assign it instead
# epistasis results don't have a header
try:
peek = pd.read_table(association_file, nrows=5,
sep="\s*", header=0,
index_col=None,
engine='python')
except StopIteration:
peek = pd.read_table(association_file, nrows=5,
sep="\t", header=0,
index_col=None)
if epistasis:
try:
results_frame = pd.read_table(association_file,
sep="\s*", header=0,
index_col=None)
except StopIteration:
results_frame = pd.read_table(association_file,
sep="\t", header=0,
index_col=None)
# results from fast epistasis are different to others
if file_format == "cassi_covar":
if results_frme.shape[1] == 12:
results_frame.columns = ["SNP1", "CHR1", "ID1", "BP1",
"SNP2", "CHR2", "ID2", "BP2",
"OR", "SE", "STAT", "P"]
elif results_frame.shape[1] == 14:
results_frame.columns = ["SNP1", "CHR1", "ID1", "BP1",
"SNP2", "CHR2", "ID2", "BP2",
"OR", "SE", "STAT", "P",
"CASE_RSQ", "CTRL_RSQ"]
elif results_frame.shape[1] == 16:
results_frame.columns = ["SNP1", "CHR1", "ID1", "BP",
"SNP2", "CHR2", "ID2", "BP2",
"OR", "SE", "STAT", "P",
"CASE_RSQ", "CTRL_RSQ",
"CASE_DPRIME" "CTRL_DPRIME"]
results_frame.loc[:, "BP"] = pd.to_numeric(results_frame["BP"],
errors="coerce")
elif file_format == "cassi":
pass
elif file_format == "plink":
if results_frame.shape[1] == 7:
results_frame.columns = ["CHR1", "SNP1", "CHR",
"SNP", "OR", "STAT", "P"]
elif results_frame.shape[1] == 9:
results_frame.columns = ["CHR", "SNP", "BP", "A1", "NMISS",
"OR", "SE", "STAT", "P"]
else:
results_frame.columns = ["CHR", "SNP", "BP", "A1", "OR",
"SE", "STAT", "P"]
results_frame.loc[:, "BP"] = pd.to_numeric(results_frame["BP"],
errors="coerce")
results_frame.loc[:, "P"] = pd.to_numeric(results_frame["P"],
errors="coerce")
return results_frame
else:
try:
assert peek["log10P"].any()
results_frame = pd.read_table(association_file,
sep="\t", header=0,
index_col=None,
dtype={"BP": np.int64,
"NMISS": np.int64})
return results_frame
except KeyError:
pass
l_count = 0
E.info("parsing file: %s" % association_file)
with open(association_file, "r") as ifile:
for line in ifile:
# check if spacing is whitespace or tab
if len(line.split(" ")) > 1:
parsed = line.split(" ")
elif len(line.split("\t")) > 1:
parsed = line.split("\t")
else:
raise IOError("file separator not recognised. "
"Must be whitespace or tab")
# remove multiple blank spaces
for i in range(parsed.count('')):
parsed.remove('')
# get rid of the newline
try:
parsed.remove('\n')
except ValueError:
parsed = [(px).rstrip("\n") for px in parsed]
if l_count == 0:
header = [iy.upper() for ix, iy in enumerate(parsed)]
head_idx = [ix for ix, iy in enumerate(parsed)]
map_dict = dict(zip(head_idx, header))
res_dict = dict(zip(header, [[] for each in header]))
l_count += 1
else:
col_idx = [lx for lx, ly in enumerate(parsed)]
col = [ly for lx, ly in enumerate(parsed)]
for i in col_idx:
res_dict[map_dict[i]].append(col[i])
l_count += 1
# substract one from the index for the header column
df_idx = range(l_count-1)
results_frame = pd.DataFrame(res_dict, index=df_idx)
results_frame.fillna(value=1.0, inplace=True)
try:
results_frame = results_frame[results_frame["TEST"] == "ADD"]
except KeyError:
pass
# need to handle NA as strings
results_frame["P"][results_frame["P"] == "NA"] = 1.0
results_frame["BP"] = [int(bx) for bx in results_frame["BP"]]
results_frame["P"] = [np.float64(fx) for fx in results_frame["P"]]
try:
results_frame["STAT"][results_frame["STAT"] == "NA"] = 1.0
results_frame["STAT"] = [np.float64(sx) for sx in results_frame["STAT"]]
except KeyError:
try:
results_frame["CHISQ"][results_frame["CHISQ"] == "NA"] = 1.0
results_frame["CHISQ"] = [np.float64(sx) for sx in results_frame["CHISQ"]]
except KeyError:
try:
results_frame["T"][results_frame["T"] == "NA"] = 1.0
results_frame["T"] = [np.float64(sx) for sx in results_frame["T"]]
except KeyError:
pass
try:
results_frame["F_U"][results_frame["F_U"] == "NA"] = 0.0
results_frame["F_U"] = [np.float64(ux) for ux in results_frame["F_U"]]
except KeyError:
pass
try:
results_frame["F_A"][results_frame["F_A"] == "NA"] = 0.0
results_frame["F_A"] = [np.float64(ax) for ax in results_frame["F_A"]]
except KeyError:
pass
try:
results_frame["FREQ"][results_frame["FREQ"] == "NA"] = 0.0
results_frame["FREQ"] = [np.float64(fx) for fx in results_frame["FREQ"]]
except KeyError:
pass
try:
results_frame["OR"][results_frame["OR"] == "NA"] = 1.0
results_frame["OR"] = [np.float64(ox) for ox in results_frame["OR"]]
except KeyError:
try:
results_frame["BETA"][results_frame["BETA"] == "NA"] = 1.0
results_frame["BETA"] = [np.float64(ox) for ox in results_frame["BETA"]]
except KeyError:
results_frame["B"][results_frame["B"] == "NA"] = 0.0
results_frame["B"] = [np.float64(ox) for ox in results_frame["B"]]
return results_frame
def plotManhattan(self, save_path, resolution="chromosome",
write_merged=True, sig_level=8):
'''
Generate a basic manhattan plot of the association results
Just deal with chromosome-by-chromosome for now.
'''
# use the python ggplot plotting package
# need to calculate -log10P values separately
self.results["log10P"] = np.log10(self.results["P"])
# or using rpy2
py2ri.activate()
R('''suppressPackageStartupMessages(library(ggplot2))''')
R('''suppressPackageStartupMessages(library(scales))''')
R('''suppressPackageStartupMessages(library(qqman))''')
R('''sink(file="sink.text")''')
r_df = py2ri.py2ri_pandasdataframe(self.results)
R.assign("assoc.df", r_df)
if resolution == "chromosome":
R('''assoc.df$CHR <- factor(assoc.df$CHR, '''
'''levels=levels(ordered(unique(assoc.df$CHR))),'''
'''labels=unique(paste0("chr", assoc.df$CHR)))''')
R('''nchrom <- length(unique(assoc.df$CHR))''')
R('''myCols <- rep(c("#ca0020", "#404040"), nchrom)[1:nchrom]''')
R('''names(myCols) <- sort(unique(assoc.df$CHR))''')
R('''colScale <- scale_colour_manual(name = "CHR", values=myCols)''')
R('''bp_indx <- seq_len(dim(assoc.df[1]))''')
R('''assoc.df$BPI <- bp_indx''')
R('''p <- ggplot(assoc.df, aes(x=BPI, y=-log10(P), colour=CHR)) + '''
'''geom_point(size=1) + colScale + '''
'''geom_hline(yintercept=6, linetype="dashed", colour="blue") + '''
'''theme_bw() + labs(x="Chromosome position (bp)", '''
'''y="-log10 P-value") + facet_grid(~CHR, scale="free_x") + '''
'''theme(axis.text.x = element_text(size=8))''')
R('''png("%s", res=90, unit="in", height=8, width=12)''' % save_path)
R('''print(p)''')
R('''dev.off()''')
elif resolution == "genome_wide":
R('''nchroms <- length(unique(assoc.df$CHR))''')
R('''png("%s", width=720, height=540)''' % save_path)
R('''p <- manhattan(assoc.df, main="Manhattan plot",'''
'''ylim=c(0, 50), cex=0.9, suggestiveline=T,'''
'''genomewideline=-log10(5e-8), chrlabs=c(1:nchroms), '''
'''col=c("#8B1A1A","#8470FF"))''')
R('''print(p)''')
R('''dev.off()''')
R('''sink(file=NULL)''')
if write_merged:
return self.results
else:
return False
def plotQQ(self, save_path, resolution="chromosome"):
'''
Generate a QQ-plot of expected vs. observed
test statistics
'''
self.results["log10P"] = np.log(self.results["P"])
py2ri.activate()
R('''suppressPackageStartupMessages(library(ggplot2))''')
R('''suppressPackageStartupMessages(library(scales))''')
R('''suppressPackageStartupMessages(library(qqman))''')
r_df = py2ri.py2ri_pandasdataframe(self.results)
R.assign("assoc.df", r_df)
R('''png("%s", width=720, height=540)''' % save_path)
R('''qq(assoc.df$P)''')
R('''dev.off()''')
def plotEpistasis(self, save_path, resolution="chromosome"):
'''
Generate both manhattan plot of the SNPs tested for
epistasis with their target SNP, and a QQplot
of the association test p-values
'''
# plot QQplot
qq_save = "_".join([save_path, "qqplot.png"])
self.plotQQ(qq_save)
manhattan_save = "_".join([save_path, "manhattan.png"])
self.plotManhattan(manhattan_save,
resolution=resolution,
sig_level=6,
write_merged=False)
def getHits(self, threshold=0.00000005):
'''
Pull out regions of association by selecting
all SNPs with association p-values less than
a certain threshold. Defaults is genome-wide
signifance, p < 5x10-8.
Then select region +/- 1.5Mb of the index SNP.
'''
hits_df = self.results[self.results["P"] <= threshold]
# find the range of SNPs with 3Mb of each index SNP
contig_group = hits_df.groupby(["CHR"])
# there may be multiple independent hits on a given
# chromosome. Need to identify independent regions.
# Independent regions are defined by their statistical
# independence, not distance. Just take all SNPs
# in 3Mb of the lead SNP for each signal
# this will create overlaps of associatation signals
for contig, region in contig_group:
region.index = region["BP"]
chr_df = self.results[self.results["CHR"] == contig]
chr_df.index = chr_df["BP"]
# find independent regions and output consecutively
# if only a single SNP above threshold then there is
# only one independent region!!
if len(region) > 1:
independents = self.findIndependentRegions(region)
indi_group = independents.groupby("Group")
else:
region["Group"] = 1
indi_group = region.groupby("Group")
for group, locus in indi_group:
# if there is only a single variant should
# the region be kept? Likely a false
# positive
if min(locus["BP"]) == max(locus["BP"]):
pass
else:
try:
try:
locus.loc[:, "STAT"] = abs(locus["STAT"])
locus.sort_values(by="STAT", inplace=True)
except KeyError:
locus.loc[:, "T"] = abs(locus["T"])
locus.sort_values(by="STAT", inplace=True)
except KeyError:
locus.sort_values(by="CHISQ", inplace=True)
index_bp = locus.iloc[0]["BP"]
E.info("Lead SNP for regions is: {}".format(locus.iloc[0]["SNP"]))
left_end = min(chr_df.loc[chr_df.index >= index_bp - 1500000, "BP"])
right_end = max(chr_df.loc[chr_df.index <= index_bp + 1500000, "BP"])
range_df = chr_df.loc[left_end: right_end, :]
max_stat = max(abs(range_df["STAT"]))
yield contig, range_df
def extractSNPs(self, snp_ids):
'''
Extract a specific set of SNP results
Arguments
---------
snp_ids: list
a list of SNP IDs to extract from the
GWAS results
Returns
-------
snp_results: pandasd.Core.DataFrame
'''
self.results.index = self.results["SNP"]
snp_results = self.results.loc[snp_ids]
return snp_results
def findIndependentRegions(self, dataframe):
'''
Find the number of independent regions on
a chromsome. Uses R distance and tree
cutting functions
'''
# mong dataframe into R
py2ri.activate()
r_df = py2ri.py2ri_pandasdataframe(dataframe)
R.assign("rdf", r_df)
R('''mat <- as.matrix(rdf$BP)''')
# get distances then cluster, chop tree at 1x10^7bp
R('''dist.mat <- dist(mat, method="euclidean")''')
R('''clusts <- hclust(dist.mat, "average")''')
R('''cut <- cutree(clusts, h=1e6)''')
R('''out.df <- rdf''')
R('''out.df$Group <- cut''')
# need to handle changes in pandas2ri API
try:
regions_df = pd.DataFrame(py2ri.ri2py(R["out.df"]))
except NotImplementedError:
regions_df = pd.DataFrame(R["out.df"])
return regions_df
def mergeFrequencyResults(self, freq_dir, file_regex):
'''
Merge GWAS results with frequency information,
and format for GCTA joint analysis input
'''
# create a dummy regex to compare
# file_regex type against
test_re = re.compile("A")
if type(file_regex) == str:
file_regex = re.compile(file_regex)
elif type(file_regex) == type(test_re):
pass
else:
raise TypeError("Regex type not recognised. Must"
"be string or re.SRE_Pattern")
all_files = os.listdir(freq_dir)
freq_files = [fx for fx in all_files if re.search(file_regex, fx)]
gwas_df = self.results
df_container = []
for freq in freq_files:
freq_file = os.path.join(freq_dir, freq)
E.info("Adding information from {}".format(freq_file))
# files may or may not be tab-delimited
try:
_df = pd.read_table(freq_file,
sep="\s*", header=0,
index_col=None,
engine='python')
except StopIteration:
_df = pd.read_table(freq_file,
sep="\t", header=0,
index_col=None)
merge_df = pd.merge(self.results, _df,
left_on=["CHR", "SNP"],
right_on=["CHR", "SNP"],
how='left')
df_container.append(merge_df)
count = 0
for df in df_container:
if not count:
gwas_df = df
count += 1
else:
gwas_df = gwas_df.append(df)
E.info("Calculating Z scores and SEs")
z_scores = -0.862 + np.sqrt(0.743 - 0.2404 *
np.log(gwas_df.loc[:, "P"]))
se = np.log(gwas_df.loc[:, "OR"])/z_scores
gwas_df.loc[:, "Z"] = z_scores
gwas_df.loc[:, "SE"] = se
gwas_df.loc[:, "logOR"] = np.log(gwas_df.loc[:, "OR"])
out_cols = ["SNP", "A1_x", "A2", "MAF", "logOR", "SE", "P", "NMISS"]
out_df = gwas_df[out_cols]
# need to remove duplicates, especially those
# that contain NaN for A2 and MAF
out_df = out_df.loc[~np.isnan(out_df["MAF"])]
return out_df
##########################################################
# unbound methods that work on files and data structures #
##########################################################
def plotMapPhenotype(data, coords, coord_id_col, lat_col,
long_col, save_path, xvar, var_type,
xlabels=None, level=None):
'''
Generate a map of the UK, with phenotype data overlaid
'''
# merge co-ordinate data with phenotype data
merged_df = pd.merge(left=coords, right=data, left_on=coord_id_col,
right_on=coord_id_col, how='inner')
# pheno column and set level of categorical variable
if xlabels and var_type == "categorical":
# convert to string type as a categorical variable
# drop NA observations from the merged data frame
na_mask = pd.isnull(merged_df.loc[:, xvar])
merged_df = merged_df[~na_mask]
rvar = merged_df.loc[:, xvar].copy()
nvar = pd.Series(np.nan_to_num(rvar), dtype=str)
var = [v for v in set(nvar)]
var.sort()
# recode the variables according to the input labels
xlabs = xlabels.split(",")
lbls = [str(xlabs[ix]) for ix in range(len(var))]
for xv in range(len(var)):
nvar[nvar == var[xv]] = lbls[xv]
merged_df.loc[:, "cat_var"] = nvar
else:
pass
if level:
lvar = merged_df.loc[:, "cat_var"].copy()
mask = lvar.isin([level])
lvar[mask] = 1
lvar[~mask] = 0
lvar = lvar.fillna(0)
merged_df.loc[:, "dichot_var"] = lvar
else:
pass
# push the df into the R env
py2ri.activate()
r_df = py2ri.py2ri_pandasdataframe(merged_df)
R.assign("pheno.df", r_df)
# setup the map and plot the points
R('''suppressPackageStartupMessages(library(maps))''')
R('''suppressPackageStartupMessages(library(mapdata))''')
R('''uk_map <- map("worldHires", c("UK", "Isle of Wight",'''
'''"Ireland", "Isle of Man", "Wales:Anglesey"), '''
'''xlim=c(-11, 3), ylim=c(50, 60.9), plot=F)''')
# colour by reference, or a colour for each discrete value
if level:
R('''red <- rep("#FF0000", '''
'''times=length(pheno.df$dichot_var[pheno.df$dichot_var == 1]))''')
R('''black <- rep("#000000", '''
'''times=length(pheno.df$dichot_var[pheno.df$dichot_var == 0]))''')
R('''png("%(save_path)s", width=540, height=540, res=90)''' % locals())
R('''map(uk_map)''')
R('''points((-pheno.df[,"%(lat_col)s"])[pheno.df$dichot_var == 1], '''
'''(-pheno.df[,"%(long_col)s"])[pheno.df$dichot_var == 1], pch=".", col=red)''' % locals())
R('''points((pheno.df[,"%(long_col)s"])[pheno.df$dichot_var == 0], '''
'''(pheno.df[,"%(lat_col)s"])[pheno.df$dichot_var == 0], pch=".", col=black)''' % locals())
R('''legend('topleft', legend=c("not-%(level)s", "%(level)s"),'''
'''fill=c("#000000", "#FF0000"))''' % locals())
R('''dev.off()''')
else:
R('''png("%(save_path)s", width=540, height=540, res=90)''' % locals())
R('''map(uk_map)''')
R('''points(pheno.df[,"%(long_col)s"], pheno.df[,"%(lat_col)s"], pch=".", '''
'''col=factor(pheno.df$cat_var))''' % locals())
R('''legend('topleft', legend=unique(pheno.df$cat_var),'''
'''fill=unique(pheno.df$cat_var))''' % locals())
R('''dev.off()''')
def plotPhenotype(data, plot_type, x, y=None, group=None,
save_path=None, labels=None, xlabels=None,
ylabels=None, glabels=None, var_type="continuous"):
'''
Generate plots of phenotypes using ggplot
'''
# change data format if necessary and convert nan/NA to missing
if not y and var_type == "categorical":
var = np.nan_to_num(data.loc[:, x].copy())
data.loc[:, x] = pd.Series(var, dtype=str)
if group:
gvar = np.nan_to_num(data.loc[:, group].copy())
data.loc[:, group] = pd.Series(gvar, dtype=str)
else:
pass
elif not y and var_type == "integer":
var = np.nan_to_num(data.loc[:, x].copy())
data.loc[:, x] = pd.Series(var, dtype=np.int64)
if group:
gvar = np.nan_to_num(data.loc[:, group].copy())
data.loc[:, group] = pd.Series(gvar, dtype=str)
else:
pass
elif not y and var_type == "continuous":
var = data.loc[:, x].copy()
data.loc[:, x] = | pd.Series(var, dtype=np.float64) | pandas.Series |
import streamlit as st
import pandas as pd
import folium
import geopandas
import seaborn as sns
from streamlit_folium import folium_static
from folium.plugins import MarkerCluster
from matplotlib import pyplot as plt
import plotly.express as px
from PIL import Image
pd.set_option('display.float_format', lambda x: '%.f' % x)
# =================================================
# ================== PAGE SET UP ==================
# =================================================
# === page titles
st.set_page_config(layout="wide")
c1, c2 = st.beta_columns((1,3))
# image
with c1:
photo = Image.open('house.png')
st.image(photo,width=300)
#headers
with c2:
st.write('')
HR_format = '<p style="font-family:sans-serif;' \
'font-size: 55px;' \
'font-weight: bold;' \
'font-style: italic;' \
'text-align: center;' \
'">House Rocket Company Analysis </p>'
st.markdown(HR_format, unsafe_allow_html=True)
st.write('')
st.write("House Rocket's business model consists of purchasing and reselling properties through a digital platform. "
"The data scientist is responsible for developing an online dashboard to help the CEO company "
"to have an overview of properties available on House Rocket's portfolio and "
"find the best business opportunities.")
st.write("For more information verify on: "
"[GitHub](https://github.com/almirgouvea/P001-Exploratory-Data_Analysis)")
st.write("Made by **<NAME>**"
" \n\n"
"Social media: [LinkedIn](https://www.linkedin.com/in/almirdonizette) "
" [Mail](<EMAIL>)")
# =================================================
# =============== HELPER FUNCTIONS ================
# =================================================
@st.cache(allow_output_mutation = True)
def get_data(path):
data = | pd.read_csv(path) | pandas.read_csv |
import os
import argparse
import pandas as pd
from datetime import datetime
import random
def process_basic_data(args, partition, episode_dirname='basic'):
df = None
df_path = os.path.join(args.output_path, partition + '_listfile.csv')
input_dir = os.path.join(args.root_path, partition)
patient_dirnames = list(filter(str.isdigit, os.listdir(input_dir)))
try:
output_dir = os.path.join(args.output_path, partition)
episode_outdir = os.path.join(output_dir, episode_dirname)
os.makedirs(episode_outdir)
except FileExistsError:
pass
for patient_index, patient in enumerate(patient_dirnames):
patient_dir = os.path.join(input_dir, patient)
ts_fnames = list(filter(lambda x: x.find("timeseries") != -1,
os.listdir(patient_dir)))
for ts_fname in ts_fnames:
lb_fname = ts_fname.replace("_timeseries", "")
label_df = pd.read_csv(os.path.join(patient_dir, lb_fname))
ts_df = pd.read_csv(os.path.join(patient_dir, ts_fname))
# Quality check ---------------------------
if len(label_df) == 0 or len(ts_df) == 0:
print('Empty label df or ts df', patient, ts_fname)
continue
assert len(label_df == 1)
label_df = label_df.to_dict(orient='records')[0]
los = label_df['Length of Stay'] * 24 # length of stay in hours
if pd.isnull(los):
print("length of stay is missing", patient, ts_fname)
continue
elif label_df["Mortality"] == pd.isnull(label_df["Deathtime"]):
print('Unmatch mortality label and deathtime', patient_dir)
continue
# Copy over time series data --------------
ts_df.to_csv(os.path.join(episode_outdir, patient + "_" + ts_fname))
# Collect time invarient information ------
rel_death_hours = None
if not pd.isnull(label_df["Deathtime"]) and not pd.isnull(label_df["Intime"]):
fmt = '%Y-%m-%d %H:%M:%S'
tdelta = datetime.strptime(label_df["Deathtime"], fmt) -\
datetime.strptime(label_df["Intime"], fmt)
rel_death_hours = tdelta.days * 24 + tdelta.seconds / 60 / 60
basis_keys = ['Icustay', 'Intime', 'Outtime', 'Deathtime',
'Mortality', 'Ethnicity', 'Gender', 'Age', 'Height', 'Weight']
diagnosis_keys = [k for k in label_df.keys() if k.startswith('Diagnosis ')]
new_df = {k: label_df[k] for k in basis_keys + diagnosis_keys}
new_df['Subject'] = patient
new_df['Real Death Hours'] = rel_death_hours
new_df['Length of Stay'] = los
new_df['Episode'] = ts_fname
new_df = pd.DataFrame(new_df, index=[0])
df = df.append(new_df, ignore_index=True) if df is not None else new_df
if ((patient_index + 1) % 100 == 0):
print("\rprocessed {} / {} patients".format(patient_index + 1,
len(patient_dirnames)))
print('Number of encounters: {}'.format(len(df)))
if partition == "train":
df = df.sample(frac=1)
if partition == "test":
df = df.sort_values('Icustay', ascending=False)
if df is not None:
df.to_csv(df_path)
def process_extra_data(args, partition, episode_dirname):
df_path = os.path.join(args.output_path, partition + '_listfile.csv')
df = | pd.read_csv(df_path) | pandas.read_csv |
import pandas as __pd
import datetime as __dt
from multiprocessing import Pool as __Pool
import multiprocessing as __mp
from functools import reduce as __red
import logging as __logging
from seffaflik.__ortak.__araclar import make_requests as __make_requests
from seffaflik.__ortak import __araclar as __araclar, __dogrulama as __dogrulama
from seffaflik.elektrik.uretim import organizasyonlar as __organizasyonlar
__first_part_url = "market/"
def hacim(baslangic_tarihi=__dt.datetime.today().strftime("%Y-%m-%d"),
bitis_tarihi=__dt.datetime.today().strftime("%Y-%m-%d"), organizasyon_eic=""):
"""
İlgili tarih aralığı için ikili anlaşma arz/talep hacim bilgilerini vermektedir.
Not: "organizasyon_eic" değeri girildiği taktirde organizasyona ait saatlik arz/talep hacim bilgisini vermektedir.
Parametreler
------------
baslangic_tarihi : %YYYY-%AA-%GG formatında başlangıç tarihi (Varsayılan: bugün)
bitis_tarihi : %YYYY-%AA-%GG formatında bitiş tarihi (Varsayılan: bugün)
organizasyon_eic : metin formatında organizasyon eic kodu (Varsayılan: "")
Geri Dönüş Değeri
----------------
Arz/Talep İkili Anlaşma Miktarları (MWh)
"""
if __dogrulama.__baslangic_bitis_tarih_eic_dogrulama(baslangic_tarihi, bitis_tarihi, organizasyon_eic):
try:
particular_url = \
__first_part_url + "bilateral-contract-sell" + "?startDate=" + baslangic_tarihi + "&endDate=" + \
bitis_tarihi + "&eic=" + organizasyon_eic
json = __make_requests(particular_url)
df_arz = __pd.DataFrame(json["body"]["bilateralContractSellList"])
particular_url = \
__first_part_url + "bilateral-contract-buy" + "?startDate=" + baslangic_tarihi + "&endDate=" + \
bitis_tarihi + "&eic=" + organizasyon_eic
json = __make_requests(particular_url)
df_talep = __pd.DataFrame(json["body"]["bilateralContractBuyList"])
df = __araclar.__merge_ia_dfs_evenif_empty(df_arz, df_talep)
df["Saat"] = df["date"].apply(lambda h: int(h[11:13]))
df["Tarih"] = __pd.to_datetime(df["date"].apply(lambda d: d[:10]))
df = df[["Tarih", "Saat", "Talep Miktarı", "Arz Miktarı"]]
except (KeyError, TypeError):
return __pd.DataFrame()
else:
return df
def tum_organizasyonlar_hacim(baslangic_tarihi=__dt.datetime.today().strftime("%Y-%m-%d"),
bitis_tarihi=__dt.datetime.today().strftime("%Y-%m-%d"), hacim_tipi="NET"):
"""
İlgili tarih aralığı ve hacim tipi için tüm organizasyonların saatlik ikili anlaşma hacim bilgilerini vermektedir.
Parametreler
------------
baslangic_tarihi : %YYYY-%AA-%GG formatında başlangıç tarihi (Varsayılan: bugün)
bitis_tarihi : %YYYY-%AA-%GG formatında bitiş tarihi (Varsayılan: bugün)
hacim_tipi : metin formatında hacim tipi ("NET", "ARZ", yada "TALEP") (varsayılan: "NET")
Geri Dönüş Değeri
-----------------
Tüm Organizasyonların İA Hacim Bilgileri (Tarih, Saat, Hacim)
"""
if __dogrulama.__baslangic_bitis_tarih_dogrulama(baslangic_tarihi, bitis_tarihi):
list_org = __organizasyonlar()[["EIC Kodu", "Kısa Adı"]].to_dict("records")
org_len = len(list_org)
list_date_org_eic = list(zip([baslangic_tarihi] * org_len, [bitis_tarihi] * org_len, list_org))
list_date_org_eic = list(map(list, list_date_org_eic))
with __Pool(__mp.cpu_count()) as p:
if hacim_tipi.lower() == "net":
list_df_unit = p.starmap(__organizasyonel_net_hacim, list_date_org_eic, chunksize=1)
elif hacim_tipi.lower() == "arz":
list_df_unit = p.starmap(__organizasyonel_arz_hacim, list_date_org_eic, chunksize=1)
elif hacim_tipi.lower() == "talep":
list_df_unit = p.starmap(__organizasyonel_talep_hacim, list_date_org_eic, chunksize=1)
else:
__logging.error("Lütfen geçerli bir hacim tipi giriniz: Net, Arz, Talep", exc_info=False)
list_df_unit = list(filter(lambda x: len(x) > 0, list_df_unit))
df_unit = __red(lambda left, right: __pd.merge(left, right, how="outer", on=["Tarih", "Saat"], sort=True),
list_df_unit)
return df_unit
def tum_gorevli_tedarik_hacim(baslangic_tarihi=__dt.datetime.today().strftime("%Y-%m-%d"),
bitis_tarihi=__dt.datetime.today().strftime("%Y-%m-%d"), hacim_tipi="NET"):
"""
İlgili tarih aralığı ve hacim tipi için tüm organizasyonların saatlik ikili anlaşma hacim bilgilerini vermektedir.
Parametreler
------------
baslangic_tarihi : %YYYY-%AA-%GG formatında başlangıç tarihi (Varsayılan: bugün)
bitis_tarihi : %YYYY-%AA-%GG formatında bitiş tarihi (Varsayılan: bugün)
hacim_tipi : metin formatında hacim tipi ("NET", "ARZ", yada "TALEP") (varsayılan: "NET")
Geri Dönüş Değeri
-----------------
Tüm Organizasyonların İA Hacim Bilgileri (Tarih, Saat, Hacim)
"""
if __dogrulama.__baslangic_bitis_tarih_dogrulama(baslangic_tarihi, bitis_tarihi):
org = __organizasyonlar()
org = org[(org["Adı"].str.contains("K1")) | (org["Adı"].str.contains("K2")) | (
org["Adı"].str.contains("K3"))].reset_index(drop=True)
list_org = org[["EIC Kodu", "Kısa Adı"]].to_dict("records")
org_len = len(list_org)
list_date_org_eic = list(zip([baslangic_tarihi] * org_len, [bitis_tarihi] * org_len, list_org))
list_date_org_eic = list(map(list, list_date_org_eic))
with __Pool(__mp.cpu_count()) as p:
if hacim_tipi.lower() == "net":
list_df_unit = p.starmap(__organizasyonel_net_hacim, list_date_org_eic, chunksize=1)
elif hacim_tipi.lower() == "arz":
list_df_unit = p.starmap(__organizasyonel_arz_hacim, list_date_org_eic, chunksize=1)
elif hacim_tipi.lower() == "talep":
list_df_unit = p.starmap(__organizasyonel_talep_hacim, list_date_org_eic, chunksize=1)
else:
__logging.error("Lütfen geçerli bir hacim tipi giriniz: Net, Arz, Talep", exc_info=False)
list_df_unit = list(filter(lambda x: len(x) > 0, list_df_unit))
df_unit = __red(lambda left, right: __pd.merge(left, right, how="outer", on=["Tarih", "Saat"], sort=True),
list_df_unit)
return df_unit
def __organizasyonel_net_hacim(baslangic_tarihi, bitis_tarihi, org):
"""
İlgili tarih aralığı ve organizasyon için saatlik ikili anlaşma net hacim bilgilerini vermektedir.
Önemli Bilgi
------------
Organizasyon bilgisi girilmediği taktirde toplam piyasa hacmi bilgisi verilmektedir.
Parametreler
-----------
baslangic_tarihi: %YYYY-%AA-%GG formatında başlangıç tarihi
bitis_tarihi : %YYYY-%AA-%GG formatında bitiş tarihi
org : dict formatında organizasyon EIC Kodu, Kısa Adı
Geri Dönüş Değeri
-----------------
Net İA Miktarı (MWh)
"""
try:
particular_url = \
__first_part_url + "bilateral-contract-sell" + "?startDate=" + baslangic_tarihi + "&endDate=" + \
bitis_tarihi + "&eic=" + org["EIC Kodu"]
json = __make_requests(particular_url)
df_arz = __pd.DataFrame(json["body"]["bilateralContractSellList"])
particular_url = \
__first_part_url + "bilateral-contract-buy" + "?startDate=" + baslangic_tarihi + "&endDate=" + \
bitis_tarihi + "&eic=" + org["EIC Kodu"]
json = __make_requests(particular_url)
df_talep = __pd.DataFrame(json["body"]["bilateralContractBuyList"])
df = __araclar.__merge_ia_dfs_evenif_empty(df_arz, df_talep)
df["Saat"] = df["date"].apply(lambda h: int(h[11:13]))
df["Tarih"] = __pd.to_datetime(df["date"].apply(lambda d: d[:10]))
df[org["Kısa Adı"]] = df["Talep Miktarı"] - df["Arz Miktarı"]
df = df[["Tarih", "Saat", org["Kısa Adı"]]]
except (KeyError, TypeError):
return __pd.DataFrame()
else:
return df
def __organizasyonel_arz_hacim(baslangic_tarihi, bitis_tarihi, org):
"""
İlgili tarih aralığı ve organizasyon için saatlik ikili anlaşma arz hacim bilgilerini vermektedir.
Önemli Bilgi
-----------
Organizasyon bilgisi girilmediği taktirde toplam piyasa hacmi bilgisi verilmektedir.
Parametreler
----------
baslangic_tarihi: %YYYY-%AA-%GG formatında başlangıç tarihi
bitis_tarihi : %YYYY-%AA-%GG formatında bitiş tarihi
org : dict formatında organizasyon EIC Kodu, Kısa Adı
Geri Dönüş Değeri
-----------------
Arz İA Miktarı (MWh)
"""
try:
particular_url = __first_part_url + "bilateral-contract-sell" + "?startDate=" + baslangic_tarihi + "&endDate=" \
+ bitis_tarihi + "&eic=" + org["EIC Kodu"]
json = __make_requests(particular_url)
df = | __pd.DataFrame(json["body"]["bilateralContractSellList"]) | pandas.DataFrame |
from pylab import *
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import datetime
import requests
import pandas_datareader.data as web
from Create_PDF_Report import portfolio_report
ALPHA_VANTAGE_KEY = 'ENTER_KEY'
RESULT_DETAILED = True
USER_AGENT = {
'User-Agent': (
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36')
}
sesh = requests.Session()
sesh.headers.update(USER_AGENT)
def check_if_weekend(date):
def test(date):
try:
temp_date = date.strftime('%Y-%m-%d')
temp = data_set.loc[temp_date]
error = False
except:
date = date - datetime.timedelta(days=1)
error = True
return date, error
if date.weekday() == 6:
date = date - datetime.timedelta(days=2)
elif date.weekday() == 5:
date = date - datetime.timedelta(days=1)
try:
temp = data_set.loc[date.strftime('%Y-%m-%d')]
error = False
except:
error = True
while error == True:
date, error = test(date)
return date
def calculate_return(start_date, end_date):
global data_set
data_set = portfolio_main.historical_performance_stock()
portfolio_return = 0
try:
if data_set.all() != 0:
end_date_value = data_set.loc[end_date]
start_date_value = data_set.loc[start_date]
portfolio_return += ((float(end_date_value) / float(start_date_value)) - 1) * total_hist_p_allocation
except AttributeError:
i = 0
i = 0
while i < len(ava_fund_list):
url = "https://www.avanza.se/_api/fund-guide/chart/" + ava_fund_list_id[i] + "/" + start_date + "/" + end_date
response = requests.get(url)
dictr = response.json()
recs = dictr['dataSerie']
ava_fund_temp_data = pd.json_normalize(recs)
performance_ava_fund = float(ava_fund_temp_data.iloc[-1, 1]) / 100 * ava_fund_list_allocation[i]
portfolio_return += performance_ava_fund
i += 1
return portfolio_return
class Portfolio:
def historical_performance_all(self):
ava_fund = pd.read_csv('Avanza Fond ID.csv', index_col=0)
date_list = []
for item in self.position_performance:
date_list.append(item.index[0])
i_date = 0
while date_list[i_date] != max(date_list):
i_date += 1
i = i_date
if self.positions[i] not in ava_fund.index:
temp_data = self.position_performance[i].groupby(self.position_performance[i].index.to_period('M')).first()
temp_data = temp_data.dropna()
temp_data.rename({'Adj Close': 'y'}, axis=1, inplace=True)
temp_data.index.name = 'x'
temp_data = (temp_data.div(temp_data['y'][0]) - 1) * 100
else:
temp_data = self.position_performance[i]
temp_data = temp_data.groupby(temp_data.index.to_period('M')).first()
portfolio_historical_performance = temp_data['y'] * self.position_allocation[i]
i = 0
i += 1
while i < len(self.positions):
if i != i_date:
if self.positions[i] not in ava_fund.index:
temp_data = self.position_performance[i].groupby(
self.position_performance[i].index.to_period('M')).first()
temp_data = temp_data.dropna()
temp_data.rename({'Adj Close': 'y'}, axis=1, inplace=True)
temp_data.index.name = 'x'
temp_data = (temp_data.div(temp_data['y'][0]) - 1) * 100
else:
temp_data = self.position_performance[i]
temp_data = temp_data.groupby(temp_data.index.to_period('M')).first()
if portfolio_historical_performance.index[0] in temp_data.index:
data_point_first = int(temp_data.index.get_loc(portfolio_historical_performance.index[0]))
temp_data = temp_data.iloc[data_point_first:].div(temp_data.iloc[data_point_first, 0])
portfolio_historical_performance += temp_data['y'] * self.position_allocation[i]
else:
portfolio_historical_performance += temp_data['y'] * self.position_allocation[i]
i += 1
# Plotting Stuff
slower = np.ma.masked_where(portfolio_historical_performance > 0, portfolio_historical_performance)
negative_return = portfolio_historical_performance.copy()
negative_return[slower > 0] = np.nan
fig, ax = plt.subplots(figsize=(10, 5))
portfolio_historical_performance.plot(ax=ax, color="#348dc1") # Benchmark Colour ? "#fedd78"
negative_return.plot(ax=ax, color="darkred")
ax.set_ylabel('', fontweight='bold', fontsize=12, color="black")
ax.set_xlabel('')
ax.yaxis.set_label_coords(-.1, .5)
ax.grid(True)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['bottom'].set_visible(False)
ax.spines['left'].set_visible(False)
fig.suptitle("Performance", y=.99, fontweight="bold",
fontsize=14, color="black")
ax.axhline(0, ls="-", lw=1,
color='gray', zorder=1)
ax.axhline(0, ls="--", lw=1,
color='black', zorder=2)
fig.set_facecolor('white')
ax.set_title("%s - %s" % (
portfolio_historical_performance.index[:1][0].strftime('%e %b \'%y'),
portfolio_historical_performance.index[-1:][0].strftime('%e %b \'%y')
), fontsize=12, color='gray')
ax.yaxis.set_major_formatter(plt.FuncFormatter(lambda x, loc: "{:,}%".format(int(x))))
ax.set_facecolor('white')
ax.fill_between(portfolio_historical_performance.index, 0, portfolio_historical_performance,
where=portfolio_historical_performance >= 0, interpolate=True,
color="#348dc1", alpha=.25)
ax.fill_between(portfolio_historical_performance.index, 0, portfolio_historical_performance,
where=portfolio_historical_performance <= 0, interpolate=True,
color="red", alpha=.25)
fig.autofmt_xdate()
try:
fig.tight_layout()
# plt.subplots_adjust(hspace=0, bottom=0, top=1)
except Exception:
pass
fig.savefig("Portfolio_Return.png")
def historical_performance_stock(self):
global total_hist_p_allocation
total_hist_p_allocation = 0
ava_fund = pd.read_csv('Avanza Fond ID.csv', index_col=0)
if self.positions[0] not in ava_fund.index:
performance = self.position_performance[0]['Adj Close'].div(
self.position_performance[0]['Adj Close'][0]).dropna().mul(self.position_allocation[0])
total_hist_p_allocation += self.position_allocation[0]
else:
performance = 0
loc_perf_index = 1
while len(self.positions) > loc_perf_index:
if self.positions[loc_perf_index] not in ava_fund.index:
if len(self.positions) > loc_perf_index:
performance += self.position_performance[loc_perf_index]['Adj Close'].div(
self.position_performance[loc_perf_index]['Adj Close'][0]).dropna().mul(
self.position_allocation[loc_perf_index])
total_hist_p_allocation += self.position_allocation[loc_perf_index]
loc_perf_index += 1
'''
fig, ax = plt.subplots()
ax.plot(performance, '-')
ax.grid(True)
ax.set_xlabel('Date')
ax.set_ylabel('Price')
fig.suptitle('Performance')
fig.autofmt_xdate()
plt.show()
'''
if total_hist_p_allocation == 0:
performance = 0
return performance
def stress_test(self):
draw_downs = 'Drawback Periods:\n\nGlobal Financial Crisis (10.09.2007-03.09.2009)\nPerformance: ' + str(round(
calculate_return("2007-10-09", "2009-03-09") * 100,
2)) + '% SP500: -54.89%\nU.S. Downgrade (04.30.2011-10.03.2011)\nPerformance: ' + str(round(
calculate_return("2011-04-29", "2011-10-03") * 100,
2)) + '% SP500: -18.64%\nGlobal Slowdown Fears (05.22.2015-08.25.2015)\nPerformance: ' + str(round(
calculate_return("2015-05-22", "2015-08-25") * 100,
2)) + '% SP500: -11.89%\nOil, U.S. Recession Fears (11.04.2015-02.11.2016)\nPerformance: ' + str(round(
calculate_return("2015-11-04", "2016-02-11") * 100,
2)) + '% SP500: -12.71%\nRising Rates/Trade (09.21.2018-12.24.2018) \nPerformance: ' + str(round(
calculate_return("2018-09-21", "2018-12-24") * 100,
2)) + '% SP500: -19.36%\nCovid-19 Concerns Begin (02.19.2020-03.23.2020) \nPerformance: ' + str(round(
calculate_return("2020-02-19", "2020-03-23") * 100, 2)) + '% SP500: -33.47%'
rebounds = 'Rebound Periods:\n\nRecession Ends (03.10.2009-04.23.2010)\nPerformance: ' + str(round(
calculate_return("2009-03-10", "2010-04-23") * 100,
2)) + '% SP500: 84.21%\nFlash Crash Rebound/European Relief (07.02.2010-02.18.2011)\nPerformance: ' + str(
round(
calculate_return("2010-07-02", "2011-02-18") * 100,
2)) + '% SP500: 33.02%\nCentral Bank QE (12.30.2011-12.29.2014)\nPerformance: ' + str(round(
calculate_return("2011-12-30", "2014-12-29") * 100,
2)) + '% SP500: 55.40%\nChina Easing/Oil rebound/Weaker USD (02.12.2016-01.26.2018)\nPerformance: ' + str(
round(
calculate_return("2016-02-12", "2018-01-26") * 100,
2)) + '% SP500: 63.49%\nFed Eases (12.26.2018-12.27.2019)\nPerformance: ' + str(round(
calculate_return("2018-12-26", "2019-12-27") * 100,
2)) + '% SP500: 40.63%\nFiscal/Fed Covid-19 Response (03.24.2020-06.08.2020)\nPerformance: ' + str(round(
calculate_return("2020-03-24", "2020-06-08") * 100, 2)) + '% SP500: 40.63%'
falling_ir = 'Falling Interest Rates:\n\nU.S. Downgrade (02.09.2011-09.22.2011)\nPerformance: ' + str(round(
calculate_return("2011-02-09", "2011-09-22") * 100,
2)) + '% (-2.03)\nEurope Debt Crisis/Flight to Quality (03.20.2012-07.25.2012)\nPerformance: ' + str(round(
calculate_return("2012-03-20", "2012-07-25") * 100,
2)) + '% (-0.93)\nWeaker Growth/Low Inflation (01.09.2014-02.02.2015)\nPerformance: ' + str(round(
calculate_return("2014-01-09", "2015-02-02") * 100,
2)) + '% (-1.33)\nGlobal Slowdown Fear (06.11.2015-07.05.2016)\nPerformance: ' + str(round(
calculate_return("2015-06-11", "2016-07-05") * 100,
2)) + '% (-1.13)\nEscalated U.S.-China Trade War (11.09.2018-09.04.2019)\nPerformance: ' + str(round(
calculate_return("2018-11-09", "2019-09-04") * 100,
2)) + '% (-1.77)\nCovid-19 Concerns Begin (01.21.2020-03.09.2020)\nPerformance: ' + str(round(
calculate_return("2020-01-21", "2020-03-09") * 100, 2)) + '% (-1.30)'
rising_ir = 'Rising Interest Rates (Change in RFR)\n\n10.06.2010-02.08.2011\nPerformance: ' + str(round(
calculate_return("2010-10-06", "2011-08-02") * 100,
2)) + '% (+1.34)\n05.02.2013-09.05.2013\nPerformance: ' + str(round(
calculate_return("2013-05-02", "2013-09-05") * 100,
2)) + '% (+1.32)\n07.08.2015-12.15.2015\nPerformance: ' + str(round(
calculate_return("2015-07-08", "2015-12-15") * 100,
2)) + '% (+1.23)\n09.07.2017-05.17.2018\nPerformance: ' + str(round(
calculate_return("2017-09-07", "2018-05-17") * 100,
2)) + '% (+1.06)\nCovid-19 Recovery/Inflation Concerns (03.09.2020-03.19.2021)\nPerformance: ' + str(round(
calculate_return("2020-03-09", "2021-03-19") * 100, 2)) + '% (+1.20)'
return draw_downs, rebounds, falling_ir, rising_ir
def pdf_data_generate(self):
ava_fund = pd.read_csv('Avanza Fond ID.csv', index_col=0)
today = datetime.datetime.now()
today_date = check_if_weekend(today)
today_date = today_date.strftime('%Y-%m-%d')
date_one_y_ago = today - datetime.timedelta(days=365)
date_one_y_ago = check_if_weekend(date_one_y_ago)
date_one_y_ago = date_one_y_ago.strftime('%Y-%m-%d')
date_one_m_ago = today - datetime.timedelta(days=30)
date_one_m_ago = check_if_weekend(date_one_m_ago)
date_one_m_ago = date_one_m_ago.strftime('%Y-%m-%d')
date_three_m_ago = today - datetime.timedelta(days=90)
date_three_m_ago = check_if_weekend(date_three_m_ago)
date_three_m_ago = date_three_m_ago.strftime('%Y-%m-%d')
date_three_y_ago = today - datetime.timedelta(days=1095)
date_three_y_ago = check_if_weekend(date_three_y_ago)
date_three_y_ago = date_three_y_ago.strftime('%Y-%m-%d')
date_begin_of_year = today.date().replace(month=1, day=1)
date_begin_of_year = check_if_weekend(date_begin_of_year)
date_begin_of_year = date_begin_of_year.strftime('%Y-%m-%d')
performance_1m = str(round(calculate_return(date_one_m_ago, today_date) * 100, 2))
performance_3m = str(round(calculate_return(date_three_m_ago, today_date) * 100, 2))
performance_ytd = str(round(calculate_return(date_begin_of_year, today_date) * 100, 2))
performance_1y = str(round(calculate_return(date_one_y_ago, today_date) * 100, 2))
performance_3y = str(round(calculate_return(date_three_y_ago, today_date) * 100, 2))
i_ava = 0
i = 0
holding_overview_list = []
for position in positions_list:
position_current_list = []
position_current_list.append(position)
data_frame_temp = self.position_performance[i]
if position not in ava_fund.index:
performance_current_1y = str(round((float(data_frame_temp['Adj Close'][-1]) / float(
data_frame_temp['Adj Close'].loc[date_one_y_ago]) - 1) * 100, 2)) + "%"
performance_current_3y = str(round((float(data_frame_temp['Adj Close'][-1]) / float(
data_frame_temp['Adj Close'].loc[date_three_y_ago]) - 1) * 100, 2)) + "%"
else:
json_dict = ava_fund_list_info[i_ava]
try:
performance_current_1y = str(round(json_dict['developmentOneYear'], 2)) + "%"
performance_current_3y = str(round(json_dict['developmentThreeYears'], 2)) + "%"
except:
performance_current_1y = 'Error'
performance_current_3y = 'Error'
i_ava += 1
position_current_list.append(performance_current_1y)
position_current_list.append(performance_current_3y)
position_current_list.append(str(self.position_allocation[i] * 100) + "%")
holding_overview_list.append(position_current_list)
i += 1
return performance_1m, performance_3m, performance_ytd, performance_1y, performance_3y, holding_overview_list
def __init__(self, list_of_positions, allocation_of_positions): # initalized function
self.positions = list_of_positions
self.position_allocation = allocation_of_positions
global ava_fund_list, ava_fund_list_info, ava_fund_list_allocation, ava_fund_list_id, in_ava_fund, stock_details, stock_overview, stock_ratings, stock_forecast
position_data_frame_list = []
ava_fund_list = []
ava_fund_list_info = []
ava_fund_list_allocation = []
ava_fund_list_id = []
in_ava_fund = []
stock_details = []
stock_temp = []
stock_overview = []
stock_ratings = []
stock_forecast = []
ava_fund = pd.read_csv('Avanza Fond ID.csv', index_col=0)
while len(self.positions) > len(position_data_frame_list):
if self.positions[len(position_data_frame_list)] in ava_fund.index:
fund_id = ava_fund.loc[self.positions[len(position_data_frame_list)], 'ID']
url = 'https://www.avanza.se/_api/fund-guide/guide/' + fund_id
response = requests.get(url)
ava_fund_list.append(self.positions[len(position_data_frame_list)])
ava_fund_list_id.append(fund_id)
ava_fund_list_info.append(response.json())
ava_fund_list_allocation.append(self.position_allocation[len(position_data_frame_list)])
url = 'https://www.avanza.se/_api/fund-guide/chart/' + fund_id + '/infinity'
response = requests.get(url)
dictr = response.json()
recs = dictr['dataSerie']
temp_data = pd.json_normalize(recs)
i = 0
for item in temp_data['x']:
test = datetime.datetime.fromtimestamp(int(float(item) / 1000))
temp_data.iloc[i, 0] = f"{test:%Y-%m-%d}"
i += 1
temp_data['x'] = pd.to_datetime(temp_data['x'])
temp_data = temp_data.set_index('x')
# temp_data = temp_data.groupby(temp_data.index.to_period('M')).first()
temp_data = temp_data.dropna()
position_data_frame_list.append(temp_data)
in_ava_fund.append(True)
stock_details.append(0)
stock_overview.append(0)
stock_ratings.append(0)
stock_forecast.append(0)
else:
if RESULT_DETAILED:
'''Get Alpha Vantage Data'''
url = 'https://www.alphavantage.co/query?function=BALANCE_SHEET&symbol={}&apikey={}'.format(
self.positions[len(position_data_frame_list)],
ALPHA_VANTAGE_KEY)
response = requests.get(url)
dictr = response.json()
stock_temp.append(pd.json_normalize(dictr['quarterlyReports']))
stock_temp.append(pd.json_normalize(dictr['annualReports']))
url = 'https://www.alphavantage.co/query?function=CASH_FLOW&symbol={}&apikey={}'.format(
self.positions[len(position_data_frame_list)],
ALPHA_VANTAGE_KEY)
response = requests.get(url)
dictr = response.json()
stock_temp.append(pd.json_normalize(dictr['quarterlyReports']))
stock_temp.append( | pd.json_normalize(dictr['annualReports']) | pandas.json_normalize |
# -*- coding: utf-8 -*-
"""
This script makes plots of relevant data.
@author: <NAME>
"""
import yaml
import os
import pandas as pd
import energyscope as es
import numpy as np
import matplotlib.pyplot as plt
from sys import platform
from energyscope.utils import make_dir, load_config, get_FEC_from_sankey
from energyscope.postprocessing import get_total_einv
def compute_einv_res(cs: str, all_data: dict):
"""
Compute the Einv by RESOURCES part (Einv_op).
:param cs: case study path
:param user_data: user_data directory
:param all_data: the data into a dict of pd.DataFrames.
:return: the data into pd.DataFrames
"""
# Load Einv data
df_einv = pd.read_csv(f"{cs}/output/einv_breakdown.csv", index_col=0)
# Define the RESOURCES list
RESOURCES = list(all_data['Resources'].index)
return df_einv.loc[RESOURCES].copy()['Einv_op']
def compute_einv_tech(cs: str, all_data: dict):
"""
Compute the Einv by TECHNOLOGIES part (Einv_const).
:param cs: case study path
:param user_data: user_data directory
:param all_data: the data into a dict of pd.DataFrames.
:return: the data into pd.DataFrames
"""
# Load Einv data
df_einv = pd.read_csv(f"{cs}/output/einv_breakdown.csv", index_col=0)
# Define the TECHNOLOGIES list
TECHNOLOGIES = list(all_data['Technologies'].index)
return df_einv.loc[TECHNOLOGIES].copy()['Einv_constr']
def retrieve_einv_const_by_categories(range_val, all_data: dict, dir: str, user_data: str):
"""
Retrieve the Einv_const values for all case studies classed by categories of technologies.
:param range_val: range of GWP constrained values.
:param all_data: the data into a dict of pd.DataFrames.
:param dir: case study path and name.
:param user_data: user_data directory.
:return: dict with keys being the categories of technologies. For each catagory, a pd.DataFrame with Einv_const values for all scenarios.
"""
# Retrieve all Einv_const values for all case studies
einv_tech = []
for run in ['run_' + str(i) for i in range_val]:
cs_temp = dir + '/' + run
einv_tech.append(compute_einv_tech(cs=cs_temp, all_data=all_data))
df_einv_tech = pd.concat(einv_tech, axis=1)
df_einv_tech.columns = [i for i in range_val]
# Retrieve the technologies categories:
df_aux_tech = pd.read_csv(user_data + "/aux_technologies.csv", index_col=0)
# tech_cat = ['Electricity', 'Heat', 'Mobility', 'Infrastructure', 'Synthetic fuels', 'Storage']
tech_cat = list(df_aux_tech['Category'].values)
tech_cat = list(dict.fromkeys(tech_cat)) # remove duplicate
# Class the technologies by categories into a dict
tech_by_cat = dict()
for cat in tech_cat:
tech_by_cat[cat] = list(df_aux_tech['Category'][df_aux_tech['Category'] == cat].index)
# Retrieve the values of Einv_const per category of technology (and remove tech where Einv_const is always 0)
tech_classed_by_cat = dict()
for cat in tech_by_cat.keys():
tech_classed_by_cat[cat] = retrieve_non_zero_val(df=df_einv_tech.loc[tech_by_cat[cat]].transpose()) /1000 # TWh
return tech_classed_by_cat
def compute_einv_details(cs: str, user_data: str, all_data: dict):
"""
Compute the Einv by RESOURCES and TECHNOLOGIES, it details the breakdown by subcategories of RESOURCES and categories of TECHNOLOGIES.
:param cs: case study path
:param user_data: user_data directory
:param all_data: the data into a dict of pd.DataFrames.
:return: the data into pd.DataFrames
"""
# Load Einv data
df_einv = | pd.read_csv(f"{cs}/output/einv_breakdown.csv", index_col=0) | pandas.read_csv |
import time
from datetime import datetime
import pandas as pd
from tqdm import tqdm
import yticker
def main():
ylb = yticker.YahooLookupBrowser()
answers = set()
perpage = 10000
idx = 0
letters = list('<KEY>')
queue = list(letters)
pbar = tqdm(total=len(queue))
while idx < len(queue):
pbar.set_description(f"[query = {queue[idx]}]")
pbar.refresh()
try:
t = datetime.now()
ans, total = ylb.lookup(key=queue[idx], category='all', start=0, size=perpage)
seconds = (datetime.now() - t).total_seconds()
pbar.write(f"query = {queue[idx]} | count = {len(ans)} | total = {total} | seconds = {seconds}")
except Exception as e:
pbar.write(f"error (idx = {idx}, query = {queue[idx]}): " + str(e))
pbar.write("wait for 10 seconds...")
time.sleep(10) # s
continue
if total > perpage:
add = [queue[idx] + '%20' + letter for letter in letters] + [queue[idx] + letter for letter in letters]
queue += add
pbar.write(f"Add new queries {queue[idx]}[%20][a-z] to queue")
pbar.reset(total=len(queue))
pbar.update(n=idx)
answers.update(ans)
pbar.update()
idx += 1
answer_list = sorted(list(answers))
df = | pd.DataFrame(answer_list) | pandas.DataFrame |
'''
example of loading FinMind api
'''
from Data import Load
import requests
import pandas as pd
url = 'http://finmindapi.servebeer.com/api/data'
list_url = 'http://finmindapi.servebeer.com/api/datalist'
translate_url = 'http://finmindapi.servebeer.com/api/translation'
'''----------------TaiwanStockInfo----------------'''
form_data = {'dataset': 'TaiwanStockInfo'}
res = requests.post(
url, verify=True,
data=form_data)
temp = res.json()
data = pd.DataFrame(temp['data'])
data.head()
'''----------------Taiwan Stock Dividend Result----------------'''
form_data = {'dataset': 'StockDividendResult'}
res = requests.post(
url, verify=True,
data=form_data)
temp = res.json()
data = pd.DataFrame(temp['data'])
data.head()
'''----------------TotalMarginPurchaseShortSale----------------'''
form_data = {'dataset': 'StockDividendResult',
'stock_id': '2330',
'date': '2010-10-01'}
res = requests.post(
url, verify=True,
data=form_data)
temp = res.json()
data = pd.DataFrame(temp['data'])
data.head()
'''----------------TaiwanStockNews----------------'''
form_data = {'dataset': 'TaiwanStockNews',
'date': '2019-10-10',
'stock_id': '2317'}
res = requests.post(
url, verify=True,
data=form_data)
temp = res.json()
data = pd.DataFrame(temp['data'])
data.head()
'''----------------TaiwanStockPrice----------------'''
form_data = {'dataset': 'TaiwanStockPrice',
'stock_id': '2317',
'date': '2019-06-01'}
res = requests.post(
url, verify=True,
data=form_data)
temp = res.json()
data = pd.DataFrame(temp['data'])
data.head()
'''----------------TaiwanStockPriceMinute----------------'''
form_data = {'dataset': 'TaiwanStockPriceMinute',
'stock_id': '2330',
'date': '2019-06-01'}
res = requests.post(
url, verify=True,
data=form_data)
temp = res.json()
data = pd.DataFrame(temp['data'])
data.head()
'''----------------FinancialStatements----------------'''
form_data = {'dataset': 'FinancialStatements',
'stock_id': '2317',
'date': '2019-01-01'}
res = requests.post(
url, verify=True,
data=form_data)
temp = res.json()
data = pd.DataFrame(temp['data'])
data = Load.transpose(data)
data.head()
'''----------------TaiwanCashFlowsStatement----------------'''
form_data = {'dataset': 'TaiwanCashFlowsStatement',
'stock_id': '2330',
'date': '2019-06-01'}
res = requests.post(
url, verify=True,
data=form_data)
temp = res.json()
data = pd.DataFrame(temp['data'])
data.head()
'''----------------TaiwanStockStockDividend----------------'''
form_data = {'dataset': 'TaiwanStockStockDividend',
'stock_id': '2317',
'date': '2018-01-01'}
res = requests.post(
url, verify=True,
data=form_data)
temp = res.json()
data = pd.DataFrame(temp['data'])
data.head()
'''----------------TaiwanStockStockDividend----------------'''
form_data = {'dataset': 'StockDividend',
'stock_id': '0050',
'date': '2015-01-02',
}
res = requests.post(
url, verify=True,
data=form_data)
temp = res.json()
data = pd.DataFrame(temp['data'])
# data['date'] = data['date'] + '-' + data['period']
# data = data.drop('period',axis = 1)
data = Load.transpose(data)
'''----------------TaiwanStockMarginPurchaseShortSale----------------'''
form_data = {'dataset': 'TaiwanStockMarginPurchaseShortSale',
'stock_id': '2317',
'date': '2019-06-01'}
res = requests.post(
url, verify=True,
data=form_data)
temp = res.json()
data = pd.DataFrame(temp['data'])
data.head()
'''----------------TotalMarginPurchaseShortSale----------------'''
form_data = {'dataset': 'TotalMarginPurchaseShortSale',
'date': '2019-06-01'}
res = requests.post(
url, verify=True,
data=form_data)
temp = res.json()
data = pd.DataFrame(temp['data'])
data.head()
'''----------------InstitutionalInvestorsBuySell----------------'''
form_data = {'dataset': 'InstitutionalInvestorsBuySell',
'stock_id': '2317',
'date': '2019-06-01'}
res = requests.post(
url, verify=True,
data=form_data)
temp = res.json()
data = pd.DataFrame(temp['data'])
data.head()
'''----------------Shareholding----------------'''
form_data = {'dataset': 'Shareholding',
'stock_id': '2317',
'date': '2019-06-01'}
res = requests.post(
url, verify=True,
data=form_data)
temp = res.json()
data = pd.DataFrame(temp['data'])
data.head()
'''----------------BalanceSheet----------------'''
form_data = {'dataset': 'BalanceSheet',
'stock_id': '2317',
'date': '2019-01-01'}
res = requests.post(
url, verify=True,
data=form_data)
temp = res.json()
data = pd.DataFrame(temp['data'])
data.head()
'''----------------TaiwanStockHoldingSharesPer----------------'''
form_data = {'dataset': 'TaiwanStockHoldingSharesPer',
'stock_id': '2317',
'date': '2019-06-01'}
res = requests.post(
url, verify=True,
data=form_data)
temp = res.json()
data = pd.DataFrame(temp['data'])
data.head()
'''----------------TaiwanStockMonthRevenue----------------'''
form_data = {'dataset': 'TaiwanStockMonthRevenue',
'stock_id': '2317',
'date': '2019-01-01'}
res = requests.post(
url, verify=True,
data=form_data)
temp = res.json()
data = pd.DataFrame(temp['data'])
data.head()
'''----------------TaiwanOption----------------'''
form_data = {'dataset': 'TaiwanOption'}
res = requests.post(
translate_url, verify=True,
data=form_data)
temp = res.json()
data = pd.DataFrame(temp['data'])
data.head()
form_data = {'dataset': 'TaiwanOption',
'stock_id': 'OCO',
'date': '2019-09-05',
}
res = requests.post(
url, verify=True,
data=form_data)
temp = res.json()
data = pd.DataFrame(temp['data'])
data.head()
'''----------------TaiwanFutures----------------'''
# load stock_id table, 讀取代碼表,用於輸入以下 stock_id 參數
form_data = {'dataset': 'TaiwanFutures'}
res = requests.post(
translate_url, verify=True,
data=form_data)
temp = res.json()
data = | pd.DataFrame(temp['data']) | pandas.DataFrame |
from datetime import datetime, timedelta
import operator
import pickle
import unittest
import numpy as np
from pandas.core.index import Index, Factor, MultiIndex, NULL_INDEX
from pandas.util.testing import assert_almost_equal
import pandas.util.testing as tm
import pandas._tseries as tseries
class TestIndex(unittest.TestCase):
def setUp(self):
self.strIndex = tm.makeStringIndex(100)
self.dateIndex = tm.makeDateIndex(100)
self.intIndex = tm.makeIntIndex(100)
self.empty = Index([])
self.tuples = Index(zip(['foo', 'bar', 'baz'], [1, 2, 3]))
def test_hash_error(self):
self.assertRaises(TypeError, hash, self.strIndex)
def test_deepcopy(self):
from copy import deepcopy
copy = deepcopy(self.strIndex)
self.assert_(copy is self.strIndex)
def test_duplicates(self):
idx = Index([0, 0, 0])
self.assertRaises(Exception, idx._verify_integrity)
def test_sort(self):
self.assertRaises(Exception, self.strIndex.sort)
def test_mutability(self):
self.assertRaises(Exception, self.strIndex.__setitem__, 5, 0)
self.assertRaises(Exception, self.strIndex.__setitem__, slice(1,5), 0)
def test_constructor(self):
# regular instance creation
tm.assert_contains_all(self.strIndex, self.strIndex)
tm.assert_contains_all(self.dateIndex, self.dateIndex)
# casting
arr = np.array(self.strIndex)
index = arr.view(Index)
tm.assert_contains_all(arr, index)
self.assert_(np.array_equal(self.strIndex, index))
# what to do here?
# arr = np.array(5.)
# self.assertRaises(Exception, arr.view, Index)
def test_constructor_corner(self):
# corner case
self.assertRaises(Exception, Index, 0)
def test_compat(self):
self.strIndex.tolist()
def test_equals(self):
# same
self.assert_(Index(['a', 'b', 'c']).equals(Index(['a', 'b', 'c'])))
# different length
self.assertFalse(Index(['a', 'b', 'c']).equals(Index(['a', 'b'])))
# same length, different values
self.assertFalse(Index(['a', 'b', 'c']).equals(Index(['a', 'b', 'd'])))
# Must also be an Index
self.assertFalse(Index(['a', 'b', 'c']).equals(['a', 'b', 'c']))
def test_asOfDate(self):
d = self.dateIndex[0]
self.assert_(self.dateIndex.asOfDate(d) is d)
self.assert_(self.dateIndex.asOfDate(d - timedelta(1)) is None)
d = self.dateIndex[-1]
self.assert_(self.dateIndex.asOfDate(d + timedelta(1)) is d)
def test_argsort(self):
result = self.strIndex.argsort()
expected = np.array(self.strIndex).argsort()
self.assert_(np.array_equal(result, expected))
def test_comparators(self):
index = self.dateIndex
element = index[len(index) // 2]
arr = np.array(index)
def _check(op):
arr_result = op(arr, element)
index_result = op(index, element)
self.assert_(isinstance(index_result, np.ndarray))
self.assert_(not isinstance(index_result, Index))
self.assert_(np.array_equal(arr_result, index_result))
_check(operator.eq)
_check(operator.ne)
_check(operator.gt)
_check(operator.lt)
_check(operator.ge)
_check(operator.le)
def test_booleanindex(self):
boolIdx = np.repeat(True, len(self.strIndex)).astype(bool)
boolIdx[5:30:2] = False
subIndex = self.strIndex[boolIdx]
tm.assert_dict_equal(tseries.map_indices(subIndex),
subIndex.indexMap)
subIndex = self.strIndex[list(boolIdx)]
tm.assert_dict_equal(tseries.map_indices(subIndex),
subIndex.indexMap)
def test_fancy(self):
sl = self.strIndex[[1,2,3]]
for i in sl:
self.assertEqual(i, sl[sl.indexMap[i]])
def test_getitem(self):
arr = np.array(self.dateIndex)
self.assertEquals(self.dateIndex[5], arr[5])
def test_shift(self):
shifted = self.dateIndex.shift(0, timedelta(1))
self.assert_(shifted is self.dateIndex)
shifted = self.dateIndex.shift(5, timedelta(1))
self.assert_(np.array_equal(shifted, self.dateIndex + timedelta(5)))
def test_intersection(self):
first = self.strIndex[:20]
second = self.strIndex[:10]
intersect = first.intersection(second)
self.assert_(tm.equalContents(intersect, second))
# Corner cases
inter = first.intersection(first)
self.assert_(inter is first)
# non-iterable input
self.assertRaises(Exception, first.intersection, 0.5)
def test_union(self):
first = self.strIndex[5:20]
second = self.strIndex[:10]
everything = self.strIndex[:20]
union = first.union(second)
self.assert_(tm.equalContents(union, everything))
# Corner cases
union = first.union(first)
self.assert_(union is first)
union = first.union([])
self.assert_(union is first)
# non-iterable input
self.assertRaises(Exception, first.union, 0.5)
def test_add(self):
firstCat = self.strIndex + self.dateIndex
secondCat = self.strIndex + self.strIndex
self.assert_(tm.equalContents(np.append(self.strIndex,
self.dateIndex), firstCat))
self.assert_(tm.equalContents(secondCat, self.strIndex))
tm.assert_contains_all(self.strIndex, firstCat.indexMap)
tm.assert_contains_all(self.strIndex, secondCat.indexMap)
tm.assert_contains_all(self.dateIndex, firstCat.indexMap)
# this is valid too
shifted = self.dateIndex + timedelta(1)
def test_add_string(self):
# from bug report
index = Index(['a', 'b', 'c'])
index2 = index + 'foo'
self.assert_('a' not in index2.indexMap)
self.assert_('afoo' in index2.indexMap)
def test_diff(self):
first = self.strIndex[5:20]
second = self.strIndex[:10]
answer = self.strIndex[10:20]
result = first - second
self.assert_(tm.equalContents(result, answer))
diff = first.diff(first)
self.assert_(len(diff) == 0)
# non-iterable input
self.assertRaises(Exception, first.diff, 0.5)
def test_pickle(self):
def testit(index):
pickled = pickle.dumps(index)
unpickled = pickle.loads(pickled)
self.assert_(isinstance(unpickled, Index))
self.assert_(np.array_equal(unpickled, index))
tm.assert_dict_equal(unpickled.indexMap, index.indexMap)
testit(self.strIndex)
testit(self.dateIndex)
# def test_always_get_null_index(self):
# empty = Index([])
# self.assert_(empty is NULL_INDEX)
# self.assert_(self.dateIndex[15:15] is NULL_INDEX)
def test_is_all_dates(self):
self.assert_(self.dateIndex.is_all_dates())
self.assert_(not self.strIndex.is_all_dates())
self.assert_(not self.intIndex.is_all_dates())
def test_summary(self):
self._check_method_works(Index.summary)
def test_format(self):
self._check_method_works(Index.format)
index = Index([datetime.now()])
formatted = index.format()
expected = str(index[0])
self.assertEquals(formatted, expected)
def test_take(self):
indexer = [4, 3, 0, 2]
result = self.dateIndex.take(indexer)
expected = self.dateIndex[indexer]
self.assert_(result.equals(expected))
def _check_method_works(self, method):
method(self.empty)
method(self.dateIndex)
method(self.strIndex)
method(self.intIndex)
method(self.tuples)
def test_get_indexer(self):
idx1 = Index([1, 2, 3, 4, 5])
idx2 = Index([2, 4, 6])
r1, r2 = idx1.get_indexer(idx2)
| assert_almost_equal(r1, [1, 3, -1]) | pandas.util.testing.assert_almost_equal |
import pandas as pd
import numpy as np
import pysam
import re
from Bio import SeqIO
from Bio.SeqRecord import SeqRecord
from Bio import Seq
import subprocess
try:
from . import global_para
except ImportError:
import global_para
try:
from .unaligned_reads import *
except ImportError:
from unaligned_reads import *
np.seterr(divide = 'ignore')
pd.options.mode.chained_assignment = None # default='warn', this is used to remove warning from pandas chain assignment
def bam_ispair():
# define bam is paired or not;
bam_genome = pysam.AlignmentFile(global_para.genome_bam_file,'rb')
for read in bam_genome:
return(read.is_paired)
break
def exon_start_adj_get_unalign(df_exon):
unalign_readlist_true = list();
bam_genome = pysam.AlignmentFile(global_para.genome_bam_file,'rb')
for read in bam_genome.fetch(df_exon.seqname,int(df_exon.pos_end)-1,int(df_exon.pos_end)+1):
if read.mapping_quality >=global_para.min_quality and read.is_secondary == False and read.is_unmapped == False:
tmp_cigar = read.to_dict()['cigar']
tmp_nm = read.get_tag('NM')
if tmp_cigar == '[0-9]*M' and tmp_nm == 0:
pass;
if re.search("[0-9]*S$",tmp_cigar):
if read.aend == df_exon.pos_end:
unalign_readlist_true.append(read);
continue
return(unalign_readlist_true)
def exon_end_adj_get_unalign(df_exon):
unalign_readlist_true = list();
bam_genome = pysam.AlignmentFile(global_para.genome_bam_file,'rb')
for read in bam_genome.fetch(df_exon.seqname,int(df_exon.pos_start)-1,int(df_exon.pos_start)+1):
if read.mapping_quality >=global_para.min_quality and read.is_secondary == False and read.is_unmapped == False:
tmp_cigar = read.to_dict()['cigar']
tmp_nm = read.get_tag('NM')
if tmp_cigar == '[0-9]*M' and tmp_nm == 0:
pass;
if re.search("^[0-9]*S",tmp_cigar):
if read.pos == (df_exon.pos_start -1 ):
unalign_readlist_true.append(read);
continue
return(unalign_readlist_true)
def exon_adj_get_mate(read_list):
bamFile_handle = pysam.Samfile(global_para.genome_bam_file,'rb')
unalign_readlist_true_mate = list()
read_list_new = [read for read in read_list if read.is_proper_pair]
# for read in read_list:
for read in read_list_new: # add
try:
read_mate = bamFile_handle.mate(read)
unalign_readlist_true_mate.append(read_mate)
except:
pass
return(unalign_readlist_true_mate)
def exon_adj_start_exchange(df_exon, df_exon_adj):
df_exon.pos_start = df_exon_adj.pos_start
df_exon.count_unalign_start = df_exon_adj.count_unalign_start
df_exon.count_exon_match_start = df_exon_adj.count_exon_match_start
df_exon.is_exon_match_start = df_exon_adj.is_exon_match_start
df_exon.consensus_seq_start = df_exon_adj.consensus_seq_start
df_exon.num_bad_reads_start = df_exon_adj.num_bad_reads_start
df_exon.num_bad_reads_ajust_start = df_exon_adj.num_bad_reads_ajust_start
df_exon.num_all_reads_start = df_exon_adj.num_all_reads_start
df_exon.num_all_reads_ajust_start = df_exon_adj.num_all_reads_ajust_start
df_exon.region_start = df_exon_adj.region_start
df_exon.bg_unalign_start = df_exon_adj.bg_unalign_start
df_exon.bg_total_start = df_exon_adj.bg_total_start
df_exon.bbinom_p_start = df_exon_adj.bbinom_p_start
df_exon.q_value_start = df_exon_adj.q_value_start
return(df_exon)
def exon_adj_end_exchange(df_exon, df_exon_adj):
df_exon.pos_end = df_exon_adj.pos_end
df_exon.count_unalign_end = df_exon_adj.count_unalign_end
df_exon.count_exon_match_end = df_exon_adj.count_exon_match_end
df_exon.is_exon_match_end = df_exon_adj.is_exon_match_end
df_exon.consensus_seq_end = df_exon_adj.consensus_seq_end
df_exon.num_bad_reads_end = df_exon_adj.num_bad_reads_end
df_exon.num_bad_reads_ajust_end = df_exon_adj.num_bad_reads_ajust_end
df_exon.num_all_reads_end = df_exon_adj.num_all_reads_end
df_exon.num_all_reads_ajust_end = df_exon_adj.num_all_reads_ajust_end
df_exon.region_end = df_exon_adj.region_end
df_exon.bg_unalign_end = df_exon_adj.bg_unalign_end
df_exon.bg_total_end = df_exon_adj.bg_total_end
df_exon.bbinom_p_end = df_exon_adj.bbinom_p_end
df_exon.q_value_end = df_exon_adj.q_value_end
return(df_exon)
def exon_start_adj(df_exon):
if global_para.genome_bam_paired == True:
df_exon_return = df_exon.copy()
# get clipped reads in exon end position;
unalign_readlist_true = exon_start_adj_get_unalign(df_exon)
# get clipped mates read list;
unalign_readlist_mate = exon_adj_get_mate(unalign_readlist_true)
# get another boundary information
tmp_read_df = f_readlist_unalign_boundary_todf(unalign_readlist_mate,'start', df_exon.start)
tmp_read_df = tmp_read_df.query('pos<=@<EMAIL>')
if len(tmp_read_df) == 0:
unalign_readlist = unalign_readlist_true + unalign_readlist_mate
unalign_readlist_pos = [read.pos for read in unalign_readlist]
unalign_readlist_pos = [x for x in unalign_readlist_pos if x <df_exon.start]
unalign_readlist_pos.sort()
if len(unalign_readlist_pos)>=2:
df_exon_return.pos_start = unalign_readlist_pos[1]
df_exon_return = f_recount_unalign(df_exon_return,"start")
df_exon_return.abs_d_start = abs(df_exon_return.pos_start - df_exon_return.start)
else:
tmp_pos = tmp_read_df.pos.value_counts().sort_values(ascending = False).index[0]
df_exon_sub = df_exon.copy()
df_exon_sub.start = tmp_pos
df_exon_sub_t = df_exon_sub.to_frame().T
df_exon_sub_t_adj = f_df_exon_start_stat(df_exon_sub_t)
if len(df_exon_sub_t_adj) >=1:
df_exon_sub_t_adj = f_df_gene_start_stat_remove_dup(df_exon_sub_t_adj)
df_exon_sub_adj = df_exon_sub_t_adj.iloc[0]
if df_exon_sub_adj.bbinom_p_start < df_exon.bbinom_p_start and df_exon_sub_adj.bbinom_p_start <0.01:
df_exon_return = exon_adj_start_exchange(df_exon,df_exon_sub_adj)
else:
unalign_readlist = unalign_readlist_true + unalign_readlist_mate
unalign_readlist_pos = [read.pos for read in unalign_readlist]
unalign_readlist_pos = [x for x in unalign_readlist_pos if x <df_exon.start]
unalign_readlist_pos.sort()
if len(unalign_readlist_pos)>=2:
df_exon_return.pos_start = unalign_readlist_pos[1]
df_exon_return = f_recount_unalign(df_exon_return,"start")
df_exon_return.abs_d_start = abs(df_exon_return.pos_start - df_exon_return.start)
else:
df_exon_return = df_exon.copy()
# get clipped reads in exon end position;
unalign_readlist_true = exon_start_adj_get_unalign(df_exon)
# get another boundary information
tmp_read_df = f_readlist_unalign_boundary_todf(unalign_readlist_true,'start', df_exon.start)
tmp_read_df = tmp_read_df.query('<EMAIL><=<EMAIL>')
if len(tmp_read_df) == 0:
unalign_readlist = unalign_readlist_true
unalign_readlist_pos = [read.pos for read in unalign_readlist]
unalign_readlist_pos = [x for x in unalign_readlist_pos if x <df_exon.start]
unalign_readlist_pos.sort()
if len(unalign_readlist_pos)>=2:
df_exon_return.pos_start = unalign_readlist_pos[1]
df_exon_return = f_recount_unalign(df_exon_return,"start")
df_exon_return.abs_d_start = abs(df_exon_return.pos_start - df_exon_return.start)
else:
tmp_pos = tmp_read_df.pos.value_counts().sort_values(ascending = False).index[0]
df_exon_sub = df_exon.copy()
df_exon_sub.start = tmp_pos
df_exon_sub_t = df_exon_sub.to_frame().T
df_exon_sub_t_adj = f_df_exon_start_stat(df_exon_sub_t)
if len(df_exon_sub_t_adj)>=1:
df_exon_sub_t_adj = f_df_gene_start_stat_remove_dup(df_exon_sub_t_adj)
df_exon_sub_adj = df_exon_sub_t_adj.iloc[0]
if df_exon_sub_adj.bbinom_p_start < df_exon.bbinom_p_start and df_exon_sub_adj.bbinom_p_start <0.01:
df_exon_return = exon_adj_start_exchange(df_exon,df_exon_sub_adj)
else:
unalign_readlist = unalign_readlist_true
unalign_readlist_pos = [read.pos for read in unalign_readlist]
unalign_readlist_pos = [x for x in unalign_readlist_pos if x <df_exon.start]
unalign_readlist_pos.sort()
if len(unalign_readlist_pos)>=2:
df_exon_return.pos_start = unalign_readlist_pos[1]
df_exon_return = f_recount_unalign(df_exon_return,"start")
df_exon_return.abs_d_start = abs(df_exon_return.pos_start - df_exon_return.start)
return(df_exon_return)
def exon_end_adj(df_exon):
if global_para.genome_bam_paired == True:
df_exon_return = df_exon.copy()
# get clipped reads in exon end position;
unalign_readlist_true = exon_end_adj_get_unalign(df_exon)
# get clipped mates read list;
unalign_readlist_mate = exon_adj_get_mate(unalign_readlist_true)
# get another boundary information
tmp_read_df = f_readlist_unalign_boundary_todf(unalign_readlist_mate,'end', df_exon.end)
tmp_read_df = tmp_read_df.query('pos>=<EMAIL>')
if len(tmp_read_df)>=1:
tmp_pos = tmp_read_df.pos.value_counts().sort_values(ascending = False).index[0]
df_exon_sub = df_exon.copy()
df_exon_sub.end = tmp_pos
df_exon_sub_t = df_exon_sub.to_frame().T
df_exon_sub_t_adj = f_df_exon_end_stat(df_exon_sub_t)
if len(df_exon_sub_t_adj)>=1:
df_exon_sub_t_adj = f_df_gene_end_stat_remove_dup(df_exon_sub_t_adj)
df_exon_sub_adj = df_exon_sub_t_adj.iloc[0]
if df_exon_sub_adj.bbinom_p_end < df_exon.bbinom_p_end and df_exon_sub_adj.bbinom_p_end <0.01:
df_exon_return = exon_adj_end_exchange(df_exon,df_exon_sub_adj)
else:
unalign_readlist = unalign_readlist_true + unalign_readlist_mate
unalign_readlist_pos = [read.pos for read in unalign_readlist]
unalign_readlist_pos = [x for x in unalign_readlist_pos if x >df_exon.end]
unalign_readlist_pos.sort()
if len(unalign_readlist_pos)>=2:
df_exon_return.pos_end = unalign_readlist_pos[-2]
df_exon_return = f_recount_unalign(df_exon_return,"end")
df_exon_return.abs_d_end = abs(df_exon_return.pos_end - df_exon_return.end)
else:
unalign_readlist = unalign_readlist_true + unalign_readlist_mate
unalign_readlist_pos = [read.aend for read in unalign_readlist]
unalign_readlist_pos = [x for x in unalign_readlist_pos if x >df_exon.end]
unalign_readlist_pos.sort()
if len(unalign_readlist_pos)>=2:
df_exon_return.pos_end = unalign_readlist_pos[-2]
df_exon_return = f_recount_unalign(df_exon_return,"end")
df_exon_return.abs_d_end = abs(df_exon_return.pos_end - df_exon_return.end)
else:
df_exon_return = df_exon.copy()
# get clipped reads in exon end position;
unalign_readlist_true = exon_end_adj_get_unalign(df_exon)
# get another boundary information
tmp_read_df = f_readlist_unalign_boundary_todf(unalign_readlist_true,'end', df_exon.end)
tmp_read_df = tmp_read_df.query('pos>=<EMAIL>')
if len(tmp_read_df)>=1:
tmp_pos = tmp_read_df.pos.value_counts().sort_values(ascending = False).index[0]
df_exon_sub = df_exon.copy()
df_exon_sub.end = tmp_pos
df_exon_sub_t = df_exon_sub.to_frame().T
df_exon_sub_t_adj = f_df_exon_end_stat(df_exon_sub_t)
if len(df_exon_sub_t_adj)>=1:
df_exon_sub_t_adj = f_df_gene_end_stat_remove_dup(df_exon_sub_t_adj)
df_exon_sub_adj = df_exon_sub_t_adj.iloc[0]
if df_exon_sub_adj.bbinom_p_end < df_exon.bbinom_p_end and df_exon_sub_adj.bbinom_p_end <0.01:
df_exon_return = exon_adj_end_exchange(df_exon,df_exon_sub_adj)
else:
unalign_readlist = unalign_readlist_true
unalign_readlist_pos = [read.pos for read in unalign_readlist]
unalign_readlist_pos = [x for x in unalign_readlist_pos if x >df_exon.end]
unalign_readlist_pos.sort()
if len(unalign_readlist_pos)>=2:
df_exon_return.pos_end = unalign_readlist_pos[-2]
df_exon_return = f_recount_unalign(df_exon_return,"end")
df_exon_return.abs_d_end = abs(df_exon_return.pos_end - df_exon_return.end)
else:
unalign_readlist = unalign_readlist_true
unalign_readlist_pos = [read.aend for read in unalign_readlist]
unalign_readlist_pos = [x for x in unalign_readlist_pos if x >df_exon.end]
unalign_readlist_pos.sort()
if len(unalign_readlist_pos)>=2:
df_exon_return.pos_end = unalign_readlist_pos[-2]
df_exon_return = f_recount_unalign(df_exon_return,"end")
df_exon_return.abs_d_end = abs(df_exon_return.pos_end - df_exon_return.end)
return(df_exon_return)
def f_len_list(list_input):
len_list = list()
for x in list_input:
if type(x) == str:
len_list.append(max([len(x_single) for x_single in x.split(',')]))
else:
len_list.append(0)
return(len_list)
def f_source_inference(df_stat_all_region, table_blastn):
column_names = ["gene_name", "clipped_seq_left", "clipped_seq_right", "clipped_left_CDS_distance", "clipped_right_CDS_distance", "clipped_left_length", "clipped_right_length", "clipped_left_blastn", "clipped_right_blastn", "source_inference","first_choice"]
genelist = df_stat_all_region.gene_name.unique().tolist()
df_genelist = pd.DataFrame({'gene_name':genelist})
table_blastn_source = pd.DataFrame(columns = column_names,index = genelist);
table_blastn_source.gene_name = genelist
tmp_df_start = df_stat_all_region.query('is_exon_boundary_start=="1"')
tmp_df_end = df_stat_all_region.query('is_exon_boundary_end=="1"')
table_blastn_source.clipped_seq_left = df_genelist.merge(tmp_df_start.filter(['gene_name','clipped_seq_start']).groupby('gene_name')['clipped_seq_start'].apply(','.join).reset_index(), how = "left").clipped_seq_start.tolist()
table_blastn_source.clipped_seq_right = df_genelist.merge(tmp_df_end.filter(['gene_name','clipped_seq_end']).groupby('gene_name')['clipped_seq_end'].apply(','.join).reset_index(), how = "left").clipped_seq_end.tolist()
table_blastn_source.clipped_left_CDS_distance = df_genelist.merge(tmp_df_start.filter(['gene_name','abs_d_start']).groupby('gene_name')['abs_d_start'].apply(max).reset_index(), how = "left").abs_d_start.tolist()
table_blastn_source.clipped_right_CDS_distance = df_genelist.merge(tmp_df_end.filter(['gene_name','abs_d_end']).groupby('gene_name')['abs_d_end'].apply(max).reset_index(), how = "left").abs_d_end.tolist()
table_blastn_source.clipped_left_length = f_len_list(table_blastn_source.clipped_seq_left.tolist())
table_blastn_source.clipped_right_length = f_len_list(table_blastn_source.clipped_seq_right.tolist())
if len(table_blastn)==0:
pass;
else:
table_blastn = table_blastn.sort_values(['query_acc.ver','bit_score'],ascending = False).drop_duplicates(subset = 'query_acc.ver')
table_blastn = table_blastn.query('pct_identity>=95')
if len(table_blastn)==0:
pass
else:
table_blastn['gene_name'] = table_blastn.apply(lambda x:x["query_acc.ver"].split(":")[0],axis = 1)
table_blastn['which_boundary'] = table_blastn.apply(lambda x:x["query_acc.ver"].split(":")[1],axis = 1)
table_blastn['blastn_database'] = table_blastn.apply(lambda x:x["subject_acc.ver"].split(":")[0],axis = 1)
table_blastn['blastn_database_detail'] = table_blastn.apply(lambda x:x["subject_acc.ver"].split(":")[1],axis = 1)
table_blastn = pd.concat([table_blastn.query('blastn_database!="UTR"'),table_blastn.query('blastn_database=="UTR"').query('blastn_database_detail==gene_name')],sort = False)
table_blastn_source.clipped_left_blastn = df_genelist.merge(table_blastn.query('which_boundary=="start"').filter(['gene_name','blastn_database']),how = "left").blastn_database.tolist()
table_blastn_source.clipped_right_blastn = df_genelist.merge(table_blastn.query('which_boundary=="end"').filter(['gene_name','blastn_database']),how = "left").blastn_database.tolist()
table_blastn_source.first_choice = df_genelist.merge(table_blastn.sort_values(['gene_name',"bit_score"],ascending = False).drop_duplicates(subset = "gene_name"), how = "left").blastn_database.tolist()
table_blastn_source.source_inference = table_blastn_source.apply(lambda x:f_source_inference_single(x),axis = 1)
del(table_blastn_source['first_choice'])
return(table_blastn_source)
def f_source_inference_single(df_blastn_gene):
tmp_inference = "unknown"
tmp_blastn_database_lable = ['vector', 'retro_element', 'UTR']
tmp_blastn_database_result = ['vector', 'retrocopy', 'retrocopy']
tmp_blastn_database_dict = {tmp_blastn_database_lable[i]:tmp_blastn_database_result[i] for i in range(len(tmp_blastn_database_lable))}
if np.any([pd.isna(df_blastn_gene.clipped_left_CDS_distance), | pd.isna(df_blastn_gene.clipped_right_CDS_distance) | pandas.isna |
import pandas as pd
from business_rules.operators import (DataframeType, StringType,
NumericType, BooleanType, SelectType,
SelectMultipleType, GenericType)
from . import TestCase
from decimal import Decimal
import sys
import pandas
class StringOperatorTests(TestCase):
def test_operator_decorator(self):
self.assertTrue(StringType("foo").equal_to.is_operator)
def test_string_equal_to(self):
self.assertTrue(StringType("foo").equal_to("foo"))
self.assertFalse(StringType("foo").equal_to("Foo"))
def test_string_not_equal_to(self):
self.assertTrue(StringType("foo").not_equal_to("Foo"))
self.assertTrue(StringType("foo").not_equal_to("boo"))
self.assertFalse(StringType("foo").not_equal_to("foo"))
def test_string_equal_to_case_insensitive(self):
self.assertTrue(StringType("foo").equal_to_case_insensitive("FOo"))
self.assertTrue(StringType("foo").equal_to_case_insensitive("foo"))
self.assertFalse(StringType("foo").equal_to_case_insensitive("blah"))
def test_string_starts_with(self):
self.assertTrue(StringType("hello").starts_with("he"))
self.assertFalse(StringType("hello").starts_with("hey"))
self.assertFalse(StringType("hello").starts_with("He"))
def test_string_ends_with(self):
self.assertTrue(StringType("hello").ends_with("lo"))
self.assertFalse(StringType("hello").ends_with("boom"))
self.assertFalse(StringType("hello").ends_with("Lo"))
def test_string_contains(self):
self.assertTrue(StringType("hello").contains("ell"))
self.assertTrue(StringType("hello").contains("he"))
self.assertTrue(StringType("hello").contains("lo"))
self.assertFalse(StringType("hello").contains("asdf"))
self.assertFalse(StringType("hello").contains("ElL"))
def test_string_matches_regex(self):
self.assertTrue(StringType("hello").matches_regex(r"^h"))
self.assertFalse(StringType("hello").matches_regex(r"^sh"))
def test_non_empty(self):
self.assertTrue(StringType("hello").non_empty())
self.assertFalse(StringType("").non_empty())
self.assertFalse(StringType(None).non_empty())
class NumericOperatorTests(TestCase):
def test_instantiate(self):
err_string = "foo is not a valid numeric type"
with self.assertRaisesRegexp(AssertionError, err_string):
NumericType("foo")
def test_numeric_type_validates_and_casts_decimal(self):
ten_dec = Decimal(10)
ten_int = 10
ten_float = 10.0
if sys.version_info[0] == 2:
ten_long = long(10)
else:
ten_long = int(10) # long and int are same in python3
ten_var_dec = NumericType(ten_dec) # this should not throw an exception
ten_var_int = NumericType(ten_int)
ten_var_float = NumericType(ten_float)
ten_var_long = NumericType(ten_long)
self.assertTrue(isinstance(ten_var_dec.value, Decimal))
self.assertTrue(isinstance(ten_var_int.value, Decimal))
self.assertTrue(isinstance(ten_var_float.value, Decimal))
self.assertTrue(isinstance(ten_var_long.value, Decimal))
def test_numeric_equal_to(self):
self.assertTrue(NumericType(10).equal_to(10))
self.assertTrue(NumericType(10).equal_to(10.0))
self.assertTrue(NumericType(10).equal_to(10.000001))
self.assertTrue(NumericType(10.000001).equal_to(10))
self.assertTrue(NumericType(Decimal('10.0')).equal_to(10))
self.assertTrue(NumericType(10).equal_to(Decimal('10.0')))
self.assertFalse(NumericType(10).equal_to(10.00001))
self.assertFalse(NumericType(10).equal_to(11))
def test_numeric_not_equal_to(self):
self.assertTrue(NumericType(10).not_equal_to(10.00001))
self.assertTrue(NumericType(10).not_equal_to(11))
self.assertTrue(NumericType(Decimal('10.0')).not_equal_to(Decimal('10.1')))
self.assertFalse(NumericType(10).not_equal_to(10))
self.assertFalse(NumericType(10).not_equal_to(10.0))
self.assertFalse(NumericType(Decimal('10.0')).not_equal_to(Decimal('10.0')))
def test_other_value_not_numeric(self):
error_string = "10 is not a valid numeric type"
with self.assertRaisesRegexp(AssertionError, error_string):
NumericType(10).equal_to("10")
def test_numeric_greater_than(self):
self.assertTrue(NumericType(10).greater_than(1))
self.assertFalse(NumericType(10).greater_than(11))
self.assertTrue(NumericType(10.1).greater_than(10))
self.assertFalse(NumericType(10.000001).greater_than(10))
self.assertTrue(NumericType(10.000002).greater_than(10))
def test_numeric_greater_than_or_equal_to(self):
self.assertTrue(NumericType(10).greater_than_or_equal_to(1))
self.assertFalse(NumericType(10).greater_than_or_equal_to(11))
self.assertTrue(NumericType(10.1).greater_than_or_equal_to(10))
self.assertTrue(NumericType(10.000001).greater_than_or_equal_to(10))
self.assertTrue(NumericType(10.000002).greater_than_or_equal_to(10))
self.assertTrue(NumericType(10).greater_than_or_equal_to(10))
def test_numeric_less_than(self):
self.assertTrue(NumericType(1).less_than(10))
self.assertFalse(NumericType(11).less_than(10))
self.assertTrue(NumericType(10).less_than(10.1))
self.assertFalse(NumericType(10).less_than(10.000001))
self.assertTrue(NumericType(10).less_than(10.000002))
def test_numeric_less_than_or_equal_to(self):
self.assertTrue(NumericType(1).less_than_or_equal_to(10))
self.assertFalse(NumericType(11).less_than_or_equal_to(10))
self.assertTrue(NumericType(10).less_than_or_equal_to(10.1))
self.assertTrue(NumericType(10).less_than_or_equal_to(10.000001))
self.assertTrue(NumericType(10).less_than_or_equal_to(10.000002))
self.assertTrue(NumericType(10).less_than_or_equal_to(10))
class BooleanOperatorTests(TestCase):
def test_instantiate(self):
err_string = "foo is not a valid boolean type"
with self.assertRaisesRegexp(AssertionError, err_string):
BooleanType("foo")
err_string = "None is not a valid boolean type"
with self.assertRaisesRegexp(AssertionError, err_string):
BooleanType(None)
def test_boolean_is_true_and_is_false(self):
self.assertTrue(BooleanType(True).is_true())
self.assertFalse(BooleanType(True).is_false())
self.assertFalse(BooleanType(False).is_true())
self.assertTrue(BooleanType(False).is_false())
class SelectOperatorTests(TestCase):
def test_contains(self):
self.assertTrue(SelectType([1, 2]).contains(2))
self.assertFalse(SelectType([1, 2]).contains(3))
self.assertTrue(SelectType([1, 2, "a"]).contains("A"))
def test_does_not_contain(self):
self.assertTrue(SelectType([1, 2]).does_not_contain(3))
self.assertFalse(SelectType([1, 2]).does_not_contain(2))
self.assertFalse(SelectType([1, 2, "a"]).does_not_contain("A"))
class SelectMultipleOperatorTests(TestCase):
def test_contains_all(self):
self.assertTrue(SelectMultipleType([1, 2]).
contains_all([2, 1]))
self.assertFalse(SelectMultipleType([1, 2]).
contains_all([2, 3]))
self.assertTrue(SelectMultipleType([1, 2, "a"]).
contains_all([2, 1, "A"]))
def test_is_contained_by(self):
self.assertTrue(SelectMultipleType([1, 2]).
is_contained_by([2, 1, 3]))
self.assertFalse(SelectMultipleType([1, 2]).
is_contained_by([2, 3, 4]))
self.assertTrue(SelectMultipleType([1, 2, "a"]).
is_contained_by([2, 1, "A"]))
def test_shares_at_least_one_element_with(self):
self.assertTrue(SelectMultipleType([1, 2]).
shares_at_least_one_element_with([2, 3]))
self.assertFalse(SelectMultipleType([1, 2]).
shares_at_least_one_element_with([4, 3]))
self.assertTrue(SelectMultipleType([1, 2, "a"]).
shares_at_least_one_element_with([4, "A"]))
def test_shares_exactly_one_element_with(self):
self.assertTrue(SelectMultipleType([1, 2]).
shares_exactly_one_element_with([2, 3]))
self.assertFalse(SelectMultipleType([1, 2]).
shares_exactly_one_element_with([4, 3]))
self.assertTrue(SelectMultipleType([1, 2, "a"]).
shares_exactly_one_element_with([4, "A"]))
self.assertFalse(SelectMultipleType([1, 2, 3]).
shares_exactly_one_element_with([2, 3, "a"]))
def test_shares_no_elements_with(self):
self.assertTrue(SelectMultipleType([1, 2]).
shares_no_elements_with([4, 3]))
self.assertFalse(SelectMultipleType([1, 2]).
shares_no_elements_with([2, 3]))
self.assertFalse(SelectMultipleType([1, 2, "a"]).
shares_no_elements_with([4, "A"]))
class DataframeOperatorTests(TestCase):
def test_exists(self):
df = pandas.DataFrame.from_dict({
"var1": [1, 2, 4, ],
"var2": [3, 5, 6, ],
})
result: pd.Series = DataframeType({"value": df}).exists({"target": "var1"})
self.assertTrue(result.equals(pd.Series([True, True, True, ])))
result: pd.Series = DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).exists({"target": "--r1"})
self.assertTrue(result.equals(pd.Series([True, True, True, ])))
result: pd.Series = DataframeType({"value": df}).exists({"target": "invalid"})
self.assertTrue(result.equals(pd.Series([False, False, False, ])))
def test_not_exists(self):
df = pandas.DataFrame.from_dict({
"var1": [1, 2, 4, ],
"var2": [3, 5, 6, ]
})
result: pd.Series = DataframeType({"value": df}).not_exists({"target": "invalid"})
self.assertTrue(result.equals(pd.Series([True, True, True, ])))
result: pd.Series = DataframeType({"value": df}).not_exists({"target": "var1"})
self.assertTrue(result.equals(pd.Series([False, False, False, ])))
result: pd.Series = DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).not_exists({"target": "--r1"})
self.assertTrue(result.equals(pd.Series([False, False, False, ])))
def test_equal_to(self):
df = pandas.DataFrame.from_dict({
"var1": [1, 2, 4, "", 7, ],
"var2": [3, 5, 6, "", 2, ],
"var3": [1, 3, 8, "", 7, ],
"var4": ["test", "issue", "one", "", "two", ]
})
self.assertTrue(DataframeType({"value": df}).equal_to({
"target": "var1",
"comparator": ""
}).equals(pandas.Series([False, False, False, False, False, ])))
self.assertTrue(DataframeType({"value": df}).equal_to({
"target": "var1",
"comparator": 2
}).equals(pandas.Series([False, True, False, False, False, ])))
self.assertTrue(DataframeType({"value": df}).equal_to({
"target": "var1",
"comparator": "var3"
}).equals(pandas.Series([True, False, False, False, True, ])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).equal_to({
"target": "--r1",
"comparator": "--r3"
}).equals(pandas.Series([True, False, False, False, True, ])))
self.assertTrue(DataframeType({"value": df}).equal_to({
"target": "var1",
"comparator": "var2"
}).equals(pandas.Series([False, False, False, False, False, ])))
self.assertTrue(DataframeType({"value": df}).equal_to({
"target": "var1",
"comparator": 20
}).equals(pandas.Series([False, False, False, False, False, ])))
self.assertTrue(DataframeType({"value": df}).equal_to({
"target": "var4",
"comparator": "var1",
"value_is_literal": True
}).equals(pandas.Series([False, False, False, False, False, ])))
def test_not_equal_to(self):
df = pandas.DataFrame.from_dict({
"var1": [1,2,4],
"var2": [3,5,6],
"var3": [1,3,8],
"var4": [1,2,4]
})
self.assertTrue(DataframeType({"value": df}).not_equal_to({
"target": "var1",
"comparator": "var4"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).not_equal_to({
"target": "var1",
"comparator": "var2"
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).not_equal_to({
"target": "--r1",
"comparator": "--r2"
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).not_equal_to({
"target": "--r1",
"comparator": 20
}).equals(pandas.Series([True, True, True])))
def test_equal_to_case_insensitive(self):
df = pandas.DataFrame.from_dict({
"var1": ["word", "", "new", "val"],
"var2": ["WORD", "", "test", "VAL"],
"var3": ["LET", "", "GO", "read"]
})
self.assertTrue(DataframeType({"value": df}).equal_to_case_insensitive({
"target": "var1",
"comparator": "NEW"
}).equals(pandas.Series([False, False, True, False])))
self.assertTrue(DataframeType({"value": df}).equal_to_case_insensitive({
"target": "var1",
"comparator": ""
}).equals(pandas.Series([False, False, False, False])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).equal_to_case_insensitive({
"target": "--r1",
"comparator": "--r2"
}).equals(pandas.Series([True, False, False, True])))
self.assertTrue(DataframeType({"value": df}).equal_to_case_insensitive({
"target": "var1",
"comparator": "var2"
}).equals(pandas.Series([True, False, False, True])))
self.assertTrue(DataframeType({"value": df}).equal_to_case_insensitive({
"target": "var1",
"comparator": "var3"
}).equals(pandas.Series([False, False, False, False])))
self.assertTrue(DataframeType({"value": df}).equal_to_case_insensitive({
"target": "var1",
"comparator": "var1",
"value_is_literal": True
}).equals(pandas.Series([False, False, False, False])))
def test_not_equal_to_case_insensitive(self):
df = pandas.DataFrame.from_dict({
"var1": ["word", "new", "val"],
"var2": ["WORD", "test", "VAL"],
"var3": ["LET", "GO", "read"],
"var4": ["WORD", "NEW", "VAL"]
})
self.assertTrue(DataframeType({"value": df}).not_equal_to_case_insensitive({
"target": "var1",
"comparator": "var4"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).not_equal_to_case_insensitive({
"target": "var1",
"comparator": "var2"
}).equals(pandas.Series([False, True, False])))
self.assertTrue(DataframeType({"value": df}).not_equal_to_case_insensitive({
"target": "var1",
"comparator": "var1",
"value_is_literal": True
}).equals(pandas.Series([True, True, True])))
def test_less_than(self):
df = pandas.DataFrame.from_dict({
"var1": [1,2,4],
"var2": [3,5,6],
"var3": [1,3,8],
"var4": [1,2,4]
})
self.assertTrue(DataframeType({"value": df}).less_than({
"target": "var1",
"comparator": "var4"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).less_than({
"target": "var1",
"comparator": "var3"
}).equals(pandas.Series([False, True, True])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).less_than({
"target": "--r1",
"comparator": "var3"
}).equals(pandas.Series([False, True, True])))
self.assertTrue(DataframeType({"value": df}).less_than({
"target": "var2",
"comparator": 2
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).less_than({
"target": "var1",
"comparator": 3
}).equals(pandas.Series([True, True, False])))
another_df = pandas.DataFrame.from_dict(
{
"LBDY": [4, None, None, None, None]
}
)
self.assertTrue(DataframeType({"value": another_df}).less_than({
"target": "LBDY",
"comparator": 5
}).equals(pandas.Series([True, False, False, False, False, ])))
def test_less_than_or_equal_to(self):
df = pandas.DataFrame.from_dict({
"var1": [1,2,4],
"var2": [3,5,6],
"var3": [1,3,8],
"var4": [1,2,4]
})
self.assertTrue(DataframeType({"value": df}).less_than_or_equal_to({
"target": "var1",
"comparator": "var4"
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).less_than_or_equal_to({
"target": "--r1",
"comparator": "var4"
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df}).less_than_or_equal_to({
"target": "var2",
"comparator": "var1"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).less_than_or_equal_to({
"target": "var2",
"comparator": 2
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).less_than_or_equal_to({
"target": "var2",
"comparator": "var3"
}).equals(pandas.Series([False, False, True])))
another_df = pandas.DataFrame.from_dict(
{
"LBDY": [4, 5, None, None, None]
}
)
self.assertTrue(DataframeType({"value": another_df}).less_than_or_equal_to({
"target": "LBDY",
"comparator": 5
}).equals(pandas.Series([True, True, False, False, False, ])))
def test_greater_than(self):
df = pandas.DataFrame.from_dict({
"var1": [1,2,4],
"var2": [3,5,6],
"var3": [1,3,8],
"var4": [1,2,4]
})
self.assertTrue(DataframeType({"value": df}).greater_than({
"target": "var1",
"comparator": "var4"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).greater_than({
"target": "var1",
"comparator": "var3"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).greater_than({
"target": "var1",
"comparator": "--r3"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).greater_than({
"target": "var2",
"comparator": 2
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df}).greater_than({
"target": "var1",
"comparator": 5000
}).equals(pandas.Series([False, False, False])))
another_df = pandas.DataFrame.from_dict(
{
"LBDY": [4, None, None, None, None]
}
)
self.assertTrue(DataframeType({"value": another_df}).greater_than({
"target": "LBDY",
"comparator": 3
}).equals(pandas.Series([True, False, False, False, False, ])))
def test_greater_than_or_equal_to(self):
df = pandas.DataFrame.from_dict({
"var1": [1,2,4],
"var2": [3,5,6],
"var3": [1,3,8],
"var4": [1,2,4]
})
self.assertTrue(DataframeType({"value": df}).greater_than_or_equal_to({
"target": "var1",
"comparator": "var4"
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).greater_than_or_equal_to({
"target": "var1",
"comparator": "--r4"
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df}).greater_than_or_equal_to({
"target": "var2",
"comparator": "var3"
}).equals(pandas.Series([True, True, False])))
self.assertTrue(DataframeType({"value": df}).greater_than_or_equal_to({
"target": "var2",
"comparator": 2
}).equals(pandas.Series([True, True, True])))
another_df = pandas.DataFrame.from_dict(
{
"LBDY": [4, 3, None, None, None]
}
)
self.assertTrue(DataframeType({"value": another_df}).greater_than_or_equal_to({
"target": "LBDY",
"comparator": 3
}).equals(pandas.Series([True, True, False, False, False, ])))
def test_contains(self):
df = pandas.DataFrame.from_dict({
"var1": [1,2,4],
"var2": [3,5,6],
"var3": [1,3,8],
"var4": [1,2,4],
"string_var": ["hj", "word", "c"],
"var5": [[1,3,5],[1,3,5], [1,3,5]]
})
self.assertTrue(DataframeType({"value": df}).contains({
"target": "var1",
"comparator": 2
}).equals(pandas.Series([False, True, False])))
self.assertTrue(DataframeType({"value": df}).contains({
"target": "var1",
"comparator": "var3"
}).equals(pandas.Series([True, False, False])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).contains({
"target": "var1",
"comparator": "--r3"
}).equals(pandas.Series([True, False, False])))
self.assertTrue(DataframeType({"value": df}).contains({
"target": "var1",
"comparator": "var2"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).contains({
"target": "string_var",
"comparator": "string_var"
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df}).contains({
"target": "string_var",
"comparator": "string_var",
"value_is_literal": True
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).contains({
"target": "var5",
"comparator": "var1"
}).equals(pandas.Series([True, False, False])))
def test_does_not_contain(self):
df = pandas.DataFrame.from_dict({
"var1": [1,2,4],
"var2": [3,5,6],
"var3": [1,3,8],
"var4": [1,2,4],
"string_var": ["hj", "word", "c"],
"var5": [[1,3,5],[1,3,5], [1,3,5]]
})
self.assertTrue(DataframeType({"value": df}).does_not_contain({
"target": "var1",
"comparator": 5
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df}).does_not_contain({
"target": "var1",
"comparator": "var3"
}).equals(pandas.Series([False, True, True])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).does_not_contain({
"target": "var1",
"comparator": "--r3"
}).equals(pandas.Series([False, True, True])))
self.assertTrue(DataframeType({"value": df}).does_not_contain({
"target": "var1",
"comparator": "var2"
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df}).does_not_contain({
"target": "string_var",
"comparator": "string_var",
"value_is_literal": True
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df}).does_not_contain({
"target": "string_var",
"comparator": "string_var"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).does_not_contain({
"target": "var5",
"comparator": "var1"
}).equals(pandas.Series([False, True, True])))
def test_contains_case_insensitive(self):
df = pandas.DataFrame.from_dict({
"var1": ["pikachu", "charmander", "squirtle"],
"var2": ["PIKACHU", "CHARIZARD", "BULBASAUR"],
"var3": ["POKEMON", "CHARIZARD", "BULBASAUR"],
"var4": [
["pikachu", "charizard", "bulbasaur"],
["chikorita", "cyndaquil", "totodile"],
["chikorita", "cyndaquil", "totodile"]
]
})
self.assertTrue(DataframeType({"value": df}).contains_case_insensitive({
"target": "var1",
"comparator": "PIKACHU"
}).equals(pandas.Series([True, False, False])))
self.assertTrue(DataframeType({"value": df}).contains_case_insensitive({
"target": "var1",
"comparator": "var2"
}).equals(pandas.Series([True, False, False])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).contains_case_insensitive({
"target": "--r1",
"comparator": "--r2"
}).equals(pandas.Series([True, False, False])))
self.assertTrue(DataframeType({"value": df}).contains_case_insensitive({
"target": "var1",
"comparator": "var3"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).contains_case_insensitive({
"target": "var3",
"comparator": "var3"
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df}).contains_case_insensitive({
"target": "var3",
"comparator": "var3",
"value_is_literal": True
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).contains_case_insensitive({
"target": "var4",
"comparator": "var2"
}).equals(pandas.Series([True, False, False])))
def test_does_not_contain_case_insensitive(self):
df = pandas.DataFrame.from_dict({
"var1": ["pikachu", "charmander", "squirtle"],
"var2": ["PIKACHU", "CHARIZARD", "BULBASAUR"],
"var3": ["pikachu", "charizard", "bulbasaur"],
"var4": [
["pikachu", "charizard", "bulbasaur"],
["chikorita", "cyndaquil", "totodile"],
["chikorita", "cyndaquil", "totodile"]
]
})
self.assertTrue(DataframeType({"value": df}).does_not_contain_case_insensitive({
"target": "var1",
"comparator": "IVYSAUR"
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df}).does_not_contain_case_insensitive({
"target": "var3",
"comparator": "var2"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).does_not_contain_case_insensitive({
"target": "var3",
"comparator": "var3",
"value_is_literal": True
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df}).does_not_contain_case_insensitive({
"target": "var3",
"comparator": "var3"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).does_not_contain_case_insensitive({
"target": "var4",
"comparator": "var2"
}).equals(pandas.Series([False, True, True])))
def test_is_contained_by(self):
df = pandas.DataFrame.from_dict({
"var1": [1,2,4],
"var2": [3,5,6],
"var3": [1,3,8],
"var4": [1,2,4],
"var5": [[1,2,3], [1,2], [17]]
})
self.assertTrue(DataframeType({"value": df}).is_contained_by({
"target": "var1",
"comparator": [4,5,6]
}).equals(pandas.Series([False, False, True])))
self.assertTrue(DataframeType({"value": df}).is_contained_by({
"target": "var1",
"comparator": "var3"
}).equals(pandas.Series([True, False, False])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).is_contained_by({
"target": "var1",
"comparator": "--r3"
}).equals(pandas.Series([True, False, False])))
self.assertTrue(DataframeType({"value": df}).is_contained_by({
"target": "var1",
"comparator": [9, 10, 11]
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).is_contained_by({
"target": "var1",
"comparator": "var2"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).is_contained_by({
"target": "var1",
"comparator": "var5"
}).equals(pandas.Series([True, True, False])))
def test_is_not_contained_by(self):
df = pandas.DataFrame.from_dict({
"var1": [1,2,4],
"var2": [3,5,6],
"var3": [1,3,8],
"var4": [1,2,4],
"var5": [[1,2,3], [1,2], [17]]
})
self.assertTrue(DataframeType({"value": df}).is_not_contained_by({
"target": "var1",
"comparator": "var3"
}).equals(pandas.Series([False, True, True])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).is_not_contained_by({
"target": "var1",
"comparator": "--r3"
}).equals(pandas.Series([False, True, True])))
self.assertTrue(DataframeType({"value": df}).is_not_contained_by({
"target": "var1",
"comparator": [9, 10, 11]
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df}).is_not_contained_by({
"target": "var1",
"comparator": "var1"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).is_not_contained_by({
"target": "var1",
"comparator": "var5"
}).equals(pandas.Series([False, False, True])))
def test_is_contained_by_case_insensitive(self):
df = pandas.DataFrame.from_dict({
"var1": ["WORD", "test"],
"var2": ["word", "TEST"],
"var3": ["another", "item"],
"var4": [set(["word"]), set(["test"])]
})
self.assertTrue(DataframeType({"value": df}).is_contained_by_case_insensitive({
"target": "var1",
"comparator": ["word", "TEST"]
}).equals(pandas.Series([True, True])))
self.assertTrue(DataframeType({"value": df}).is_contained_by_case_insensitive({
"target": "var1",
"comparator": "var1"
}).equals(pandas.Series([True, True])))
self.assertTrue(DataframeType({"value": df}).is_contained_by_case_insensitive({
"target": "var1",
"comparator": "var3"
}).equals(pandas.Series([False, False])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).is_contained_by_case_insensitive({
"target": "var1",
"comparator": "--r3"
}).equals(pandas.Series([False, False])))
self.assertTrue(DataframeType({"value": df}).is_contained_by_case_insensitive({
"target": "var1",
"comparator": "var4"
}).equals(pandas.Series([True, True])))
self.assertTrue(DataframeType({"value": df}).is_contained_by_case_insensitive({
"target": "var3",
"comparator": "var4"
}).equals(pandas.Series([False, False])))
def test_is_not_contained_by_case_insensitive(self):
df = pandas.DataFrame.from_dict({
"var1": ["WORD", "test"],
"var2": ["word", "TEST"],
"var3": ["another", "item"],
"var4": [set(["word"]), set(["test"])]
})
self.assertTrue(DataframeType({"value": df}).is_not_contained_by_case_insensitive({
"target": "var1",
"comparator": ["word", "TEST"]
}).equals(pandas.Series([False, False])))
self.assertTrue(DataframeType({"value": df}).is_not_contained_by_case_insensitive({
"target": "var1",
"comparator": "var3"
}).equals(pandas.Series([True, True])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).is_not_contained_by_case_insensitive({
"target": "var1",
"comparator": "--r3"
}).equals(pandas.Series([True, True])))
self.assertTrue(DataframeType({"value": df}).is_not_contained_by_case_insensitive({
"target": "var1",
"comparator": "var4"
}).equals(pandas.Series([False, False])))
self.assertTrue(DataframeType({"value": df}).is_not_contained_by_case_insensitive({
"target": "var3",
"comparator": "var4"
}).equals(pandas.Series([True, True])))
def test_prefix_matches_regex(self):
df = pandas.DataFrame.from_dict({
"var1": ["WORD", "test"],
"var2": ["word", "TEST"],
"var3": ["another", "item"]
})
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).prefix_matches_regex({
"target": "--r2",
"comparator": "w.*",
"prefix": 2
}).equals(pandas.Series([True, False])))
self.assertTrue(DataframeType({"value": df}).prefix_matches_regex({
"target": "var2",
"comparator": "[0-9].*",
"prefix": 2
}).equals(pandas.Series([False, False])))
def test_suffix_matches_regex(self):
df = pandas.DataFrame.from_dict({
"var1": ["WORD", "test"],
"var2": ["word", "TEST"],
"var3": ["another", "item"]
})
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).suffix_matches_regex({
"target": "--r1",
"comparator": "es.*",
"suffix": 3
}).equals(pandas.Series([False, True])))
self.assertTrue(DataframeType({"value": df}).suffix_matches_regex({
"target": "var1",
"comparator": "[0-9].*",
"suffix": 3
}).equals(pandas.Series([False, False])))
def test_not_prefix_matches_suffix(self):
df = pandas.DataFrame.from_dict({
"var1": ["WORD", "test"],
"var2": ["word", "TEST"],
"var3": ["another", "item"]
})
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).not_prefix_matches_regex({
"target": "--r1",
"comparator": ".*",
"prefix": 2
}).equals(pandas.Series([False, False])))
self.assertTrue(DataframeType({"value": df}).not_prefix_matches_regex({
"target": "var2",
"comparator": "[0-9].*",
"prefix": 2
}).equals(pandas.Series([True, True])))
def test_not_suffix_matches_regex(self):
df = pandas.DataFrame.from_dict({
"var1": ["WORD", "test"],
"var2": ["word", "TEST"],
"var3": ["another", "item"]
})
self.assertTrue(DataframeType({"value": df}).not_suffix_matches_regex({
"target": "var1",
"comparator": ".*",
"suffix": 3
}).equals(pandas.Series([False, False])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).not_suffix_matches_regex({
"target": "--r1",
"comparator": "[0-9].*",
"suffix": 3
}).equals(pandas.Series([True, True])))
def test_matches_suffix(self):
df = pandas.DataFrame.from_dict({
"var1": ["WORD", "test"],
"var2": ["word", "TEST"],
"var3": ["another", "item"]
})
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).matches_regex({
"target": "--r1",
"comparator": ".*",
}).equals(pandas.Series([True, True])))
self.assertTrue(DataframeType({"value": df}).matches_regex({
"target": "var2",
"comparator": "[0-9].*",
}).equals(pandas.Series([False, False])))
def test_not_matches_regex(self):
df = pandas.DataFrame.from_dict({
"var1": ["WORD", "test"],
"var2": ["word", "TEST"],
"var3": ["another", "item"]
})
self.assertTrue(DataframeType({"value": df}).not_matches_regex({
"target": "var1",
"comparator": ".*",
}).equals(pandas.Series([False, False])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).not_matches_regex({
"target": "--r1",
"comparator": "[0-9].*",
}).equals(pandas.Series([True, True])))
def test_starts_with(self):
df = pandas.DataFrame.from_dict({
"var1": ["WORD", "test"],
"var2": ["word", "TEST"],
"var3": ["another", "item"]
})
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).starts_with({
"target": "--r1",
"comparator": "WO",
}).equals(pandas.Series([True, False])))
self.assertTrue(DataframeType({"value": df}).starts_with({
"target": "var2",
"comparator": "ABC",
}).equals(pandas.Series([False, False])))
def test_ends_with(self):
df = pandas.DataFrame.from_dict({
"var1": ["WORD", "test"],
"var2": ["word", "TEST"],
"var3": ["another", "item"]
})
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).ends_with({
"target": "--r1",
"comparator": "abc",
}).equals(pandas.Series([False, False])))
self.assertTrue(DataframeType({"value": df}).ends_with({
"target": "var1",
"comparator": "est",
}).equals(pandas.Series([False, True])))
def test_has_equal_length(self):
df = pandas.DataFrame.from_dict(
{
"var_1": ['test', 'value']
}
)
df_operator = DataframeType({"value": df, "column_prefix_map": {"--": "va"}})
result = df_operator.has_equal_length({"target": "--r_1", "comparator": 4})
self.assertTrue(result.equals(pandas.Series([True, False])))
def test_has_not_equal_length(self):
df = pandas.DataFrame.from_dict(
{
"var_1": ['test', 'value']
}
)
df_operator = DataframeType({"value": df, "column_prefix_map": {"--": "va"}})
result = df_operator.has_not_equal_length({"target": "--r_1", "comparator": 4})
self.assertTrue(result.equals(pandas.Series([False, True])))
def test_longer_than(self):
df = pandas.DataFrame.from_dict(
{
"var_1": ['test', 'value']
}
)
df_operator = DataframeType({"value": df, "column_prefix_map": {"--": "va"}})
self.assertTrue(df_operator.longer_than({"target": "--r_1", "comparator": 3}).equals(pandas.Series([True, True])))
def test_longer_than_or_equal_to(self):
df = pandas.DataFrame.from_dict(
{
"var_1": ['test', 'alex']
}
)
df_operator = DataframeType({"value": df, "column_prefix_map": {"--": "va"}})
self.assertTrue(df_operator.longer_than_or_equal_to({"target": "--r_1", "comparator": 3}).equals(pandas.Series([True, True])))
self.assertTrue(df_operator.longer_than_or_equal_to({"target": "var_1", "comparator": 4}).equals(pandas.Series([True, True])))
def test_shorter_than(self):
df = pandas.DataFrame.from_dict(
{
"var_1": ['test', 'val']
}
)
df_operator = DataframeType({"value": df, "column_prefix_map": {"--": "va"}})
self.assertTrue(df_operator.shorter_than({"target": "--r_1", "comparator": 5}).equals(pandas.Series([True, True])))
def test_shorter_than_or_equal_to(self):
df = pandas.DataFrame.from_dict(
{
"var_1": ['test', 'alex']
}
)
df_operator = DataframeType({"value": df, "column_prefix_map": {"--": "va"}})
self.assertTrue(df_operator.shorter_than_or_equal_to({"target": "--r_1", "comparator": 5}).equals(pandas.Series([True, True])))
self.assertTrue(df_operator.shorter_than_or_equal_to({"target": "var_1", "comparator": 4}).equals(pandas.Series([True, True])))
def test_contains_all(self):
df = pandas.DataFrame.from_dict(
{
"var1": ['test', 'value', 'word'],
"var2": ["test", "value", "test"]
}
)
self.assertTrue(DataframeType({"value": df}).contains_all({
"target": "var1",
"comparator": "var2",
}))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).contains_all({
"target": "--r1",
"comparator": "--r2",
}))
self.assertFalse(DataframeType({"value": df}).contains_all({
"target": "var2",
"comparator": "var1",
}))
self.assertTrue(DataframeType({"value": df}).contains_all({
"target": "var2",
"comparator": ["test", "value"],
}))
def test_not_contains_all(self):
df = pandas.DataFrame.from_dict(
{
"var1": ['test', 'value', 'word'],
"var2": ["test", "value", "test"]
}
)
self.assertTrue(DataframeType({"value": df}).contains_all({
"target": "var1",
"comparator": "var2",
}))
self.assertFalse(DataframeType({"value": df}).contains_all({
"target": "var2",
"comparator": "var1",
}))
self.assertTrue(DataframeType({"value": df}).contains_all({
"target": "var2",
"comparator": ["test", "value"],
}))
def test_invalid_date(self):
df = pandas.DataFrame.from_dict(
{
"var1": ['2021', '2021', '2021', '2021', '2099'],
"var2": ["2099", "2022", "2034", "90999", "20999"],
"var3": ["1997-07", "1997-07-16", "1997-07-16T19:20:30.45+01:00", "1997-07-16T19:20:30+01:00", "1997-07-16T19:20+01:00"],
}
)
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).invalid_date({"target": "--r1"})
.equals(pandas.Series([False, False, False, False, False])))
self.assertTrue(DataframeType({"value": df}).invalid_date({"target": "var3"})
.equals(pandas.Series([False, False, False, False, False])))
self.assertTrue(DataframeType({"value": df}).invalid_date({"target": "var2"})
.equals(pandas.Series([False, False, False, True, True])))
def test_date_equal_to(self):
df = pandas.DataFrame.from_dict(
{
"var1": ['2021', '2021', '2021', '2021', '2021'],
"var2": ["2099", "2022", "2034", "90999", "20999"],
"var3": ["1997-07", "1997-07-16", "1997-07-16T19:20:30.45+01:00", "1997-07-16T19:20:30+01:00", "1997-07-16T19:20+01:00"],
"var4": ["1997-07", "1997-07-16", "1997-07-16T19:20:30.45+01:00", "1997-07-16T19:20:30+01:00", "1997-07-16T19:20+01:00"],
"var5": ["1997-08", "1997-08-16", "1997-08-16T19:20:30.45+01:00", "1997-08-16T19:20:30+01:00", "1997-08-16T19:20+01:00"],
"var6": ["1998-08", "1998-08-11", "1998-08-17T20:21:31.46+01:00", "1998-08-17T20:21:31+01:00", "1998-08-17T20:21+01:00"],
"var7": ["", None, "", "", ""]
}
)
self.assertTrue(DataframeType({"value": df}).date_equal_to({"target": "var1", "comparator": '2021'})
.equals(pandas.Series([True, True, True, True, True])))
self.assertTrue(DataframeType({"value": df}).date_equal_to({"target": "var3", "comparator": "1997-07-16T19:20:30.45+01:00"})
.equals(pandas.Series([False, False, True, False, False])))
self.assertTrue(DataframeType({"value": df}).date_equal_to({"target": "var3", "comparator": "var4"})
.equals(pandas.Series([True, True, True, True, True])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).date_equal_to({"target": "--r3", "comparator": "--r4", "date_component": "year"})
.equals(pandas.Series([True, True, True, True, True])))
self.assertTrue(DataframeType({"value": df}).date_equal_to({"target": "var3", "comparator": "var5", "date_component": "hour"})
.equals(pandas.Series([True, True, True, True, True])))
self.assertTrue(DataframeType({"value": df}).date_equal_to({"target": "var3", "comparator": "var5", "date_component": "minute"})
.equals(pandas.Series([True, True, True, True, True])))
self.assertTrue(DataframeType({"value": df}).date_equal_to({"target": "var3", "comparator": "var5", "date_component": "second"})
.equals(pandas.Series([True, True, True, True, True])))
self.assertTrue(DataframeType({"value": df}).date_equal_to({"target": "var3", "comparator": "var5", "date_component": "microsecond"})
.equals(pandas.Series([True, True, True, True, True])))
self.assertTrue(DataframeType({"value": df}).date_equal_to({"target": "var3", "comparator": "var5", "date_component": "year"})
.equals(pandas.Series([True, True, True, True, True])))
self.assertTrue(DataframeType({"value": df}).date_equal_to({"target": "var3", "comparator": "var5", "date_component": "month"})
.equals(pandas.Series([False, False, False, False, False])))
self.assertTrue(DataframeType({"value": df}).date_equal_to({"target": "var3", "comparator": "var6", "date_component": "year"})
.equals(pandas.Series([False, False, False, False, False])))
self.assertTrue(DataframeType({"value": df}).date_equal_to({"target": "var3", "comparator": "var6", "date_component": "month"})
.equals(pandas.Series([False, False, False, False, False])))
self.assertTrue(DataframeType({"value": df}).date_equal_to({"target": "var3", "comparator": "var7", "date_component": "month"})
.equals(pandas.Series([False, False, False, False, False])))
self.assertTrue(DataframeType({"value": df}).date_equal_to({"target": "var7", "comparator": "var7", "date_component": "month"})
.equals(pandas.Series([False, False, False, False, False])))
def test_date_not_equal_to(self):
df = pandas.DataFrame.from_dict(
{
"var1": ['2021', '2021', '2021', '2021', '2021'],
"var2": ["2099", "2022", "2034", "90999", "20999"],
"var3": ["1997-07", "1997-07-16", "1997-07-16T19:20:30.45+01:00", "1997-07-16T19:20:30+01:00", "1997-07-16T19:20+01:00"],
"var4": ["1997-07", "1997-07-16", "1997-07-16T19:20:30.45+01:00", "1997-07-16T19:20:30+01:00", "1997-07-16T19:20+01:00"],
"var5": ["1997-08", "1997-08-16", "1997-08-16T19:20:30.45+01:00", "1997-08-16T19:20:30+01:00", "1997-08-16T19:20+01:00"],
"var6": ["1998-08", "1998-08-11", "1998-08-17T20:21:31.46+01:00", "1998-08-17T20:21:31+01:00", "1998-08-17T20:21+01:00"],
"var7": ["", None, "", "", ""]
}
)
self.assertTrue(DataframeType({"value": df}).date_not_equal_to({"target": "var1", "comparator": '2022'})
.equals(pandas.Series([True, True, True, True, True])))
self.assertTrue(DataframeType({"value": df}).date_not_equal_to({"target": "var3", "comparator": "1998-07-16T19:20:30.45+01:00"})
.equals(pandas.Series([True, True, True, True, True])))
self.assertTrue(DataframeType({"value": df}).date_not_equal_to({"target": "var3", "comparator": "var4"})
.equals(pandas.Series([False, False, False, False, False])))
self.assertTrue(DataframeType({"value": df}).date_not_equal_to({"target": "var3", "comparator": "var4", "date_component": "year"})
.equals(pandas.Series([False, False, False, False, False])))
self.assertTrue(DataframeType({"value": df}).date_not_equal_to({"target": "var3", "comparator": "var6", "date_component": "hour"})
.equals(pandas.Series([False, False, True, True, True])))
self.assertTrue(DataframeType({"value": df}).date_not_equal_to({"target": "var3", "comparator": "var7", "date_component": "month"})
.equals(pandas.Series([False, False, False, False, False])))
self.assertTrue(DataframeType({"value": df}).date_not_equal_to({"target": "var7", "comparator": "var7", "date_component": "month"})
.equals(pandas.Series([False, False, False, False, False])))
def test_date_less_than(self):
df = pandas.DataFrame.from_dict(
{
"var1": ['2021', '2021', '2021', '2021', '2021'],
"var2": ["2099", "2022", "2034", "90999", "20999"],
"var3": ["1997-07", "1997-07-16", "1997-07-16T19:20:30.45+01:00", "1997-07-16T19:20:30+01:00", "1997-07-16T19:20+01:00"],
"var4": ["1997-07", "1997-07-16", "1997-07-16T19:20:30.45+01:00", "1997-07-16T19:20:30+01:00", "1997-07-16T19:20+01:00"],
"var5": ["1997-08", "1997-08-16", "1997-08-16T19:20:30.45+01:00", "1997-08-16T19:20:30+01:00", "1997-08-16T19:20+01:00"],
"var6": ["1998-08", "1998-08-11", "1998-08-17T20:21:31.46+01:00", "1998-08-17T20:21:31+01:00", "1998-08-17T20:21+01:00"],
"var7": ["", None, "", "", ""]
}
)
self.assertTrue(DataframeType({"value": df}).date_less_than({"target": "var1", "comparator": '2022'})
.equals(pandas.Series([True, True, True, True, True])))
self.assertTrue(DataframeType({"value": df}).date_less_than({"target": "var3", "comparator": "1998-07-16T19:20:30.45+01:00"})
.equals(pandas.Series([True, True, True, True, True])))
self.assertTrue(DataframeType({"value": df}).date_less_than({"target": "var3", "comparator": "var4"})
.equals(pandas.Series([False, False, False, False, False])))
self.assertTrue(DataframeType({"value": df}).date_less_than({"target": "var3", "comparator": "var4", "date_component": "year"})
.equals(pandas.Series([False, False, False, False, False])))
self.assertTrue(DataframeType({"value": df}).date_less_than({"target": "var3", "comparator": "var6", "date_component": "hour"})
.equals(pandas.Series([False, False, True, True, True])))
self.assertTrue(DataframeType({"value": df}).date_less_than({"target": "var3", "comparator": "var7", "date_component": "month"})
.equals(pandas.Series([False, False, False, False, False])))
self.assertTrue(DataframeType({"value": df}).date_less_than({"target": "var7", "comparator": "var7", "date_component": "month"})
.equals(pandas.Series([False, False, False, False, False])))
def test_date_greater_than(self):
df = pandas.DataFrame.from_dict(
{
"var1": ['2021', '2021', '2021', '2021', '2021'],
"var2": ["2099", "2022", "2034", "90999", "20999"],
"var3": ["1997-07", "1997-07-16", "1997-07-16T19:20:30.45+01:00", "1997-07-16T19:20:30+01:00", "1997-07-16T19:20+01:00"],
"var4": ["1997-07", "1997-07-16", "1997-07-16T19:20:30.45+01:00", "1997-07-16T19:20:30+01:00", "1997-07-16T19:20+01:00"],
"var5": ["1997-08", "1997-08-16", "1997-08-16T19:20:30.45+01:00", "1997-08-16T19:20:30+01:00", "1997-08-16T19:20+01:00"],
"var6": ["1998-08", "1998-08-11", "1998-08-17T20:21:31.46+01:00", "1998-08-17T20:21:31+01:00", "1998-08-17T20:21+01:00"],
"var7": ["", None, "", "", ""]
}
)
self.assertTrue(DataframeType({"value": df}).date_greater_than({"target": "var1", "comparator": '2020'})
.equals(pandas.Series([True, True, True, True, True])))
self.assertTrue(DataframeType({"value": df}).date_greater_than({"target": "var3", "comparator": "1996-07-16T19:20:30.45+01:00"})
.equals(pandas.Series([True, True, True, True, True])))
self.assertTrue(DataframeType({"value": df}).date_greater_than({"target": "var3", "comparator": "var4"})
.equals(pandas.Series([False, False, False, False, False])))
self.assertTrue(DataframeType({"value": df}).date_greater_than({"target": "var3", "comparator": "var4", "date_component": "year"})
.equals(pandas.Series([False, False, False, False, False])))
self.assertTrue(DataframeType({"value": df}).date_greater_than({"target": "var6", "comparator": "var3", "date_component": "hour"})
.equals(pandas.Series([False, False, True, True, True])))
self.assertTrue(DataframeType({"value": df}).date_greater_than({"target": "var3", "comparator": "var7", "date_component": "month"})
.equals(pandas.Series([False, False, False, False, False])))
self.assertTrue(DataframeType({"value": df}).date_greater_than({"target": "var7", "comparator": "var7", "date_component": "month"})
.equals(pandas.Series([False, False, False, False, False])))
def test_date_greater_than_or_equal_to(self):
df = pandas.DataFrame.from_dict(
{
"var1": ['2021', '2021', '2021', '2021', '2021'],
"var2": ["2099", "2022", "2034", "90999", "20999"],
"var3": ["1997-07", "1997-07-16", "1997-07-16T19:20:30.45+01:00", "1997-07-16T19:20:30+01:00", "1997-07-16T19:20+01:00"],
"var4": ["1997-07", "1997-07-16", "1997-07-16T19:20:30.45+01:00", "1997-07-16T19:20:30+01:00", "1997-07-16T19:20+01:00"],
"var5": ["1997-08", "1997-08-16", "1997-08-16T19:20:30.45+01:00", "1997-08-16T19:20:30+01:00", "1997-08-16T19:20+01:00"],
"var6": ["1998-08", "1998-08-11", "1998-08-17T20:21:31.46+01:00", "1998-08-17T20:21:31+01:00", "1998-08-17T20:21+01:00"],
"var7": ["", None, "", "", ""]
}
)
self.assertTrue(DataframeType({"value": df}).date_greater_than_or_equal_to({"target": "var1", "comparator": '2020'})
.equals(pandas.Series([True, True, True, True, True])))
self.assertTrue(DataframeType({"value": df}).date_greater_than_or_equal_to({"target": "var1", "comparator": '2023'})
.equals(pandas.Series([False, False, False, False, False])))
self.assertTrue(DataframeType({"value": df}).date_greater_than_or_equal_to({"target": "var3", "comparator": "1996-07-16T19:20:30.45+01:00"})
.equals(pandas.Series([True, True, True, True, True])))
self.assertTrue(DataframeType({"value": df}).date_greater_than_or_equal_to({"target": "var3", "comparator": "var4"})
.equals(pandas.Series([True, True, True, True, True])))
self.assertTrue(DataframeType({"value": df}).date_greater_than_or_equal_to({"target": "var3", "comparator": "var4", "date_component": "year"})
.equals(pandas.Series([True, True, True, True, True])))
self.assertTrue(DataframeType({"value": df}).date_greater_than_or_equal_to({"target": "var6", "comparator": "var3", "date_component": "hour"})
.equals(pandas.Series([True, True, True, True, True])))
self.assertTrue(DataframeType({"value": df}).date_greater_than_or_equal_to({"target": "var3", "comparator": "var7", "date_component": "month"})
.equals(pandas.Series([False, False, False, False, False])))
self.assertTrue(DataframeType({"value": df}).date_greater_than_or_equal_to({"target": "var7", "comparator": "var7", "date_component": "month"})
.equals(pandas.Series([False, False, False, False, False])))
def test_date_less_than_or_equal_to(self):
df = pandas.DataFrame.from_dict(
{
"var1": ['2021', '2021', '2021', '2021', '2021'],
"var2": ["2099", "2022", "2034", "90999", "20999"],
"var3": ["1997-07", "1997-07-16", "1997-07-16T19:20:30.45+01:00", "1997-07-16T19:20:30+01:00", "1997-07-16T19:20+01:00"],
"var4": ["1997-07", "1997-07-16", "1997-07-16T19:20:30.45+01:00", "1997-07-16T19:20:30+01:00", "1997-07-16T19:20+01:00"],
"var5": ["1997-08", "1997-08-16", "1997-08-16T19:20:30.45+01:00", "1997-08-16T19:20:30+01:00", "1997-08-16T19:20+01:00"],
"var6": ["1998-08", "1998-08-11", "1998-08-17T20:21:31.46+01:00", "1998-08-17T20:21:31+01:00", "1998-08-17T20:21+01:00"],
"var7": ["", None, "", "", ""]
}
)
self.assertTrue(DataframeType({"value": df}).date_less_than_or_equal_to({"target": "var1", "comparator": '2022'})
.equals(pandas.Series([True, True, True, True, True])))
self.assertTrue(DataframeType({"value": df}).date_less_than_or_equal_to({"target": "var1", "comparator": '2020'})
.equals(pandas.Series([False, False, False, False, False])))
self.assertTrue(DataframeType({"value": df}).date_less_than_or_equal_to({"target": "var3", "comparator": "1998-07-16T19:20:30.45+01:00"})
.equals(pandas.Series([True, True, True, True, True])))
self.assertTrue(DataframeType({"value": df}).date_less_than_or_equal_to({"target": "var3", "comparator": "var4"})
.equals(pandas.Series([True, True, True, True, True])))
self.assertTrue(DataframeType({"value": df}).date_less_than_or_equal_to({"target": "var3", "comparator": "var4", "date_component": "year"})
.equals(pandas.Series([True, True, True, True, True])))
self.assertTrue(DataframeType({"value": df}).date_less_than_or_equal_to({"target": "var6", "comparator": "var3", "date_component": "hour"})
.equals(pandas.Series([True, True, False, False, False])))
self.assertTrue(DataframeType({"value": df}).date_less_than_or_equal_to({"target": "var3", "comparator": "var7", "date_component": "month"})
.equals(pandas.Series([False, False, False, False, False])))
self.assertTrue(DataframeType({"value": df}).date_less_than_or_equal_to({"target": "var7", "comparator": "var7", "date_component": "month"})
.equals(pandas.Series([False, False, False, False, False])))
def test_is_incomplete_date(self):
df = pandas.DataFrame.from_dict(
{
"var1": [ '2021', '2021', '2099'],
"var2": [ "1997-07-16", "1997-07-16T19:20:30+01:00", "1997-07-16T19:20+01:00"],
}
)
self.assertTrue(DataframeType({"value": df}).is_incomplete_date({"target" : "var1"})
.equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df}).is_incomplete_date({"target" : "var2"})
.equals(pandas.Series([False, False, False])))
def test_is_complete_date(self):
df = pandas.DataFrame.from_dict(
{
"var1": ["2021", "2021", "2099"],
"var2": ["1997-07-16", "1997-07-16T19:20:30+01:00", "1997-07-16T19:20+01:00"],
}
)
self.assertTrue(DataframeType({"value": df}).is_complete_date({"target": "var1"})
.equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).is_complete_date({"target": "var2"})
.equals(pandas.Series([True, True, True])))
def test_is_unique_set(self):
df = pandas.DataFrame.from_dict( {"ARM": ["PLACEBO", "PLACEBO", "A", "A"], "TAE": [1,1,1,2], "LAE": [1,2,1,2], "ARF": [1,2,3,4]})
self.assertTrue(DataframeType({"value": df}).is_unique_set({"target" : "ARM", "comparator": "LAE"})
.equals(pandas.Series([True, True, True, True])))
self.assertTrue(DataframeType({"value": df}).is_unique_set({"target" : "ARM", "comparator": ["LAE"]})
.equals(pandas.Series([True, True, True, True])))
self.assertTrue(DataframeType({"value": df}).is_unique_set({"target" : "ARM", "comparator": ["TAE"]})
.equals(pandas.Series([False, False, True, True])))
self.assertTrue(DataframeType({"value": df}).is_unique_set({"target" : "ARM", "comparator": "TAE"})
.equals(pandas.Series([False, False, True, True])))
self.assertTrue(DataframeType({"value":df, "column_prefix_map": {"--": "AR"}}).is_unique_set({"target" : "--M", "comparator": "--F"})
.equals(pandas.Series([True, True, True, True])))
self.assertTrue(DataframeType({"value":df, "column_prefix_map": {"--": "AR"}}).is_unique_set({"target" : "--M", "comparator": ["--F"]})
.equals(pandas.Series([True, True, True, True])))
def test_is_not_unique_set(self):
df = pandas.DataFrame.from_dict( {"ARM": ["PLACEBO", "PLACEBO", "A", "A"], "TAE": [1,1,1,2], "LAE": [1,2,1,2], "ARF": [1,2,3,4]})
self.assertTrue(DataframeType({"value": df}).is_not_unique_set({"target" : "ARM", "comparator": "LAE"})
.equals(pandas.Series([False, False, False, False])))
self.assertTrue(DataframeType({"value": df}).is_not_unique_set({"target" : "ARM", "comparator": ["LAE"]})
.equals(pandas.Series([False, False, False, False])))
self.assertTrue(DataframeType({"value": df}).is_not_unique_set({"target" : "ARM", "comparator": ["TAE"]})
.equals(pandas.Series([True, True, False, False])))
self.assertTrue(DataframeType({"value": df}).is_not_unique_set({"target" : "ARM", "comparator": "TAE"})
.equals(pandas.Series([True, True, False, False])))
self.assertTrue(DataframeType({"value":df, "column_prefix_map": {"--": "AR"}}).is_not_unique_set({"target" : "--M", "comparator": "--F"})
.equals(pandas.Series([False, False, False, False])))
self.assertTrue(DataframeType({"value":df, "column_prefix_map": {"--": "AR"}}).is_not_unique_set({"target" : "--M", "comparator": ["--F"]})
.equals(pandas.Series([False, False, False, False])))
def test_is_ordered_set(self):
df = pandas.DataFrame.from_dict( {"USUBJID": [1,2,1,2], "SESEQ": [1,1,2,2] })
self.assertTrue(DataframeType({"value": df}).is_ordered_set({"target" : "SESEQ", "comparator": "USUBJID"}))
self.assertTrue(DataframeType({"value":df, "column_prefix_map": {"--": "SE"}}).is_ordered_set({"target" : "--SEQ", "comparator": "USUBJID"}))
df2 = pandas.DataFrame.from_dict( {"USUBJID": [1,2,1,2], "SESEQ": [3,1,2,2] })
self.assertFalse(DataframeType({"value": df2}).is_ordered_set({"target" : "SESEQ", "comparator": "USUBJID"}))
self.assertFalse(DataframeType({"value":df2, "column_prefix_map": {"--": "SE"}}).is_ordered_set({"target" : "--SEQ", "comparator": "USUBJID"}))
def test_is_not_ordered_set(self):
df = pandas.DataFrame.from_dict( {"USUBJID": [1,2,1,2], "SESEQ": [3,1,2,2] })
self.assertTrue(DataframeType({"value": df}).is_not_ordered_set({"target" : "SESEQ", "comparator": "USUBJID"}))
self.assertTrue(DataframeType({"value":df, "column_prefix_map": {"--": "SE"}}).is_not_ordered_set({"target" : "--SEQ", "comparator": "USUBJID"}))
df2 = pandas.DataFrame.from_dict( {"USUBJID": [1,2,1,2], "SESEQ": [1,1,2,2] })
self.assertFalse(DataframeType({"value": df2}).is_not_ordered_set({"target" : "SESEQ", "comparator": "USUBJID"}))
self.assertFalse(DataframeType({"value":df2, "column_prefix_map": {"--": "SE"}}).is_not_ordered_set({"target" : "--SEQ", "comparator": "USUBJID"}))
def test_is_unique_relationship(self):
"""
Test validates one-to-one relationship against a dataset.
One-to-one means that a pair of columns can be duplicated
but its integrity should not be violated.
"""
one_to_one_related_df = pandas.DataFrame.from_dict(
{
"STUDYID": [1, 2, 3, 1, 2],
"USUBJID": ["TEST", "TEST-1", "TEST-2", "TEST-3", "TEST-4", ],
"STUDYDESC": ["Russia", "USA", "China", "Russia", "USA", ],
}
)
self.assertTrue(
DataframeType({"value": one_to_one_related_df}).is_unique_relationship(
{"target": "STUDYID", "comparator": "STUDYDESC"}
).equals(pandas.Series([True, True, True, True, True]))
)
self.assertTrue(
DataframeType({"value": one_to_one_related_df}).is_unique_relationship(
{"target": "STUDYDESC", "comparator": "STUDYID"}
).equals(pandas.Series([True, True, True, True, True]))
)
self.assertTrue(
DataframeType({"value": one_to_one_related_df, "column_prefix_map":{"--": "STUDY"}}).is_unique_relationship(
{"target": "--ID", "comparator": "--DESC"}
).equals(pandas.Series([True, True, True, True, True]))
)
self.assertTrue(
DataframeType({"value": one_to_one_related_df, "column_prefix_map":{"--": "STUDY"}}).is_unique_relationship(
{"target": "--DESC", "comparator": "--ID"}
).equals(pandas.Series([True, True, True, True, True]))
)
df_violates_one_to_one = pandas.DataFrame.from_dict(
{
"STUDYID": ["TEST", "TEST-1", "TEST-2", "TEST-3", ],
"TESTID": [1, 2, 1, 3],
"TESTNAME": ["Functional", "Stress", "Functional", "Stress", ],
}
)
self.assertTrue(DataframeType({"value": df_violates_one_to_one}).is_unique_relationship(
{"target": "TESTID", "comparator": "TESTNAME"}).equals(pandas.Series([True, False, True, False]))
)
self.assertTrue(DataframeType({"value": df_violates_one_to_one}).is_unique_relationship(
{"target": "TESTNAME", "comparator": "TESTID"}).equals(pandas.Series([True, False, True, False]))
)
def test_is_not_unique_relationship(self):
"""
Test validates one-to-one relationship against a dataset.
One-to-one means that a pair of columns can be duplicated
but its integrity should not be violated.
"""
valid_df = pandas.DataFrame.from_dict(
{
"STUDYID": ["TEST", "TEST-1", "TEST-2", "TEST-3", ],
"VISITNUM": [1, 2, 1, 3],
"VISIT": ["Consulting", "Surgery", "Consulting", "Treatment", ],
}
)
self.assertTrue(DataframeType({"value": valid_df}).is_not_unique_relationship(
{"target": "VISITNUM", "comparator": "VISIT"}).equals(pandas.Series([False, False, False, False]))
)
self.assertTrue(DataframeType({"value": valid_df}).is_not_unique_relationship(
{"target": "VISIT", "comparator": "VISITNUM"}).equals(pandas.Series([False, False, False, False]))
)
valid_df_1 = pandas.DataFrame.from_dict(
{
"STUDYID": ["TEST", "TEST-1", "TEST-2", "TEST-3", ],
"VISIT": ["Consulting", "Surgery", "Consulting", "Treatment", ],
"VISITDESC": [
"Doctor Consultation", "Heart Surgery", "Doctor Consultation", "Long Lasting Treatment",
],
}
)
self.assertTrue(DataframeType({"value": valid_df_1}).is_not_unique_relationship(
{"target": "VISIT", "comparator": "VISITDESC"}).equals(pandas.Series([False, False, False, False]))
)
self.assertTrue(DataframeType({"value": valid_df_1}).is_not_unique_relationship(
{"target": "VISITDESC", "comparator": "VISIT"}).equals(pandas.Series([False, False, False, False]))
)
df_violates_one_to_one = pandas.DataFrame.from_dict(
{
"STUDYID": ["TEST", "TEST-1", "TEST-2", "TEST-3", ],
"VISITNUM": [1, 2, 1, 3],
"VISIT": ["Consulting", "Surgery", "Consulting", "Consulting", ],
}
)
self.assertTrue(DataframeType({"value": df_violates_one_to_one}).is_not_unique_relationship(
{"target": "VISITNUM", "comparator": "VISIT"}).equals(pandas.Series([True, False, True, True]))
)
self.assertTrue(DataframeType({"value": df_violates_one_to_one}).is_not_unique_relationship(
{"target": "VISIT", "comparator": "VISITNUM"}).equals(pandas.Series([True, False, True, True]))
)
df_violates_one_to_one_1 = pandas.DataFrame.from_dict(
{
"STUDYID": ["TEST", "TEST-1", "TEST-2", "TEST-3", "TEST-4", ],
"VISIT": ["Consulting", "Consulting", "Surgery", "Consulting", "Treatment", ],
"VISITDESC": ["Doctor Consultation", "Doctor Consultation", "Heart Surgery", "Heart Surgery", "Long Lasting Treatment", ],
}
)
self.assertTrue(DataframeType({"value": df_violates_one_to_one_1}).is_not_unique_relationship(
{"target": "VISIT", "comparator": "VISITDESC"}).equals(pandas.Series([True, True, True, True, False]))
)
self.assertTrue(DataframeType({"value": df_violates_one_to_one_1}).is_not_unique_relationship(
{"target": "VISITDESC", "comparator": "VISIT"}).equals(pandas.Series([True, True, True, True, False]))
)
self.assertTrue(DataframeType({"value": df_violates_one_to_one_1, "column_prefix_map": {"--": "VI"}}).is_not_unique_relationship(
{"target": "--SIT", "comparator": "--SITDESC"}).equals(pandas.Series([True, True, True, True, False]))
)
self.assertTrue(DataframeType({"value": df_violates_one_to_one_1, "column_prefix_map": {"--": "VI"}}).is_not_unique_relationship(
{"target": "--SITDESC", "comparator": "--SIT"}).equals(pandas.Series([True, True, True, True, False]))
)
def test_empty_within_except_last_row(self):
df = pandas.DataFrame.from_dict(
{
"USUBJID": [1, 1, 1, 2, 2, 2],
"valid": ["2020-10-10", "2020-10-10", "2020-10-10", "2021", "2021", "2021", ],
"invalid": ["2020-10-10", None, None, "2020", "2020", None, ],
}
)
self.assertFalse(
DataframeType({"value": df}).empty_within_except_last_row({"target": "valid", "comparator": "USUBJID"})
)
self.assertTrue(
DataframeType({"value": df}).empty_within_except_last_row({"target": "invalid", "comparator": "USUBJID"})
)
def test_non_empty_within_except_last_row(self):
df = pandas.DataFrame.from_dict(
{
"USUBJID": [1, 1, 1, 2, 2, 2],
"valid": ["2020-10-10", "2020-10-10", "2020-10-10", "2021", "2021", "2021", ],
"invalid": ["2020-10-10", None, None, "2020", "2020", None, ],
}
)
self.assertTrue(
DataframeType({"value": df}).non_empty_within_except_last_row({"target": "valid", "comparator": "USUBJID"})
)
self.assertFalse(
DataframeType({"value": df}).non_empty_within_except_last_row({"target": "invalid", "comparator": "USUBJID"})
)
def test_is_valid_reference(self):
reference_data = {
"LB": {
"TEST": [],
"DATA": [1,2,3]
},
"AE": {
"AETERM": [1,2,3]
}
}
df = pandas.DataFrame.from_dict(
{
"RDOMAIN": ["LB", "LB", "AE"],
"IDVAR1": ["TEST", "DATA", "AETERM"],
"IDVAR2": ["TEST", "AETERM", "AETERM"]
}
)
self.assertTrue(
DataframeType({"value": df, "relationship_data": reference_data}).is_valid_reference({"target": "IDVAR1", "context": "RDOMAIN"})
.equals(pandas.Series([True, True, True]))
)
self.assertTrue(
DataframeType({"value": df, "relationship_data": reference_data}).is_valid_reference({"target": "IDVAR2", "context": "RDOMAIN"})
.equals(pandas.Series([True, False, True]))
)
def test_not_valid_reference(self):
reference_data = {
"LB": {
"TEST": [],
"DATA": [1,2,3]
},
"AE": {
"AETERM": [1,2,3]
}
}
df = pandas.DataFrame.from_dict(
{
"RDOMAIN": ["LB", "LB", "AE"],
"IDVAR1": ["TEST", "DATA", "AETERM"],
"IDVAR2": ["TEST", "AETERM", "AETERM"]
}
)
self.assertTrue(
DataframeType({"value": df, "relationship_data": reference_data}).is_not_valid_reference({"target": "IDVAR1", "context": "RDOMAIN"})
.equals(pandas.Series([False, False, False]))
)
self.assertTrue(
DataframeType({"value": df, "relationship_data": reference_data}).is_not_valid_reference({"target": "IDVAR2", "context": "RDOMAIN"})
.equals(pandas.Series([False, True, False]))
)
def test_is_valid_relationship(self):
reference_data = {
"LB": {
"TEST": pandas.Series([4,5,6]).values,
"DATA": pandas.Series([1,2,3]).values
},
"AE": {
"AETERM": pandas.Series([31, 323, 33]).values
}
}
df = pandas.DataFrame.from_dict(
{
"RDOMAIN": ["LB", "LB", "AE"],
"IDVAR1": ["TEST", "DATA", "AETERM"],
"IDVAR2": ["TEST", "DATA", "AETERM"],
"IDVARVAL1": [4, 1, 31],
"IDVARVAL2": [5, 1, 35]
}
)
self.assertTrue(
DataframeType({"value": df, "relationship_data": reference_data}).is_valid_relationship({"target": "IDVAR1", "comparator": "IDVARVAL1", "context": "RDOMAIN"})
.equals(pandas.Series([True, True, True]))
)
self.assertTrue(
DataframeType({"value": df, "relationship_data": reference_data}).is_valid_relationship({"target": "IDVAR2", "comparator": "IDVARVAL2", "context": "RDOMAIN"})
.equals(pandas.Series([True, True, False]))
)
def test_not_valid_relationship(self):
reference_data = {
"LB": {
"TEST": pandas.Series([4,5,6]).values,
"DATA": pandas.Series([1,2,3]).values
},
"AE": {
"AETERM": pandas.Series([31, 323, 33]).values
}
}
df = pandas.DataFrame.from_dict(
{
"RDOMAIN": ["LB", "LB", "AE"],
"IDVAR1": ["TEST", "DATA", "AETERM"],
"IDVAR2": ["TEST", "DATA", "AETERM"],
"IDVARVAL1": [4, 1, 31],
"IDVARVAL2": [5, 1, 35]
}
)
self.assertTrue(
DataframeType({"value": df, "relationship_data": reference_data}).is_not_valid_relationship({"target": "IDVAR1", "comparator": "IDVARVAL1", "context": "RDOMAIN"})
.equals(pandas.Series([False, False, False]))
)
self.assertTrue(
DataframeType({"value": df, "relationship_data": reference_data}).is_not_valid_relationship({"target": "IDVAR2", "comparator": "IDVARVAL2", "context": "RDOMAIN"})
.equals(pandas.Series([False, False, True]))
)
def test_non_conformant_value_length(self):
def filter_func(row):
return row["IDVAR1"] == "TEST"
def length_check(row):
return len(row["IDVAR2"]) <= 4
df = pandas.DataFrame.from_dict(
{
"RDOMAIN": ["LB", "LB", "AE"],
"IDVAR1": ["TEST", "TEST", "AETERM"],
"IDVAR2": ["TEST", "TOOLONG", "AETERM"],
}
)
vlm = [
{
"filter": filter_func,
"length_check": length_check
}
]
result = DataframeType({"value": df, "value_level_metadata": vlm }).non_conformant_value_length({})
self.assertTrue(result.equals(pandas.Series([False, True, False])))
def test_non_conformant_value_data_type(self):
def filter_func(row):
return row["IDVAR1"] == "TEST"
def type_check(row):
return isinstance(row["IDVAR2"], str)
df = pandas.DataFrame.from_dict(
{
"RDOMAIN": ["LB", "LB", "AE"],
"IDVAR1": ["TEST", "TEST", "AETERM"],
"IDVAR2": ["TEST", 1, "AETERM"],
}
)
vlm = [
{
"filter": filter_func,
"type_check": type_check
}
]
result = DataframeType({"value": df, "value_level_metadata": vlm }).non_conformant_value_data_type({})
self.assertTrue(result.equals(pandas.Series([False, True, False])))
def test_conformant_value_length(self):
def filter_func(row):
return row["IDVAR1"] == "TEST"
def length_check(row):
return len(row["IDVAR2"]) <= 4
df = pandas.DataFrame.from_dict(
{
"RDOMAIN": ["LB", "LB", "AE"],
"IDVAR1": ["TEST", "TEST", "AETERM"],
"IDVAR2": ["TEST", "TOOLONG", "AETERM"],
}
)
vlm = [
{
"filter": filter_func,
"length_check": length_check
}
]
result = DataframeType({"value": df, "value_level_metadata": vlm }).conformant_value_length({})
self.assertTrue(result.equals(pandas.Series([True, False, False])))
def test_conformant_value_data_type(self):
def filter_func(row):
return row["IDVAR1"] == "TEST"
def type_check(row):
return isinstance(row["IDVAR2"], str)
df = pandas.DataFrame.from_dict(
{
"RDOMAIN": ["LB", "LB", "AE"],
"IDVAR1": ["TEST", "TEST", "AETERM"],
"IDVAR2": ["TEST", 1, "AETERM"],
}
)
vlm = [
{
"filter": filter_func,
"type_check": type_check
}
]
result = DataframeType({"value": df, "value_level_metadata": vlm }).conformant_value_data_type({})
self.assertTrue(result.equals(pandas.Series([True, False, False])))
def test_has_next_corresponding_record(self):
"""
Test for has_next_corresponding_record operator.
"""
valid_df = pandas.DataFrame.from_dict(
{
"USUBJID": [789, 789, 789, 789, 790, 790, 790, 790, ],
"SESEQ": [1, 2, 3, 4, 5, 6, 7, 8, ],
"SEENDTC": ["2006-06-03T10:32", "2006-06-10T09:47", "2006-06-17", "2006-06-17", "2006-06-03T10:14", "2006-06-10T10:32", "2006-06-17", "2006-06-17"],
"SESTDTC": ["2006-06-01", "2006-06-03T10:32", "2006-06-10T09:47", "2006-06-17", "2006-06-01", "2006-06-03T10:14", "2006-06-10T10:32", "2006-06-17"],
}
)
other_value: dict = {"target": "SEENDTC", "comparator": "SESTDTC", "within": "USUBJID", "ordering": "SESEQ"}
result = DataframeType({"value": valid_df}).has_next_corresponding_record(other_value)
self.assertTrue(result.equals(pandas.Series([True, True, True, pandas.NA, True, True, True, pandas.NA])))
invalid_df = pandas.DataFrame.from_dict(
{
"USUBJID": [789, 789, 789, 789, 790, 790, 790, 790, ],
"SESEQ": [1, 2, 3, 4, 5, 6, 7, 8, ],
"SEENDTC": ["2006-06-03T10:32", "2006-06-10T09:47", "2006-06-17", "2006-06-17", "2006-06-03T10:14", "2006-06-10T10:32", "2006-06-17", "2006-06-17"],
"SESTDTC": ["2006-06-01", "2010-08-03", "2008-08", "2006-06-17T10:20", "2006-06-01", "2006-06-03T10:14", "2006-06-10T10:32", "2006-06-17"],
}
)
other_value: dict = {"target": "SEENDTC", "comparator": "SESTDTC", "within": "USUBJID", "ordering": "SESEQ"}
result = DataframeType({"value": invalid_df}).has_next_corresponding_record(other_value)
self.assertTrue(result.equals(pandas.Series([False, False, False, pandas.NA, True, True, True, pandas.NA])))
def test_does_not_have_next_corresponding_record(self):
"""
Test for does_not_have_next_corresponding_record operator.
"""
valid_df = pandas.DataFrame.from_dict(
{
"USUBJID": [789, 789, 789, 789, 790, 790, 790, 790, ],
"SESEQ": [1, 2, 3, 4, 5, 6, 7, 8, ],
"SEENDTC": ["2006-06-03T10:32", "2006-06-10T09:47", "2006-06-17", "2006-06-17", "2006-06-03T10:14", "2006-06-10T10:32", "2006-06-17", "2006-06-17"],
"SESTDTC": ["2006-06-01", "2006-06-03T10:32", "2006-06-10T09:47", "2006-06-17", "2006-06-01", "2006-06-03T10:14", "2006-06-10T10:32", "2006-06-17"],
}
)
other_value: dict = {"target": "SEENDTC", "comparator": "SESTDTC", "within": "USUBJID", "ordering": "SESEQ"}
result = DataframeType({"value": valid_df}).does_not_have_next_corresponding_record(other_value)
self.assertTrue(result.equals(pandas.Series([False, False, False, pandas.NA, False, False, False, pandas.NA])))
invalid_df = pandas.DataFrame.from_dict(
{
"USUBJID": [789, 789, 789, 789, 790, 790, 790, 790, ],
"SESEQ": [1, 2, 3, 4, 5, 6, 7, 8, ],
"SEENDTC": ["2006-06-03T10:32", "2006-06-10T09:47", "2006-06-17", "2006-06-17", "2006-06-03T10:14", "2006-06-10T10:32", "2006-06-17", "2006-06-17"],
"SESTDTC": ["2006-06-01", "2010-08-03", "2008-08", "2006-06-17T10:20", "2006-06-01", "2006-06-03T10:14", "2006-06-10T10:32", "2006-06-17"],
}
)
other_value: dict = {"target": "SEENDTC", "comparator": "SESTDTC", "within": "USUBJID", "ordering": "SESEQ"}
result = DataframeType({"value": invalid_df}).does_not_have_next_corresponding_record(other_value)
self.assertTrue(result.equals(pandas.Series([True, True, True, pandas.NA, False, False, False, pandas.NA])))
def test_present_on_multiple_rows_within(self):
"""
Unit test for present_on_multiple_rows_within operator.
"""
valid_df = pandas.DataFrame.from_dict(
{
"USUBJID": [1, 1, 1, 2, 2, 2, ],
"SEQ": [1, 2, 3, 4, 5, 6],
"RELID": ["AEHOSP1", "AEHOSP1", "AEHOSP1", "AEHOSP2", "AEHOSP2", "AEHOSP2"]
}
)
result = DataframeType({"value": valid_df}).present_on_multiple_rows_within(
{"target": "RELID", "within": "USUBJID", "comparator": 1}
)
self.assertTrue(result.equals(pandas.Series([True, True, True, True, True, True])))
valid_df_1 = pandas.DataFrame.from_dict(
{
"USUBJID": [5, 5, 5, 7, 7, 7, ],
"SEQ": [1, 2, 3, 4, 5, 6],
"RELID": ["AEHOSP1", "AEHOSP1", "AEHOSP1", "AEHOSP2", "AEHOSP2", "AEHOSP2"]
}
)
result = DataframeType({"value": valid_df_1}).present_on_multiple_rows_within(
{"target": "RELID", "within": "USUBJID", "comparator": 2}
)
self.assertTrue(result.equals(pandas.Series([True, True, True, True, True, True])))
invalid_df = pandas.DataFrame.from_dict(
{
"USUBJID": [1, 1, 1, 2, 2, 2, 3],
"SEQ": [1, 2, 3, 4, 5, 6, 7],
"RELID": ["AEHOSP1", "AEHOSP1", "AEHOSP1", "AEHOSP2", "AEHOSP2", "AEHOSP2", "AEHOSP3"]
}
)
result = DataframeType({"value": invalid_df}).present_on_multiple_rows_within(
{"target": "RELID", "within": "USUBJID", "comparator": 1}
)
self.assertTrue(result.equals(pandas.Series([True, True, True, True, True, True, False])))
def test_not_present_on_multiple_rows_within(self):
"""
Unit test for not_present_on_multiple_rows_within operator.
"""
valid_df = pandas.DataFrame.from_dict(
{
"USUBJID": [1, 1, 1, 2, 2, 2, ],
"SEQ": [1, 2, 3, 4, 5, 6],
"RELID": ["AEHOSP1", "AEHOSP1", "AEHOSP1", "AEHOSP2", "AEHOSP2", "AEHOSP2"]
}
)
result = DataframeType({"value": valid_df}).not_present_on_multiple_rows_within(
{"target": "RELID", "within": "USUBJID", "comparator": 1}
)
self.assertTrue(result.equals(pandas.Series([False, False, False, False, False, False])))
invalid_df = pandas.DataFrame.from_dict(
{
"USUBJID": [1, 1, 1, 2, 2, 2, 3],
"SEQ": [1, 2, 3, 4, 5, 6, 7],
"RELID": ["AEHOSP1", "AEHOSP1", "AEHOSP1", "AEHOSP2", "AEHOSP2", "AEHOSP2", "AEHOSP3"]
}
)
result = DataframeType({"value": invalid_df}).not_present_on_multiple_rows_within(
{"target": "RELID", "within": "USUBJID", "comparator": 1}
)
self.assertTrue(result.equals(pandas.Series([False, False, False, False, False, False, True])))
def test_additional_columns_empty(self):
"""
Unit test for additional_columns_empty operator.
"""
valid_df = pandas.DataFrame.from_dict(
{
"USUBJID": [1, 1, 1, 1, ],
"TSVAL": [None, None, "another value", None], # original column may be empty
"TSVAL1": ["value", "value", "value", None], # valid since TSVAL2 is also null in the same row
"TSVAL2": [None, "value 2", "value 2", None],
}
)
result = DataframeType({"value": valid_df, }).additional_columns_empty({"target": "TSVAL", })
self.assertTrue(result.equals(pandas.Series([False, False, False, False, ])))
invalid_df = pandas.DataFrame.from_dict(
{
"USUBJID": [1, 1, 1, 1, ],
"TSVAL": ["value", None, "another value", None], # original column may be empty
"TSVAL1": ["value", None, "value", "value"], # invalid column
"TSVAL2": ["value 2", "value 2", "value 2", None],
"TSVAL3": ["value 3", "value 3", None, "value 3"],
}
)
result = DataframeType({"value": invalid_df, }).additional_columns_empty({"target": "TSVAL", })
self.assertTrue(result.equals(pandas.Series([False, True, False, True, ])))
def test_additional_columns_not_empty(self):
"""
Unit test for additional_columns_not_empty operator.
"""
df_with_empty_rows = pandas.DataFrame.from_dict(
{
"USUBJID": [1, 1, 1, 1, ],
"TSVAL": ["value", None, "another value", None], # original column may be empty
"TSVAL1": ["value", None, "value", "value"],
"TSVAL2": ["value 2", "value 2", "value 2", "value 2"],
}
)
result = DataframeType({"value": df_with_empty_rows, }).additional_columns_not_empty({"target": "TSVAL", })
self.assertTrue(result.equals(pandas.Series([True, False, True, True, ])))
df_without_empty_rows = pandas.DataFrame.from_dict(
{
"USUBJID": [1, 1, 1, 1, ],
"TSVAL": ["value", None, "another value", None], # original column may be empty
"TSVAL1": ["value", "value", "value", "value"],
"TSVAL2": ["value 2", "value 2", "value 2", "value 2"],
}
)
result = DataframeType({"value": df_without_empty_rows, }).additional_columns_not_empty({"target": "TSVAL", })
self.assertTrue(result.equals(pandas.Series([True, True, True, True, ])))
def test_references_valid_codelist(self):
df = pandas.DataFrame.from_dict(
{
"define_variable_name": ["TEST", "COOLVAR", "ANOTHERVAR" ],
"define_variable_controlled_terms": ["C123", "C456", "C789"],
"define_variable_invalid_terms": ["C123", "C456", "C786"]
}
)
column_codelist_map = {
"TEST": ["C123", "C456"],
"COOLVAR": ["C123", "C456"],
"ANOTHERVAR": ["C789"]
}
dft = DataframeType({
"value": df,
"column_codelist_map": column_codelist_map
})
result = dft.references_correct_codelist({"target": "define_variable_name", "comparator": "define_variable_controlled_terms"})
self.assertTrue(result.equals(pandas.Series([True, True, True ])))
bad_result = dft.references_correct_codelist({"target": "define_variable_name", "comparator": "define_variable_invalid_terms"})
self.assertTrue(bad_result.equals(pandas.Series([True, True, False])))
def test_does_not_reference_valid_codelist(self):
df = pandas.DataFrame.from_dict(
{
"define_variable_name": ["TEST", "COOLVAR", "ANOTHERVAR" ],
"define_variable_controlled_terms": ["C123", "C456", "C789"],
"define_variable_invalid_terms": ["C123", "C456", "C786"]
}
)
column_codelist_map = {
"TEST": ["C123", "C456"],
"--OLVAR": ["C123", "C456"],
"ANOTHERVAR": ["C789"]
}
dft = DataframeType({
"value": df,
"column_codelist_map": column_codelist_map,
"column_prefix_map": {
"--": "CO"
}
})
result = dft.does_not_reference_correct_codelist({"target": "define_variable_name", "comparator": "define_variable_controlled_terms"})
self.assertTrue(result.equals(pandas.Series([False, False, False ])))
bad_result = dft.does_not_reference_correct_codelist({"target": "define_variable_name", "comparator": "define_variable_invalid_terms"})
self.assertTrue(bad_result.equals(pandas.Series([False, False, True])))
def test_uses_valid_codelist_terms(self):
df = pandas.DataFrame.from_dict(
{
"define_variable_name": ["TEST", "COOLVAR", "ANOTHERVAR" ],
"define_variable_controlled_terms": ["C123", "C456", "C789"],
"define_variable_allowed_terms": [
["A", "B"],
["C", "D"],
["E", "F"]
],
"define_variable_invalid_allowed_terms": [
["A", "L"],
["C", "Z"],
["E", "F"]
]
}
)
extensible_codelist_term_map = [{
"C123": {
"extensible": False,
"allowed_terms": ["A", "B", "b", "C"],
},
"C456": {
"extensible": True,
"allowed_terms": ["A", "B", "b", "C", "D"]
},
"C789": {
"extensible": False,
"allowed_terms": ["E", "F", "b", "C"]
}
}]
codelist_term_map = [{
"C123": {
"extensible": False,
"allowed_terms": ["A", "B", "b", "C"],
},
"C456": {
"extensible": False,
"allowed_terms": ["A", "B", "b", "C", "D"]
},
"C789": {
"extensible": False,
"allowed_terms": ["E", "F", "b", "C"]
}
}]
dft = DataframeType({
"value": df,
"codelist_term_maps": codelist_term_map
})
result = dft.uses_valid_codelist_terms({"target": "define_variable_controlled_terms", "comparator": "define_variable_allowed_terms"})
self.assertTrue(result.equals(pandas.Series([True, True, True])))
bad_result = dft.uses_valid_codelist_terms({"target": "define_variable_controlled_terms", "comparator": "define_variable_invalid_allowed_terms"})
self.assertTrue(bad_result.equals(pandas.Series([False, False, True])))
# Test extensible flag
dft = DataframeType({
"value": df,
"codelist_term_maps": extensible_codelist_term_map
})
result = dft.uses_valid_codelist_terms({"target": "define_variable_controlled_terms", "comparator": "define_variable_invalid_allowed_terms"})
self.assertTrue(result.equals(pandas.Series([False, True, True])))
def test_does_not_use_valid_terms(self):
df = pandas.DataFrame.from_dict(
{
"define_variable_name": ["TEST", "COOLVAR", "ANOTHERVAR" ],
"define_variable_controlled_terms": ["C123", "C456", "C789"],
"define_variable_allowed_terms": [
["A", "B"],
["C", "D"],
["E", "F"]
],
"define_variable_invalid_allowed_terms": [
["A", "L"],
["C", "Z"],
["E", "F"]
]
}
)
extensible_codelist_term_map = [{
"C123": {
"extensible": False,
"allowed_terms": ["A", "B", "b", "C"],
},
"C456": {
"extensible": True,
"allowed_terms": ["A", "B", "b", "C", "D"]
},
"C789": {
"extensible": False,
"allowed_terms": ["E", "F", "b", "C"]
}
}]
codelist_term_map = [{
"C123": {
"extensible": False,
"allowed_terms": ["A", "B", "b", "C"],
},
"C456": {
"extensible": False,
"allowed_terms": ["A", "B", "b", "C", "D"]
},
"C789": {
"extensible": False,
"allowed_terms": ["E", "F", "b", "C"]
}
}]
dft = DataframeType({
"value": df,
"codelist_term_maps": codelist_term_map
})
result = dft.does_not_use_valid_codelist_terms({"target": "define_variable_controlled_terms", "comparator": "define_variable_allowed_terms"})
self.assertTrue(result.equals( | pandas.Series([False, False, False]) | pandas.Series |
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 11 13:38:28 2021
@author: <NAME>
"""
# # IP 2020-21: DEVELOPING A VPP FOR A NACRA 17
# ## 1) Aerodynamic module
# Import necssary packages to run the code:
# In[2]:
import math
import numpy as np
import pandas as pd
import time
from math import degrees as deg
from math import radians as rad
from math import cos as cos
from math import sin as sin
from math import atan as atan
from math import sqrt as sqrt
from scipy.optimize import minimize
from scipy.interpolate import interp1d
import scipy.interpolate as spi
import matplotlib.pyplot as plt
from matplotlib.pyplot import polar
from IPython.display import display
pd.set_option('display.max_rows', 40)
pd.set_option('display.max_columns', 20)
# ### Initial data (boat, environment...)
# In[3]:
LWL = 5.25 #m
rho_water = 1025 #Kg/m3
rho_air = 1.225 #Kg/m3
A_main = 14.45 #m²
A_jib = 4 #m²
A_upwind = A_main + A_jib
A_spi = 18.5 #m²
A_downwind = A_main + A_jib + A_spi
AR = 4.85 #aspect ratio of mainsail
k = 1/(math.pi*AR)
nu_air = 1.802e-5 #kg/m-s
nu_water = 1.1892e-6
v = 1.48e-5 #m²/s
RM_max = 7397.24 #N.m
PM_max = 4550 #N.m
g = 9.81 #kg/s²
boat_weight = 163*g
crew_weight = 120*g
hull_form = 1.22
Aw_1hull = 1.914 #m²
Aw_2hulls = 3.828 #m²
# ### Aerodynamic sail coefficients, based on ORC VPP data
# In[4]:
Cl_main = [0,0.86207,1.05172,1.16379,1.34698,1.35345,1.26724,0.93103,0.38793,-0.11207]
Cd_main = [0.0431,0.02586,0.02328,0.02328,0.03259,0.11302,0.3825,0.96888,1.31578,1.34483]
Beta_main = [0,7,9,12,28,60,90,120,150,180]
a = np.array([Beta_main, Cl_main, Cd_main])
Cl_jib = [0,1,1.375,1.45,1.45,1.25,0.4,0,-0.1]
Cd_jib = [0.05,0.032,0.031,0.037,0.25,0.35,0.73,0.95,0.9]
Beta_jib = [7,15,20,27,50,60,100,150,180]
b = np.array([Beta_jib, Cl_jib, Cd_jib])
df_spi = | pd.read_excel("Copy of Database.xlsx", "Asymmetric spinnaker on pole", engine = 'openpyxl') | pandas.read_excel |
import importlib
import os
import pandas as pd
import logging
def func_from_string(key):
_mod = key.rsplit('.',1)[0]
_fun = key.rsplit('.',1)[1]
mod = importlib.import_module(_mod)
return getattr(mod, _fun)
def extract_best_model(config,metric='accuracy_score',ds='valid'):
modelpath = config['mpath']+'models/'
metricpath = config['mpath']+'metrics/'
results=pd.DataFrame()
for file in os.listdir(metricpath):
#print(file)
_r = | pd.read_csv(f"{metricpath}{file}",header=[0,1,2],index_col=[0],parse_dates=True) | pandas.read_csv |
###############################################################################
# Building the Model
# Importing the libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.externals import joblib
# import pickle
# opening the databases
train_df = pd.read_csv('data/train_data_modified.csv')
test_df = | pd.read_csv('data/test_data_modified.csv') | pandas.read_csv |
import altair as alt
import pandas as pd
import numpy as np
from datetime import date, timedelta
from os import path
from io import StringIO
from flask import current_app as app
import redis, requests, time, pyarrow
def connect():
return redis.Redis( host=app.config['REDIS_HOST'], port=app.config['REDIS_PORT'] )
#
# A simple label placement algorithm, to avoid overlaps
#
# A conventional approach might use scipy.optimize, but I want
# to avoid dependence on something so big as scipy just for this one
# purpose.
#
class label_placement:
def __init__(self, xcol, ycol):
xy = np.column_stack( [xcol.to_numpy(), ycol.to_numpy()] )
dxy = (xy.max(axis=0) - xy.min(axis=0))/40
r0 = xy/dxy
#
# Default starts above mark
#
phi = np.array([np.pi/2]*len(r0))
#
# Push away from closest neighbor
#
i0, i1 = np.triu_indices(r0.shape[0], k=1)
r = r0 + [0,1] # = [cos(phi),sin(phi)]
for attempt in range(0,100):
#
# Find worst collision
#
id2 = np.argmin(((r[i0,:]-r[i1,:])**2).sum(axis=1))
t0 = i0[id2]
t1 = i1[id2]
#
# We're finished if the collision isn't too bad
#
dr = r[t0,:] - r[t1,:]
if (dr**2).sum() > 2: break
#
# Move each by delta in phi
#
if np.cos(phi[t0]) * dr[1] - np.sin(phi[t0]) * dr[0] > 0:
phi[t0] += np.pi/6
else:
phi[t0] -= np.pi/6
r[t0,:] = r0[t0,:] + [np.cos(phi[t0]), np.sin(phi[t0])]
if np.cos(phi[t1]) * dr[1] - np.sin(phi[t1]) * dr[0] < 0:
phi[t1] += np.pi/6
else:
phi[t1] -= np.pi/6
r[t1,:] = r0[t1,:] + [np.cos(phi[t1]), np.sin(phi[t1])]
self.xy = r * dxy
def X(self):
return self.xy[:,0]
def Y(self):
return self.xy[:,1]
def fetchState(rconn,key):
context = pyarrow.default_serialization_context()
#
# Check date of main dataframe
#
expires = rconn.hget("state"+key,"expires")
if expires and time.time() < float(expires):
return context.deserialize(rconn.hget("state"+key,"dataframe"))
#
# Fetch
# Make sure we include a user agent. We are limited to 50,000 records per query,
# but that should be plenty for this table (which has rows per day)
#
req = requests.get(
"https://data.cdc.gov/resource/9mfq-cb36.csv",
params={
'state': key,
'$limit': 5000,
'$select': "submission_date,state,new_case,new_death",
"$$app_token": app.config['SOCRATA_TOKEN']
},
headers = {'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:77.0) Gecko/20100101 Firefox/77.0'}
)
if req.status_code != 200:
raise Exception("Request failure: {}".format(req.status_code))
answer = pd.read_csv(StringIO(req.text), parse_dates=["submission_date"]).rename(columns={
'submission_date': 'dt'
})
answer = answer.sort_values('dt')
#
# Save
#
rconn.hset("state"+key,"dataframe",context.serialize(answer).to_buffer().to_pybytes())
rconn.hset("state"+key,"expires",str(time.time()+600.0))
return answer
def fetchRecent(rconn):
context = pyarrow.default_serialization_context()
#
# Check date of main dataframe
#
expires = rconn.hget("staterecent","expires")
if expires and time.time() < float(expires):
return context.deserialize(rconn.hget("staterecent","dataframe"))
#
# Fetch
# Make sure we include a user agent. We are limited to 50,000 records per query,
# but that should be plenty for this table (which has rows per day)
#
# Fetch starting from 10 days ago, to ensure we get at least seven
#
start = date.today() - timedelta(days=11)
req = requests.get(
"https://data.cdc.gov/resource/9mfq-cb36.csv",
params={
'$where': "submission_date > '{:4d}-{:02d}-{:02d}'".format(start.year,start.month,start.day),
'$limit': 5000,
'$select': "submission_date,state,new_case,new_death",
"$$app_token": app.config['SOCRATA_TOKEN']
},
headers = {'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:77.0) Gecko/20100101 Firefox/77.0'}
)
if req.status_code != 200:
raise Exception("Request failure: {}".format(req.status_code))
answer = pd.read_csv(StringIO(req.text), parse_dates=["submission_date"]).rename(columns={
'submission_date': 'dt'
})
#
# We actually get some odd "states". Let's remove them.
#
namefile = path.join(app.config['DATA_DIR'],"state-abbre.csv")
valid = | pd.read_csv(namefile) | pandas.read_csv |
# coding=utf-8
# pylint: disable-msg=E1101,W0612
import numpy as np
import pytest
from pandas.compat import lrange, range
import pandas as pd
from pandas import DataFrame, Index, Series
import pandas.util.testing as tm
from pandas.util.testing import assert_series_equal
def test_get():
# GH 6383
s = Series(np.array([43, 48, 60, 48, 50, 51, 50, 45, 57, 48, 56, 45,
51, 39, 55, 43, 54, 52, 51, 54]))
result = s.get(25, 0)
expected = 0
assert result == expected
s = Series(np.array([43, 48, 60, 48, 50, 51, 50, 45, 57, 48, 56,
45, 51, 39, 55, 43, 54, 52, 51, 54]),
index=pd.Float64Index(
[25.0, 36.0, 49.0, 64.0, 81.0, 100.0,
121.0, 144.0, 169.0, 196.0, 1225.0,
1296.0, 1369.0, 1444.0, 1521.0, 1600.0,
1681.0, 1764.0, 1849.0, 1936.0],
dtype='object'))
result = s.get(25, 0)
expected = 43
assert result == expected
# GH 7407
# with a boolean accessor
df = pd.DataFrame({'i': [0] * 3, 'b': [False] * 3})
vc = df.i.value_counts()
result = vc.get(99, default='Missing')
assert result == 'Missing'
vc = df.b.value_counts()
result = vc.get(False, default='Missing')
assert result == 3
result = vc.get(True, default='Missing')
assert result == 'Missing'
def test_get_nan():
# GH 8569
s = pd.Float64Index(range(10)).to_series()
assert s.get(np.nan) is None
assert s.get(np.nan, default='Missing') == 'Missing'
def test_get_nan_multiple():
# GH 8569
# ensure that fixing "test_get_nan" above hasn't broken get
# with multiple elements
s = pd.Float64Index(range(10)).to_series()
idx = [2, 30]
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
assert_series_equal(s.get(idx),
Series([2, np.nan], index=idx))
idx = [2, np.nan]
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
assert_series_equal(s.get(idx),
Series([2, np.nan], index=idx))
# GH 17295 - all missing keys
idx = [20, 30]
assert(s.get(idx) is None)
idx = [np.nan, np.nan]
assert(s.get(idx) is None)
def test_delitem():
# GH 5542
# should delete the item inplace
s = Series(lrange(5))
del s[0]
expected = Series(lrange(1, 5), index=lrange(1, 5))
assert_series_equal(s, expected)
del s[1]
expected = Series(lrange(2, 5), index=lrange(2, 5))
assert_series_equal(s, expected)
# empty
s = | Series() | pandas.Series |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Fri Sep 14 12:46:24 2018
@author: nmei
"""
import os
working_dir = ''
import pandas as pd
pd.options.mode.chained_assignment = None
import seaborn as sns
import numpy as np
from statsmodels.formula.api import ols#,mixedlm
from statsmodels.stats.anova import anova_lm
from utils import eta_squared,omega_squared,resample_ttest_2sample,MCPConverter
from itertools import combinations
sns.set_style('whitegrid')
sns.set_context('poster')
saving_dir = '../figures/'
df_dir = '../results/for_spss'
def post_processing(df):
feature_names = [name for name in df.columns if 'coef' in name] # the feature names with "coef" in them
feature_name_wk = feature_names[1:] # take the intercept out
working_df = df[feature_name_wk] #
for name in feature_name_wk:
working_df[name] = working_df[name].apply(np.exp)
new_col_names = {name:name[:-5] for name in feature_name_wk}
working_df['model'] = 'logistic'
working_df['window'] = df['window']
working_df = working_df.rename(new_col_names,axis='columns')
df_plot = pd.melt(working_df,id_vars = ['model','window'],
value_vars = new_col_names.values())
df_plot.columns = ['Model','Window','Coefficients','Odd_Ratio']
return df_plot
def thresholding(value):
if value < 0.001:
return "***"
elif value < 0.01:
return "**"
elif value < 0.05:
return "*"
else:
return "ns"
def preparation(c):
df_temp = {}
for ((window,feature),df_sub) in c.groupby(['Window','Coefficients']):
df_temp['{}_win{}_{}'.format('logistic',window,feature)] = df_sub['Odd_Ratio'].values
df_temp = pd.DataFrame(df_temp)
return df_temp
"""
Take the exponential of each of the coefficients to generate the odds ratios.
This tells you how a 1 unit increase or decrease in a variable affects the odds of being high POS.
"""
if __name__ == '__main__':
results = []
aov_tables = []
##########################################################################################################################################
pos = pd.read_csv('../results/pos_logistic_statsmodel_6_features.csv')
att = pd.read_csv('../results/att_logistic_statsmodel_6_features.csv')
df = pos.copy()
df_plot = post_processing(df) # process the dataframe with melt or sth
df_plot = df_plot[(df_plot['Window']>0) & (df_plot['Window']<4)] # get the window
df_temp = preparation(df_plot)
writer = pd.ExcelWriter(os.path.join(df_dir,'pos,6 features,odd ratio.xlsx'))
df_temp.to_excel(writer,'sheet1',index=False);writer.save()
######
df = att.copy()
df_plot = post_processing(df)
df_plot = df_plot[(df_plot['Window']>0) & (df_plot['Window']<4)]
df_temp = preparation(df_plot)
writer = pd.ExcelWriter(os.path.join(df_dir,'att,6 features,odd ratio.xlsx'))
df_temp.to_excel(writer,'sheet1',index=False);writer.save()
###################################################################################
################### 3 judgement features #########################################
###################################################################################
pos = | pd.read_csv('../results/pos_logistic_statsmodel_3_1_features.csv') | pandas.read_csv |
"""
Contributions from:
DSEverything - Mean Mix - Math, Geo, Harmonic (LB 0.493)
https://www.kaggle.com/dongxu027/mean-mix-math-geo-harmonic-lb-0-493
JdPaletto - Surprised Yet? - Part2 - (LB: 0.503)
https://www.kaggle.com/jdpaletto/surprised-yet-part2-lb-0-503
hklee - weighted mean comparisons, LB 0.497, 1ST
https://www.kaggle.com/zeemeen/weighted-mean-comparisons-lb-0-497-1st
Also all comments for changes, encouragement, and forked scripts rock
Keep the Surprise Going
"""
import glob, re
import numpy as np
import pandas as pd
from sklearn import *
from datetime import datetime
from xgboost import XGBRegressor
data = {
'tra': pd.read_csv('../input/air_visit_data.csv'),
'as': pd.read_csv('../input/air_store_info.csv'),
'hs': pd.read_csv('../input/hpg_store_info.csv'),
'ar': pd.read_csv('../input/air_reserve.csv'),
'hr': pd.read_csv('../input/hpg_reserve.csv'),
'id': pd.read_csv('../input/store_id_relation.csv'),
'tes': pd.read_csv('../input/sample_submission.csv'),
'hol': pd.read_csv('../input/date_info.csv').rename(columns={'calendar_date':'visit_date'})
}
data['hr'] = | pd.merge(data['hr'], data['id'], how='inner', on=['hpg_store_id']) | pandas.merge |
# -*- coding: utf-8 -*-
import os
import pandas as pd
from pandas.testing import assert_frame_equal
import camelot
from camelot.core import Table, TableList
from camelot.__version__ import generate_version
from .data import *
testdir = os.path.dirname(os.path.abspath(__file__))
testdir = os.path.join(testdir, "files")
def test_lattice():
df = pd.DataFrame(data_lattice)
filename = os.path.join(
testdir, "tabula/icdar2013-dataset/competition-dataset-us/us-030.pdf"
)
tables = camelot.read_pdf(filename, pages="2")
assert_frame_equal(df, tables[0].df)
def test_lattice_table_rotated():
df = pd.DataFrame(data_lattice_table_rotated)
filename = os.path.join(testdir, "clockwise_table_1.pdf")
tables = camelot.read_pdf(filename)
assert_frame_equal(df, tables[0].df)
filename = os.path.join(testdir, "anticlockwise_table_1.pdf")
tables = camelot.read_pdf(filename)
assert_frame_equal(df, tables[0].df)
def test_lattice_two_tables():
df1 = pd.DataFrame(data_lattice_two_tables_1)
df2 = pd.DataFrame(data_lattice_two_tables_2)
filename = os.path.join(testdir, "twotables_2.pdf")
tables = camelot.read_pdf(filename)
assert len(tables) == 2
assert df1.equals(tables[0].df)
assert df2.equals(tables[1].df)
def test_lattice_table_regions():
df = pd.DataFrame(data_lattice_table_regions)
filename = os.path.join(testdir, "table_region.pdf")
tables = camelot.read_pdf(filename, table_regions=["170,370,560,270"])
assert_frame_equal(df, tables[0].df)
def test_lattice_table_areas():
df = pd.DataFrame(data_lattice_table_areas)
filename = os.path.join(testdir, "twotables_2.pdf")
tables = camelot.read_pdf(filename, table_areas=["80,693,535,448"])
assert_frame_equal(df, tables[0].df)
def test_lattice_process_background():
df = pd.DataFrame(data_lattice_process_background)
filename = os.path.join(testdir, "background_lines_1.pdf")
tables = camelot.read_pdf(filename, process_background=True)
assert_frame_equal(df, tables[1].df)
def test_lattice_copy_text():
df = pd.DataFrame(data_lattice_copy_text)
filename = os.path.join(testdir, "row_span_1.pdf")
tables = camelot.read_pdf(filename, line_scale=60, copy_text="v")
assert_frame_equal(df, tables[0].df)
def test_lattice_shift_text():
df_lt = | pd.DataFrame(data_lattice_shift_text_left_top) | pandas.DataFrame |
import pandas as pd
from utility import *
def filter(df):
header = list(df)
if len([s for s in header if 'W1' in s]) > 0:
return df.drop(['mode', 'stage', 'time', 'win',
'W1-weapon', 'W1-kill-assist', 'W1-kill', 'W1-assist', 'W1-death', 'W1-special', 'W1-inked', 'W1-level',
'W2-weapon', 'W2-kill-assist', 'W2-kill', 'W2-assist', 'W2-death', 'W2-special', 'W2-inked', 'W2-level',
'W3-weapon', 'W3-kill-assist', 'W3-kill', 'W3-assist', 'W3-death', 'W3-special', 'W3-inked', 'W3-level',
'W4-weapon', 'W4-kill-assist', 'W4-kill', 'W4-assist', 'W4-death', 'W4-special', 'W4-inked', 'W4-level',
'L1-weapon', 'L1-kill-assist', 'L1-kill', 'L1-assist', 'L1-death', 'L1-special', 'L1-inked', 'L1-level',
'L2-weapon', 'L2-kill-assist', 'L2-kill', 'L2-assist', 'L2-death', 'L2-special', 'L2-inked', 'L2-level',
'L3-weapon', 'L3-kill-assist', 'L3-kill', 'L3-assist', 'L3-death', 'L3-special', 'L3-inked', 'L3-level',
'L4-weapon', 'L4-kill-assist', 'L4-kill', 'L4-assist', 'L4-death', 'L4-special', 'L4-inked', 'L4-level'], axis=1)
else:
return df.drop(['mode', 'stage', 'time', 'win',
'A1-weapon', 'A1-kill-assist', 'A1-kill', 'A1-assist', 'A1-death', 'A1-special', 'A1-inked', 'A1-level',
'A2-weapon', 'A2-kill-assist', 'A2-kill', 'A2-assist', 'A2-death', 'A2-special', 'A2-inked', 'A2-level',
'A3-weapon', 'A3-kill-assist', 'A3-kill', 'A3-assist', 'A3-death', 'A3-special', 'A3-inked', 'A3-level',
'A4-weapon', 'A4-kill-assist', 'A4-kill', 'A4-assist', 'A4-death', 'A4-special', 'A4-inked', 'A4-level',
'B1-weapon', 'B1-kill-assist', 'B1-kill', 'B1-assist', 'B1-death', 'B1-special', 'B1-inked', 'B1-level',
'B2-weapon', 'B2-kill-assist', 'B2-kill', 'B2-assist', 'B2-death', 'B2-special', 'B2-inked', 'B2-level',
'B3-weapon', 'B3-kill-assist', 'B3-kill', 'B3-assist', 'B3-death', 'B3-special', 'B3-inked', 'B3-level',
'B4-weapon', 'B4-kill-assist', 'B4-kill', 'B4-assist', 'B4-death', 'B4-special', 'B4-inked', 'B4-level'], axis=1)
def get_item_list():
return ['kill', 'assist', 'death', 'special', 'inked', 'level']
def mean_stat(df):
m = df.mean()
print(m)
header = list(df)
if len([s for s in header if 'W1' in s]) > 0:
for item in get_item_list():
delta = m['WT-'+item] - m['LT-'+item]
print(item + ': ' + str(delta))
else:
for item in get_item_list():
delta = m['AT-'+item] - m['BT-'+item]
print(item + ': ' + str(delta))
def median_stat(df):
m = df.median()
print(m)
header = list(df)
if len([s for s in header if 'W1' in s]) > 0:
for item in get_item_list():
delta = m['WT-'+item] - m['LT-'+item]
print(item + ': ' + str(delta))
else:
for item in get_item_list():
delta = m['AT-'+item] - m['AT-'+item]
print(item + ': ' + str(delta))
def mean_team(df, kind):
header = list(df)
if len([s for s in header if 'W1' in s]) > 0:
df['WT-'+kind] = (df['W1-'+kind] + df['W2-'+kind] +
df['W3-'+kind] + df['W4-'+kind]) / 4
df['LT-'+kind] = (df['L1-'+kind] + df['L2-'+kind] +
df['L3-'+kind] + df['L4-'+kind]) / 4
else:
df['AT-'+kind] = (df['A1-'+kind] + df['A2-'+kind] +
df['A3-'+kind] + df['A4-'+kind]) / 4
df['BT-'+kind] = (df['B1-'+kind] + df['B2-'+kind] +
df['B3-'+kind] + df['B4-'+kind]) / 4
return df
def stat_team_data(df):
team_index = ['A', 'B']
header = list(df)
if len([s for s in header if 'W1' in s]) > 0:
team_index = ['W', 'L']
# for category in ['kill']:
for category in ['kill', 'assist', 'death', 'special', 'inked', 'level']:
x, y = extract_team_data(df, team_index, category)
df[team_index[0]+'T-'+category+'-mean'] = x.mean(axis=1)
df[team_index[1]+'T-'+category+'-mean'] = y.mean(axis=1)
df[team_index[0]+'T-'+category+'-median'] = x.median(axis=1)
df[team_index[1]+'T-'+category+'-median'] = y.median(axis=1)
df[team_index[0]+'T-'+category+'-max'] = x.max(axis=1)
df[team_index[1]+'T-'+category+'-max'] = y.max(axis=1)
df[team_index[0]+'T-'+category+'-min'] = x.min(axis=1)
df[team_index[1]+'T-'+category+'-min'] = y.min(axis=1)
df[team_index[0]+'T-'+category+'-std'] = x.std(axis=1)
df[team_index[1]+'T-'+category+'-std'] = y.std(axis=1)
df[team_index[0]+'T-'+category+'-var'] = x.var(axis=1)
df[team_index[1]+'T-'+category+'-var'] = y.var(axis=1)
df[team_index[0]+'T-'+category +
'-delta'] = x.mean(axis=1) - y.mean(axis=1)
df = df.drop([team_index[0]+'1-'+category, team_index[0]+'2-'+category,
team_index[0]+'3-'+category, team_index[0]+'4-'+category,
team_index[1]+'1-'+category, team_index[1]+'2-'+category,
team_index[1]+'3-'+category, team_index[1]+'4-'+category], axis=1)
df = df.drop([team_index[0]+'1-kill-assist', team_index[0]+'2-kill-assist',
team_index[0]+'3-kill-assist', team_index[0]+'4-kill-assist',
team_index[1]+'1-kill-assist', team_index[1]+'2-kill-assist',
team_index[1]+'3-kill-assist', team_index[1]+'4-kill-assist'], axis=1)
return df
def stat_table(df):
mean = df.mean().reset_index(name='mean')
median = df.median().reset_index(name='median')
max = df.max().reset_index(name='max')
min = df.min().reset_index(name='min')
std = df.std().reset_index(name='std')
var = df.var().reset_index(name='var')
stat_df = pd.merge(mean, median)
stat_df = pd.merge(stat_df, max)
stat_df = pd.merge(stat_df, min)
stat_df = pd.merge(stat_df, std)
stat_df = | pd.merge(stat_df, var) | pandas.merge |
import pandas as pd
import json
from datetime import datetime, timedelta
import plotly.graph_objs as go
class BaseDataFrameManipulation(object):
def __init__(self, df):
self.df = self.transform_date(
self.transpose_and_remove_index(self.drop_fields(df)))
# drop information that arent important for the first plots
def drop_fields(self, df):
return df.drop(['Province/State', 'Lat', 'Long', 'Country/Region'], axis=1)
def transpose_and_remove_index(self, df):
return df.transpose().reset_index()
# the date row must be transformed from string to date
def transform_date(self, df):
df.columns = ['date', 'cases']
df['date'] = | pd.to_datetime(df['date'], format='%m/%d/%y') | pandas.to_datetime |
import networkx as nx
import pandas as pd
import sparql
import logging
from tabulate import tabulate
import nltk
import re
nltk.download('wordnet')
nltk.download('averaged_perceptron_tagger')
from nltk.corpus import wordnet
class QueryGenerator():
def generate(self, question, graph):
question = question.lower()
# query head and tail
variable = self.__get_target_variable(graph)
(head, tail, constraints) = self.__generate_head_and_tail(question, variable)
# query body
body = self.__generate_body(graph)
# assemble query
query = head + body + tail
return query, constraints
def ask(self, graph, entities, query, constraints, endpoint='http://dbpedia.org/sparql'):
logging.debug("Ask query:")
logging.debug(query)
try:
results=sparql.query(endpoint, query)
except Exception as e:
logging.error(f"Exception {e} on query:\n\n{query}")
raise e
if 'answer-type' in constraints:
answer = results.hasresult()
for node in graph.nodes:
if graph.nodes[node]['label'] in entities:
entities.remove(graph.nodes[node]['label'])
if len(entities) > 0:
answer = False
return | pd.DataFrame([answer], columns=['Answers']) | pandas.DataFrame |
"""
Project: Student performance analysis
"""
# Import necessary libraries and load data sets to pandas DataFrame.
import os
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.model_selection import cross_val_score
from sklearn import metrics
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import classification_report, confusion_matrix
from sklearn.svm import LinearSVR
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import ElasticNet
from sklearn.linear_model import Lasso
from sklearn.linear_model import Ridge
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.model_selection import ShuffleSplit
from sklearn.linear_model import LogisticRegression
from sklearn.svm import LinearSVC
from sklearn import neighbors
from sklearn.svm import SVC
from sklearn.ensemble import GradientBoostingClassifier
from warnings import filterwarnings
filterwarnings('ignore')
# Load data for subject "Math".
math = pd.read_csv('student/student-mat.csv', sep=";")
print(math.shape) # (395, 33)
# add an extra column ['subject] to math DataFrame.
math.insert(1, 'subject', ['math'] * 395)
# Load data for subject "Portuguese".
portuguese = pd.read_csv('student/student-por.csv', sep=";")
print(portuguese.shape) # (649, 33)
# add an extra column ['subject] to portuguese dataframe.
portuguese.insert(1, 'subject', ['por'] * 649)
# Concatenate both DataFrame vertically
students = pd.concat([math, portuguese])
# Check and make sure the concatenation is correct
assert math.shape[0] + portuguese.shape[0] == students.shape[0], 'merge error'
assert math.shape[1] == portuguese.shape[1] == students.shape[1], 'merge error'
# Check the DataFrame
print(students.shape)
print(students.head()) # column 'subject' has been inserted.
print(students.describe())
print(students.info()) # No data missing, but some columns data type are object, data cleansing needed before ML.
# Sort out the all the column names with data type object
text_columns = []
dataTypeDict = dict(students.dtypes)
for col in dataTypeDict:
if dataTypeDict[col] == 'O':
text_columns.append(col)
print(text_columns)
# convert all the two-answers categorical features to integers: (Mjob, Fjob, reason, guardian, needs one-hot-encoding
# method to convert into numerical data)
students['school'] = students['school'].map({'GP': 0, "MS": 1})
students['subject'] = students['subject'].map({'math': 0, "por": 1})
students['sex'] = students['sex'].map({'F': 0, "M": 1})
students['address'] = students['address'].map({'U': 0, "R": 1})
students['famsize'] = students['famsize'].map({'GT3': 0, "LE3": 1})
students['Pstatus'] = students['Pstatus'].map({'A': 0, "T": 1})
students['schoolsup'] = students['schoolsup'].map({'no': 0, "yes": 1})
students['famsup'] = students['famsup'].map({'no': 0, "yes": 1})
students['paid'] = students['paid'].map({'no': 0, "yes": 1})
students['activities'] = students['activities'].map({'no': 0, "yes": 1})
students['nursery'] = students['nursery'].map({'no': 0, "yes": 1})
students['higher'] = students['higher'].map({'no': 0, "yes": 1})
students['internet'] = students['internet'].map({'no': 0, "yes": 1})
students['romantic'] = students['romantic'].map({'no': 0, "yes": 1})
# Recheck the dtypes
print(students.info())
# Data visualization
os.makedirs('plots/visual', exist_ok=True)
os.makedirs('plots/ML', exist_ok=True)
# Plotting the heatmap (missing Mjob, Fjob, reason, guardian)
_, ax = plt.subplots(figsize=(25, 25))
cmap = sns.diverging_palette(220, 10, as_cmap=True) # color map
# Numpy’s tril() function to extract Lower Triangle Matrix
df_lt = students.corr().where(np.tril(np.ones(students.corr().shape)).astype(np.bool))
# plotting the heatmap
sns.heatmap(data=df_lt,
cmap=cmap,
square=True,
cbar_kws={'shrink': .6},
annot=True,
annot_kws={'fontsize': 10},
ax=ax
)
plt.savefig(f'plots/visual/heatmap.png')
plt.close()
"""
Interesting findings of Heatmap:
1. G1 and G2 and failures are the most 3 related features to final grade G3.
2. Mother's education is very much related with father's education.
3. Beside the G1 and G2 and failures, we can see the other Top10 influence factors to target G3 are: higher:0.24,
Medu:0.2, Fedu:0.16, studytime:0.16, age:-0.13, Dalc:-0.13, address:-0.12, Walc: -0.12, internet:0.11, traveltime: -0.1.
4. walc and goout has high relation.
5. traveltime and address has high relation.
6. paid and subject has high negtive relation.
7. Internet has a positive relation to target G3.
8. sex, Pstatus, schoolsup, famsup, paid, nursery, romantic, famrel, health, absences (These factors are surpriseingly
showing us the grade is not much related to them, which is contrary to our usual perception: family support, school
support or extra classes paid should greatly help to improve grades but not as hoped. However, as we usually worried
Early school love, poor health and often absences must affect grades but they do not really lead to a decline in grades.
So let's visualize these findings by plotting them :)
"""
# countplot to review G3 distritution
plt.figure(figsize=(10, 6))
sns.set()
sns.countplot('G3', data=students, palette="ch:2.5,-.2,dark=.3")
plt.title('Grade distritution')
plt.xlabel('final grade')
plt.savefig(f'plots/visual/G3_countplot.png')
plt.close()
# Above plot shows that the grades of the students conform to the normal distribution.
# However there are a bit too much the students whose grade is only 0. It might be because of cheating when doing exam.
# We think the students whose grade is 0 should be removed.
# let's class our grades (high:>=15, mid:8-14, low:<=7)
high = students.loc[students['G3'] >= 15].count()[0]
medium = students.loc[(students['G3'] >= 8) & (students['G3'] <= 14)].count()[0]
low = students.loc[students['G3'] <= 7].count()[0]
# pieplot
plt.figure(figsize=(10, 6))
labels = ['high grades > = 15', 'Medium grades 8-14', 'low grades <= 7']
colors = ['#abcdef', '#aabbcc', '#67757a']
plt.pie([high, medium, low], labels=labels, colors=colors, autopct='%1.1f%%', shadow=False)
plt.title('Grades 3-classes of math and portuguese')
plt.savefig(f'plots/visual/G3_pieplot.png')
plt.close()
# lineplot on G1/G2/failures to G3
fig, ax = plt.subplots(3, 1, figsize=(10, 9))
sns.set()
index = 0
for col in ['G1', 'G2', 'failures']:
sns.lineplot(col, 'G3', data=students, ax=ax[index])
ax[index].set_title(col + ' to final grades')
ax[index].set_xlabel(col)
ax[index].set_ylabel('final Grade')
index += 1
fig.tight_layout(pad=3.0)
plt.savefig(f'plots/visual/G3_lineplot.png')
plt.close()
# These 3 plots demonstrate that: Students' academic performance continues to be stable, which means students with
# good results will continue to perform well and vice versa.
# barplot for Medu and Fedu
new_Pstatus = []
for each_status in students['Pstatus']:
if each_status == 0:
new_Pstatus.append('Apart')
else:
new_Pstatus.append('Together')
students['NPstatus'] = new_Pstatus
plt.figure(figsize=(11, 8))
sns.set()
labels = ['Apart', 'Together']
sns.barplot('Medu', 'Fedu', hue='NPstatus', data=students, palette="Blues_d")
plt.title("Mother's education vs. Father's eduction")
plt.xlabel('Mother education level')
plt.ylabel('Father education level')
plt.xticks(np.arange(5), ('no education', 'primary(1st-4th)', 'primary(5th-9th)', 'secondary', 'university and above'))
plt.yticks(np.arange(5), ('no education', 'primary(1st-4th)', 'primary(5th-9th)', 'secondary', 'university and above'))
plt.legend()
plt.savefig(f'plots/visual/Medu_Fedu_barplot.png')
plt.close()
# This plot shows that people prefer to marry similar education background person, it might because they have more
# interests in common. And the divorce rate is almost 50% high in each group. The no education group has very small
# sample, not representative.
# Line plot
sorted_by_studytime_df = students.sort_values('studytime')
plt.figure(figsize=(12, 8))
sns.set()
sns.lineplot('studytime', 'G3', hue='sex', data=sorted_by_studytime_df)
plt.xlabel('studytime (hours/week)')
plt.ylabel('Grade')
plt.xticks([1, 2, 3, 4], ('less than 2h', '2-5hrs', '5-10hrs', 'more than 10hrs'))
plt.legend(labels=['Female', 'Male'])
plt.title('Studytime on final grade')
plt.savefig(f'plots/visual/studytime_lineplot.png')
plt.close()
# From above plot, it shows that for female students, the more studytime spent, the better the grade is. However for
# male students, the grade is increasing with the studytime, but when the total weekly studytime is over than 10hs,
# their grades are declining.
# Scatter plot
plt.style.use("bmh")
fig, axes = plt.subplots(1, 1, figsize=(10, 10))
axes.scatter(students['goout'], students['G3'], alpha=0.3, s=students[['Walc'] + ['Dalc']] ** 4,
label='alcohol consumption')
axes.set_xlabel('Low <----going out with friends----> High')
axes.set_ylabel('Grade')
axes.set_title('Going out time on grade\n')
axes.legend()
plt.savefig(f'plots/visual/goout_scatterplot.png')
plt.close()
# From this plot we can see that the students who go out rarely has the minimal fluctuations in grades but not the
# best grade group. The best group is going out on low level but still spend sometime with friends. And all the
# groups the top grade students almost no alcohol consumption.
# swarm plot
sns.set()
plt.figure(figsize=(12, 8))
sns.swarmplot('traveltime', 'G3', data=students, hue='address', size=10)
plt.xlabel('Travel time from home to school')
plt.ylabel('Grade')
plt.xticks([0, 1, 2, 3], ('<15 min', '15-30 min', '30 min. to 1 hour', '>1 hour'))
plt.title('Travel time on grade\n')
plt.legend(labels=['Urban', 'Rural'])
plt.savefig(f'plots/visual/traveltime_swarmplot.png')
plt.close()
# From this plot we can see that the students who spent more time on the way to school have lower grades. And most of
# the students live near school and students who live in Rural are normally have more travel time than students live
# in Urban.
# comparisons
_, ax = plt.subplots(3, 2, figsize=(12, 12))
sns.set()
# First row of subplots
# Compare the percentage of extra pay on Math and Portuguese
sns.countplot('paid', data=students.loc[students['subject'] == 0], ax=ax[0][0])
ax[0][0].set_title('Extra pay on Math')
sns.countplot('paid', data=students.loc[students['subject'] == 1], ax=ax[0][1])
ax[0][1].set_title('Extra pay on Portuguese')
# Second row of subplots
# Compare Female and Male students performance on each subject
sns.boxplot('sex', 'G3', data=students.loc[students['subject'] == 0], ax=ax[1][0])
ax[1][0].set_title('G3 comparison by sex on Math')
sns.boxplot('sex', 'G3', data=students.loc[students['subject'] == 1], ax=ax[1][1])
ax[1][1].set_title('G3 comparison by sex on Portuguese')
# Third row of subplots
# Compare Mother's job and Father's job to students grade
sns.boxplot('Mjob', 'G3', data=students, ax=ax[2][0])
ax[2][0].set_title("G3 comparison by mother's job")
sns.boxplot('Fjob', 'G3', data=students, ax=ax[2][1])
ax[2][1].set_title("G3 comparison by father's job")
fig.tight_layout(pad=3.0)
plt.savefig(f'plots/visual/comparisons_plot.png')
plt.close()
# Above plots show:
# 1. For extra classes, parents paid more on Math and very little paid on Portuguese.
# 2. Male students have higher performance on Math and Female students have higher performance on Portuguese.
# 3. Mother's job is health related, their kids have best performance and Father's job is teacher related,
# their kids has best performance.
# Regplots to see other three features to final grade (Age/students willing to learn/internet availability)
fig, (axis1, axis2, axis3) = plt.subplots(1, 3, figsize=(15, 5))
sns.regplot(x='age', y='G3', data=students, ax=axis1)
sns.regplot(x='higher', y='G3', data=students, ax=axis2)
sns.regplot(x='internet', y='G3', data=students, ax=axis3)
fig.tight_layout(pad=2.0)
plt.savefig(f'plots/visual/regplots.png')
plt.close()
# Above 3 plots show:
# 1. The grade is decline with the age increasing. This is according with our normal cognition.
# The higher the grade, the knowledge is more difficult.
# 2. Students who want to take higher education get the better study performance.
# 3. Internet doesn't affect learning, it helps with academic performance.
# Data cleansing
"""
Since there are still 4 features dtype are objects, so we have to convert them to numerical data type.
Because there is no ordinal relationship for each features, so we have to use one-hot-encoding method
in pandas to convert categorical data to numerical data.
"""
# use pd.concat to join the new columns with original students dataframe and drop the original 'Mjob' column
students = pd.concat([students, pd.get_dummies(students['Mjob'],
prefix='Mjob', dummy_na=False)], axis=1).drop(['Mjob'], axis=1)
# use pd.concat to join the new columns with students dataframe and drop the original 'Fjob' column
students = pd.concat([students, pd.get_dummies(students['Fjob'],
prefix='Fjob', dummy_na=False)], axis=1).drop(['Fjob'], axis=1)
# use pd.concat to join the new columns with students dataframe and drop the original 'reason' column
students = pd.concat([students, pd.get_dummies(students['reason'],
prefix='reason', dummy_na=False)], axis=1).drop(['reason'], axis=1)
# use pd.concat to join the new columns with students dataframe and drop the original 'guardian' column
students = pd.concat([students, pd.get_dummies(students['guardian'],
prefix='guardian', dummy_na=False)], axis=1).drop(['guardian'], axis=1)
# Check one-hot-encoding is applied correctly.
print(students.columns)
# need to remove 'NPstatus' - added for plotting purpose only
students.drop(['NPstatus'], axis=1, inplace=True)
print(students.shape)
# need remove students sample whose G3 is 0
students = students.loc[students['G3'] != 0]
print(students.shape)
print(students.info()) # Data cleansing is done. No data missing and all the sample dtype are numerical.
# Machine Learning - 1. Predict students final grade (Regression)
# We’re going to build up a model to estimate students final scores for two two subjects (Math and Portugues)
# from various features of the student. The scores produced are numbers between 0 and 20, where higher scores indicate
# better study performance.
def xyz(x,y):
# Splitting features and target datasets into: train and test
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.35)
# print(f"x.shape: (x.shape), y.shape: (y.shape)")
# print(f"x_train.shape: {x_train.shape}, y_train.shape: {y_train.shape}")
# print(f"x_test.shape: {x_test.shape}, y_test.shape: {y_test.shape}")
# return x_train, x_test, y_train, y_test
for Model in [LinearRegression, LinearSVR, Ridge, ElasticNet, Lasso, GradientBoostingRegressor]:
model = Model()
model.fit(x_train, y_train)
predicted_values = model.predict(x_test)
print(f"{Model}: {Model.__name__, cross_val_score(Model(), x, y).mean()}")
print(f"MAE error(avg abs residual): {metrics.mean_absolute_error(y_test, predicted_values)}")
print(f"MSE error: {metrics.mean_squared_error(y_test, predicted_values)}")
print(f"RMSE error: {np.sqrt(metrics.mean_squared_error(y_test, predicted_values))}")
print(f"R2 Score: {metrics.r2_score(y_test, predicted_values)}\n")
return x_train, x_test, y_train, y_test
# ==> Based on the cross-validation score, we would choose "Gradient Boosting Regressor" as our predict estimator.
# Ensemble Decision Tree - Gradient Boosting Regressor
# Tuning the hyper-parameter
def gradient_booster(param_grid, n_jobs, x_train, y_train):
estimator = GradientBoostingRegressor()
classifier = GridSearchCV(estimator=estimator, cv=5, param_grid=param_grid,
n_jobs=n_jobs)
classifier.fit(x_train, y_train)
print(classifier.best_estimator_)
#
clf = GradientBoostingRegressor(alpha=0.9, criterion='friedman_mse', init=None,
learning_rate=0.05, loss='ls', max_depth=4,
max_features=1.0, max_leaf_nodes=None,
min_impurity_decrease=0.0, min_impurity_split=None,
min_samples_leaf=3, min_samples_split=2,
min_weight_fraction_leaf=0.0, n_estimators=100,
n_iter_no_change=None, presort='auto',
random_state=None, subsample=1.0, tol=0.0001,
validation_fraction=0.1, verbose=0, warm_start=False)
def predict(x_test):
clf.fit(x_train, y_train)
## Predicting the results for our test data set
predicted_values = clf.predict(x_test)
#
print(f"Printing MAE error(avg abs residual):{metrics.mean_absolute_error(y_test,predicted_values)}")
print(f"Printing MSE error: {metrics.mean_squared_error(y_test, predicted_values)}")
print(f"Printing RMSE error: {np.sqrt(metrics.mean_squared_error(y_test, predicted_values))}")
print(f"R2 Score: {metrics.r2_score(y_test, predicted_values)}")
return predicted_values
def plot_func(name, y_test, predicted_values):
## Plotting different between real and predicted values
sns.scatterplot(y_test, predicted_values)
plt.plot([0, 20], [0, 20], '--')
plt.xlabel('Real Value')
plt.ylabel('Predicted Value')
plt.savefig(name) ###
# plt.savefig(f'plots/ML/all_features_predict.png') ###
# plt.savefig(f'plots/ML/withoutG2_predict.png')
# plt.savefig(f'plots/ML/WithoutG1G2_predict.png')
plt.close()
# Plot training deviance
test_score = np.zeros((100,), dtype=np.float64)
def plot_deviance(plot_file_name,y_test,predicted_values,x_test,test_score):
test_score = np.zeros((100,), dtype=np.float64)
for i, predicted_values in enumerate(clf.staged_predict(x_test)):
test_score[i] = clf.loss_(y_test, predicted_values)
plt.title('Deviance')
plt.plot(np.arange(100) + 1, clf.train_score_, 'b-',
label='Training Set Deviance')
plt.plot(np.arange(100) + 1, test_score, 'r-',
label='Test Set Deviance')
plt.legend(loc='upper right')
plt.xlabel('Boosting Iterations')
plt.ylabel('Deviance')
plt.savefig(plot_file_name)
# plt.savefig(f'plots/ML/WithoutG2_deviance.png')
#plt.savefig(f'plots/ML/WithoutG1G2_deviance.png')
plt.close()
# Plot feature importance
def bar_func(plot_name,x):
(pd.Series(clf.feature_importances_, index=x)
.nlargest(10)
.plot(kind='barh'))
plt.title('Variable Importance')
plt.savefig(plot_name)
plt.close()
#################################
# ==> Based on the cross-validation score, we would choose "Gradient Boosting Regressor" as our predict estimator.
# Ensemble Decision Tree - Gradient Boosting Regressor
# Tuning the hyper-parameter
p = {'n_estimators': [100, 500],
'learning_rate': [0.1, 0.05, 0.02],
'max_depth': [4],
'min_samples_leaf': [3],
'max_features': [1.0]}
job = 4
# Data preparation (Keep all the features, including G1 and G2)
# Separating features(X) and target(y)
x1 = students.drop('G3', axis=1)
y1 = students['G3']
x_train, x_test, y_train, y_test = xyz(x1,y1)
gradient_booster(p, job, x_train, y_train)
predicted_values= predict(x_test)
plot_func(f'plots/ML/all_features_predicted_real.png', y_test, predicted_values)
plot_deviance(f'plots/ML/all_features_deviance.png',y_test,predicted_values,x_test,test_score)
bar_func(f'plots/ML/all_features_barplot.png',x1.columns)
# ==> From variable importance plotting, we can see G2 affect prediction greatly.
# Therefore, we are going to remove 'G2' from the X and see how the model performance is.
# Data preparation (Keep all the features but remove 'G2')
# Separating features(X) and target(y)
x2 = students.drop(['G3', 'G2'], axis=1)
y2 = students['G3']
x_train, x_test, y_train, y_test = xyz(x2,y2)
gradient_booster(p, job, x_train, y_train)
predicted_values= predict(x_test)
plot_func(f'plots/ML/Without_G2_predicted_real.png', y_test, predicted_values)
plot_deviance(f'plots/ML/Without_G2_deviance.png',y_test,predicted_values,x_test,test_score)
bar_func(f'plots/ML/Without_G2_barplot.png',x2.columns)
# ==> The performance of model is decline but still showing G1 has strong effect size to final grade.
# Therefore, we are going to remove `G1 & G2` from the X and see how the model performance is.
# Data preparation - ( keep all the features without G1 ang G2)
# Separating features(X) and target(y)
x3 = students.drop(['G1', 'G2', 'G3'], axis=1)
y3 = students['G3']
x_train, x_test, y_train, y_test = xyz(x3,y3)
gradient_booster(p, job, x_train, y_train)
predicted_values= predict(x_test)
plot_func(f'plots/ML/Without_G1G2_predicted_real.png',y_test, predicted_values)
plot_deviance(f'plots/ML/Without_G1G2_deviance.png',y_test,predicted_values,x_test,test_score)
bar_func(f'plots/ML/Without_G1G2_barplot.png',x3.columns)
# ==> After removing "G1'and 'G2' grade related features, the model predictive performance is dramatically down.
# We can see all the other features are not really impact students final grade too much. In order to see more clearly,
# we are going to remove all the features only leave 'G1' and 'G2' as X.
# Therefore, we are going to check the impact of `G1` and `G2` to the model performance.
# data preparation ( keep only features: G1 and G2)
# Separating features(X) and target(y)
x4 = students[['G1', 'G2']]
y4 = students['G3']
x_train, x_test, y_train, y_test = xyz(x4,y4)
gradient_booster(p, job, x_train, y_train)
predicted_values= predict(x_test)
plot_func(f'plots/ML/OnlyG1G2_predicted_real.png', y_test, predicted_values)
plot_deviance(f'plots/ML/OnlyG1G2_deviance.png',y_test,predicted_values,x_test,test_score)
bar_func(f'plots/ML/OnlyG1G2_barplot.png',x4.columns)
"""
#Summary:
As far as the prediction model is concerned, all the characteristic variables are retained, and the prediction model
reached is almost the same as the model that only retain the students' previous test scores (G1 and G2). If one
previous score (G1) is removed, the accuracy of the model prediction will be reduced, but if both test scores (G1 and
G2) are all removed, the prediction ability of the model will be greatly reduced, and it will not have the value of
prediction. From the analysis of the different steps, we conclude:
1. If we collect only students' previous grade (G1 and G2), we can build up a good prediction model to predict
students' final grade.
2. The above conclusion doesn't mean this dataset has no research value. Because from the data visualization, we get a
lot of interesting findings. Although these findings are not always to do with academic score, they still show the value
of social research: the differences in learning between boys and girls can provide better educational ideas for parents
and schools. Maybe boys need more exercise than just extending study time to improve performance. And for boys and girls
in different subjects, can schools or families provide different help. For children who do not like to socialize, they
can properly develop their social skills, which will help improve their academic performance, but parents and school
need to supervise problems such as alcoholism caused by improper socialization, etc.
3. This data set does not find a characteristic variable that really affects students' academic performance. Predicting
student next academic score with previous academic is of course the most effective and low-cost method. However, as a
more in-depth study of the factors affecting a student's performance, this data did not find the most important
characteristic variable.
Suggest: Based on the above research analysis, we suggest that in data collection, it should consider the student's
'IQ', 'EQ', or 'Expression ability', etc. These may be important factors that affect a student's academic
performance. In the future, We hope to be able to make accurate predictions of their academic performance when there
is no grades related information.
"""
# Without these 3 features: grades G1 & G2 & failures, the Estimator scores are decline dramatically.
# Compare with others, Ensemble Decision Tree - 'Gradient Boosting Regressor' is still the best estimator to choose.
# Machine Learning (Fun Part)
# 2. Predict if my child has a girlfriend/boyfriend (classcification) Students in youth treason, parents sometimes
# worry very much about their children's early love. The significance of building this predictive model is to help
# parents judge whether their children have early love in the school. 0 - No, 1- Yes
# Data preparation (training data & test data)
# Separating features(X) and target(y)
X = students.drop(['romantic'], axis=1)
y = students['romantic']
# Splitting features and target datasets into: train and test
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.30)
# Printing original Dataset
print(f"X.shape: {X.shape}, y.shape: {y.shape}")
# Printing splitted datasets
print(f"X_train.shape: {X_train.shape}, y_train.shape: {y_train.shape}")
print(f"X_test.shape: {X_test.shape}, y_test.shape: {y_test.shape}")
# Training a model using multiple differents algorithms and comparing the results
# Cross-validation to get each estimator score
for Model in [LogisticRegression, LinearSVC, neighbors.KNeighborsClassifier, SVC, GradientBoostingClassifier]:
model = Model()
model.fit(X_train, y_train)
predicted_values = model.predict(X_test)
cv = ShuffleSplit(n_splits=5)
print(f"{Model}: {Model.__name__, cross_val_score(Model(), X, y, cv=cv)}")
print('Classification Report')
print(classification_report(y_test, predicted_values))
# From above cross validation score, we can see `GradientBoostingClassifier` is the best choice.
# Tuning the hyper-parameter
p5 = {'n_estimators': [100, 500],
'learning_rate': [0.1, 0.05, 0.02],
'max_depth': [4],
'min_samples_leaf': [3],
'max_features': [1.0]}
job5 = 4
gradient_booster(p5, job5,X_train,y_train)
clf = GradientBoostingClassifier(criterion='friedman_mse', init=None,
learning_rate=0.1, loss='deviance', max_depth=4,
max_features=1.0, max_leaf_nodes=None,
min_impurity_decrease=0.0, min_impurity_split=None,
min_samples_leaf=3, min_samples_split=2,
min_weight_fraction_leaf=0.0, n_estimators=500,
n_iter_no_change=None, presort='auto',
random_state=None, subsample=1.0, tol=0.0001,
validation_fraction=0.1, verbose=0,
warm_start=False)
clf.fit(X_train, y_train)
# Predicting the results for our test data set
predicted_values = clf.predict(X_test)
print('Classification Report')
print(classification_report(y_test, predicted_values))
print('Confusion Matrix')
print(confusion_matrix(y_test, predicted_values))
#print('Overall f1-score')
#print(f1_score(y_test, predicted_values, average="macro"))
# plot confusion matrix
array = [[172, 29], [41, 56]]
df_cm = | pd.DataFrame(array, index=[i for i in "AB"], columns=[i for i in "AB"]) | pandas.DataFrame |
import scipy
import streamlit as st
import joblib
import pandas as pd
import numpy as np
@st.cache(allow_output_mutation=True)
def load_data():
'''
load_data() returns a dictionary where all saved models are loaded.
return: dict
'''
parameters = {'model':joblib.load('models/random_forest_regressor.sav'),
'sex':joblib.load('models/map_sex.sav'),
'smoker':joblib.load('models/map_smoker.sav'),
'region':joblib.load('models/map_region.sav'),
'bmi':joblib.load('models/scale_bmi.sav'),
'age_children':joblib.load('models/scale_age_children.sav'),
'expenses':joblib.load('models/scale_target.sav')}
return parameters
def user_input_features():
'''
Take user input from sidebar.
return: pd.DataFrame Object
'''
inputFile = st.sidebar.file_uploader("Upload DataSet", type=['csv', 'xlsx', 'xls'])
cols = ['age', 'sex', 'bmi', 'children', 'smoker', 'region']
try:
if inputFile is not None:
# validing whether it a csv or excel file
fileName = inputFile.name
if fileName[-3:] == 'csv':
df = | pd.read_csv(inputFile) | pandas.read_csv |
#para rodar o streamlit tem que digitar no terminal o comando streamlit run NomedoArquivo
import pandas as pd
import streamlit as st
import numpy as np
import folium
from streamlit_folium import folium_static
from folium.plugins import MarkerCluster
import plotly.express as px
from datetime import datetime
st.set_page_config(layout='wide') #para que os conteúdos da página preencham toda a tela
@st.cache(allow_output_mutation=True) #para otimizar a performance do código
def get_data(path):
df = pd.read_csv(path)
return df
def set_feature(data):
# tranformação da variavel de foot para metros
data['sqft_lot_m'] = data['sqft_lot'] * (0.3048)
#add new feature
data['price_m2'] = data['price'] / data['sqft_lot_m']
return data
def overview_data(data):
# data overview
f_attribute = st.sidebar.multiselect('Enter columns',
data.columns) # Q2 - filtro que permite escolher uma ou mais variáveis para visualizar (Q2)
f_zipcode = st.sidebar.multiselect('Enter ZipCode', data['zipcode'].unique()) # Q1 - filtro para visualizar os imóveis de uma ou várias regiões (Q1)
st.title('Data Overview') # título na página
# if (f_zipcode != []) & (f_attribute != []):
# data = data.loc[data['zipcode'].isin(f_zipcode), f_attribute]
# elif (f_zipcode != []) & (f_attribute == []):
# data = data.loc[data['zipcode'].isin(f_zipcode), :]
# elif (f_zipcode == []) & (f_attribute != []):
# data = data.loc[:, f_attri0ute]
# else:
# data = data.copy()
if (f_attribute == []):
if f_zipcode != []:
data = data.loc[data['zipcode'].isin(f_zipcode), :]
data2 = data.loc[data['zipcode'].isin(f_zipcode), :]
else: #f_zipcode == []
data = data.copy()
data2 = data.copy()
else: #f_attribute != []
if f_zipcode != []:
data2 = data.loc[data['zipcode'].isin(f_zipcode), f_attribute]
data = data.loc[data['zipcode'].isin(f_zipcode), :]
else: #f_zipcode == []
data2 = data.loc[:, f_attribute]
data = data.copy()
st.dataframe(data2)
c1, c2 = st.columns((1, 1)) # para colocar uma tabela do lado da outra
# average metrics
# Q3 - Observar o número total de imóveis, a média de preço, a média da sala de estar
# e também a média do preço por metro quadrado em cada um dos códigos postais.
# data2 = get_data(path)
df1 = data[['id', 'zipcode']].groupby('zipcode').count().reset_index() # número total de imóveis
df2 = data[['price', 'zipcode']].groupby('zipcode').mean().reset_index() # média de preço
df3 = data[['sqft_living', 'zipcode']].groupby('zipcode').mean().reset_index() # média da sala de estar
df4 = data[['price_m2', 'zipcode']].groupby('zipcode').mean().reset_index() # média do preço por metro quadrado
# merge
m1 = pd.merge(df1, df2, on='zipcode', how='inner')
m2 = pd.merge(m1, df3, on='zipcode', how='inner')
df = pd.merge(m2, df4, on='zipcode', how='inner')
df.columns = ['ZIPCODE', 'TOTAL HOUSES', 'PRICE', 'SQRT LIVIND', 'PRICE/M2']
c1.header('Average Values')
c1.dataframe(df, height=600)
# Statistic Descriptive
# Q4 - Analisar cada uma das colunas de um modo mais descrito.
num_attributes = data.select_dtypes(include=['int64', 'float64'])
media = pd.DataFrame(num_attributes.apply(np.mean))
mediana = pd.DataFrame(num_attributes.apply(np.median))
std = pd.DataFrame(num_attributes.apply(np.std))
max_ = pd.DataFrame(num_attributes.apply(np.max))
min_ = pd.DataFrame(num_attributes.apply(np.min))
df1 = pd.concat([max_, min_, media, mediana, std], axis=1).reset_index()
df1.columns = ['attributes', 'max', 'min', 'mean', 'median', 'std']
c2.header('Descriptive analysis')
c2.dataframe(df1, height=700)
return None
def portfolio_density(data):
# densidade de portfólio
st.title('Region Overview')
c1, c2 = st.columns((1, 1))
c1.header('Portfolio Density')
df = data.sample(10)
# base Map - folium
density_map = folium.Map(location=[data['lat'].mean(), data['long'].mean()],
default_zoom_start=15)
marker_cluster = MarkerCluster().add_to(density_map) # marcadores no mapa
for name, row in df.iterrows():
folium.Marker([row['lat'], row['long']],
popup='Sold RS{0} on: {1}. Features: {2} sqft, {3} bedrooms, {4} bathrooms, year built: {5}'.format(
row['price'],
row['date'],
row['sqft_living'],
row['bedrooms'],
row['bathrooms'],
row['yr_built'])).add_to(marker_cluster)
with c1:
folium_static(density_map)
return None
def commercial_distribution(data):
# Distribuição dos imoveis por categoria comerciais
st.sidebar.title('Commercial Options')
st.title('Commercial Attributes')
# Checar a variação anual de preço.
# Average Price per Year
data['date'] = pd.to_datetime(data['date']).dt.strftime('%Y-%m-%d')
# filter
min_year_built = int(data['yr_built'].min())
max_year_built = int(data['yr_built'].max())
st.sidebar.subheader('Select Max Year Built')
f_year_built = st.sidebar.slider('Year Built', min_year_built,
max_year_built,
min_year_built)
st.header('Average Price per Year Built')
# data select
df = data.loc[data['yr_built'] < f_year_built]
df = df[['yr_built', 'price']].groupby('yr_built').mean().reset_index()
# plot
fig = px.line(df, x='yr_built', y='price')
st.plotly_chart(fig, use_container_width=True)
# Checar a variação diária de preço.
# Average Price per Day
st.header('Average Price per Day')
st.sidebar.subheader('Select Max Date')
# filter
min_date = datetime.strptime(data['date'].min(), '%Y-%m-%d')
max_date = datetime.strptime(data['date'].max(), '%Y-%m-%d')
f_date = st.sidebar.slider('Date', min_date, max_date, min_date)
# data select
data['date'] = | pd.to_datetime(data['date']) | pandas.to_datetime |
# -*- coding: utf-8 -*-
from __future__ import print_function
from datetime import datetime, timedelta
import functools
import itertools
import numpy as np
import numpy.ma as ma
import numpy.ma.mrecords as mrecords
from numpy.random import randn
import pytest
from pandas.compat import (
PY3, PY36, OrderedDict, is_platform_little_endian, lmap, long, lrange,
lzip, range, zip)
from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike
from pandas.core.dtypes.common import is_integer_dtype
import pandas as pd
from pandas import (
Categorical, DataFrame, Index, MultiIndex, Series, Timedelta, Timestamp,
compat, date_range, isna)
from pandas.tests.frame.common import TestData
import pandas.util.testing as tm
MIXED_FLOAT_DTYPES = ['float16', 'float32', 'float64']
MIXED_INT_DTYPES = ['uint8', 'uint16', 'uint32', 'uint64', 'int8', 'int16',
'int32', 'int64']
class TestDataFrameConstructors(TestData):
def test_constructor(self):
df = DataFrame()
assert len(df.index) == 0
df = DataFrame(data={})
assert len(df.index) == 0
def test_constructor_mixed(self):
index, data = tm.getMixedTypeDict()
# TODO(wesm), incomplete test?
indexed_frame = DataFrame(data, index=index) # noqa
unindexed_frame = DataFrame(data) # noqa
assert self.mixed_frame['foo'].dtype == np.object_
def test_constructor_cast_failure(self):
foo = DataFrame({'a': ['a', 'b', 'c']}, dtype=np.float64)
assert foo['a'].dtype == object
# GH 3010, constructing with odd arrays
df = DataFrame(np.ones((4, 2)))
# this is ok
df['foo'] = np.ones((4, 2)).tolist()
# this is not ok
pytest.raises(ValueError, df.__setitem__, tuple(['test']),
np.ones((4, 2)))
# this is ok
df['foo2'] = np.ones((4, 2)).tolist()
def test_constructor_dtype_copy(self):
orig_df = DataFrame({
'col1': [1.],
'col2': [2.],
'col3': [3.]})
new_df = pd.DataFrame(orig_df, dtype=float, copy=True)
new_df['col1'] = 200.
assert orig_df['col1'][0] == 1.
def test_constructor_dtype_nocast_view(self):
df = DataFrame([[1, 2]])
should_be_view = DataFrame(df, dtype=df[0].dtype)
should_be_view[0][0] = 99
assert df.values[0, 0] == 99
should_be_view = DataFrame(df.values, dtype=df[0].dtype)
should_be_view[0][0] = 97
assert df.values[0, 0] == 97
def test_constructor_dtype_list_data(self):
df = DataFrame([[1, '2'],
[None, 'a']], dtype=object)
assert df.loc[1, 0] is None
assert df.loc[0, 1] == '2'
def test_constructor_list_frames(self):
# see gh-3243
result = DataFrame([DataFrame([])])
assert result.shape == (1, 0)
result = DataFrame([DataFrame(dict(A=lrange(5)))])
assert isinstance(result.iloc[0, 0], DataFrame)
def test_constructor_mixed_dtypes(self):
def _make_mixed_dtypes_df(typ, ad=None):
if typ == 'int':
dtypes = MIXED_INT_DTYPES
arrays = [np.array(np.random.rand(10), dtype=d)
for d in dtypes]
elif typ == 'float':
dtypes = MIXED_FLOAT_DTYPES
arrays = [np.array(np.random.randint(
10, size=10), dtype=d) for d in dtypes]
zipper = lzip(dtypes, arrays)
for d, a in zipper:
assert(a.dtype == d)
if ad is None:
ad = dict()
ad.update({d: a for d, a in zipper})
return DataFrame(ad)
def _check_mixed_dtypes(df, dtypes=None):
if dtypes is None:
dtypes = MIXED_FLOAT_DTYPES + MIXED_INT_DTYPES
for d in dtypes:
if d in df:
assert(df.dtypes[d] == d)
# mixed floating and integer coexinst in the same frame
df = _make_mixed_dtypes_df('float')
_check_mixed_dtypes(df)
# add lots of types
df = _make_mixed_dtypes_df('float', dict(A=1, B='foo', C='bar'))
_check_mixed_dtypes(df)
# GH 622
df = _make_mixed_dtypes_df('int')
_check_mixed_dtypes(df)
def test_constructor_complex_dtypes(self):
# GH10952
a = np.random.rand(10).astype(np.complex64)
b = np.random.rand(10).astype(np.complex128)
df = DataFrame({'a': a, 'b': b})
assert a.dtype == df.a.dtype
assert b.dtype == df.b.dtype
def test_constructor_dtype_str_na_values(self, string_dtype):
# https://github.com/pandas-dev/pandas/issues/21083
df = DataFrame({'A': ['x', None]}, dtype=string_dtype)
result = df.isna()
expected = DataFrame({"A": [False, True]})
tm.assert_frame_equal(result, expected)
assert df.iloc[1, 0] is None
df = DataFrame({'A': ['x', np.nan]}, dtype=string_dtype)
assert np.isnan(df.iloc[1, 0])
def test_constructor_rec(self):
rec = self.frame.to_records(index=False)
if PY3:
# unicode error under PY2
rec.dtype.names = list(rec.dtype.names)[::-1]
index = self.frame.index
df = DataFrame(rec)
tm.assert_index_equal(df.columns, pd.Index(rec.dtype.names))
df2 = DataFrame(rec, index=index)
tm.assert_index_equal(df2.columns, pd.Index(rec.dtype.names))
tm.assert_index_equal(df2.index, index)
rng = np.arange(len(rec))[::-1]
df3 = DataFrame(rec, index=rng, columns=['C', 'B'])
expected = DataFrame(rec, index=rng).reindex(columns=['C', 'B'])
tm.assert_frame_equal(df3, expected)
def test_constructor_bool(self):
df = DataFrame({0: np.ones(10, dtype=bool),
1: np.zeros(10, dtype=bool)})
assert df.values.dtype == np.bool_
def test_constructor_overflow_int64(self):
# see gh-14881
values = np.array([2 ** 64 - i for i in range(1, 10)],
dtype=np.uint64)
result = DataFrame({'a': values})
assert result['a'].dtype == np.uint64
# see gh-2355
data_scores = [(6311132704823138710, 273), (2685045978526272070, 23),
(8921811264899370420, 45),
(long(17019687244989530680), 270),
(long(9930107427299601010), 273)]
dtype = [('uid', 'u8'), ('score', 'u8')]
data = np.zeros((len(data_scores),), dtype=dtype)
data[:] = data_scores
df_crawls = DataFrame(data)
assert df_crawls['uid'].dtype == np.uint64
@pytest.mark.parametrize("values", [np.array([2**64], dtype=object),
np.array([2**65]), [2**64 + 1],
np.array([-2**63 - 4], dtype=object),
np.array([-2**64 - 1]), [-2**65 - 2]])
def test_constructor_int_overflow(self, values):
# see gh-18584
value = values[0]
result = DataFrame(values)
assert result[0].dtype == object
assert result[0][0] == value
def test_constructor_ordereddict(self):
import random
nitems = 100
nums = lrange(nitems)
random.shuffle(nums)
expected = ['A%d' % i for i in nums]
df = DataFrame(OrderedDict(zip(expected, [[0]] * nitems)))
assert expected == list(df.columns)
def test_constructor_dict(self):
frame = DataFrame({'col1': self.ts1,
'col2': self.ts2})
# col2 is padded with NaN
assert len(self.ts1) == 30
assert len(self.ts2) == 25
tm.assert_series_equal(self.ts1, frame['col1'], check_names=False)
exp = pd.Series(np.concatenate([[np.nan] * 5, self.ts2.values]),
index=self.ts1.index, name='col2')
tm.assert_series_equal(exp, frame['col2'])
frame = DataFrame({'col1': self.ts1,
'col2': self.ts2},
columns=['col2', 'col3', 'col4'])
assert len(frame) == len(self.ts2)
assert 'col1' not in frame
assert isna(frame['col3']).all()
# Corner cases
assert len(DataFrame({})) == 0
# mix dict and array, wrong size - no spec for which error should raise
# first
with pytest.raises(ValueError):
DataFrame({'A': {'a': 'a', 'b': 'b'}, 'B': ['a', 'b', 'c']})
# Length-one dict micro-optimization
frame = DataFrame({'A': {'1': 1, '2': 2}})
tm.assert_index_equal(frame.index, pd.Index(['1', '2']))
# empty dict plus index
idx = Index([0, 1, 2])
frame = DataFrame({}, index=idx)
assert frame.index is idx
# empty with index and columns
idx = Index([0, 1, 2])
frame = DataFrame({}, index=idx, columns=idx)
assert frame.index is idx
assert frame.columns is idx
assert len(frame._series) == 3
# with dict of empty list and Series
frame = DataFrame({'A': [], 'B': []}, columns=['A', 'B'])
tm.assert_index_equal(frame.index, Index([], dtype=np.int64))
# GH 14381
# Dict with None value
frame_none = DataFrame(dict(a=None), index=[0])
frame_none_list = DataFrame(dict(a=[None]), index=[0])
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
assert frame_none.get_value(0, 'a') is None
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
assert frame_none_list.get_value(0, 'a') is None
tm.assert_frame_equal(frame_none, frame_none_list)
# GH10856
# dict with scalar values should raise error, even if columns passed
msg = 'If using all scalar values, you must pass an index'
with pytest.raises(ValueError, match=msg):
DataFrame({'a': 0.7})
with pytest.raises(ValueError, match=msg):
DataFrame({'a': 0.7}, columns=['a'])
@pytest.mark.parametrize("scalar", [2, np.nan, None, 'D'])
def test_constructor_invalid_items_unused(self, scalar):
# No error if invalid (scalar) value is in fact not used:
result = DataFrame({'a': scalar}, columns=['b'])
expected = DataFrame(columns=['b'])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("value", [2, np.nan, None, float('nan')])
def test_constructor_dict_nan_key(self, value):
# GH 18455
cols = [1, value, 3]
idx = ['a', value]
values = [[0, 3], [1, 4], [2, 5]]
data = {cols[c]: Series(values[c], index=idx) for c in range(3)}
result = DataFrame(data).sort_values(1).sort_values('a', axis=1)
expected = DataFrame(np.arange(6, dtype='int64').reshape(2, 3),
index=idx, columns=cols)
tm.assert_frame_equal(result, expected)
result = DataFrame(data, index=idx).sort_values('a', axis=1)
tm.assert_frame_equal(result, expected)
result = DataFrame(data, index=idx, columns=cols)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("value", [np.nan, None, float('nan')])
def test_constructor_dict_nan_tuple_key(self, value):
# GH 18455
cols = Index([(11, 21), (value, 22), (13, value)])
idx = Index([('a', value), (value, 2)])
values = [[0, 3], [1, 4], [2, 5]]
data = {cols[c]: Series(values[c], index=idx) for c in range(3)}
result = (DataFrame(data)
.sort_values((11, 21))
.sort_values(('a', value), axis=1))
expected = DataFrame(np.arange(6, dtype='int64').reshape(2, 3),
index=idx, columns=cols)
tm.assert_frame_equal(result, expected)
result = DataFrame(data, index=idx).sort_values(('a', value), axis=1)
tm.assert_frame_equal(result, expected)
result = DataFrame(data, index=idx, columns=cols)
tm.assert_frame_equal(result, expected)
@pytest.mark.skipif(not PY36, reason='Insertion order for Python>=3.6')
def test_constructor_dict_order_insertion(self):
# GH19018
# initialization ordering: by insertion order if python>= 3.6
d = {'b': self.ts2, 'a': self.ts1}
frame = DataFrame(data=d)
expected = DataFrame(data=d, columns=list('ba'))
tm.assert_frame_equal(frame, expected)
@pytest.mark.skipif(PY36, reason='order by value for Python<3.6')
def test_constructor_dict_order_by_values(self):
# GH19018
# initialization ordering: by value if python<3.6
d = {'b': self.ts2, 'a': self.ts1}
frame = DataFrame(data=d)
expected = DataFrame(data=d, columns=list('ab'))
tm.assert_frame_equal(frame, expected)
def test_constructor_multi_index(self):
# GH 4078
# construction error with mi and all-nan frame
tuples = [(2, 3), (3, 3), (3, 3)]
mi = MultiIndex.from_tuples(tuples)
df = DataFrame(index=mi, columns=mi)
assert pd.isna(df).values.ravel().all()
tuples = [(3, 3), (2, 3), (3, 3)]
mi = MultiIndex.from_tuples(tuples)
df = DataFrame(index=mi, columns=mi)
assert pd.isna(df).values.ravel().all()
def test_constructor_error_msgs(self):
msg = "Empty data passed with indices specified."
# passing an empty array with columns specified.
with pytest.raises(ValueError, match=msg):
DataFrame(np.empty(0), columns=list('abc'))
msg = "Mixing dicts with non-Series may lead to ambiguous ordering."
# mix dict and array, wrong size
with pytest.raises(ValueError, match=msg):
DataFrame({'A': {'a': 'a', 'b': 'b'},
'B': ['a', 'b', 'c']})
# wrong size ndarray, GH 3105
msg = r"Shape of passed values is \(3, 4\), indices imply \(3, 3\)"
with pytest.raises(ValueError, match=msg):
DataFrame(np.arange(12).reshape((4, 3)),
columns=['foo', 'bar', 'baz'],
index=pd.date_range('2000-01-01', periods=3))
# higher dim raise exception
with pytest.raises(ValueError, match='Must pass 2-d input'):
DataFrame(np.zeros((3, 3, 3)), columns=['A', 'B', 'C'], index=[1])
# wrong size axis labels
msg = ("Shape of passed values "
r"is \(3, 2\), indices "
r"imply \(3, 1\)")
with pytest.raises(ValueError, match=msg):
DataFrame(np.random.rand(2, 3), columns=['A', 'B', 'C'], index=[1])
msg = ("Shape of passed values "
r"is \(3, 2\), indices "
r"imply \(2, 2\)")
with pytest.raises(ValueError, match=msg):
DataFrame(np.random.rand(2, 3), columns=['A', 'B'], index=[1, 2])
msg = ("If using all scalar "
"values, you must pass "
"an index")
with pytest.raises(ValueError, match=msg):
DataFrame({'a': False, 'b': True})
def test_constructor_with_embedded_frames(self):
# embedded data frames
df1 = DataFrame({'a': [1, 2, 3], 'b': [3, 4, 5]})
df2 = DataFrame([df1, df1 + 10])
df2.dtypes
str(df2)
result = df2.loc[0, 0]
tm.assert_frame_equal(result, df1)
result = df2.loc[1, 0]
tm.assert_frame_equal(result, df1 + 10)
def test_constructor_subclass_dict(self):
# Test for passing dict subclass to constructor
data = {'col1': tm.TestSubDict((x, 10.0 * x) for x in range(10)),
'col2': tm.TestSubDict((x, 20.0 * x) for x in range(10))}
df = DataFrame(data)
refdf = DataFrame({col: dict(compat.iteritems(val))
for col, val in compat.iteritems(data)})
tm.assert_frame_equal(refdf, df)
data = tm.TestSubDict(compat.iteritems(data))
df = DataFrame(data)
tm.assert_frame_equal(refdf, df)
# try with defaultdict
from collections import defaultdict
data = {}
self.frame['B'][:10] = np.nan
for k, v in compat.iteritems(self.frame):
dct = defaultdict(dict)
dct.update(v.to_dict())
data[k] = dct
frame = DataFrame(data)
tm.assert_frame_equal(self.frame.sort_index(), frame)
def test_constructor_dict_block(self):
expected = np.array([[4., 3., 2., 1.]])
df = DataFrame({'d': [4.], 'c': [3.], 'b': [2.], 'a': [1.]},
columns=['d', 'c', 'b', 'a'])
tm.assert_numpy_array_equal(df.values, expected)
def test_constructor_dict_cast(self):
# cast float tests
test_data = {
'A': {'1': 1, '2': 2},
'B': {'1': '1', '2': '2', '3': '3'},
}
frame = DataFrame(test_data, dtype=float)
assert len(frame) == 3
assert frame['B'].dtype == np.float64
assert frame['A'].dtype == np.float64
frame = DataFrame(test_data)
assert len(frame) == 3
assert frame['B'].dtype == np.object_
assert frame['A'].dtype == np.float64
# can't cast to float
test_data = {
'A': dict(zip(range(20), tm.makeStringIndex(20))),
'B': dict(zip(range(15), randn(15)))
}
frame = DataFrame(test_data, dtype=float)
assert len(frame) == 20
assert frame['A'].dtype == np.object_
assert frame['B'].dtype == np.float64
def test_constructor_dict_dont_upcast(self):
d = {'Col1': {'Row1': 'A String', 'Row2': np.nan}}
df = DataFrame(d)
assert isinstance(df['Col1']['Row2'], float)
dm = DataFrame([[1, 2], ['a', 'b']], index=[1, 2], columns=[1, 2])
assert isinstance(dm[1][1], int)
def test_constructor_dict_of_tuples(self):
# GH #1491
data = {'a': (1, 2, 3), 'b': (4, 5, 6)}
result = DataFrame(data)
expected = DataFrame({k: list(v) for k, v in compat.iteritems(data)})
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_constructor_dict_multiindex(self):
def check(result, expected):
return tm.assert_frame_equal(result, expected, check_dtype=True,
check_index_type=True,
check_column_type=True,
check_names=True)
d = {('a', 'a'): {('i', 'i'): 0, ('i', 'j'): 1, ('j', 'i'): 2},
('b', 'a'): {('i', 'i'): 6, ('i', 'j'): 5, ('j', 'i'): 4},
('b', 'c'): {('i', 'i'): 7, ('i', 'j'): 8, ('j', 'i'): 9}}
_d = sorted(d.items())
df = DataFrame(d)
expected = DataFrame(
[x[1] for x in _d],
index=MultiIndex.from_tuples([x[0] for x in _d])).T
expected.index = MultiIndex.from_tuples(expected.index)
check(df, expected)
d['z'] = {'y': 123., ('i', 'i'): 111, ('i', 'j'): 111, ('j', 'i'): 111}
_d.insert(0, ('z', d['z']))
expected = DataFrame(
[x[1] for x in _d],
index=Index([x[0] for x in _d], tupleize_cols=False)).T
expected.index = Index(expected.index, tupleize_cols=False)
df = DataFrame(d)
df = df.reindex(columns=expected.columns, index=expected.index)
check(df, expected)
def test_constructor_dict_datetime64_index(self):
# GH 10160
dates_as_str = ['1984-02-19', '1988-11-06', '1989-12-03', '1990-03-15']
def create_data(constructor):
return {i: {constructor(s): 2 * i}
for i, s in enumerate(dates_as_str)}
data_datetime64 = create_data(np.datetime64)
data_datetime = create_data(lambda x: datetime.strptime(x, '%Y-%m-%d'))
data_Timestamp = create_data(Timestamp)
expected = DataFrame([{0: 0, 1: None, 2: None, 3: None},
{0: None, 1: 2, 2: None, 3: None},
{0: None, 1: None, 2: 4, 3: None},
{0: None, 1: None, 2: None, 3: 6}],
index=[Timestamp(dt) for dt in dates_as_str])
result_datetime64 = DataFrame(data_datetime64)
result_datetime = DataFrame(data_datetime)
result_Timestamp = DataFrame(data_Timestamp)
tm.assert_frame_equal(result_datetime64, expected)
tm.assert_frame_equal(result_datetime, expected)
tm.assert_frame_equal(result_Timestamp, expected)
def test_constructor_dict_timedelta64_index(self):
# GH 10160
td_as_int = [1, 2, 3, 4]
def create_data(constructor):
return {i: {constructor(s): 2 * i}
for i, s in enumerate(td_as_int)}
data_timedelta64 = create_data(lambda x: np.timedelta64(x, 'D'))
data_timedelta = create_data(lambda x: timedelta(days=x))
data_Timedelta = create_data(lambda x: Timedelta(x, 'D'))
expected = DataFrame([{0: 0, 1: None, 2: None, 3: None},
{0: None, 1: 2, 2: None, 3: None},
{0: None, 1: None, 2: 4, 3: None},
{0: None, 1: None, 2: None, 3: 6}],
index=[Timedelta(td, 'D') for td in td_as_int])
result_timedelta64 = DataFrame(data_timedelta64)
result_timedelta = DataFrame(data_timedelta)
result_Timedelta = DataFrame(data_Timedelta)
tm.assert_frame_equal(result_timedelta64, expected)
tm.assert_frame_equal(result_timedelta, expected)
tm.assert_frame_equal(result_Timedelta, expected)
def test_constructor_period(self):
# PeriodIndex
a = pd.PeriodIndex(['2012-01', 'NaT', '2012-04'], freq='M')
b = pd.PeriodIndex(['2012-02-01', '2012-03-01', 'NaT'], freq='D')
df = pd.DataFrame({'a': a, 'b': b})
assert df['a'].dtype == a.dtype
assert df['b'].dtype == b.dtype
# list of periods
df = pd.DataFrame({'a': a.astype(object).tolist(),
'b': b.astype(object).tolist()})
assert df['a'].dtype == a.dtype
assert df['b'].dtype == b.dtype
def test_nested_dict_frame_constructor(self):
rng = pd.period_range('1/1/2000', periods=5)
df = DataFrame(randn(10, 5), columns=rng)
data = {}
for col in df.columns:
for row in df.index:
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
data.setdefault(col, {})[row] = df.get_value(row, col)
result = DataFrame(data, columns=rng)
tm.assert_frame_equal(result, df)
data = {}
for col in df.columns:
for row in df.index:
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
data.setdefault(row, {})[col] = df.get_value(row, col)
result = DataFrame(data, index=rng).T
tm.assert_frame_equal(result, df)
def _check_basic_constructor(self, empty):
# mat: 2d matrix with shape (3, 2) to input. empty - makes sized
# objects
mat = empty((2, 3), dtype=float)
# 2-D input
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert len(frame.index) == 2
assert len(frame.columns) == 3
# 1-D input
frame = DataFrame(empty((3,)), columns=['A'], index=[1, 2, 3])
assert len(frame.index) == 3
assert len(frame.columns) == 1
# cast type
frame = DataFrame(mat, columns=['A', 'B', 'C'],
index=[1, 2], dtype=np.int64)
assert frame.values.dtype == np.int64
# wrong size axis labels
msg = r'Shape of passed values is \(3, 2\), indices imply \(3, 1\)'
with pytest.raises(ValueError, match=msg):
DataFrame(mat, columns=['A', 'B', 'C'], index=[1])
msg = r'Shape of passed values is \(3, 2\), indices imply \(2, 2\)'
with pytest.raises(ValueError, match=msg):
DataFrame(mat, columns=['A', 'B'], index=[1, 2])
# higher dim raise exception
with pytest.raises(ValueError, match='Must pass 2-d input'):
DataFrame(empty((3, 3, 3)), columns=['A', 'B', 'C'],
index=[1])
# automatic labeling
frame = DataFrame(mat)
tm.assert_index_equal(frame.index, pd.Index(lrange(2)))
tm.assert_index_equal(frame.columns, pd.Index(lrange(3)))
frame = DataFrame(mat, index=[1, 2])
tm.assert_index_equal(frame.columns, pd.Index(lrange(3)))
frame = DataFrame(mat, columns=['A', 'B', 'C'])
tm.assert_index_equal(frame.index, pd.Index(lrange(2)))
# 0-length axis
frame = DataFrame(empty((0, 3)))
assert len(frame.index) == 0
frame = DataFrame(empty((3, 0)))
assert len(frame.columns) == 0
def test_constructor_ndarray(self):
self._check_basic_constructor(np.ones)
frame = DataFrame(['foo', 'bar'], index=[0, 1], columns=['A'])
assert len(frame) == 2
def test_constructor_maskedarray(self):
self._check_basic_constructor(ma.masked_all)
# Check non-masked values
mat = ma.masked_all((2, 3), dtype=float)
mat[0, 0] = 1.0
mat[1, 2] = 2.0
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert 1.0 == frame['A'][1]
assert 2.0 == frame['C'][2]
# what is this even checking??
mat = ma.masked_all((2, 3), dtype=float)
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert np.all(~np.asarray(frame == frame))
def test_constructor_maskedarray_nonfloat(self):
# masked int promoted to float
mat = ma.masked_all((2, 3), dtype=int)
# 2-D input
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert len(frame.index) == 2
assert len(frame.columns) == 3
assert np.all(~np.asarray(frame == frame))
# cast type
frame = DataFrame(mat, columns=['A', 'B', 'C'],
index=[1, 2], dtype=np.float64)
assert frame.values.dtype == np.float64
# Check non-masked values
mat2 = ma.copy(mat)
mat2[0, 0] = 1
mat2[1, 2] = 2
frame = DataFrame(mat2, columns=['A', 'B', 'C'], index=[1, 2])
assert 1 == frame['A'][1]
assert 2 == frame['C'][2]
# masked np.datetime64 stays (use NaT as null)
mat = ma.masked_all((2, 3), dtype='M8[ns]')
# 2-D input
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert len(frame.index) == 2
assert len(frame.columns) == 3
assert isna(frame).values.all()
# cast type
frame = DataFrame(mat, columns=['A', 'B', 'C'],
index=[1, 2], dtype=np.int64)
assert frame.values.dtype == np.int64
# Check non-masked values
mat2 = ma.copy(mat)
mat2[0, 0] = 1
mat2[1, 2] = 2
frame = DataFrame(mat2, columns=['A', 'B', 'C'], index=[1, 2])
assert 1 == frame['A'].view('i8')[1]
assert 2 == frame['C'].view('i8')[2]
# masked bool promoted to object
mat = ma.masked_all((2, 3), dtype=bool)
# 2-D input
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert len(frame.index) == 2
assert len(frame.columns) == 3
assert np.all(~np.asarray(frame == frame))
# cast type
frame = DataFrame(mat, columns=['A', 'B', 'C'],
index=[1, 2], dtype=object)
assert frame.values.dtype == object
# Check non-masked values
mat2 = ma.copy(mat)
mat2[0, 0] = True
mat2[1, 2] = False
frame = DataFrame(mat2, columns=['A', 'B', 'C'], index=[1, 2])
assert frame['A'][1] is True
assert frame['C'][2] is False
def test_constructor_mrecarray(self):
# Ensure mrecarray produces frame identical to dict of masked arrays
# from GH3479
assert_fr_equal = functools.partial(tm.assert_frame_equal,
check_index_type=True,
check_column_type=True,
check_frame_type=True)
arrays = [
('float', np.array([1.5, 2.0])),
('int', np.array([1, 2])),
('str', np.array(['abc', 'def'])),
]
for name, arr in arrays[:]:
arrays.append(('masked1_' + name,
np.ma.masked_array(arr, mask=[False, True])))
arrays.append(('masked_all', np.ma.masked_all((2,))))
arrays.append(('masked_none',
np.ma.masked_array([1.0, 2.5], mask=False)))
# call assert_frame_equal for all selections of 3 arrays
for comb in itertools.combinations(arrays, 3):
names, data = zip(*comb)
mrecs = mrecords.fromarrays(data, names=names)
# fill the comb
comb = {k: (v.filled() if hasattr(v, 'filled') else v)
for k, v in comb}
expected = | DataFrame(comb, columns=names) | pandas.DataFrame |
# Null is uncorrelated random graph, alternative is stochastic blockmodel
import netcomp as nc
from joblib import Parallel, delayed
import multiprocessing
import os
import networkx as nx
import pandas as pd
import time
import pickle
import numpy as np
from tqdm import tqdm
####################################
### SET PARAMETERS
####################################
data_dir = "../pickled_data"
num_cores = multiprocessing.cpu_count()
# size of ensemble
ensemble_len = 500
n = 1000
p = 0.02
# SBM parameters, must average to p so that we have same volume as ER graph
# (note there are 2 partitions of equal size).
pp = 1.9 * p
qq = 0.1 * p
####################################
## DEFINE IMPORTANT FUNCTIONS
####################################
def distance(dist_func, A, B):
return dist_func(A, B)
lambda_adj = lambda A1, A2: nc.lambda_dist(A1, A2, kind="adjacency")
lambda_lap = lambda A1, A2: nc.lambda_dist(A1, A2, kind="laplacian")
lambda_nlap = lambda A1, A2: nc.lambda_dist(A1, A2, kind="laplacian_norm")
res_dist = lambda A1, A2: nc.resistance_distance(A1, A2, check_connected=False)
distances = [
nc.edit_distance,
res_dist,
nc.deltacon0,
nc.netsimile,
lambda_adj,
lambda_lap,
lambda_nlap,
]
labels = [
"Edit",
"Resistance Dist.",
"DeltaCon",
"NetSimile",
"Lambda (Adjacency)",
"Lambda (Laplacian)",
"Lambda (Normalized Laplacian)",
]
def grab_data(i, null=True):
G1 = nx.erdos_renyi_graph(n, p)
if null:
G2 = nx.erdos_renyi_graph(n, p)
else:
G2 = nx.planted_partition_graph(2, n // 2, pp, qq)
A1, A2 = [nx.adjacency_matrix(G).todense() for G in [G1, G2]]
adj_distances = pd.Series(
[distance(dfun, A1, A2) for dfun in distances],
index=labels,
name="Adjacency Distances",
)
data = pd.concat([adj_distances], axis=1)
return data
####################################
## TAKE DATA
####################################
print("Running on {} cores.".format(num_cores))
start = time.time()
results_null = Parallel(n_jobs=num_cores)(
delayed(grab_data)(i) for i in tqdm(range(ensemble_len))
)
end = time.time()
print("Null data complete. Total time elapsed: {} seconds.".format(end - start))
results_df_null = pd.concat(results_null, axis=1)
start = time.time()
results_not_null = Parallel(n_jobs=num_cores)(
delayed(grab_data)(i, null=False) for i in tqdm(range(ensemble_len))
)
end = time.time()
print("Not null data complete. Total time elapsed: {} seconds.".format(end - start))
results_df_not_null = | pd.concat(results_not_null, axis=1) | pandas.concat |
import pandas as pd
from scipy import sparse
from itertools import repeat
import pytest
import anndata as ad
from anndata.utils import make_index_unique
from anndata.tests.helpers import gen_typed_df
def test_make_index_unique():
index = pd.Index(["val", "val", "val-1", "val-1"])
with pytest.warns(UserWarning):
result = make_index_unique(index)
expected = pd.Index(["val", "val-2", "val-1", "val-1-1"])
assert list(expected) == list(result)
assert result.is_unique
def test_adata_unique_indices():
m, n = (10, 20)
obs_index = pd.Index(repeat("a", m), name="obs")
var_index = pd.Index(repeat("b", n), name="var")
adata = ad.AnnData(
X=sparse.random(m, n, format="csr"),
obs=gen_typed_df(m, index=obs_index),
var=gen_typed_df(n, index=var_index),
obsm={"df": gen_typed_df(m, index=obs_index)},
varm={"df": gen_typed_df(n, index=var_index)},
)
| pd.testing.assert_index_equal(adata.obsm["df"].index, adata.obs_names) | pandas.testing.assert_index_equal |
import pandas as pd
import numpy as np
import os
from sklearn.metrics import roc_auc_score, accuracy_score
from sklearn import metrics
from scipy.stats import rankdata
import math
import argparse
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--enspath", type=str, default="./data", help="Path to folder with all csvs")
parser.add_argument("--enstype", type=str, default="loop",
help="Type of ensembling to be performed - Current options: loop / sa")
parser.add_argument("--exp", type=str, default="experiment", help="Name of experiment for csv's")
parser.add_argument('--subdata', action='store_const', default=False, const=True)
# Parse the arguments.
args = parser.parse_args()
return args
### FUNCTIONS IMPLEMENTING ENSEMBLE METHODS ###
### HELPERS ###
# Optimizing accuracy based on ROC AUC
# Source: https://albertusk95.github.io/posts/2019/12/best-threshold-maximize-accuracy-from-roc-pr-curve/
# ACC = (TP + TN)/(TP + TN + FP + FN) = (TP + TN) / P + N (= Correct ones / all)
# Senstivity / tpr = TP / P
# Specificity / tnr = TN / N
def get_acc_and_best_threshold_from_roc_curve(tpr, fpr, thresholds, num_pos_class, num_neg_class):
tp = tpr * num_pos_class
tn = (1 - fpr) * num_neg_class
acc = (tp + tn) / (num_pos_class + num_neg_class)
best_threshold = thresholds[np.argmax(acc)]
return np.amax(acc), best_threshold
def set_acc(row, threshold):
if row['proba'] >= threshold:
val = 1
else:
val = 0
return val
### AVERAGES ###
def simple_average(targets, example, weights=None, power=1, normalize=False):
"""
targets: df with target values as columns
example: output df example (e.g. including ID - make sure to adjust iloc below if target is not at 1)
weights: per submission weights; default is equal weighting
power: optional for power averaging
normalize: Whether to normalize targets btw 0 & 1
"""
if weights is None:
weights = len(targets.columns) * [1.0 / len(targets.columns)]
else:
weights = weights / np.sum(weights)
preds = example.copy()
preds.iloc[:, 1] = np.zeros(len(preds))
if normalize:
targets = (targets - targets.min()) / (targets.max() - targets.min())
for i in range(len(targets.columns)):
preds.iloc[:, 1] = np.add(preds.iloc[:, 1], weights[i] * (targets.iloc[:, i].astype(float) ** power))
return preds
def rank_average(subs, weights=None):
"""
subs: list of submission dataframes with two columns (id, value)
weights: per submission weights; default is equal weighting
"""
if weights is None:
weights = len(subs) * [1.0 / len(subs)]
else:
weights = weights / np.sum(weights)
preds = subs[0].copy()
preds.iloc[:, 1] = np.zeros(len(subs[0]))
for i, sub in enumerate(subs):
preds.iloc[:, 1] = np.add(preds.iloc[:, 1], weights[i] * rankdata(sub.iloc[:, 1]) / len(sub))
return preds
### SIMPLEX ###
### Similar to scipy optimize
# Taken & adapted from:
# https://github.com/chrisstroemel/Simple
from heapq import heappush, heappop, heappushpop
import numpy
import math
import time
import matplotlib.pyplot as plotter
CAPACITY_INCREMENT = 1000
class _Simplex:
def __init__(self, pointIndices, testCoords, contentFractions, objectiveScore, opportunityCost, contentFraction,
difference):
self.pointIndices = pointIndices
self.testCoords = testCoords
self.contentFractions = contentFractions
self.contentFraction = contentFraction
self.__objectiveScore = objectiveScore
self.__opportunityCost = opportunityCost
self.update(difference)
def update(self, difference):
self.acquisitionValue = -(self.__objectiveScore + (self.__opportunityCost * difference))
self.difference = difference
def __eq__(self, other):
return self.acquisitionValue == other.acquisitionValue
def __lt__(self, other):
return self.acquisitionValue < other.acquisitionValue
class SimpleTuner:
def __init__(self, cornerPoints, objectiveFunction, exploration_preference=0.15):
self.__cornerPoints = cornerPoints
self.__numberOfVertices = len(cornerPoints)
self.queue = []
self.capacity = self.__numberOfVertices + CAPACITY_INCREMENT
self.testPoints = numpy.empty((self.capacity, self.__numberOfVertices))
self.objective = objectiveFunction
self.iterations = 0
self.maxValue = None
self.minValue = None
self.bestCoords = []
self.opportunityCostFactor = exploration_preference # / self.__numberOfVertices
def optimize(self, maxSteps=10):
for step in range(maxSteps):
# print(self.maxValue, self.iterations, self.bestCoords)
if len(self.queue) > 0:
targetSimplex = self.__getNextSimplex()
newPointIndex = self.__testCoords(targetSimplex.testCoords)
for i in range(0, self.__numberOfVertices):
tempIndex = targetSimplex.pointIndices[i]
targetSimplex.pointIndices[i] = newPointIndex
newContentFraction = targetSimplex.contentFraction * targetSimplex.contentFractions[i]
newSimplex = self.__makeSimplex(targetSimplex.pointIndices, newContentFraction)
heappush(self.queue, newSimplex)
targetSimplex.pointIndices[i] = tempIndex
else:
testPoint = self.__cornerPoints[self.iterations]
testPoint.append(0)
testPoint = numpy.array(testPoint, dtype=numpy.float64)
self.__testCoords(testPoint)
if self.iterations == (self.__numberOfVertices - 1):
initialSimplex = self.__makeSimplex(numpy.arange(self.__numberOfVertices, dtype=numpy.intp), 1)
heappush(self.queue, initialSimplex)
self.iterations += 1
def get_best(self):
return (self.maxValue, self.bestCoords[0:-1])
def __getNextSimplex(self):
targetSimplex = heappop(self.queue)
currentDifference = self.maxValue - self.minValue
while currentDifference > targetSimplex.difference:
targetSimplex.update(currentDifference)
# if greater than because heapq is in ascending order
if targetSimplex.acquisitionValue > self.queue[0].acquisitionValue:
targetSimplex = heappushpop(self.queue, targetSimplex)
return targetSimplex
def __testCoords(self, testCoords):
objectiveValue = self.objective(testCoords[0:-1])
if self.maxValue is None or objectiveValue > self.maxValue:
self.maxValue = objectiveValue
self.bestCoords = testCoords
if self.minValue is None: self.minValue = objectiveValue
elif objectiveValue < self.minValue:
self.minValue = objectiveValue
testCoords[-1] = objectiveValue
if self.capacity == self.iterations:
self.capacity += CAPACITY_INCREMENT
self.testPoints.resize((self.capacity, self.__numberOfVertices))
newPointIndex = self.iterations
self.testPoints[newPointIndex] = testCoords
return newPointIndex
def __makeSimplex(self, pointIndices, contentFraction):
vertexMatrix = self.testPoints[pointIndices]
coordMatrix = vertexMatrix[:, 0:-1]
barycenterLocation = numpy.sum(vertexMatrix, axis=0) / self.__numberOfVertices
differences = coordMatrix - barycenterLocation[0:-1]
distances = numpy.sqrt(numpy.sum(differences * differences, axis=1))
totalDistance = numpy.sum(distances)
barycentricTestCoords = distances / totalDistance
euclideanTestCoords = vertexMatrix.T.dot(barycentricTestCoords)
vertexValues = vertexMatrix[:, -1]
testpointDifferences = coordMatrix - euclideanTestCoords[0:-1]
testPointDistances = numpy.sqrt(numpy.sum(testpointDifferences * testpointDifferences, axis=1))
inverseDistances = 1 / testPointDistances
inverseSum = numpy.sum(inverseDistances)
interpolatedValue = inverseDistances.dot(vertexValues) / inverseSum
currentDifference = self.maxValue - self.minValue
opportunityCost = self.opportunityCostFactor * math.log(contentFraction, self.__numberOfVertices)
return _Simplex(pointIndices.copy(), euclideanTestCoords, barycentricTestCoords, interpolatedValue,
opportunityCost, contentFraction, currentDifference)
def plot(self):
if self.__numberOfVertices != 3: raise RuntimeError('Plotting only supported in 2D')
matrix = self.testPoints[0:self.iterations, :]
x = matrix[:, 0].flat
y = matrix[:, 1].flat
z = matrix[:, 2].flat
coords = []
acquisitions = []
for triangle in self.queue:
coords.append(triangle.pointIndices)
acquisitions.append(-1 * triangle.acquisitionValue)
plotter.figure()
plotter.tricontourf(x, y, coords, z)
plotter.triplot(x, y, coords, color='white', lw=0.5)
plotter.colorbar()
plotter.figure()
plotter.tripcolor(x, y, coords, acquisitions)
plotter.triplot(x, y, coords, color='white', lw=0.5)
plotter.colorbar()
plotter.show()
def Simplex(devs, label, df_list=False, exploration=0.01, scale=1):
"""
devs: list of dataframes with "proba" column
label: list/np array of ground truths
scale: By default we will get weights in the 0-1 range. Setting e.g. scale=50, gives weights in the 0-50 range.
"""
predictions = []
if df_list:
for df in devs:
predictions.append(df.proba)
print(len(predictions[0]))
else:
for i, column in enumerate(devs):
predictions.append(devs.iloc[:, i])
print(len(predictions[0]))
print("Optimizing {} inputs.".format(len(predictions)))
def roc_auc(weights):
''' Will pass the weights as a numpy array '''
final_prediction = 0
for weight, prediction in zip(weights, predictions):
final_prediction += weight * prediction
return roc_auc_score(label, final_prediction)
# This defines the search area, and other optimization parameters.
# For e.g. 11 models, we have 12 corner points -- e.g. all none, only model 1, all others none, only model 2 all others none..
# We concat an identity matrix & a zero array to create those
zero_vtx = np.zeros((1, len(predictions)), dtype=int)
optimization_domain_vertices = np.identity(len(predictions), dtype=int) * scale
optimization_domain_vertices = np.concatenate((zero_vtx, optimization_domain_vertices), axis=0).tolist()
number_of_iterations = 3000
exploration = exploration # optional, default 0.01
# Optimize weights
tuner = SimpleTuner(optimization_domain_vertices, roc_auc, exploration_preference=exploration)
tuner.optimize(number_of_iterations)
best_objective_value, best_weights = tuner.get_best()
print('Optimized =', best_objective_value) # same as roc_auc(best_weights)
print('Weights =', best_weights)
return best_weights
### APPLYING THE HELPER FUNCTIONS ###
def sa_wrapper(data_path="./data"):
"""
Applies simple average.
data_path: path to folder with X * (dev_seen, test_seen & test_unseen) .csv files
"""
# Make sure the lists will be ordered, i.e. test[0] is the same model as devs[0]
train, dev, test, test_unseen = [], [], [], []
train_probas, dev_probas, test_probas, test_unseen_probas = {}, {}, {}, {} # Never dynamically add to a pd Dataframe
for csv in sorted(os.listdir(data_path)):
if ".csv" in csv:
print("Included in Simple Average: ", csv)
if "train" in csv:
train.append(pd.read_csv(data_path + csv))
train_probas[csv[:-4]] = pd.read_csv(data_path + csv).proba.values
elif ("dev" in csv) or ("val" in csv):
dev.append(pd.read_csv(data_path + csv))
dev_probas[csv[:-8]] = pd.read_csv(data_path + csv).proba.values
elif "test_unseen" in csv:
test_unseen.append(pd.read_csv(data_path + csv))
test_unseen_probas[csv[:-14]] = | pd.read_csv(data_path + csv) | pandas.read_csv |
# -*- coding: utf-8 -*-
# Run this app with `python app.py` and
# visit http://127.0.0.1:8050/ in your web browser.
import base64
import datetime
import io
import copy
from ortools.sat.python import cp_model
import dash
import dash_core_components as dcc
import dash_html_components as html
import dash_table
import pandas as pd
import plotly.express as px
# import SCHED_main_code as sched
from dash.dependencies import Input, Output, State
from scheduler import Scheduler, TimeVar
a = None
external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
# assume you have a "long-form" data frame
# see https://plotly.com/python/px-arguments/ for more options
# df = pd.DataFrame({
# "Fruit": ["Apples", "Oranges", "Bananas", "Apples", "Oranges", "Bananas"],
# "Amount": [4, 1, 2, 2, 4, 5],
# "City": ["SF", "SF", "SF", "Montreal", "Montreal", "Montreal"]
# })
# fig = px.bar(df, x="Fruit", y="Amount", color="City", barmode="group")
scheduler = Scheduler()
app.layout = html.Div([
# html.H2(children='Hello You'),
html.H1(children='Scheduler'),
dcc.Upload(
id='upload-data',
children=html.Div([
'Drag and Drop or ',
html.A('Select')
, ' your XML Files']),
style={
'width': '100%',
'height': '60px',
'lineHeight': '60px',
'borderWidth': '1px',
'borderStyle': 'dashed',
'borderRadius': '5px',
'textAlign': 'center',
'margin': '10px'
},
# Allow multiple files to be uploaded
multiple=True
),
html.Div(id='output-data-upload'),
dash_table.DataTable(id='table-package'),
dash_table.DataTable(id='table-worker'),
dash_table.DataTable(id='table-location'),
html.Button('Create Schedule', id='submit-val', n_clicks=0),
html.Div(id='scheduler-output'),
])
# +
# [dcc.Input(
# id=f"table-{i}",
# type=j,
# placeholder=f"input type {j}",) for i in ["package", "worker", "location"] for j in ["data", "column"]])
def parse_contents(contents, filename, date):
content_type, content_string = contents.split(',')
# """
decoded = base64.b64decode(content_string)
try:
if 'csv' in filename:
# Assume that the user uploaded a CSV file
df = pd.read_csv(io.StringIO(decoded.decode('utf-8')), sheet_name='Sheet_package')
df2 = pd.read_csv(io.StringIO(decoded.decode('utf-8')), sheet_name='Sheet_worker')
df3 = pd.read_csv(io.StringIO(decoded.decode('utf-8')), sheet_name='Sheet_location')
elif 'xls' in filename:
# Assume that the user uploaded an excel file
df = pd.read_excel(io.BytesIO(decoded), sheet_name='Sheet_package')
df2 = pd.read_excel(io.BytesIO(decoded), sheet_name='Sheet_worker')
df3 = pd.read_excel(io.BytesIO(decoded), sheet_name='Sheet_location')
except Exception as e:
print(e)
return html.Div([
'There was an error processing this file.'
])
# """
# return df, df2, df3
# def show_tables(df, df2, df3, filename, date):
# path = "data/xl.xlsx"
# df = pd.read_excel(open(path, 'rb'),
# sheet_name='Sheet_package')
# df2 = pd.read_excel(open(path, 'rb'),
# sheet_name='Sheet_worker')
# df3 = pd.read_excel(open(path, 'rb'),
# sheet_name='Sheet_location')
# data = sched.main_code(df,df2,df3,df4,df5,df6,df7,df8,df9)
# gantt_data = data[0]
# gantt_figure = data[1]
num_vehicles = 4
num_shifts = 11
time_shifts = [TimeVar(6, 30) + TimeVar(0, 20*i)
for i in range(num_shifts)]
scheduler(df, df2, df3, time_shifts, num_vehicles)
return html.Div([
html.H5(f'Input filename: {filename}'),
html.H6(datetime.datetime.fromtimestamp(date)),
html.Div([
html.Div(
className='row',
children=[
html.H5('Location info'),
dash_table.DataTable(
id='table-location',
data=df3.to_dict('rows'),
columns=[{'name': i, 'id': i} for i in df3.columns],
editable=True
),
], style={'width': '50%', 'display': 'inline-block'}),
html.Div(
className='row',
children=[
html.H5('Worker info'),
dash_table.DataTable(
id='table-worker',
data=df2.to_dict('rows'),
columns=[{'name': i, 'id': i} for i in df2.columns],
editable=True
),
], style={'width': '50%', 'display': 'inline-block'}),
], style={'width': '100%', 'display': 'inline-block'}),
html.H5('Package info'),
dash_table.DataTable(
style_cell={
'whiteSpace': 'normal',
'height': 'auto',
},
id='table-package',
data=scheduler.input_data_package_orig.to_dict('rows'),
columns=[{'name': i, 'id': i} for i in ['package', 'quantity', 'decay',
'location', 'vehicle', 'next', 'yesterday']],
editable=True
),
html.Hr(),
# dcc.Graph(gantt_figure),
# # For debugging, display the raw contents provided by the web browser
# html.Div('Raw Content'),
# html.Pre(contents[0:200] + '...', style={
# 'whiteSpace': 'pre-wrap',
# 'wordBreak': 'break-all'
# }),
])
##################################
@app.callback(Output('scheduler-output', 'children'),
[dash.dependencies.Input('submit-val', 'n_clicks')],
Input('table-package', 'data'),
Input('table-package', 'columns'),
Input('table-worker', 'data'),
Input('table-worker', 'columns'),
Input('table-location', 'data'),
Input('table-location', 'columns'),
# Input('output-data-upload', 'children'),
)
def show_output_table(n_clicks, pr, pc, wr, wc, lr, lc):
# def show_output_table(n_clicks, pr, pc):
# print(o)
if scheduler.input_data_package is None: return
df = pd.DataFrame(pr, columns=[c['name'] for c in pc])
df2 = pd.DataFrame(wr, columns=[c['name'] for c in wc])
df3 = | pd.DataFrame(lr, columns=[c['name'] for c in lc]) | pandas.DataFrame |
import pandas as pd
import os
import glob
import re
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
import numpy as np
from sklearn.decomposition import PCA
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.metrics import mean_squared_error
from helper import *
def readfilerun(run):
"reads in files from each run"
#path = os.path.join(os.getcwd())
testdir = os.path.join(os.getcwd(), "test", "run" + str(run))
dir_files = os.listdir(testdir)
#print(dir_files) # listing all files in directory
df_lst = []
# loop over the list of csv files
for f in dir_files:
#print(f)
filedir = os.path.join(os.getcwd(), "test", "run" + str(run), f)
#dftest = open(filedir)
df = pd.read_csv(filedir)
# print the location and filename | print('Location:', f)
filename = f.split("\\")[-1].strip("-iperf.csv")
#print('File Name:', filename)
# GET LOSS regex to get everything after last "-"
los = str(re.search('([^\-]+$)', filename).group(0))
# GET LATENCY regex to everything between "_" and "-"
lat = (re.search("_(.*?)-", f).group(0)).strip("_").strip("-")
# print("run" + str(run) + ", loss: " + los, "latency: "+ lat)
# print()
# labelling the df for the loss
tempdf1 = df.assign(loss = int(los))
# labelling the df for latency
tempdf2 = tempdf1.assign(latency = int(lat))
# labelling the df for run
tempdf3 = tempdf2.assign(iteration = run)
# data processing
tdf = tempdf3
tdf['total_bytes'] = tdf['1->2Bytes'] + tdf['2->1Bytes'] # combining bytes
tdf['total_pkts'] = tdf['1->2Pkts'] + tdf['2->1Pkts'] # combining packets
tdf['packet_sizes'] = tdf['packet_sizes'].astype('str').apply(return_int) # converting list of packet sizes to type int
tdf['pkt sum'] = tdf['packet_sizes'].apply(lambda x: sum(x)) # summing packets
tdf['packet_dirs'] = tdf['packet_dirs'].astype('str').apply(return_int) # converting to type int
tdf['longest_seq'] = tdf['packet_dirs'].apply(longest_seq) # finding longest sequence
tdf['packet_times'] = tdf['packet_times'].apply(return_int) # converting to int
#tdf = onehot_(tdf)
def maxbyte(x):
x = | pd.DataFrame([x[0],x[1]]) | pandas.DataFrame |
import subprocess
import json
import os
import pandas as pd
with open("./config/feature_config.json") as f:
feature_config = json.load(f)
# get the location of R script
r_scipt_dir = feature_config["r_script_dir"]
# Kallisto out dir
kallisto_out_dir = feature_config["processed"]["kallisto"]
# dir for DESeq2 output dir
deseq_cts_matrix_dir = feature_config["features"]["deseq_cts_matrix_dir"]
# covariates dir from the SRAruntable
covariates = feature_config["features"]["covariates"]
covariates_dir = covariates["dir"]
covariates_in_cols = covariates["columns"]["in_cols"]
covariates_out_cols = covariates["columns"]["out_cols"]
disorders = covariates["disorders"]
brain_regions = covariates["brain_regions"]
abbr = covariates["disorders_abbr"]
num_cov = covariates["num"]
# sraruntable_dir table dir
sraruntable_dir = feature_config["sraruntable_dir"]
def test_r():
"""
test running the R script
"""
print(r_scipt_dir)
subprocess.call(["Rscript", "--vanilla", r_scipt_dir])
def make_cts():
"""
This method doesn't have an input, but rather takes 352 abundance.tsv in the processed
Kallisto directory, and makes a matrix that counts different subfeatures.
"""
abundances_dirs = os.listdir(kallisto_out_dir)
abundances_dirs.sort()
# cols_name = pd.read_csv(os.path.join(kallisto_out_dir, abundances_dirs[0], "abundance.tsv"), sep="\t").target_id
# print(cols_name)
result = pd.DataFrame()
for pair in abundances_dirs:
abundances_dir = os.path.join(kallisto_out_dir, pair, "abundance.tsv")
df = | pd.read_csv(abundances_dir, sep="\t") | pandas.read_csv |
"""runeberg.
Usage:
dasem.runeberg download-catalogue
dasem.runeberg catalogue-as-csv
Description
-----------
Runeberg is a digital library with primarily Nordic texts. It is available from
http://runeberg.org/
"""
from __future__ import absolute_import, division, print_function
from os.path import join
from re import DOTALL, UNICODE, findall
import sys
from pandas import DataFrame
import requests
from .config import data_directory
from .utils import make_data_directory
CATALOGUE_URL = 'http://runeberg.org/katalog.html'
CATALOGUE_FILENAME = 'katalog.html'
def fix_author(author):
"""Change surname-firstname order.
Parameters
----------
author : str
Author as string
Returns
-------
fixed_author : str
Changed author string.
Examples
--------
>>> author = '<NAME>'
>>> fix_author(author)
'<NAME>'
"""
author_parts = author.split(', ')
if len(author_parts) == 2:
fixed_author = author_parts[1] + ' ' + author_parts[0]
else:
fixed_author = author
return fixed_author
class Runeberg(object):
"""Runeberg.
Examples
--------
>>> runeberg = Runeberg()
>>> catalogue = runeberg.catalogue()
>>> danish_catalogue = catalogue.ix[catalogue.language == 'dk', :]
>>> len(danish_catalogue) > 300
True
"""
def download_catalogue(self):
"""Download and store locally the Runeberg catalogue."""
make_data_directory(data_directory(), 'runeberg')
filename = join(data_directory(), 'runeberg', CATALOGUE_FILENAME)
response = requests.get(CATALOGUE_URL)
with open(filename, 'w') as f:
f.write(response.content)
def catalogue(self, fix_author=True):
"""Retrieve and parse Runeberg catalogue.
Returns
-------
books : pandas.DataFrame
Dataframe with book information.
fix_author : bool, optional
Determine if author names should be rearranged in firstname-surname
order [default: True]
"""
response = requests.get(CATALOGUE_URL)
flags = DOTALL | UNICODE
tables = findall(r'<table.*?</table>', response.text, flags=flags)
rows = findall(r'<tr.*?</tr>', tables[1], flags=flags)
books = []
for row in rows[1:]:
elements = findall('<td.*?</td>', row, flags=flags)
book_id, title = findall(r'/(.*?)/">(.*?)<',
elements[4], flags=flags)[0]
try:
author_id, author = findall(r'/authors/(.*?).html">(.*?)<',
elements[6], flags=flags)[0]
except:
author_id, author = '', ''
if fix_author:
# fix_author name collision. TODO
author = globals()['fix_author'](author)
book = {
'type': findall(r'alt="(.*?)">', elements[0], flags=flags)[0],
'book_id': book_id,
'title': title,
'author_id': author_id,
'author': author,
'year': elements[8][15:-5],
'language': elements[10][-9:-7]
}
books.append(book)
return | DataFrame(books) | pandas.DataFrame |
import plotly.figure_factory as ff
import plotly.graph_objects as go
import plotly.express as px
import pandas as pd
import streamlit as st
import requests
from src.utils.helpers import hide_table_indexes, BST
def main(url):
type_s = st.radio(
"Select data type:", (
"1. Server Statistics",
"2. Miners Statistics",
"3. Historic Prices",
)
)
if type_s[:1] == "1":
response = requests.get(f"{url}/statistics").json()
with st.form("richest_miners"):
st.subheader("Top 10 Richest Miners")
m_list = | pd.json_normalize(response) | pandas.json_normalize |
# pylint: disable=unused-import
# type: ignore
"""Computations for plot_diff([df1, df2, ..., dfn],x)."""
from collections import UserList
from typing import Any, Callable, Dict, List, Union, Optional
import sys
import math
import pandas as pd
import numpy as np
import dask
import dask.array as da
import dask.dataframe as dd
from dask.array.stats import kurtosis, skew
from ...utils import gaussian_kde
from ...intermediate import Intermediate
from ...dtypes import (
Continuous,
detect_dtype,
is_dtype,
)
from ...configs import Config
from ...distribution.compute.univariate import _calc_box
from ...correlation.compute.univariate import (
_pearson_1xn,
_spearman_1xn,
_kendall_tau_1xn,
_corr_filter,
)
from ...correlation.compute.common import CorrelationMethod
from ...eda_frame import EDAFrame
class Srs(UserList):
"""
This class **separates** the columns with the same name into individual series.
"""
# pylint: disable=too-many-ancestors, eval-used, too-many-locals
def __init__(self, srs: Union[dd.DataFrame, List[Any]], agg: bool = False) -> None:
super().__init__()
if agg:
self.data = srs
else:
if len(srs.shape) > 1:
self.data: List[dd.Series] = [srs.iloc[:, loc] for loc in range(srs.shape[1])]
else:
self.data: List[dd.Series] = [srs]
def __getattr__(self, attr: str) -> UserList:
output = []
for srs in self.data:
output.append(getattr(srs, attr))
return Srs(output, agg=True)
def apply(self, method: str, *params: Optional[Any], **kwargs: Optional[Any]) -> UserList:
"""
Apply the same method for all elements in the list.
"""
output = []
for srs in self.data:
output.append(getattr(srs, method)(*params, **kwargs))
return Srs(output, agg=True)
def getidx(self, ind: Union[str, int]) -> List[Any]:
"""
Get the specified index for all elements in the list.
"""
output = []
for data in self.data:
output.append(data[ind])
return output
def getmask(
self, mask: Union[List[dd.Series], UserList], inverse: bool = False
) -> List[dd.Series]:
"""
Return rows based on a boolean mask.
"""
output = []
for data, cond in zip(self.data, mask):
if inverse:
output.append(data[~cond])
else:
output.append(data[cond])
return output
def self_map(
self,
func: Callable[[dd.Series], Any],
condition: Optional[List[bool]] = None,
multi_args: Optional[Any] = None,
**kwargs: Any,
) -> List[Any]:
"""
Map the data to the given function.
"""
if condition:
rslt = []
for cond, data in zip(condition, self.data):
if not cond:
rslt.append(func(data, **kwargs))
else:
rslt.append(None)
return rslt
elif multi_args:
rslt = []
for args, data in zip(multi_args, self.data):
rslt.append(func(data, args, **kwargs))
return rslt
else:
return [func(srs, **kwargs) for srs in self.data]
def compare_multiple_col(
df_list: List[dd.DataFrame],
x: str,
cfg: Config,
) -> Intermediate:
"""
Compute function for plot_diff([df...],x)
Parameters
----------
df_list
Dataframe sequence to be compared.
x
Name of the column to be compared
cfg
Config instance
"""
aligned_dfs = dd.concat(df_list, axis=1)
baseline: int = cfg.diff.baseline
srs = Srs(aligned_dfs[x])
data: List[Any] = []
col_dtype = srs.self_map(detect_dtype)
if len(col_dtype) > 1:
col_dtype = col_dtype[baseline]
else:
col_dtype = col_dtype[0]
if is_dtype(col_dtype, Continuous()):
data.append((_cont_calcs(srs.apply("dropna"), cfg, df_list, x)))
stats = calc_stats_cont(srs, cfg)
stats, data = dask.compute(stats, data)
return Intermediate(col=x, data=data, stats=stats, visual_type="comparison_continuous")
else:
return Intermediate()
def _cont_calcs(srs: Srs, cfg: Config, df_list: List[dd.DataFrame], x: str) -> Dict[str, List[Any]]:
"""
Computations for a continuous column in plot_diff([df...],x)
"""
# pylint:disable = too-many-branches, too-many-locals
data: Dict[str, List[Any]] = {}
# drop infinite values
mask = srs.apply("isin", {np.inf, -np.inf})
srs = Srs(srs.getmask(mask, inverse=True), agg=True)
min_max = srs.apply(
"map_partitions", lambda x: pd.Series([x.max(), x.min()]), meta=pd.Series([], dtype=float)
).data
if cfg.kde.enable:
min_max_comp = []
for min_max_value in dask.compute(min_max)[0]:
min_max_comp.append(math.isclose(min_max_value.min(), min_max_value.max()))
min_max = dd.concat(min_max).repartition(npartitions=1)
# histogram
if cfg.hist.enable:
data["hist"] = srs.self_map(
da.histogram, bins=cfg.hist.bins, range=(min_max.min(), min_max.max())
)
# compute the density histogram
if cfg.kde.enable:
data["dens"] = srs.self_map(
da.histogram,
condition=min_max_comp,
bins=cfg.kde.bins,
range=(min_max.min(), min_max.max()),
density=True,
)
# gaussian kernel density estimate
data["kde"] = []
sample_data = dask.compute(
srs.apply(
"map_partitions",
lambda x: x.sample(min(1000, x.shape[0])),
meta= | pd.Series([], dtype=float) | pandas.Series |
from statsmodels.tsa.seasonal import seasonal_decompose
import matplotlib.pyplot as plt
import pandas as pd
# https://juanitorduz.github.io/fb_prophet/
def seasonal_decomp(df, target_col, date_col, freq="D"):
# freq == "W-Mon" or "W-Sun" or "D"
pred_df = df[[date_col, target_col]].copy()
pred_df.rename(columns={date_col:'ds', target_col:'y'}, inplace=True)
pred_df = pred_df.set_index('ds').asfreq(freq)
decomp_obj = seasonal_decompose(
x=pred_df['y'],
model='additive'
)
fig, ax = plt.subplots(4, 1, figsize=(12, 12))
# Observed time series.
decomp_obj.observed.plot(ax=ax[0])
ax[0].set(title='observed')
# Trend component.
decomp_obj.trend.plot(label='fit', ax=ax[1])
ax[1].set(title='trend')
# Seasonal component.
decomp_obj.seasonal.plot(label='fit', ax=ax[2])
ax[2].set(title='seasonal')
# Residual.
decomp_obj.resid.plot(label='fit', ax=ax[3])
ax[3].set(title='resid')
fig.suptitle('Time Series Decomposition', y=1.01)
plt.tight_layout()
decomp_df = | pd.DataFrame([decomp_obj.observed, decomp_obj.trend, decomp_obj.seasonal, decomp_obj.resid]) | pandas.DataFrame |
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import glob
import os
import sh
from pandas import DataFrame
from matplotlib.ticker import MultipleLocator, FormatStrFormatter, ScalarFormatter, LogLocator, AutoMinorLocator
from matplotlib.backends.backend_pdf import PdfPages
plt.rcParams['axes.linewidth'] = 1.5
pp = PdfPages('Stats.pdf')
def find_nearest(array, value):
array = np.asarray(array)
idx = (np.abs(array - value)).argmin()
return array[idx]
def get_line_number2(value, matrix):
for i, line in enumerate(matrix, 1):
if line == value:
return i
def stats_blockt(ntt, new_tau, etau, fileout, filecl, pathdir, f_sig=5):
"""
Confidence intervals function.
Parameters
-------------
ntt : 'array'
new_tau : 'array'
etau : 'array'
fileout : 'str'
filecl : 'str'
f_sig : 'float'
"""
xdata0 = ntt
ydata0 = new_tau
err_y = etau
e_min = ydata0 - err_y
e_max = ydata0 + err_y
with open(fileout, 'r') as f2:
lines = f2.readlines()
da = []
da = [line.split() for line in lines]
dainp = np.asfarray(da)
ucv = dainp.shape[1] # number of columns
w_values = dainp.shape[1] - (ucv / 2) # weight values
pathcl = filecl
tcl = np.loadtxt(pathcl, dtype=float, delimiter=',', usecols=(list(range(int(w_values)))), skiprows=1).T
f1 = open(pathdir + 'Confidence_limits_2nd.dat', 'w')
f1.close()
sh.rm(sh.glob(pathdir + 'Confidence_limits_2nd.dat'))
###################
# Creatin random matrix and sorting
if w_values == 2:
c1, c2 = tcl[0][0], tcl[1][0]
m = f_sig * np.std(err_y)
int1 = np.random.normal(c1, m, 5000)
int2 = np.random.normal(c2, m, 5000)
list2 = list(range(len(int1)))
for i1, j1 in zip(list2, list2):
f1 = int1[i1] * dainp[:, 1] + int2[j1] * dainp[:, 3]
p = (1. / (len(ydata0) - 1. - w_values)) * np.sum(((ydata0 - f1) / err_y) ** 2)
if int1[i1] >= 0. and int2[j1] >= 0. and p >= 0. and p <= 500.:
fp = open('Confidence_limits_2nd.dat', 'a')
fp.write('{0:f} {1:f} {2:f}\n'.format(int1[i1], int2[j1], p))
fp.close()
path = pathdir + 'Confidence_limits_2nd.dat'
t = pd.read_csv(path, sep='\s+', header=None) # astropy.io.ascii.read(path, format='no_header')
w1 = t[0]
w2 = t[1]
y = t[2]
delchi = y - min(y)
Data1 = {'w1': w1, 'w2': w2, 'y': y}
df1 = DataFrame(Data1, columns=['w1', 'w2', 'y'])
df1.sort_values(by=['y'], inplace=True)
np.savetxt('Confidence_limits_2nd.dat', df1, fmt='%1.4f')
Data2 = {'w1': w1, 'w2': w2, 'delchi': delchi}
df2 = DataFrame(Data2, columns=['w1', 'w2', 'delchi'])
df2.sort_values(by=['delchi'], inplace=True)
np.savetxt('Confidence_limits_Delta_2nd.dat', df2, fmt='%1.4f')
# Statistical Plots
fig = plt.figure()
from ENIIGMA.Stats import Stats_contour as stc
file1 = pathdir + 'Confidence_limits_2nd.dat'
stc.st_plot_contour2(file1, pathdir)
plt.savefig(pp, format='pdf', bbox_inches='tight')
from ENIIGMA.Stats import Stats_plot as stp
fig = plt.figure()
stp.min_max(xdata0, ydata0, e_min, e_max, pathdir)
plt.savefig(pp, format='pdf', bbox_inches='tight')
fig = plt.figure()
file2 = pathdir + 'output_file.txt'
fileqmin = pathdir + 'q_min.txt'
fileqmax = pathdir + 'q_max.txt'
for fn in glob.glob(pathdir + "Merge*"): # Remove the Merge files
os.remove(fn)
stp.deconv_best(xdata0, ydata0, e_min, e_max, pathdir)
plt.savefig(pp, format='pdf', bbox_inches='tight')
try:
fig = plt.figure()
from ENIIGMA.Stats import Merge_colden as mc
filename = pathdir + 'Column_density_*.csv'
mc.mergecd(filename, pathdir)
plt.savefig(pp, format='pdf')
except:
print('Analytical decomposition failed!')
elif w_values == 3:
c1, c2, c3 = tcl[0][0], tcl[1][0], tcl[2][0]
m = f_sig * np.std(err_y)
int1 = np.random.normal(c1, m, 1000)
int2 = np.random.normal(c2, m, 1000)
int3 = np.random.normal(c3, m, 1000)
list2 = list(range(len(int1)))
for i1, j1, k1 in zip(list2, list2, list2):
f1 = int1[i1] * dainp[:, 1] + int2[j1] * dainp[:, 3] + int3[k1] * dainp[:, 5]
p = (1. / (len(ydata0) - 1. - w_values)) * np.sum(((ydata0 - f1) / err_y) ** 2)
if int1[i1] >= 0. and int2[j1] >= 0. and int3[k1] >= 0. and p >= 0. and p <= 1050.:
fp = open('Confidence_limits_2nd.dat', 'a')
fp.write('{0:f} {1:f} {2:f} {3:f}\n'.format(int1[i1], int2[j1], int3[k1], p))
fp.close()
path = pathdir + 'Confidence_limits_2nd.dat'
t = pd.read_csv(path, sep='\s+', header=None) # astropy.io.ascii.read(path, format='no_header')
w1 = t[0]
w2 = t[1]
w3 = t[2]
y = t[3]
delchi = y - min(y)
Data1 = {'w1': w1, 'w2': w2, 'w3': w3, 'y': y}
df1 = DataFrame(Data1, columns=['w1', 'w2', 'w3', 'y'])
df1.sort_values(by=['y'], inplace=True)
np.savetxt('Confidence_limits_2nd.dat', df1, fmt='%1.4f')
Data2 = {'w1': w1, 'w2': w2, 'w3': w3, 'delchi': delchi}
df2 = DataFrame(Data2, columns=['w1', 'w2', 'w3', 'delchi'])
df2.sort_values(by=['delchi'], inplace=True)
np.savetxt('Confidence_limits_Delta_2nd.dat', df2, fmt='%1.4f')
# Statistical Plots
fig = plt.figure()
from ENIIGMA.Stats import Stats_contour as stc
file1 = pathdir + 'Confidence_limits_2nd.dat'
stc.st_plot_contour3(file1, pathdir)
plt.savefig(pp, format='pdf', bbox_inches='tight')
from ENIIGMA.Stats import Stats_plot as stp
fig = plt.figure()
stp.min_max(xdata0, ydata0, e_min, e_max, pathdir)
plt.savefig(pp, format='pdf', bbox_inches='tight')
fig = plt.figure()
file2 = pathdir + 'output_file.txt'
fileqmin = pathdir + 'q_min.txt'
fileqmax = pathdir + 'q_max.txt'
for fn in glob.glob(pathdir + "Merge*"): # Remove the Merge files
os.remove(fn)
stp.deconv_best(xdata0, ydata0, e_min, e_max, pathdir)
plt.savefig(pp, format='pdf', bbox_inches='tight')
try:
fig = plt.figure()
from ENIIGMA.Stats import Merge_colden as mc
filename = pathdir + 'Column_density_*.csv'
mc.mergecd(filename, pathdir)
plt.savefig(pp, format='pdf')
except:
print('Analytical decomposition failed!')
elif w_values == 4:
c1, c2, c3, c4 = tcl[0][0], tcl[1][0], tcl[2][0], tcl[3][0]
m = f_sig * np.std(err_y)
int1 = np.random.normal(c1, m, 1000)
int2 = np.random.normal(c2, m, 1000)
int3 = np.random.normal(c3, m, 1000)
int4 = np.random.normal(c4, m, 1000)
list2 = list(range(len(int1)))
for i1, j1, k1, l1 in zip(list2, list2, list2, list2):
f1 = int1[i1] * dainp[:, 1] + int2[j1] * dainp[:, 3] + int3[k1] * dainp[:, 5] + int4[l1] * dainp[:, 7]
p = (1. / (len(ydata0) - 1. - w_values)) * np.sum(((ydata0 - f1) / err_y) ** 2)
if int1[i1] >= 0. and int2[j1] >= 0. and int3[k1] >= 0. and int4[l1] >= 0. and p >= 0. and p <= 500.:
fp = open('Confidence_limits_2nd.dat', 'a')
fp.write('{0:f} {1:f} {2:f} {3:f} {4:f}\n'.format(int1[i1], int2[j1], int3[k1], int4[l1], p))
fp.close()
path = pathdir + 'Confidence_limits_2nd.dat'
t = pd.read_csv(path, sep='\s+', header=None) # astropy.io.ascii.read(path, format='no_header')
w1 = t[0]
w2 = t[1]
w3 = t[2]
w4 = t[3]
y = t[4]
delchi2 = y - min(y)
Data1 = {'w1': w1, 'w2': w2, 'w3': w3, 'w4': w4, 'y': y}
Data2 = {'w1': w1, 'w2': w2, 'w3': w3, 'w4': w4, 'delchi2': delchi2}
df1 = DataFrame(Data1, columns=['w1', 'w2', 'w3', 'w4', 'y'])
df2 = DataFrame(Data2, columns=['w1', 'w2', 'w3', 'w4', 'delchi2'])
df1.sort_values(by=['y'], inplace=True)
df2.sort_values(by=['delchi2'], inplace=True)
np.savetxt('Confidence_limits_2nd.dat', df1, fmt='%1.4f')
np.savetxt('Confidence_limits_Delta_2nd.dat', df2, fmt='%1.4f')
# Statistical Plots
fig = plt.figure()
from ENIIGMA.Stats import Stats_contour as stc
file1 = pathdir + 'Confidence_limits_2nd.dat'
stc.st_plot_contour4(file1, pathdir)
plt.savefig(pp, format='pdf', bbox_inches='tight')
from ENIIGMA.Stats import Stats_plot as stp
fig = plt.figure()
stp.min_max(xdata0, ydata0, e_min, e_max, pathdir)
plt.savefig(pp, format='pdf', bbox_inches='tight')
fig = plt.figure()
file2 = pathdir + 'output_file.txt'
fileqmin = pathdir + 'q_min.txt'
fileqmax = pathdir + 'q_max.txt'
for fn in glob.glob(pathdir + "Merge*"): # Remove the Merge files
os.remove(fn)
stp.deconv_best(xdata0, ydata0, e_min, e_max, pathdir)
plt.savefig(pp, format='pdf', bbox_inches='tight')
try:
fig = plt.figure()
from ENIIGMA.Stats import Merge_colden as mc
filename = pathdir + 'Column_density_*.csv'
mc.mergecd(filename, pathdir)
plt.savefig(pp, format='pdf')
except:
print('Analytical decomposition failed!')
elif w_values == 5:
c1, c2, c3, c4, c5 = tcl[0][0], tcl[1][0], tcl[2][0], tcl[3][0], tcl[4][0]
m = f_sig * np.std(err_y)
int1 = np.random.normal(c1, m, 1000)
int2 = np.random.normal(c2, m, 1000)
int3 = np.random.normal(c3, m, 1000)
int4 = np.random.normal(c4, m, 1000)
int5 = np.random.normal(c5, m, 1000)
list2 = list(range(len(int1)))
for i1, j1, k1, l1, m1 in zip(list2, list2, list2, list2, list2):
f1 = int1[i1] * dainp[:, 1] + int2[j1] * dainp[:, 3] + int3[k1] * dainp[:, 5] + int4[l1] * dainp[:, 7] + \
int5[m1] * dainp[:, 9]
p = (1. / (len(ydata0) - 1. - w_values)) * np.sum(((ydata0 - f1) / err_y) ** 2)
if int1[i1] >= 0. and int2[j1] >= 0. and int3[k1] >= 0. and int4[l1] >= 0. and int5[
m1] >= 0. and p >= 0. and p <= 1100.:
fp = open('Confidence_limits_2nd.dat', 'a')
fp.write(
'{0:f} {1:f} {2:f} {3:f} {4:f} {5:f}\n'.format(int1[i1], int2[j1], int3[k1], int4[l1], int5[m1], p))
fp.close()
path = pathdir + 'Confidence_limits_2nd.dat'
t = pd.read_csv(path, sep='\s+', header=None) # astropy.io.ascii.read(path, format='no_header')
w1 = t[0]
w2 = t[1]
w3 = t[2]
w4 = t[3]
w5 = t[4]
y = t[5]
delchi2 = y - min(y)
Data1 = {'w1': w1, 'w2': w2, 'w3': w3, 'w4': w4, 'w5': w5, 'y': y}
Data2 = {'w1': w1, 'w2': w2, 'w3': w3, 'w4': w4, 'w5': w5, 'delchi2': delchi2}
df1 = DataFrame(Data1, columns=['w1', 'w2', 'w3', 'w4', 'w5', 'y'])
df2 = DataFrame(Data2, columns=['w1', 'w2', 'w3', 'w4', 'w5', 'delchi2'])
df1.sort_values(by=['y'], inplace=True)
df2.sort_values(by=['delchi2'], inplace=True)
np.savetxt('Confidence_limits_2nd.dat', df1, fmt='%1.4f')
np.savetxt('Confidence_limits_Delta_2nd.dat', df2, fmt='%1.4f')
# Statistical Plots
fig = plt.figure()
from ENIIGMA.Stats import Stats_contour as stc
file1 = pathdir + 'Confidence_limits_2nd.dat'
stc.st_plot_contour5(file1, pathdir)
plt.savefig(pp, format='pdf', bbox_inches='tight')
from ENIIGMA.Stats import Stats_plot as stp
fig = plt.figure()
stp.min_max(xdata0, ydata0, e_min, e_max, pathdir)
plt.savefig(pp, format='pdf', bbox_inches='tight')
fig = plt.figure()
file2 = pathdir + 'output_file.txt'
fileqmin = pathdir + 'q_min.txt'
fileqmax = pathdir + 'q_max.txt'
for fn in glob.glob(pathdir + "Merge*"): # Remove the Merge files
os.remove(fn)
stp.deconv_best(xdata0, ydata0, e_min, e_max, pathdir)
plt.savefig(pp, format='pdf', bbox_inches='tight')
try:
fig = plt.figure()
from ENIIGMA.Stats import Merge_colden as mc
filename = pathdir + 'Column_density_*.csv'
mc.mergecd(filename, pathdir)
plt.savefig(pp, format='pdf')
except:
print('Analytical decomposition failed!')
elif w_values == 6:
c1, c2, c3, c4, c5, c6 = tcl[0][0], tcl[1][0], tcl[2][0], tcl[3][0], tcl[4][0], tcl[5][0]
m = f_sig * np.std(err_y)
int1 = np.random.normal(c1, m, 1000)
int2 = np.random.normal(c2, m, 1000)
int3 = np.random.normal(c3, m, 1000)
int4 = np.random.normal(c4, m, 1000)
int5 = np.random.normal(c5, m, 1000)
int6 = np.random.normal(c6, m, 1000)
list2 = list(range(len(int1)))
for i1, j1, k1, l1, m1, n1 in zip(list2, list2, list2, list2, list2, list2):
f1 = int1[i1] * dainp[:, 1] + int2[j1] * dainp[:, 3] + int3[k1] * dainp[:, 5] + int4[l1] * dainp[:, 7] + \
int5[m1] * dainp[:, 9] + int6[n1] * dainp[:, 11]
p = (1. / (len(ydata0) - 1. - w_values)) * np.sum(((ydata0 - f1) / err_y) ** 2)
if int1[i1] >= 0. and int2[j1] >= 0. and int3[k1] >= 0. and int4[l1] >= 0. and int5[m1] >= 0. and int6[
n1] >= 0. and p >= 0. and p <= 1100.:
fp = open('Confidence_limits_2nd.dat', 'a')
fp.write('{0:f} {1:f} {2:f} {3:f} {4:f} {5:f} {6:f}\n'.format(int1[i1], int2[j1], int3[k1], int4[l1],
int5[m1], int6[n1], p))
fp.close()
path = pathdir + 'Confidence_limits_2nd.dat'
t = pd.read_csv(path, sep='\s+', header=None) # astropy.io.ascii.read(path, format='no_header')
w1 = t[0]
w2 = t[1]
w3 = t[2]
w4 = t[3]
w5 = t[4]
w6 = t[5]
y = t[6]
delchi2 = y - min(y)
Data1 = {'w1': w1, 'w2': w2, 'w3': w3, 'w4': w4, 'w5': w5, 'w6': w6, 'y': y}
Data2 = {'w1': w1, 'w2': w2, 'w3': w3, 'w4': w4, 'w5': w5, 'w6': w6, 'delchi2': delchi2}
df1 = DataFrame(Data1, columns=['w1', 'w2', 'w3', 'w4', 'w5', 'w6', 'y'])
df2 = DataFrame(Data2, columns=['w1', 'w2', 'w3', 'w4', 'w5', 'w6', 'delchi2'])
df1.sort_values(by=['y'], inplace=True)
df2.sort_values(by=['delchi2'], inplace=True)
np.savetxt('Confidence_limits_2nd.dat', df1, fmt='%1.4f')
np.savetxt('Confidence_limits_Delta_2nd.dat', df2, fmt='%1.4f')
# Statistical Plots
fig = plt.figure()
from ENIIGMA.Stats import Stats_contour as stc
file1 = pathdir + 'Confidence_limits_2nd.dat'
stc.st_plot_contour6(file1, pathdir)
plt.savefig(pp, format='pdf', bbox_inches='tight')
from ENIIGMA.Stats import Stats_plot as stp
fig = plt.figure()
stp.min_max(xdata0, ydata0, e_min, e_max, pathdir)
plt.savefig(pp, format='pdf', bbox_inches='tight')
fig = plt.figure()
file2 = pathdir + 'output_file.txt'
fileqmin = pathdir + 'q_min.txt'
fileqmax = pathdir + 'q_max.txt'
for fn in glob.glob(pathdir + "Merge*"): # Remove the Merge files
os.remove(fn)
stp.deconv_best(xdata0, ydata0, e_min, e_max, pathdir)
plt.savefig(pp, format='pdf', bbox_inches='tight')
try:
fig = plt.figure()
from ENIIGMA.Stats import Merge_colden as mc
filename = pathdir + 'Column_density_*.csv'
mc.mergecd(filename, pathdir)
plt.savefig(pp, format='pdf')
except:
print('Analytical decomposition failed!')
elif w_values == 7:
c1, c2, c3, c4, c5, c6, c7 = tcl[0][0], tcl[1][0], tcl[2][0], tcl[3][0], tcl[4][0], tcl[5][0], tcl[6][0]
m = f_sig * np.std(err_y)
int1 = np.random.normal(c1, m, 2000)
int2 = np.random.normal(c2, m, 2000)
int3 = np.random.normal(c3, m, 2000)
int4 = np.random.normal(c4, m, 2000)
int5 = np.random.normal(c5, m, 2000)
int6 = np.random.normal(c6, m, 2000)
int7 = np.random.normal(c7, m, 2000)
list2 = list(range(len(int1)))
for i1, j1, k1, l1, m1, n1, o1 in zip(list2, list2, list2, list2, list2, list2, list2):
f1 = int1[i1] * dainp[:, 1] + int2[j1] * dainp[:, 3] + int3[k1] * dainp[:, 5] + int4[l1] * dainp[:, 7] + \
int5[m1] * dainp[:, 9] + int6[n1] * dainp[:, 11] + int7[o1] * dainp[:, 13]
p = (1. / (len(ydata0) - 1. - w_values)) * np.sum(((ydata0 - f1) / err_y) ** 2)
if int1[i1] >= 0. and int2[j1] >= 0. and int3[k1] >= 0. and int4[l1] >= 0. and int5[m1] >= 0. and int6[
n1] >= 0. and int7[o1] >= 0. and p >= 0. and p <= 1058.:
fp = open('Confidence_limits_2nd.dat', 'a')
fp.write(
'{0:f} {1:f} {2:f} {3:f} {4:f} {5:f} {6:f} {7:f}\n'.format(int1[i1], int2[j1], int3[k1], int4[l1],
int5[m1], int6[n1], int7[o1], p))
fp.close()
path = pathdir + 'Confidence_limits_2nd.dat'
t = pd.read_csv(path, sep='\s+', header=None) # astropy.io.ascii.read(path, format='no_header')
w1 = t[0]
w2 = t[1]
w3 = t[2]
w4 = t[3]
w5 = t[4]
w6 = t[5]
w7 = t[6]
y = t[7]
delchi2 = y - min(y)
Data1 = {'w1': w1, 'w2': w2, 'w3': w3, 'w4': w4, 'w5': w5, 'w6': w6, 'w7': w7, 'y': y}
Data2 = {'w1': w1, 'w2': w2, 'w3': w3, 'w4': w4, 'w5': w5, 'w6': w6, 'w7': w7, 'delchi2': delchi2}
df1 = DataFrame(Data1, columns=['w1', 'w2', 'w3', 'w4', 'w5', 'w6', 'w7', 'y'])
df2 = | DataFrame(Data2, columns=['w1', 'w2', 'w3', 'w4', 'w5', 'w6', 'w7', 'delchi2']) | pandas.DataFrame |
"""Tests for the SQLite DatabaseManager `SQLiteDb`.
Tests all methods of the DatabaseManager because it is easy to test with SQLite.
"""
# =================================================
# Imports
# =================================================
# Standard Library
from pathlib import Path
# Third Party
import numpy as np
import pandas as pd
from pandas.testing import assert_frame_equal, assert_series_equal
import pytest
from sqlalchemy.sql import text
import sqlalchemy
# Local
import pandemy
from .dependencies import PANDAS_VERSION
# =================================================
# Setup
# =================================================
class SQLiteSQLContainer(pandemy.SQLContainer):
r"""A correctly defined pandemy.SQLContainer subclass"""
my_query = 'SELECT * FROM MyTable;'
class SQLiteFakeSQLContainer:
r"""
SQLContainer class that does not inherit from `pandemy.SQLContainer`.
This class is not a valid input to the container parameter of
`pandemy.DatabaseManager`.
"""
my_query = 'SELECT * FROM MyTable;'
# =================================================
# Tests
# =================================================
class TestInitSQLiteDb:
r"""Test the initalization of the SQLite DatabaseManager `SQLiteDb`.
Fixtures
--------
sqlite_db_file : Path
Path to a SQLite database that exists on disk.
"""
def test_all_defaults(self):
r"""Create an instance of SQLiteDb that lives in memory with all default values."""
# Setup - None
# ===========================================================
# Exercise
# ===========================================================
db = pandemy.SQLiteDb()
# Verify
# ===========================================================
assert db.file == ':memory:'
assert db.must_exist is False
assert db.container is None
assert db.engine_config is None
assert db.conn_str == r'sqlite://'
assert isinstance(db.engine, sqlalchemy.engine.base.Engine)
# Clean up - None
# ===========================================================
def test_in_memory(self):
r"""Create an instance of SQLiteDb that lives in memory."""
# Setup - None
# ===========================================================
# Exercise
# ===========================================================
db = pandemy.SQLiteDb(file=':memory:')
# Verify
# ===========================================================
assert db.file == ':memory:'
assert db.must_exist is False
assert db.conn_str == r'sqlite://'
assert isinstance(db.engine, sqlalchemy.engine.base.Engine)
# Clean up - None
# ===========================================================
@pytest.mark.parametrize('file_as_str', [pytest.param(True, id='str'), pytest.param(False, id='Path')])
def test_file_must_exist(self, file_as_str, sqlite_db_file):
r"""Create an instance with a file supplied as a string and pathlib.Path object.
The default option `must_exist` is set to True.
The file exists on disk.
Parameters
----------
file_as_str : bool
True if the file should be supplied as a string and False for pathlib.Path.
"""
# Setup - None
# ===========================================================
# Exercise
# ===========================================================
if file_as_str:
db = pandemy.SQLiteDb(file=str(sqlite_db_file), must_exist=True)
else:
db = pandemy.SQLiteDb(file=sqlite_db_file, must_exist=True)
# Verify
# ===========================================================
assert db.file == sqlite_db_file
assert db.must_exist is True
assert db.conn_str == fr'sqlite:///{str(sqlite_db_file)}'
assert isinstance(db.engine, sqlalchemy.engine.base.Engine)
# Clean up - None
# ===========================================================
@pytest.mark.raises
@pytest.mark.parametrize('file', [pytest.param('does not exist', id='str'),
pytest.param(Path('does not exist'), id='Path')])
def test_on_file_must_exist_file_does_not_exist(self, file):
r"""Create an instance with a file supplied as a string and pathlib.Path object.
The default option `must_exist` is set to True.
The file does not exists on disk.
pandemy.DatabaseFileNotFoundError is expected to be raised.
Parameters
----------
file : str or Path
The file with the SQLite database.
"""
# Setup - None
# ===========================================================
# Exercise & Verify
# ===========================================================
with pytest.raises(pandemy.DatabaseFileNotFoundError):
pandemy.SQLiteDb(file=file, must_exist=True)
# Clean up - None
# ===========================================================
def test_on_file_with_SQLContainer(self):
r"""Create an instance with a SQLContainer class.
The option `must_exist` is set to False.
The file does not exists on disk.
"""
# Setup
# ===========================================================
must_exist = False
file = 'mydb.db'
# Exercise
# ===========================================================
db = pandemy.SQLiteDb(file=file, must_exist=must_exist, container=SQLiteSQLContainer)
# Verify
# ===========================================================
assert db.file == Path(file)
assert db.must_exist is must_exist
assert db.container is SQLiteSQLContainer
# Clean up - None
# ===========================================================
# file, must_exist, container, engine_config, error_msg
input_test_bad_input = [
pytest.param(42, False, None, None, 'Received: 42', id='file=42'),
pytest.param('my_db.db', 'False', None, {'encoding': 'UTF-8'}, 'Received: False', id="must_exist='False'"),
pytest.param('my_db.db', False, [42], None, 'container must be a subclass of pandemy.SQLContainer',
id="container=[42]"),
pytest.param(Path('my_db.db'), False, SQLiteFakeSQLContainer, None,
'container must be a subclass of pandemy.SQLContainer', id="container=FakeSQLContainer"),
pytest.param('my_db.db', False, None, 42, 'engine_config must be a dict', id="engine_config=42"),
]
@pytest.mark.raises
@pytest.mark.parametrize('file, must_exist, container, engine_config, error_msg', input_test_bad_input)
def test_bad_input_parameters(self, file, must_exist, container, engine_config, error_msg):
r"""Test bad input parameters.
pandemy.InvalidInputError is expected to be raised.
Parameters
----------
file : str or Path, default ':memory:'
The file (with path) to the SQLite database.
The default creates an in memory database.
must_exist : bool, default True
If True validate that file exists unless file = ':memory:'.
If it does not exist FileNotFoundError is raised.
If False the validation is omitted.
container : pandemy.SQLContainer or None, default None
A container of database statements that the SQLite DatabaseManager can use.
engine_config : dict or None
Additional keyword arguments passed to the SQLAlchemy create_engine function.
"""
# Setup - None
# ===========================================================
# Exercise & Verify
# ===========================================================
with pytest.raises(pandemy.InvalidInputError, match=error_msg):
pandemy.SQLiteDb(file=file, must_exist=must_exist, container=container, engine_config=engine_config)
# Clean up - None
# ===========================================================
@pytest.mark.raises
def test_invalid_parameter_to_create_engine(self):
r"""Test to supply an invalid parameter to the SQLAlchemy create_engine function.
pandemy.CreateEngineError is expected to be raised.
Also supply a keyword argument that is not used for anything.
It should not affect the initialization.
"""
# Setup
# ===========================================================
error_msg = 'invalid_param'
engine_config = {'invalid_param': True}
# Exercise & Verify
# ===========================================================
with pytest.raises(pandemy.CreateEngineError, match=error_msg):
pandemy.SQLiteDb(file='my_db.db', must_exist=False, container=None,
engine_config=engine_config, kwarg='kwarg')
# Clean up - None
# ===========================================================
class TestExecuteMethod:
r"""Test the `execute` method of the SQLite DatabaseManager `SQLiteDb`.
Fixtures
--------
sqlite_db : pandemy.SQLiteDb
An instance of the test database.
sqlite_db_empty : pandemy.SQLiteDb
An instance of the test database where all tables are empty.
df_owner : pd.DataFrame
The owner table of the test database.
"""
# The query for test_select_all_owners
select_all_owners = """SELECT OwnerId, OwnerName, BirthDate FROM Owner;"""
@pytest.mark.parametrize('query', [pytest.param(select_all_owners, id='query: str'),
pytest.param(text(select_all_owners), id='query: sqlalchemy TextClause')])
def test_select_all_owners(self, query, sqlite_db, df_owner):
r"""Test to execute a SELECT query.
Query all rows from the Owner table.
Parameters
----------
query : str or text
The SQL query to execute.
"""
# Setup
# ===========================================================
with sqlite_db.engine.connect() as conn:
# Exercise
# ===========================================================
result = sqlite_db.execute(sql=query, conn=conn)
# Verify
# ===========================================================
for idx, row in enumerate(result):
assert row.OwnerId == df_owner.index[idx]
assert row.OwnerName == df_owner.loc[row.OwnerId, 'OwnerName']
assert row.BirthDate == df_owner.loc[row.OwnerId, 'BirthDate'].strftime(r'%Y-%m-%d')
# Clean up - None
# ===========================================================
# The query for test_select_owner_by_id
select_owner_by_id = """SELECT OwnerId, OwnerName
FROM Owner
WHERE OwnerId = :id;
"""
# query, id, owner_exp
input_test_select_owner_by_id = [pytest.param(select_owner_by_id, 1,
id='query: str, id=1'),
pytest.param(text(select_owner_by_id), 2,
id='query: sqlalchemy TextClause, id=2')]
@pytest.mark.parametrize('query, owner_id', input_test_select_owner_by_id)
def test_select_owner_by_id(self, query, owner_id, sqlite_db, df_owner):
r"""Test to execute a SELECT query with a query parameter.
Parameters
----------
query : str or sqlalchemy.sql.elements.TextClause
The SQL query to execute.
owner_id : int
The parameter representing OwnerId in `query`.
"""
# Setup
# ===========================================================
with sqlite_db.engine.connect() as conn:
# Exercise
# ===========================================================
result = sqlite_db.execute(sql=query, conn=conn, params={'id': owner_id})
# Verify
# ===========================================================
for row in result:
assert row.OwnerId == owner_id
assert row.OwnerName == df_owner.loc[owner_id, 'OwnerName']
# Clean up - None
# ===========================================================
def test_select_owner_by_2_params(self, sqlite_db, df_owner):
r"""Test to execute a SELECT query with 2 query parameters."""
# Setup
# ===========================================================
query = text("""SELECT OwnerId, OwnerName, BirthDate
FROM Owner
WHERE OwnerName = :name OR
DATE(BirthDate) > DATE(:bdate)
ORDER BY OwnerName ASC;
""")
df_exp_result = df_owner.loc[[3, 1], :]
with sqlite_db.engine.connect() as conn:
# Exercise
# ===========================================================
result = sqlite_db.execute(sql=query, conn=conn, params={'name': 'John', 'bdate': '1941-12-07'})
# Verify
# ===========================================================
for idx, row in enumerate(result):
assert row.OwnerId == df_exp_result.index[idx]
assert row.OwnerName == df_exp_result.loc[row.OwnerId, 'OwnerName']
assert row.BirthDate == df_exp_result.loc[row.OwnerId, 'BirthDate'].strftime(r'%Y-%m-%d')
# Clean up - None
# ===========================================================
input_test_insert_owner = [
pytest.param([{'id': 1, 'name': '<NAME>', 'bdate': '2021-07-07'}], id='1 Owner'),
pytest.param([{'id': 1, 'name': '<NAME>', 'bdate': '2021-07-07'},
{'id': 2, 'name': '<NAME>', 'bdate': '1987-07-21'}], id='2 Owners'),
]
@pytest.mark.parametrize('params', input_test_insert_owner)
def test_insert_into_owner(self, params, sqlite_db_empty):
r"""Test to insert new owner(s) into the Owner table of the empty test database.
Parameters
----------
params : list of dict
The parameters to pass to the insert statement.
"""
# Setup
# ===========================================================
statement = text("""INSERT INTO Owner (OwnerId, OwnerName, BirthDate)
VALUES (:id, :name, :bdate);
""")
# The query to read back the inserted owners
query_exp = """SELECT OwnerId, OwnerName, BirthDate FROM Owner;"""
with sqlite_db_empty.engine.connect() as conn:
# Exercise
# ===========================================================
sqlite_db_empty.execute(sql=statement, conn=conn, params=params)
# Verify
# ===========================================================
result = sqlite_db_empty.execute(sql=query_exp, conn=conn)
for idx, row in enumerate(result):
assert row.OwnerId == params[idx]['id']
assert row.OwnerName == params[idx]['name']
assert row.BirthDate == params[idx]['bdate']
# Clean up - None
# ===========================================================
@pytest.mark.raises
def test_invalid_select_syntax(self, sqlite_db):
r"""Execute a SELECT query with invalid syntax.
No query parameters are supplied. It should raise pandemy.ExecuteStatementError.
"""
# Setup
# ===========================================================
query = 'SELE * FROM Owner'
with sqlite_db.engine.connect() as conn:
# Exercise & Verify
# ===========================================================
with pytest.raises(pandemy.ExecuteStatementError):
sqlite_db.execute(sql=query, conn=conn)
# Clean up - None
# ===========================================================
@pytest.mark.raises
def test_invalid_query_param(self, sqlite_db):
r"""
Execute a SELECT query with a parameter (:id) and the name of the supplied
parameter (:di) to the query does not match the parameter name in the query.
It should raise pandemy.ExecuteStatementError.
"""
# Setup
# ===========================================================
with sqlite_db.engine.connect() as conn:
# Exercise & Verify
# ===========================================================
with pytest.raises(pandemy.ExecuteStatementError):
sqlite_db.execute(sql=self.select_owner_by_id, conn=conn, params={'di': 1})
# Clean up - None
# ===========================================================
@pytest.mark.raises
def test_invalid_sql_param(self, sqlite_db):
r"""Supply and invalid type to the `sql` parameter.
It should raise pandemy.InvalidInputError.
"""
# Setup
# ===========================================================
with sqlite_db.engine.connect() as conn:
# Exercise & Verify
# ===========================================================
with pytest.raises(pandemy.InvalidInputError, match='list'):
sqlite_db.execute(sql=['Invalid query'], conn=conn, params={'di': 1})
# Clean up - None
# ===========================================================
class TestIsValidTableName:
r"""Test the `_is_valid_table_name` method of the SQLiteDb DatabaseManager `SQLiteDb`.
Fixtures
--------
sqlite_db_empty : pandemy.SQLiteDb
An instance of the test database where all tables are empty.
"""
@pytest.mark.parametrize('table', [pytest.param('Customer', id='Customer'),
pytest.param('1', id='1'),
pytest.param('', id='empty string'),
pytest.param('DELETE', id='DELETE'),
pytest.param('"DROP"', id='DROP'),
pytest.param('""DELETEFROMTABLE""', id='""DELETEFROMTABLE""')])
def test_is_valid_table_name_valid_table_names(self, table, sqlite_db_empty):
r"""Test that valid table names can pass the validation.
The `_is_valid_table_name method` checks that the table name consists
of a single word. If the table name is valid the method returns None
and no exception should be raised.
Parameters
----------
table : str
The name of the table.
"""
# Setup - None
# ===========================================================
# Exercise
# ===========================================================
result = sqlite_db_empty._is_valid_table_name(table=table)
# Verify
# ===========================================================
assert result is None
# Clean up - None
# ===========================================================
@pytest.mark.raises
@pytest.mark.parametrize('table, spaces', [pytest.param('Customer DELETE', '1',
id='2 words, 1 space'),
pytest.param(' Customer DELETE', '3',
id='2 words, 3 spaces'),
pytest.param('"DROP TABLE Customer"', '2',
id='3 words, 2 spaces'),
pytest.param(';""DELETE FROM TABLE Customer;"', '3',
id='4 words, 3 spaces')])
def test_is_valid_table_name_invalid_table_names(self, table, spaces, sqlite_db_empty):
r"""Test that invalid table names can be detected correctly.
The `_is_valid_table_name method` checks that the table name consists
of a single word.
pandemy.InvalidTableNameError is expected to be raised
if the table name is invalid.
Parameters
----------
table : str
The name of the table.
spaces : str
The number of space characters in `table`.
"""
# Setup - None
# ===========================================================
# Exercise
# ===========================================================
with pytest.raises(pandemy.InvalidTableNameError) as exc_info:
sqlite_db_empty._is_valid_table_name(table=table)
# Verify
# ===========================================================
assert exc_info.type is pandemy.InvalidTableNameError
assert table in exc_info.value.args[0]
assert spaces in exc_info.value.args[0]
assert table == exc_info.value.data
# Clean up - None
# ===========================================================
@pytest.mark.raises
@pytest.mark.parametrize('table', [pytest.param(1, id='int'),
pytest.param(3.14, id='float'),
pytest.param([1, '1'], id='list'),
pytest.param({'table': 'name'}, id='dict')])
def test_is_valid_table_name_invalid_input(self, table, sqlite_db_empty):
r"""Test invalid input to the `table` parameter.
If `table` is not a string pandemy.InvalidInputError should be raised.
Parameters
----------
table : str
The name of the table.
"""
# Setup - None
# ===========================================================
# Exercise
# ===========================================================
with pytest.raises(pandemy.InvalidInputError) as exc_info:
sqlite_db_empty._is_valid_table_name(table=table)
# Verify
# ===========================================================
assert exc_info.type is pandemy.InvalidInputError
assert str(table) in exc_info.value.args[0]
assert table == exc_info.value.data
# Clean up - None
# ===========================================================
class TestDeleteAllRecordsFromTable:
r"""Test the `delete_all_records_from_table` method of the SQLiteDb DatabaseManager `SQLiteDb`.
Fixtures
--------
sqlite_db_empty : pandemy.SQLiteDb
An instance of the test database where all tables are empty.
df_customer : pd.DataFrame
The Customer table of the test database.
"""
def test_delete_all_records(self, sqlite_db_empty, df_customer):
r"""Delete all records from the table Customer in the test database."""
# Setup
# ===========================================================
query = """SELECT * FROM Customer;"""
df_exp_result = pd.DataFrame(columns=df_customer.columns)
df_exp_result.index.name = df_customer.index.name
with sqlite_db_empty.engine.begin() as conn:
# Write data to the empty table
df_customer.to_sql(name='Customer', con=conn, if_exists='append')
# Exercise
# ===========================================================
with sqlite_db_empty.engine.begin() as conn:
sqlite_db_empty.delete_all_records_from_table(table='Customer', conn=conn)
# Verify
# ===========================================================
with sqlite_db_empty.engine.begin() as conn:
df_result = pd.read_sql(sql=query, con=conn, index_col='CustomerId', parse_dates=['BirthDate'])
assert_frame_equal(df_result, df_exp_result, check_dtype=False, check_index_type=False)
@pytest.mark.raises
def test_delete_all_records_table_does_not_exist(self, sqlite_db_empty):
r"""Try to delete all records from the table Custom that does not exist in the database.
pandemy.DeleteFromTableError is expected to be raised.
"""
# Setup
# ===========================================================
table = 'Custom'
# Exercise
# ===========================================================
with pytest.raises(pandemy.DeleteFromTableError) as exc_info:
with sqlite_db_empty.engine.begin() as conn:
sqlite_db_empty.delete_all_records_from_table(table=table, conn=conn)
# Verify
# ===========================================================
assert exc_info.type is pandemy.DeleteFromTableError
assert table in exc_info.value.args[0]
assert table in exc_info.value.data[0]
# Clean up - None
# ===========================================================
@pytest.mark.raises
@pytest.mark.parametrize('table', [pytest.param('Customer DELETE', id='table name = 2 words'),
pytest.param('"DROP TABLE Customer"', id='table name = 3 words'),
pytest.param(';""DELETE FROM TABLE Customer;"', id='table name = 4 words')])
def test_delete_all_records_invalid_table_name(self, table, sqlite_db_empty):
r"""Try to delete all records from specified table when supplying and invalid table name.
pandemy.InvalidTableNameError is expected to be raised.
Parameters
----------
table: str
The name of the table to delete records from.
"""
# Setup - None
# ===========================================================
# Exercise
# ===========================================================
with pytest.raises(pandemy.InvalidTableNameError) as exc_info:
with sqlite_db_empty.engine.begin() as conn:
sqlite_db_empty.delete_all_records_from_table(table=table, conn=conn)
# Verify
# ===========================================================
assert exc_info.type is pandemy.InvalidTableNameError
assert table in exc_info.value.args[0]
assert table == exc_info.value.data
# Clean up - None
# ===========================================================
class TestSaveDfMethod:
r"""Test the `save_df` method of the SQLiteDb DatabaseManager `SQLiteDb`.
Fixtures
--------
sqlite_db : pandemy.SQLiteDb
An instance of the test database.
sqlite_db_empty : pandemy.SQLiteDb
An instance of the test database where all tables are empty.
df_customer : pd.DataFrame
The Customer table of the test database.
"""
@pytest.mark.parametrize('chunksize', [pytest.param(None, id='chunksize=None'),
pytest.param(2, id='chunksize=2')])
def test_save_to_existing_empty_table(self, chunksize, sqlite_db_empty, df_customer):
r"""Save a DataFrame to an exisitng empty table.
Parameters
----------
chunksize : int or None
The number of rows in each batch to be written at a time.
If None, all rows will be written at once.
"""
# Setup
# ===========================================================
query = """SELECT * FROM Customer;"""
# Exercise
# ===========================================================
with sqlite_db_empty.engine.begin() as conn:
sqlite_db_empty.save_df(df=df_customer, table='Customer', conn=conn,
if_exists='append', chunksize=chunksize)
# Verify
# ===========================================================
df_result = pd.read_sql(sql=query, con=conn, index_col='CustomerId', parse_dates=['BirthDate'])
assert_frame_equal(df_result, df_customer, check_dtype=False, check_index_type=False)
# Clean up - None
# ===========================================================
def test_save_to_new_table_with_schema(self, sqlite_db_empty, df_customer):
r"""Save a DataFrame to a new table in the database with a schema specified.
The table Customer already exists as an empty table in the database. By saving the DataFrame
to a temporary table (the temp schema) called Customer, while the parameter `if_exists` = 'fail',
no exception should be raised since the tables called Customer exist in different schemas.
SQLite supports the schemas 'temp', 'main' or the name of an attached database.
See Also
--------
https://sqlite.org/lang_createtable.html
"""
# Setup
# ===========================================================
schema = 'temp'
query = f"""SELECT * FROM {schema}.Customer;"""
# Exercise
# ===========================================================
with sqlite_db_empty.engine.begin() as conn:
sqlite_db_empty.save_df(df=df_customer, table='Customer', conn=conn,
schema=schema, if_exists='fail')
# Verify
# ===========================================================
df_result = pd.read_sql(sql=query, con=conn, index_col='CustomerId', parse_dates=['BirthDate'])
assert_frame_equal(df_result, df_customer, check_dtype=False, check_index_type=False)
# Clean up - None
# ===========================================================
def test_save_to_existing_non_empty_table_if_exists_replace(self, sqlite_db_empty, df_customer):
r"""Save a DataFrame to an exisitng non empty table.
The existing rows in the table are deleted before writing the DataFrame.
"""
# Setup
# ===========================================================
query = """SELECT * FROM Customer;"""
with sqlite_db_empty.engine.begin() as conn:
# Write data to the empty table
df_customer.to_sql(name='Customer', con=conn, if_exists='append')
# Exercise
# ===========================================================
sqlite_db_empty.save_df(df=df_customer, table='Customer', conn=conn, if_exists='replace')
# Verify
# ===========================================================
df_result = pd.read_sql(sql=query, con=conn, index_col='CustomerId', parse_dates=['BirthDate'])
| assert_frame_equal(df_result, df_customer, check_dtype=False, check_index_type=False) | pandas.testing.assert_frame_equal |
# -*- coding: utf-8 -*-
# pylint: disable-msg=E1101,W0612
import nose
import numpy as np
from numpy import nan
import pandas as pd
from distutils.version import LooseVersion
from pandas import (Index, Series, DataFrame, Panel, isnull,
date_range, period_range)
from pandas.core.index import MultiIndex
import pandas.core.common as com
from pandas.compat import range, zip
from pandas import compat
from pandas.util.testing import (assert_series_equal,
assert_frame_equal,
assert_panel_equal,
assert_equal)
import pandas.util.testing as tm
def _skip_if_no_pchip():
try:
from scipy.interpolate import pchip_interpolate # noqa
except ImportError:
raise nose.SkipTest('scipy.interpolate.pchip missing')
# ----------------------------------------------------------------------
# Generic types test cases
class Generic(object):
_multiprocess_can_split_ = True
def setUp(self):
pass
@property
def _ndim(self):
return self._typ._AXIS_LEN
def _axes(self):
""" return the axes for my object typ """
return self._typ._AXIS_ORDERS
def _construct(self, shape, value=None, dtype=None, **kwargs):
""" construct an object for the given shape
if value is specified use that if its a scalar
if value is an array, repeat it as needed """
if isinstance(shape, int):
shape = tuple([shape] * self._ndim)
if value is not None:
if np.isscalar(value):
if value == 'empty':
arr = None
# remove the info axis
kwargs.pop(self._typ._info_axis_name, None)
else:
arr = np.empty(shape, dtype=dtype)
arr.fill(value)
else:
fshape = np.prod(shape)
arr = value.ravel()
new_shape = fshape / arr.shape[0]
if fshape % arr.shape[0] != 0:
raise Exception("invalid value passed in _construct")
arr = np.repeat(arr, new_shape).reshape(shape)
else:
arr = np.random.randn(*shape)
return self._typ(arr, dtype=dtype, **kwargs)
def _compare(self, result, expected):
self._comparator(result, expected)
def test_rename(self):
# single axis
for axis in self._axes():
kwargs = {axis: list('ABCD')}
obj = self._construct(4, **kwargs)
# no values passed
# self.assertRaises(Exception, o.rename(str.lower))
# rename a single axis
result = obj.rename(**{axis: str.lower})
expected = obj.copy()
setattr(expected, axis, list('abcd'))
self._compare(result, expected)
# multiple axes at once
def test_get_numeric_data(self):
n = 4
kwargs = {}
for i in range(self._ndim):
kwargs[self._typ._AXIS_NAMES[i]] = list( | range(n) | pandas.compat.range |
# Copyright (C) 2020 <NAME>, <NAME>
# Code -- Study 2 -- What Personal Information Can a Consumer Facial Image Reveal?
# https://github.com/computationalmarketing/facialanalysis/
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.lines as mlines
import matplotlib.patches as mpatches
import matplotlib.ticker as mtick
from matplotlib import gridspec
from matplotlib import rcParams
rcParams.update({'font.size': 12})
rcParams['font.family'] = 'serif'
rcParams['font.sans-serif'] = ['Times']
import seaborn as sns
import torchvision.models as models
import torch
from torch.utils.data import DataLoader
from torch.autograd import Variable
import torch.nn.functional as F
import torch.optim as optim
import os
from os import walk
from tqdm import tqdm
from sklearn.utils import class_weight
from sklearn import metrics, svm
from sklearn.linear_model import LogisticRegression
from sklearn.decomposition import TruncatedSVD, PCA
from sklearn.model_selection import KFold, GroupKFold, ShuffleSplit, GroupShuffleSplit
from sklearn.neighbors import NearestNeighbors
import scipy.stats
from scipy.special import softmax
import scipy.cluster.hierarchy as sch
from scipy.cluster.hierarchy import dendrogram, linkage
# ATTENTION: we disable notifications when AUC cannot be computed -- during nn finetuning
from sklearn.exceptions import UndefinedMetricWarning
import warnings
warnings.filterwarnings(action='ignore', category=UndefinedMetricWarning)
warnings.filterwarnings(action='ignore', category=RuntimeWarning)
import json
import numpy as np
from torchvision import transforms
from torch.utils.data.dataset import Dataset
from PIL import Image
import pandas as pd
import pickle
import sys
'''
CustomDataset object takes care of supplying an observation (image, labels).
It also performs image preprocessing, such as normalization by color channel.
In case of training, it also performs random transformations, such as horizontal flips, resized crops, rotations, and color jitter.
'''
class CustomDataset(Dataset):
def __init__(self, data, tr = True):
self.data = data
self.paths = self.data['img_path'].values.astype('str')
self.data_len = self.data.shape[0]
self.labels = self.data[q_list].values.astype('int32')
self.control_metrics = self.data[control_list].values.astype('float32')
# transforms
if tr:
self.transforms = transforms.Compose([
transforms.Resize(224),
transforms.RandomHorizontalFlip(p=0.5),
transforms.RandomApply([
transforms.RandomResizedCrop(224),
transforms.RandomRotation(20),
transforms.ColorJitter(brightness=0.1,contrast=0.1,saturation=0.1,hue=0.1)], p=0.75),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],[0.229, 0.224, 0.225])])
else:
self.transforms = transforms.Compose([
transforms.Resize(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],[0.229, 0.224, 0.225])])
def __getitem__(self, index):
img_path = PATH + '/'+ self.paths[index]
img = Image.open(img_path)
img_tensor = self.transforms(img)
label = self.labels[index]
control_metric = self.control_metrics[index]
return (img_tensor, label, control_metric)
def __len__(self):
return self.data_len
#get pretrained resnet50 model
def get_pretrained():
model = models.resnet50(pretrained=True)
return model
# replace last layer
def prepare_for_finetuning(model):
for param in model.parameters():
param.requires_grad = False
param.requires_grad = True
#replacing last layer with new fully connected
model.fc = torch.nn.Linear(model.fc.in_features,n_outs)
return
# create an object that uses CustomDataset object from above to load multiple observations in parallel
def create_dataloader(data,rand=True):
if rand: # shuddle observations
dataset = CustomDataset(data, tr=True)
loader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, shuffle=True, num_workers=10, drop_last=False)
else: # load in fixed order of data
dataset = CustomDataset(data, tr=False)
loader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, shuffle=False, sampler = torch.utils.data.sampler.SequentialSampler(dataset), num_workers=10, drop_last=False)
return loader
#finetune and save neural net model
def finetune_and_save(loader_train, loader_test):
# loading pretrained model and preparing it for finetuning
model = get_pretrained()
prepare_for_finetuning(model)
if CUDA:
model.cuda()
# optimize only last six layers
layers = list(model.children())
params = list(layers[len(layers)-1].parameters())+list(layers[len(layers)-2].parameters())+list(layers[len(layers)-3].parameters())+list(layers[len(layers)-4].parameters())+list(layers[len(layers)-5].parameters())+list(layers[len(layers)-6].parameters())
optimizer = optim.Adamax(params=params, lr=0.001)
# print("starting finetuning")
hist = {}
hist['d_labs'] = q_list
hist['train_loss'] = []
hist['val_loss'] = []
hist['train_loss_d'] = []
hist['val_loss_d'] = []
hist['train_auc_d'] = []
hist['val_auc_d'] = []
acc_best = 0.0
#train
for epoch in range(N_EPOCHS):
train_loss, train_loss_d, train_auc_d = run_epoch(model, loss_f, optimizer, loader_train, update_model = True) # training
eval_loss, eval_loss_d, eval_auc_d = run_epoch(model, loss_f, optimizer, loader_test, update_model = False) # evaluation
hist['train_loss'].append(train_loss)
hist['val_loss'].append(eval_loss)
hist['train_loss_d'].append(train_loss_d)
hist['val_loss_d'].append(eval_loss_d)
hist['train_auc_d'].append(train_auc_d)
hist['val_auc_d'].append(eval_auc_d)
with open(RESULTS+'/eval_record.json', 'w') as fjson:
json.dump(hist, fjson)
# saving model
torch.save(model, RESULTS+"/finetuned_model")
return
# function that performa training (or evaluation) over an epoch (full pass through a data set)
def run_epoch(model, loss_f, optimizer, loader, update_model = False):
if update_model:
model.train()
else:
model.eval()
loss_hist = []
loss_hist_detailed = []
auc_hist_detailed = []
for batch_i, var in tqdm(enumerate(loader)):
loss, loss_detailed, auc_detailed = loss_f(model, var)
if update_model:
optimizer.zero_grad()
loss.backward()
optimizer.step()
loss_hist.append(loss.data.item())
loss_hist_detailed.append(loss_detailed)
auc_hist_detailed.append(auc_detailed)
loss_detailed = pd.DataFrame(loss_hist_detailed)
loss_detailed.columns = q_list
auc_detailed = pd.DataFrame(auc_hist_detailed)
auc_detailed.columns = q_list
return np.mean(loss_hist).item(), loss_detailed.mean(0).values.tolist(), auc_detailed.mean(0).values.tolist()
# function to compute loss from a batch data
def loss_f(model, var):
data, target, _ = var
data, target = Variable(data), Variable(target)
if CUDA:
data, target = data.cuda(), target.cuda()
output = model(data) # match for the user and focal game
loss = 0
loss_detailed = []
auc_detailed = []
for i in range(len(q_d_list)):
w = torch.FloatTensor(class_weights[i])
if CUDA:
w = w.cuda()
# output contains scores for each level of every predicted variable
# q_d_list[i] is number of levels to variable i
# q_d_list_cumsum[i] is a cumulative sum over number of levels for variable i and all variables before it
# all variables ordered as in q_list
# (q_d_list_cumsum[i]-q_d_list[i]):q_d_list_cumsum[i] then gives exact coordinates of the scores for variable i
# among all scores in the output
temp = F.cross_entropy(output[:,(q_d_list_cumsum[i]-q_d_list[i]):q_d_list_cumsum[i]], target[:,i].long(), weight=w)
loss_detailed.append(temp.data.item())
loss += temp
# now we calculate AUC
y_true = target[:,i].detach().cpu().numpy()
y_score = output[:,(q_d_list_cumsum[i]-q_d_list[i]):q_d_list_cumsum[i]].detach().cpu().numpy()[:,1]
fpr, tpr, thresholds = metrics.roc_curve(y_true, y_score)
auc_detailed.append(metrics.auc(fpr, tpr))
return loss, loss_detailed, auc_detailed
# building class balancing weights as in
# https://datascience.stackexchange.com/questions/13490/how-to-set-class-weights-for-imbalanced-classes-in-keras
def calculate_class_weights(X):
class_weights = []
for i in q_list:
class_weights.append(
class_weight.compute_class_weight('balanced', np.unique(X[i].values), X[i].values))
return class_weights
# extract data from a dataloader as a set of image features X and set of labels y, corresponding to those image features
# can also blackout specified areas of the loaded images before extracting the image features -- this is used in our experiments
# when data loader is deterministic, then it will load in the same data again and again
def extract_data(loader, modelred, blackout=None):
X = []
y = []
z = []
for batch_i, var in tqdm(enumerate(loader)):
data, target, control_metrics = var
if blackout is not None:
data[:, :, blackout[0]:blackout[1], blackout[2]:blackout[3]] = 0.0
data, target, control_metrics = Variable(data), Variable(target), Variable(control_metrics)
if CUDA:
data, target, control_metrics = data.cuda(), target.cuda(), control_metrics.cuda()
data_out = modelred(data)
X.append(data_out.detach().cpu().numpy())
y.append(target.detach().cpu().numpy())
z.append(control_metrics.detach().cpu().numpy())
X = np.vstack(X).squeeze()
y = np.vstack(y)
z = np.vstack(z)
return X, y, z
# function to evaluate a set of trained classifier using AUC metric
# 'models' contains classifiers in order of binary variables to be predicted -- which are contaiend in Y
# X is a matrix of covariates
def analytics_lin(models, X, Y):
acc = {}
auc = {}
for i in tqdm(range(Y.shape[1])):
y_true = Y[:,i]
mod = models[i]
y_pred = np.argmax(mod.predict_proba(X),axis=1)
# auc
y_prob = mod.predict_proba(X)[:,1]
fpr, tpr, thresholds = metrics.roc_curve(y_true, y_prob)
auc[q_list[i]] = metrics.auc(fpr, tpr)
return auc
# sequentially yield coordinates for blackout in an image
def sliding_window(image_shape, stepSize, windowSize):
# slide a window across the image
for yc in range(0, image_shape[0], stepSize):
for xc in range(0, image_shape[1], stepSize):
# yield the current window
yield (yc, yc + windowSize[1], xc, xc + windowSize[0])
# calculating decrease in AUC when blocking a particular area of an image -- over 8x8 grid placed over the image
def img_area_importance(modelred, models, svd, dat, auc_true):
patch_importance = {}
for (y0, y1, x0, x1) in sliding_window(image_shape=(224,224), stepSize = 28, windowSize=(28,28)):
loader = create_dataloader(dat,rand=False)
# X_modified_raw contains image features extracted from images with a portion of the image blocked
X_modified_raw, Y, _ = extract_data(loader, modelred, (y0, y1, x0, x1))
# image features reduced to 500 via svd
X_modified = svd.transform(X_modified_raw)
auc = analytics_lin(models, X_modified, Y)
patch_importance_q = {} # contains -(decrease in auc after blocking of an image)
for q in q_list:
patch_importance_q[q] = auc_true[q] - auc[q]
patch_importance[(y0, y1, x0, x1)] = patch_importance_q # decrease in auc across all variables -- for the given blocked portion of the image
return patch_importance
# START OF THE RUN
torch.set_num_threads(1)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
N_EPOCHS = 20
FINETUNE = True
CUDA = torch.cuda.is_available()
batch_size=10
PATH = './data'
# analysis on face vs. bodies
CASHIER = sys.argv[1]#'ALL' #'4' # 3 #
control_list = ['02.05','03.05','04.05','05.05','06.05','07.05','08.05','09.05','10.05', '11.05', '12.05', '13.05',
'time_1', 'time_2', 'time_3', 'time_4']
if CASHIER == 'ALL':
data = pd.read_csv(PATH+'/data_face.csv')
RESULTS = './results_face'
control_list = control_list + ['cashier4']
elif CASHIER == '4':
data = | pd.read_csv(PATH+'/data_face.csv') | pandas.read_csv |
import pandas as pd
import numpy as np2
def build(args):
# Get medians
def get_medians(df_p, last):
df_res = df_p.iloc[-last:].groupby(["param"]).median().reset_index()["median"][0]
return df_res
def medians_params(df_list, age_group, last):
params_def = ["age", "beta", "IFR", "RecPeriod", "alpha", "sigma"]
params_val = [
age_group,
get_medians(df_list[0], last),
get_medians(df_list[1], last),
get_medians(df_list[2], last),
get_medians(df_list[3], last),
get_medians(df_list[4], last),
]
res = dict(zip(params_def, params_val))
return res
params_data_BOG = pd.read_csv(args.params_data_path, encoding="unicode_escape", delimiter=",")
# Ages 0-19
young_ages_params = pd.DataFrame(params_data_BOG[params_data_BOG["age_group"] == "0-19"])
young_ages_beta = pd.DataFrame(young_ages_params[young_ages_params["param"] == "contact_rate"])
young_ages_IFR = pd.DataFrame(young_ages_params[young_ages_params["param"] == "IFR"])
young_ages_RecPeriod = pd.DataFrame(young_ages_params[young_ages_params["param"] == "recovery_period"])
young_ages_alpha = pd.DataFrame(young_ages_params[young_ages_params["param"] == "report_rate"])
young_ages_sigma = pd.DataFrame(young_ages_params[young_ages_params["param"] == "relative_asymp_transmission"])
young_params = [young_ages_beta, young_ages_IFR, young_ages_RecPeriod, young_ages_alpha, young_ages_sigma]
# Ages 20-39
youngAdults_ages_params = pd.DataFrame(params_data_BOG[params_data_BOG["age_group"] == "20-39"])
youngAdults_ages_beta = pd.DataFrame(youngAdults_ages_params[youngAdults_ages_params["param"] == "contact_rate"])
youngAdults_ages_IFR = pd.DataFrame(youngAdults_ages_params[youngAdults_ages_params["param"] == "IFR"])
youngAdults_ages_RecPeriod = pd.DataFrame(
youngAdults_ages_params[youngAdults_ages_params["param"] == "recovery_period"]
)
youngAdults_ages_alpha = pd.DataFrame(youngAdults_ages_params[youngAdults_ages_params["param"] == "report_rate"])
youngAdults_ages_sigma = pd.DataFrame(
youngAdults_ages_params[youngAdults_ages_params["param"] == "relative_asymp_transmission"]
)
youngAdults_params = [
youngAdults_ages_beta,
youngAdults_ages_IFR,
youngAdults_ages_RecPeriod,
youngAdults_ages_alpha,
youngAdults_ages_sigma,
]
# Ages 40-49
adults_ages_params = pd.DataFrame(params_data_BOG[params_data_BOG["age_group"] == "40-49"])
adults_ages_beta = pd.DataFrame(adults_ages_params[adults_ages_params["param"] == "contact_rate"])
adults_ages_IFR = pd.DataFrame(adults_ages_params[adults_ages_params["param"] == "IFR"])
adults_ages_RecPeriod = pd.DataFrame(adults_ages_params[adults_ages_params["param"] == "recovery_period"])
adults_ages_alpha = pd.DataFrame(adults_ages_params[adults_ages_params["param"] == "report_rate"])
adults_ages_sigma = pd.DataFrame(adults_ages_params[adults_ages_params["param"] == "relative_asymp_transmission"])
adults_params = [adults_ages_beta, adults_ages_IFR, adults_ages_RecPeriod, adults_ages_alpha, adults_ages_sigma]
# Ages 50-59
seniorAdults_ages_params = pd.DataFrame(params_data_BOG[params_data_BOG["age_group"] == "50-59"])
seniorAdults_ages_beta = pd.DataFrame(seniorAdults_ages_params[seniorAdults_ages_params["param"] == "contact_rate"])
seniorAdults_ages_IFR = pd.DataFrame(seniorAdults_ages_params[seniorAdults_ages_params["param"] == "IFR"])
seniorAdults_ages_RecPeriod = pd.DataFrame(
seniorAdults_ages_params[seniorAdults_ages_params["param"] == "recovery_period"]
)
seniorAdults_ages_alpha = pd.DataFrame(seniorAdults_ages_params[seniorAdults_ages_params["param"] == "report_rate"])
seniorAdults_ages_sigma = pd.DataFrame(
seniorAdults_ages_params[seniorAdults_ages_params["param"] == "relative_asymp_transmission"]
)
seniorAdults_params = [
seniorAdults_ages_beta,
seniorAdults_ages_IFR,
seniorAdults_ages_RecPeriod,
seniorAdults_ages_alpha,
seniorAdults_ages_sigma,
]
# Ages 60-69
senior_ages_params = pd.DataFrame(params_data_BOG[params_data_BOG["age_group"] == "60-69"])
senior_ages_beta = pd.DataFrame(senior_ages_params[senior_ages_params["param"] == "contact_rate"])
senior_ages_IFR = pd.DataFrame(senior_ages_params[senior_ages_params["param"] == "IFR"])
senior_ages_RecPeriod = pd.DataFrame(senior_ages_params[senior_ages_params["param"] == "recovery_period"])
senior_ages_alpha = | pd.DataFrame(senior_ages_params[senior_ages_params["param"] == "report_rate"]) | pandas.DataFrame |
from skfem import *
import numpy as np
from utils import solver_iter_krylov, solver_iter_pyamg, solver_iter_mgcg
from skfem.helpers import d, dd, ddd, dot, ddot, grad, dddot, prod
from scipy.sparse.linalg import LinearOperator, minres
from skfem.models.poisson import *
from skfem.assembly import BilinearForm, LinearForm
import datetime
import pandas as pd
import sys
import time
pi = np.pi
sin = np.sin
cos = np.cos
exp = np.exp
# parameters
tol = 1e-8
intorder = 5
solver_type = 'mgcg'
refine_time = 6
epsilon_range = 6
zero_ep = False
element_type = 'P2'
sigma = 5
penalty = False
example = 'ex1'
# end of parameters
save_path = 'log/' + example + '_' + element_type + '_' + ('pen' if penalty else 'nopen') + '_' +'{}'.format(datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S"))
# output to txt
class Logger(object):
def __init__(self, filename=save_path+'.txt', stream=sys.stdout):
self.terminal = stream
self.log = open(filename, 'a')
def write(self, message):
self.terminal.write(message)
self.log.write(message)
def flush(self):
pass
sys.stdout = Logger(save_path+'.txt', sys.stdout)
# print parameters
print('=======Arguments=======')
print('example:\t{}'.format(example))
print('penalty:\t{}'.format(penalty))
print('element_type:\t{}'.format(element_type))
print('solver_type:\t{}'.format(solver_type))
print('tol:\t{}'.format(tol))
print('intorder:\t{}'.format(intorder))
print('refine_time:\t{}'.format(refine_time))
print('epsilon_range:\t{}'.format(epsilon_range))
print('sigma:\t{}'.format(sigma))
print('save_path:\t{}'.format(save_path))
print('=======Results=======')
# functions
def easy_boundary_penalty(basis):
'''
Input basis
----------------
Return D for boundary conditions
'''
dofs = basis.find_dofs({
'left': m.facets_satisfying(lambda x: x[0] == 0),
'right': m.facets_satisfying(lambda x: x[0] == 1),
'top': m.facets_satisfying(lambda x: x[1] == 1),
'buttom': m.facets_satisfying(lambda x: x[1] == 0)
})
D = np.concatenate((dofs['left'].nodal['u'], dofs['right'].nodal['u'],
dofs['top'].nodal['u'], dofs['buttom'].nodal['u']))
return D
def easy_boundary(basis):
'''
Input basis
----------------
Return D for boundary conditions
'''
dofs = basis.find_dofs({
'left': m.facets_satisfying(lambda x: x[0] == 0),
'right': m.facets_satisfying(lambda x: x[0] == 1),
'top': m.facets_satisfying(lambda x: x[1] == 1),
'buttom': m.facets_satisfying(lambda x: x[1] == 0)
})
D = np.concatenate((dofs['left'].nodal['u'], dofs['right'].nodal['u'],
dofs['top'].nodal['u'], dofs['buttom'].nodal['u'],
dofs['left'].facet['u_n'], dofs['right'].facet['u_n'],
dofs['top'].facet['u_n'], dofs['buttom'].facet['u_n']))
return D
@Functional
def L2pnvError(w):
return (w.h * dot(w['n'].value, w['w'].grad))**2
@BilinearForm
def a_load(u, v, w):
'''
for $a_{h}$
'''
return ddot(dd(u), dd(v))
@BilinearForm
def b_load(u, v, w):
'''
for $b_{h}$
'''
return dot(grad(u), grad(v))
@BilinearForm
def wv_load(u, v, w):
'''
for $(\nabla \chi_{h}, \nabla_{h} v_{h})$
'''
return dot(grad(u), grad(v))
@BilinearForm
def penalty_1(u, v, w):
return ddot(-dd(u), prod(w.n, w.n)) * dot(grad(v), w.n)
@BilinearForm
def penalty_2(u, v, w):
return ddot(-dd(v), prod(w.n, w.n)) * dot(grad(u), w.n)
@BilinearForm
def penalty_3(u, v, w):
return (sigma / w.h) * dot(grad(u), w.n) * dot(grad(v), w.n)
@BilinearForm
def laplace(u, v, w):
'''
for $(\nabla w_{h}, \nabla \chi_{h})$
'''
return dot(grad(u), grad(v))
@Functional
def L2uError(w):
x, y = w.x
return (w.w - exact_u(x, y))**2
def get_DuError(basis, u):
duh = basis.interpolate(u).grad
x = basis.global_coordinates().value
dx = basis.dx # quadrature weights
dux, duy = dexact_u(x[0], x[1])
return np.sqrt(np.sum(((duh[0] - dux)**2 + (duh[1] - duy)**2) * dx))
def get_D2uError(basis, u):
dduh = basis.interpolate(u).hess
x = basis.global_coordinates(
).value # coordinates of quadrature points [x, y]
dx = basis.dx # quadrature weights
duxx, duxy, duyx, duyy = ddexact(x[0], x[1])
return np.sqrt(
np.sum(((dduh[0][0] - duxx)**2 + (dduh[0][1] - duxy)**2 +
(dduh[1][1] - duyy)**2 + (dduh[1][0] - duyx)**2) * dx))
def solve_problem1(m, element_type='P1', solver_type='pcg'):
'''
switching to mgcg solver for problem 1
'''
if element_type == 'P1':
element = {'w': ElementTriP1(), 'u': ElementTriMorley()}
elif element_type == 'P2':
element = {'w': ElementTriP2(), 'u': ElementTriMorley()}
else:
raise Exception("Element not supported")
basis = {
variable: InteriorBasis(m, e, intorder=intorder)
for variable, e in element.items()
} # intorder: integration order for quadrature
K1 = asm(laplace, basis['w'])
f1 = asm(f_load, basis['w'])
if solver_type == 'amg':
wh = solve(*condense(K1, f1, D=basis['w'].find_dofs()), solver=solver_iter_pyamg(tol=tol))
elif solver_type == 'pcg':
wh = solve(*condense(K1, f1, D=basis['w'].find_dofs()), solver=solver_iter_krylov(Precondition=True, tol=tol))
elif solver_type == 'mgcg':
wh = solve(*condense(K1, f1, D=basis['w'].find_dofs()), solver=solver_iter_mgcg(tol=tol))
else:
raise Exception("Solver not supported")
K2 = epsilon**2 * asm(a_load, basis['u']) + asm(b_load, basis['u'])
f2 = asm(wv_load, basis['w'], basis['u']) * wh
if solver_type == 'amg':
uh0 = solve(*condense(K2, f2, D=easy_boundary(basis['u'])), solver=solver_iter_pyamg(tol=tol))
elif solver_type == 'pcg':
uh0 = solve(*condense(K2, f2, D=easy_boundary(basis['u'])), solver=solver_iter_krylov(Precondition=True, tol=tol))
elif solver_type == 'mgcg':
uh0 = solve(*condense(K2, f2, D=easy_boundary(basis['u'])), solver=solver_iter_mgcg(tol=tol))
else:
raise Exception("Solver not supported")
return uh0, basis
def solve_problem2(m, element_type='P1', solver_type='pcg'):
'''
adding mgcg solver for problem 2
'''
if element_type == 'P1':
element = {'w': ElementTriP1(), 'u': ElementTriMorley()}
elif element_type == 'P2':
element = {'w': ElementTriP2(), 'u': ElementTriMorley()}
else:
raise Exception("The element not supported")
basis = {
variable: InteriorBasis(m, e, intorder=intorder)
for variable, e in element.items()
}
K1 = asm(laplace, basis['w'])
f1 = asm(f_load, basis['w'])
if solver_type == 'amg':
wh = solve(*condense(K1, f1, D=basis['w'].find_dofs()), solver=solver_iter_pyamg(tol=tol))
elif solver_type == 'pcg':
wh = solve(*condense(K1, f1, D=basis['w'].find_dofs()), solver=solver_iter_krylov(Precondition=True, tol=tol))
elif solver_type == 'mgcg':
wh = solve(*condense(K1, f1, D=basis['w'].find_dofs()), solver=solver_iter_mgcg(tol=tol))
else:
raise Exception("Solver not supported")
fbasis = FacetBasis(m, element['u'])
p1 = asm(penalty_1, fbasis)
p2 = asm(penalty_2, fbasis)
p3 = asm(penalty_3, fbasis)
P = p1 + p2 + p3
K2 = epsilon**2 * asm(a_load, basis['u']) + epsilon**2 * P + asm(b_load, basis['u'])
f2 = asm(wv_load, basis['w'], basis['u']) * wh
if solver_type == 'amg':
uh0 = solve(*condense(K2, f2, D=easy_boundary_penalty(basis['u'])), solver=solver_iter_pyamg(tol=tol))
elif solver_type == 'pcg':
uh0 = solve(*condense(K2, f2, D=easy_boundary_penalty(basis['u'])), solver=solver_iter_krylov(Precondition=True, tol=tol))
elif solver_type == 'mgcg':
uh0 = solve(*condense(K2, f2, D=easy_boundary_penalty(basis['u'])), solver=solver_iter_mgcg(tol=tol))
else:
raise Exception("Solver not supported")
return uh0, basis, fbasis
if example == 'ex1':
@LinearForm
def f_load(v, w):
'''
for $(f, x_{h})$
'''
pix = pi * w.x[0]
piy = pi * w.x[1]
lu = 2 * (pi)**2 * (cos(2 * pix) * ((sin(piy))**2) + cos(2 * piy) *
((sin(pix))**2))
llu = -8 * (pi)**4 * (cos(2 * pix) * sin(piy)**2 + cos(2 * piy) *
sin(pix)**2 - cos(2 * pix) * cos(2 * piy))
return (epsilon**2 * llu - lu) * v
def exact_u(x, y):
return (sin(pi * x) * sin(pi * y))**2
def dexact_u(x, y):
dux = 2 * pi * cos(pi * x) * sin(pi * x) * sin(pi * y)**2
duy = 2 * pi * cos(pi * y) * sin(pi * x)**2 * sin(pi * y)
return dux, duy
def ddexact(x, y):
duxx = 2 * pi**2 * cos(pi * x)**2 * sin(pi * y)**2 - 2 * pi**2 * sin(
pi * x)**2 * sin(pi * y)**2
duxy = 2 * pi * cos(pi * x) * sin(pi * x) * 2 * pi * cos(pi * y) * sin(
pi * y)
duyx = duxy
duyy = 2 * pi**2 * cos(pi * y)**2 * sin(pi * x)**2 - 2 * pi**2 * sin(
pi * y)**2 * sin(pi * x)**2
return duxx, duxy, duyx, duyy
elif example == 'ex2':
@LinearForm
def f_load(v, w):
'''
for $(f, x_{h})$
'''
x = w.x[0]
y = w.x[1]
return (
(sin(pi * x) / 2 - (ep * pi * (exp(-x / ep) + exp(
(x - 1) / ep) - exp(-1 / ep) - 1)) / (2 * (exp(-1 / ep) - 1))) *
(12 * y + ep *
((exp(-y / ep) *
(3 / (exp(-1 / ep) - 1) + 1 /
(exp(-1 / ep) + 2 * ep * (exp(-1 / ep) - 1) + 1))) / ep**2 + (exp(
(y - 1) / ep) * (3 / (exp(-1 / ep) - 1) - 1 /
(exp(-1 / ep) + 2 * ep *
(exp(-1 / ep) - 1) + 1))) / ep**2)) -
((pi**2 * sin(pi * x)) / 2 + (ep * pi * (exp(-x / ep) / ep**2 + exp(
(x - 1) / ep) / ep**2)) / (2 * (exp(-1 / ep) - 1))) *
(ep * (exp((y - 1) / ep) * (3 / (exp(-1 / ep) - 1) - 1 /
(exp(-1 / ep) + 2 * ep *
(exp(-1 / ep) - 1) + 1)) + exp(-y / ep) *
(3 / (exp(-1 / ep) - 1) + 1 /
(exp(-1 / ep) + 2 * ep *
(exp(-1 / ep) - 1) + 1)) - (3 * exp(-1 / ep) + 3) /
(exp(-1 / ep) - 1) - ((2 * y - 1) * (exp(-1 / ep) - 1)) /
(exp(-1 / ep) + 2 * ep * (exp(-1 / ep) - 1) + 1)) + 2 * y *
(y**2 - 1)) - ep**2 *
(((pi**4 * sin(pi * x)) / 2 - (ep * pi * (exp(-x / ep) / ep**4 + exp(
(x - 1) / ep) / ep**4)) / (2 * (exp(-1 / ep) - 1))) *
(ep * (exp((y - 1) / ep) * (3 / (exp(-1 / ep) - 1) - 1 /
(exp(-1 / ep) + 2 * ep *
(exp(-1 / ep) - 1) + 1)) + exp(-y / ep) *
(3 / (exp(-1 / ep) - 1) + 1 /
(exp(-1 / ep) + 2 * ep *
(exp(-1 / ep) - 1) + 1)) - (3 * exp(-1 / ep) + 3) /
(exp(-1 / ep) - 1) - ((2 * y - 1) * (exp(-1 / ep) - 1)) /
(exp(-1 / ep) + 2 * ep * (exp(-1 / ep) - 1) + 1)) + 2 * y *
(y**2 - 1)) - 2 *
(12 * y + ep *
((exp(-y / ep) *
(3 / (exp(-1 / ep) - 1) + 1 /
(exp(-1 / ep) + 2 * ep * (exp(-1 / ep) - 1) + 1))) / ep**2 + (exp(
(y - 1) / ep) * (3 / (exp(-1 / ep) - 1) - 1 /
(exp(-1 / ep) + 2 * ep *
(exp(-1 / ep) - 1) + 1))) / ep**2)) *
((pi**2 * sin(pi * x)) / 2 + (ep * pi * (exp(-x / ep) / ep**2 + exp(
(x - 1) / ep) / ep**2)) / (2 * (exp(-1 / ep) - 1))) + ep *
(sin(pi * x) / 2 - (ep * pi * (exp(-x / ep) + exp(
(x - 1) / ep) - exp(-1 / ep) - 1)) / (2 * (exp(-1 / ep) - 1))) *
((exp(-y / ep) *
(3 / (exp(-1 / ep) - 1) + 1 /
(exp(-1 / ep) + 2 * ep * (exp(-1 / ep) - 1) + 1))) / ep**4 + (exp(
(y - 1) / ep) * (3 / (exp(-1 / ep) - 1) - 1 /
(exp(-1 / ep) + 2 * ep *
(exp(-1 / ep) - 1) + 1))) / ep**4))) * v
def exact_u(x, y):
return -(sin(pi * x) / 2 - (ep * pi * (exp(-x / ep) + exp(
(x - 1) / ep) - exp(-1 / ep) - 1)) /
(2 *
(exp(-1 / ep) - 1))) * (ep * (exp(
(y - 1) / ep) * (3 / (exp(-1 / ep) - 1) - 1 /
(exp(-1 / ep) + 2 * ep *
(exp(-1 / ep) - 1) + 1)) + exp(-y / ep) *
(3 / (exp(-1 / ep) - 1) + 1 /
(exp(-1 / ep) + 2 * ep *
(exp(-1 / ep) - 1) + 1)) -
(3 * exp(-1 / ep) + 3) /
(exp(-1 / ep) - 1) -
((2 * y - 1) *
(exp(-1 / ep) - 1)) /
(exp(-1 / ep) + 2 * ep *
(exp(-1 / ep) - 1) + 1)) + 2 * y *
(y**2 - 1))
def dexact_u(x, y):
dux = -((pi * cos(pi * x)) / 2 + (ep * pi * (exp(-x / ep) / ep - exp(
(x - 1) / ep) / ep)) /
(2 *
(exp(-1 / ep) - 1))) * (ep * (exp(
(y - 1) / ep) * (3 / (exp(-1 / ep) - 1) - 1 /
(exp(-1 / ep) + 2 * ep *
(exp(-1 / ep) - 1) + 1)) + exp(-y / ep) *
(3 / (exp(-1 / ep) - 1) + 1 /
(exp(-1 / ep) + 2 * ep *
(exp(-1 / ep) - 1) + 1)) -
(3 * exp(-1 / ep) + 3) /
(exp(-1 / ep) - 1) -
((2 * y - 1) * (exp(-1 / ep) - 1)) /
(exp(-1 / ep) + 2 * ep *
(exp(-1 / ep) - 1) + 1)) + 2 * y *
(y**2 - 1))
duy = (sin(pi * x) / 2 - (ep * pi * (exp(-x / ep) + exp(
(x - 1) / ep) - exp(-1 / ep) - 1)) /
(2 * (exp(-1 / ep) - 1))) * (ep * (
(2 * (exp(-1 / ep) - 1)) / (exp(-1 / ep) + 2 * ep *
(exp(-1 / ep) - 1) + 1) +
(exp(-y / ep) * (3 / (exp(-1 / ep) - 1) + 1 /
(exp(-1 / ep) + 2 * ep *
(exp(-1 / ep) - 1) + 1))) / ep -
(exp((y - 1) / ep) *
(3 / (exp(-1 / ep) - 1) - 1 /
(exp(-1 / ep) + 2 * ep *
(exp(-1 / ep) - 1) + 1))) / ep) - 6 * y**2 + 2)
return dux, duy
def ddexact(x, y):
duxx = ((pi**2 * sin(pi * x)) / 2 + (ep * pi * (exp(-x / ep) / ep**2 + exp(
(x - 1) / ep) / ep**2)) /
(2 *
(exp(-1 / ep) - 1))) * (ep * (exp(
(y - 1) / ep) * (3 / (exp(-1 / ep) - 1) - 1 /
(exp(-1 / ep) + 2 * ep *
(exp(-1 / ep) - 1) + 1)) + exp(-y / ep) *
(3 / (exp(-1 / ep) - 1) + 1 /
(exp(-1 / ep) + 2 * ep *
(exp(-1 / ep) - 1) + 1)) -
(3 * exp(-1 / ep) + 3) /
(exp(-1 / ep) - 1) -
((2 * y - 1) * (exp(-1 / ep) - 1)) /
(exp(-1 / ep) + 2 * ep *
(exp(-1 / ep) - 1) + 1)) + 2 * y *
(y**2 - 1))
duxy = ((pi * cos(pi * x)) / 2 + (ep * pi * (exp(-x / ep) / ep - exp(
(x - 1) / ep) / ep)) / (2 * (exp(-1 / ep) - 1))) * (ep * (
(2 * (exp(-1 / ep) - 1)) / (exp(-1 / ep) + 2 * ep *
(exp(-1 / ep) - 1) + 1) +
(exp(-y / ep) * (3 / (exp(-1 / ep) - 1) + 1 /
(exp(-1 / ep) + 2 * ep *
(exp(-1 / ep) - 1) + 1))) / ep -
(exp((y - 1) / ep) *
(3 / (exp(-1 / ep) - 1) - 1 /
(exp(-1 / ep) + 2 * ep *
(exp(-1 / ep) - 1) + 1))) / ep) - 6 * y**2 + 2)
duyx = duxy
duyy = -(sin(pi * x) / 2 - (ep * pi * (exp(-x / ep) + exp(
(x - 1) / ep) - exp(-1 / ep) - 1)) /
(2 *
(exp(-1 / ep) - 1))) * (12 * y + ep *
((exp(-y / ep) *
(3 / (exp(-1 / ep) - 1) + 1 /
(exp(-1 / ep) + 2 * ep *
(exp(-1 / ep) - 1) + 1))) / ep**2 +
(exp((y - 1) / ep) *
(3 / (exp(-1 / ep) - 1) - 1 /
(exp(-1 / ep) + 2 * ep *
(exp(-1 / ep) - 1) + 1))) / ep**2))
return duxx, duxy, duyx, duyy
elif example == 'ex3':
@LinearForm
def f_load(v, w):
pix = pi * w.x[0]
piy = pi * w.x[1]
return (2 * pi**2 * sin(pix) * sin(piy)) * v
def exact_u(x, y):
return sin(pi * x) * sin(pi * y)
def dexact_u(x, y):
dux = pi * cos(pi * x) * sin(pi * y)
duy = pi * cos(pi * y) * sin(pi * x)
return dux, duy
def ddexact(x, y):
duxx = -pi**2 * sin(pi * x) * sin(pi * y)
duxy = pi * cos(pi * x) * pi * cos(pi * y)
duyx = duxy
duyy = -pi**2 * sin(pi * y) * sin(pi * x)
return duxx, duxy, duyx, duyy
else:
raise Exception('Example not supported')
# solving
time_start = time.time()
df_list = []
for j in range(epsilon_range):
epsilon = 1 * 10**(-j) * (1 - zero_ep)
ep = epsilon
L2_list = []
Du_list = []
D2u_list = []
h_list = []
epu_list = []
m = MeshTri()
for i in range(1, refine_time+1):
m.refine()
if penalty:
uh0, basis, fbasis = solve_problem2(m, element_type, solver_type)
else:
uh0, basis = solve_problem1(m, element_type, solver_type)
U = basis['u'].interpolate(uh0).value
# compute errors
L2u = np.sqrt(L2uError.assemble(basis['u'], w=U))
Du = get_DuError(basis['u'], uh0)
H1u = Du + L2u
if penalty:
D2u = np.sqrt(get_D2uError(basis['u'], uh0)**2 + L2pnvError.assemble(fbasis, w=fbasis.interpolate(uh0)))
else:
D2u = get_D2uError(basis['u'], uh0)
epu = np.sqrt(epsilon**2 * D2u**2 + Du**2)
h_list.append(m.param())
Du_list.append(Du)
L2_list.append(L2u)
D2u_list.append(D2u)
epu_list.append(epu)
hs = np.array(h_list)
L2s = np.array(L2_list)
Dus = np.array(Du_list)
D2us = np.array(D2u_list)
epus = np.array(epu_list)
H1s = L2s + Dus
H2s = H1s + D2us
# store data
data = np.array([L2s, H1s, H2s, epus])
df = | pd.DataFrame(data.T, columns=['L2', 'H1', 'H2', 'Energy']) | pandas.DataFrame |
# This code is used to generate an analysis html for the results of the mturk batch of project -
# Explainable KBC (id=1365457).
# It requires the html book file (generated by @get_turk_data.py), and the results.csv downloaded from mturk.
# It then generates an analysis html file if all the HITs are valid, if Not it generates a CSV with a reason for rejecting the HIT.
# Upload that CSV to Mturk to reject the HITs, not pay the turkers and republish the hits for other workers to do.
import pandas as pd
import numpy as np
import pprint
import argparse
import collections
import string
import os
import bs4 as bs
import itertools
from collections import Counter
ANSWER_OPTIONS = ['our','other','both','none']
def get_key_answer(key,id):
return string.Template('Answer.${key}_${id}.on').substitute(key=key,id=id)
def get_key_input(key,id):
return string.Template('Input.${key}_${id}').substitute(key=key,id=id)
def valid_row(row):
total_sum = 0
quality_ctrl_id = None
for i in range(5):
if(row[get_key_input('exp_A',i)] == row[get_key_input('exp_B',i)]):
quality_ctrl_id = i
for opt in ANSWER_OPTIONS:
total_sum += row[get_key_answer(opt,i)]
if(total_sum != 5):
return 'You did not mark any option in some questions'
if(quality_ctrl_id is not None):
if(not (row[get_key_answer('both',quality_ctrl_id)] or row[get_key_answer('none',quality_ctrl_id)]) ):
print("Quality control id == >",quality_ctrl_id)
return 'You did not chose the option both explanations are good/bad, even when both A and B were same in question number {}'.format(quality_ctrl_id+1)
return ''
def get_invalid_hits(df,outfilename):
df_new = df.copy()
df = df.fillna(False)
invalid_hits = collections.defaultdict(list)
for index,row in df.iterrows():
message = valid_row(row)
if(message!=''):
print('Invalid HIT at {} with message ==> {} '.format(index, message))
df_new['Reject'][index] = message
invalid_hits[row['WorkerId']].append(row['AssignmentId'])
else:
df_new['Approve'][index] = 'X'
if(len(invalid_hits)!=0):
df_new.to_csv(outfilename,index=False,sep=',')
return invalid_hits
def get_book(book_filename):
# TODO: Change this to have a clean pipeline
with open(book_filename,'r') as f:
soup = bs.BeautifulSoup(f, 'lxml')
table = soup.find('table')
table_body = table.find('tbody')
rows = table_body.find_all('tr')
data = []
for row in rows:
cols = row.find_all('td')
cols = [ele.text for ele in cols]
data.append([ele for ele in cols if ele])
return pd.DataFrame(data,columns=['fact','our','other'])
def get_winner_majority(answers):
count_choices = Counter(answers)
ordered_choices = count_choices.most_common()
rv = ordered_choices[0][0]
total = sum(count_choices.values())
if rv in ['both','none']:
rv = 'tie'
#
if 1.0*ordered_choices[0][1]/total < args.threshold:
rv = 'undec'
#
return [rv]
def get_winner(answers):
if args.selection_type == 'voting':
return get_winner_voting(answers)
elif args.selection_type == 'majority':
return get_winner_majority(answers)
else:
raise
def get_winner_voting(answers):
our_ct = 0
other_ct = 0
for el in answers:
if(el=='our'):
our_ct += 1
elif(el=='other'):
other_ct += 1
elif(el=='both'):
our_ct += 1
other_ct += 1
if(our_ct > other_ct):
return ['our']
elif (other_ct > our_ct):
return ['other']
else:
return ['tie']
# return ['our','other']
def get_results(df,book):
df = df.fillna(False)
results = {}
for index, row in df.iterrows():
for i in range(5):
fact = row[get_key_input('fact',i)]
exp_A = row[get_key_input('exp_A',i)]
exp_B = row[get_key_input('exp_B',i)]
if(exp_A == exp_B):
continue
fact_text = bs.BeautifulSoup(fact,'lxml').text
exp_B_text = bs.BeautifulSoup(exp_B,'lxml').text
if(book[book.fact == fact_text]['our'].iloc[0] == exp_B_text):
exp_A, exp_B = exp_B , exp_A
if(fact not in results):
results[fact] = {'our_exp': exp_A, 'other_exp':exp_B, 'answers' : [],'row_idx':[], 'fact_no':[]}
for opt in ANSWER_OPTIONS:
if(row[get_key_answer(opt,i)]):
results[fact]['answers'].append(opt)
results[fact]['row_idx'].append(index)
results[fact]['fact_no'].append(i)
for k in results:
winner = get_winner(results[k]['answers'])
results[k]['winner'] = winner
return results
def write_results(results,output_file,analysis_str):
results_df = pd.DataFrame.from_dict(results,orient='index')
results_df = results_df.reset_index()
results_df = results_df.drop(['row_idx','fact_no'],axis=1)
with open('css_style.css','r') as css_file:
CSS = css_file.read()
with open(output_file,'w') as f:
f.write(CSS+'\n\n')
analysis_str = analysis_str.replace('\n','<br><br>')
f.write(analysis_str+'\n\n')
pd.set_option('display.max_colwidth', -1)
results_df.to_html(f, escape=False, justify='center')
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-rf', '--result_file', help="Name of the result csv downloaded from mturk", required=True)
parser.add_argument('-op', '--output_path', help="Output path for rejected people and results", required=True)
parser.add_argument('-bf', '--book_file', help="Original HTML (Book) written by get_turk_data", required=True)
parser.add_argument('-st', '--selection_type', help="How to select the winner? Through voting or majority?",type=str, default = 'voting')
parser.add_argument('-thr', '--threshold', help="threhold for selecting winner.",type=float, default = 0)
args = parser.parse_args()
df = | pd.read_csv(args.result_file) | pandas.read_csv |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Dec 16 12:10:20 2018
@author: douglas
"""
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Dec 14 23:11:01 2018
@author: douglas
"""
import pandas as pd
import lightgbm as lgb
from sklearn.model_selection import StratifiedKFold
from sklearn.metrics import roc_auc_score
from sklearn.metrics import classification_report
from sklearn.metrics import accuracy_score
import numpy as np
import copy
from fastFM.datasets import make_user_item_regression
from fastFM import als
from sklearn.metrics import mean_squared_log_error
import scipy.sparse as sp
from sklearn.cross_validation import train_test_split
from matplotlib import pyplot as plt
# data analysis
def rmsle(y_pred, y_test) :
assert len(y_test) == len(y_pred)
return np.sqrt(np.mean((np.log(1+y_pred) - np.log(1+y_test))**2))
# load or create your dataset
print('Load data...')
df_train = pd.read_csv("train_GzS76OK/train.csv")
print(df_train.head())
df_test = | pd.read_csv('test_QoiMO9B.csv') | pandas.read_csv |
"""
Module containing classes and function used to organize data and metadata
"""
from typing import *
import time
import pandas as pd
import sys
import pickle
from pathlib import Path
import pandas as pd
from pysmFISH.io import open_consolidated_metadata
from pysmFISH.logger_utils import selected_logger
class Dataset():
"""Dataset class used to collect all the info related to the
acquired images. It contains all the metadata needed during the
processing.
"""
def __init__(self):
self.logger = selected_logger()
def load_dataset(self, dataset_fpath: str):
"""Load a pre-existing dataset
Args:
dataset_fpath (str): Path to the existing dataset
"""
self.dataset = pd.read_parquet(dataset_fpath)
# TODO Add support for other file types
def create_full_dataset_from_files(self, experiment_fpath: str,
experiment_info: dict,
parsed_raw_data_fpath: str ,ftype: str = 'pkl'):
""" Utility function that can be used to create the dataset from a
storage type different from the zarr structure used in pysmFISH.
processing. It requires a file of type ftype containing all the metadata
Args:
experiment_fpath (str): Path to the experiment to process
experiment_info (dict): Dictionary with the configuration data
parsed_raw_data_fpath (str): Path to the folder/zarr file with the parsed data
ftype (str, optional): Path to the files containing the metadata. Defaults to 'pkl'.
"""
self.experiment_fpath = Path(experiment_fpath)
self.experiment_info = experiment_info
self.parsed_raw_data_fpath = Path(parsed_raw_data_fpath)
date_tag = time.strftime("%y%m%d_%H_%M_%S")
experiment_name = self.experiment_fpath.stem
self.dataset_fpath = self.experiment_fpath / (date_tag + '_' + experiment_name + '_dataset.parquet')
self.dataset = pd.DataFrame()
all_pickle_list = list(self.parsed_raw_data_fpath.glob('*.' + ftype))
if len(all_pickle_list):
if ftype == 'pkl':
for fdata in all_pickle_list:
single = pickle.load(open(fdata,'rb'))
fdata_loc = fdata.parent / fdata.stem
single['raw_data_location'] = fdata_loc.as_posix()
single_df = pd.DataFrame(single,index=[0])
self.dataset = | pd.concat([self.dataset,single_df],axis=0,ignore_index=True) | pandas.concat |
import numpy as np
import pandas as pd
import pickle
from sklearn.feature_selection import SelectKBest,f_regression
from sklearn.ensemble import RandomForestRegressor
import matplotlib.pyplot as plt
from sklearn import metrics
###########################
# Folder Name Setting
###########################
folder = 'J:/DATAMINING/KAGGLE/MLSP_BirdClassification/'
essential_folder = folder+'essential_data/'
supplemental_folder = folder+'supplemental_data/'
dp_folder = folder+'DP/'
subm_folder = folder+ 'Submission/'
log_folder = folder+ 'log/'
###################################################
## Read the Essential Data
## labels, training-test split,file_names etc.
###################################################
# Each audio file has a unique recording identifier ("rec_id"), ranging from 0 to 644.
# The file rec_id2filename.txt indicates which wav file is associated with each rec_id.
rec2f = pd.read_csv(essential_folder + 'rec_id2filename.txt', sep = ',')
# There are 19 bird species in the dataset. species_list.txt gives each a number from 0 to 18.
species = pd.read_csv(essential_folder + 'species_list.txt', sep = ',')
num_species = 19
# The dataset is split into training and test sets.
# CVfolds_2.txt gives the fold for each rec_id. 0 is the training set, and 1 is the test set.
cv = pd.read_csv(essential_folder + 'CVfolds_2.txt', sep = ',')
# This is your main label training data. For each rec_id, a set of species is listed. The format is:
# rec_id,[labels]
raw = pd.read_csv(essential_folder + 'rec_labels_test_hidden.txt', sep = ';')
label = np.zeros(len(raw)*num_species)
label = label.reshape([len(raw),num_species])
for i in range(len(raw)):
line = raw.irow(i)
labels = line[0].split(',')
labels.pop(0) # rec_id == i
for c in labels:
if(c != '?'):
label[i,c] = 1
label = | pd.DataFrame(label) | pandas.DataFrame |
import datetime
import functools
import importlib
import random
import tempfile
from contextlib import contextmanager
from unittest import mock
import matplotlib
import numpy as np
import pandas as pd
import testing.postgresql
from descriptors import cachedproperty
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from triage.component.catwalk.db import ensure_db
from triage.component.catwalk.storage import MatrixStore, ProjectStorage
from triage.component.catwalk.utils import filename_friendly_hash
from triage.component.results_schema import Model, Matrix
from triage.experiments import CONFIG_VERSION
from triage.util.structs import FeatureNameList
from tests.results_tests.factories import init_engine, session, MatrixFactory
matplotlib.use("Agg")
from matplotlib import pyplot as plt # noqa
def fake_labels(length):
return np.array([random.choice([True, False]) for i in range(0, length)])
class MockTrainedModel:
def predict_proba(self, dataset):
return np.random.rand(len(dataset), len(dataset))
class MockMatrixStore(MatrixStore):
def __init__(
self,
matrix_type,
matrix_uuid,
label_count,
db_engine,
init_labels=None,
metadata_overrides=None,
matrix=None,
init_as_of_dates=None,
):
base_metadata = {
"feature_start_time": datetime.date(2014, 1, 1),
"end_time": datetime.date(2015, 1, 1),
"as_of_date_frequency": "1y",
"matrix_id": "some_matrix",
"label_name": "label",
"label_timespan": "3month",
"indices": MatrixStore.indices,
"matrix_type": matrix_type,
"as_of_times": [datetime.date(2014, 10, 1), datetime.date(2014, 7, 1)],
}
metadata_overrides = metadata_overrides or {}
base_metadata.update(metadata_overrides)
if matrix is None:
matrix = pd.DataFrame.from_dict(
{
"entity_id": [1, 2],
"as_of_date": [pd.Timestamp(2014, 10, 1), pd.Timestamp(2014, 7, 1)],
"feature_one": [3, 4],
"feature_two": [5, 6],
"label": [7, 8],
}
).set_index(MatrixStore.indices)
if init_labels is None:
init_labels = []
labels = matrix.pop("label")
self.matrix_label_tuple = matrix, labels
self.metadata = base_metadata
self.label_count = label_count
self.init_labels = pd.Series(init_labels, dtype='float64')
self.matrix_uuid = matrix_uuid
self.init_as_of_dates = init_as_of_dates or []
session = sessionmaker(db_engine)()
session.add(Matrix(matrix_uuid=matrix_uuid))
session.commit()
@property
def as_of_dates(self):
"""The list of as-of-dates in the matrix"""
return self.init_as_of_dates or self.metadata["as_of_times"]
@property
def labels(self):
if len(self.init_labels) > 0:
return self.init_labels
else:
return fake_labels(self.label_count)
def fake_trained_model(
db_engine,
train_matrix_uuid="efgh",
train_end_time=datetime.datetime(2016, 1, 1)
):
"""Creates and stores a trivial trained model and training matrix
Args:
db_engine (sqlalchemy.engine)
Returns:
(int) model id for database retrieval
"""
session = sessionmaker(db_engine)()
session.merge(Matrix(matrix_uuid=train_matrix_uuid))
# Create the fake trained model and store in db
trained_model = MockTrainedModel()
db_model = Model(
model_hash="abcd",
train_matrix_uuid=train_matrix_uuid,
train_end_time=train_end_time,
)
session.add(db_model)
session.commit()
return trained_model, db_model.model_id
def matrix_metadata_creator(**override_kwargs):
"""Create a sample valid matrix metadata with optional overrides
Args:
**override_kwargs: Keys and values to override in the metadata
Returns: (dict)
"""
base_metadata = {
"feature_start_time": datetime.date(2012, 12, 20),
"end_time": datetime.date(2016, 12, 20),
"label_name": "label",
"as_of_date_frequency": "1w",
"max_training_history": "5y",
"matrix_id": "tester-1",
"state": "active",
"cohort_name": "default",
"label_timespan": "1y",
"metta-uuid": "1234",
"matrix_type": "test",
"feature_names": FeatureNameList(["ft1", "ft2"]),
"feature_groups": ["all: True"],
"indices": MatrixStore.indices,
"as_of_times": [datetime.date(2016, 12, 20)],
}
for override_key, override_value in override_kwargs.items():
base_metadata[override_key] = override_value
return base_metadata
def matrix_creator():
"""Return a sample matrix."""
source_dict = {
"entity_id": [1, 2],
"as_of_date": [ | pd.Timestamp(2016, 1, 1) | pandas.Timestamp |
import pandas as pd
import numpy as np
import logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
def main():
df_train = | pd.read_csv('data/titanic-train.csv') | pandas.read_csv |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Feb 21 11:24:34 2019
@author: nzhang
"""
import pandas as pd
#import modin.pandas as pd
import codecs
import re
from bs4 import BeautifulSoup
import multiprocessing
import numba
def read_snapshot(file_path):
'''
read snapshot files
@params:
file_path: store path of snapshot
@return:
DataFrame
'''
data = {}
data['unicode'] = []
data['file_path'] = []
try:
for file in file_path:
with codecs.open(file, "r", "utf-8") as f:
data['unicode'].append(f.read())
data['file_path'].append(file)
except FileNotFoundError:
print('{} is not found'.format(file))
return | pd.DataFrame.from_dict(data) | pandas.DataFrame.from_dict |
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# This file contains dummy data for the model unit tests
import numpy as np
import pandas as pd
AIR_FCST_LINEAR_95 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("1961-01-01 00:00:00"),
1: pd.Timestamp("1961-02-01 00:00:00"),
2: pd.Timestamp("1961-03-01 00:00:00"),
3: pd.Timestamp("1961-04-01 00:00:00"),
4: pd.Timestamp("1961-05-01 00:00:00"),
5: pd.Timestamp("1961-06-01 00:00:00"),
6: pd.Timestamp("1961-07-01 00:00:00"),
7: pd.Timestamp("1961-08-01 00:00:00"),
8: pd.Timestamp("1961-09-01 00:00:00"),
9: pd.Timestamp("1961-10-01 00:00:00"),
10: pd.Timestamp("1961-11-01 00:00:00"),
11: pd.Timestamp("1961-12-01 00:00:00"),
12: pd.Timestamp("1962-01-01 00:00:00"),
13: pd.Timestamp("1962-02-01 00:00:00"),
14: pd.Timestamp("1962-03-01 00:00:00"),
15: pd.Timestamp("1962-04-01 00:00:00"),
16: pd.Timestamp("1962-05-01 00:00:00"),
17: pd.Timestamp("1962-06-01 00:00:00"),
18: pd.Timestamp("1962-07-01 00:00:00"),
19: pd.Timestamp("1962-08-01 00:00:00"),
20: pd.Timestamp("1962-09-01 00:00:00"),
21: pd.Timestamp("1962-10-01 00:00:00"),
22: pd.Timestamp("1962-11-01 00:00:00"),
23: pd.Timestamp("1962-12-01 00:00:00"),
24: pd.Timestamp("1963-01-01 00:00:00"),
25: pd.Timestamp("1963-02-01 00:00:00"),
26: pd.Timestamp("1963-03-01 00:00:00"),
27: pd.Timestamp("1963-04-01 00:00:00"),
28: pd.Timestamp("1963-05-01 00:00:00"),
29: pd.Timestamp("1963-06-01 00:00:00"),
},
"fcst": {
0: 472.9444444444443,
1: 475.60162835249025,
2: 478.2588122605362,
3: 480.9159961685822,
4: 483.57318007662815,
5: 486.23036398467417,
6: 488.88754789272014,
7: 491.5447318007661,
8: 494.20191570881207,
9: 496.85909961685803,
10: 499.516283524904,
11: 502.17346743295,
12: 504.830651340996,
13: 507.48783524904195,
14: 510.1450191570879,
15: 512.8022030651339,
16: 515.4593869731799,
17: 518.1165708812258,
18: 520.7737547892718,
19: 523.4309386973177,
20: 526.0881226053638,
21: 528.7453065134097,
22: 531.4024904214557,
23: 534.0596743295017,
24: 536.7168582375476,
25: 539.3740421455936,
26: 542.0312260536396,
27: 544.6884099616856,
28: 547.3455938697316,
29: 550.0027777777775,
},
"fcst_lower": {
0: 380.6292037661305,
1: 383.26004701147235,
2: 385.8905370924373,
3: 388.52067431512216,
4: 391.1504589893095,
5: 393.7798914284503,
6: 396.4089719496461,
7: 399.0377008736321,
8: 401.66607852475926,
9: 404.2941052309762,
10: 406.9217813238114,
11: 409.54910713835505,
12: 412.1760830132403,
13: 414.80270929062544,
14: 417.42898631617453,
15: 420.0549144390392,
16: 422.68049401183924,
17: 425.3057253906438,
18: 427.93060893495215,
19: 430.555145007674,
20: 433.1793339751107,
21: 435.8031762069345,
22: 438.42667207616984,
23: 441.0498219591729,
24: 443.6726262356114,
25: 446.2950852884452,
26: 448.91719950390507,
27: 451.53896927147304,
28: 454.1603949838614,
29: 456.78147703699216,
},
"fcst_upper": {
0: 565.2596851227581,
1: 567.9432096935082,
2: 570.6270874286351,
3: 573.3113180220422,
4: 575.9959011639468,
5: 578.680836540898,
6: 581.3661238357942,
7: 584.0517627279,
8: 586.7377528928648,
9: 589.4240940027398,
10: 592.1107857259966,
11: 594.797827727545,
12: 597.4852196687516,
13: 600.1729612074585,
14: 602.8610519980012,
15: 605.5494916912286,
16: 608.2382799345206,
17: 610.9274163718079,
18: 613.6169006435915,
19: 616.3067323869615,
20: 618.9969112356168,
21: 621.6874368198849,
22: 624.3783087667415,
23: 627.0695266998305,
24: 629.7610902394838,
25: 632.4529990027421,
26: 635.145252603374,
27: 637.8378506518982,
28: 640.5307927556019,
29: 643.2240785185628,
},
}
)
AIR_FCST_LINEAR_99 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("1961-01-01 00:00:00"),
1: pd.Timestamp("1961-02-01 00:00:00"),
2: pd.Timestamp("1961-03-01 00:00:00"),
3: pd.Timestamp("1961-04-01 00:00:00"),
4: pd.Timestamp("1961-05-01 00:00:00"),
5: pd.Timestamp("1961-06-01 00:00:00"),
6: pd.Timestamp("1961-07-01 00:00:00"),
7: pd.Timestamp("1961-08-01 00:00:00"),
8: pd.Timestamp("1961-09-01 00:00:00"),
9: pd.Timestamp("1961-10-01 00:00:00"),
10: pd.Timestamp("1961-11-01 00:00:00"),
11: pd.Timestamp("1961-12-01 00:00:00"),
12: pd.Timestamp("1962-01-01 00:00:00"),
13: pd.Timestamp("1962-02-01 00:00:00"),
14: pd.Timestamp("1962-03-01 00:00:00"),
15: pd.Timestamp("1962-04-01 00:00:00"),
16: pd.Timestamp("1962-05-01 00:00:00"),
17: pd.Timestamp("1962-06-01 00:00:00"),
18: pd.Timestamp("1962-07-01 00:00:00"),
19: pd.Timestamp("1962-08-01 00:00:00"),
20: pd.Timestamp("1962-09-01 00:00:00"),
21: pd.Timestamp("1962-10-01 00:00:00"),
22: pd.Timestamp("1962-11-01 00:00:00"),
23: pd.Timestamp("1962-12-01 00:00:00"),
24: pd.Timestamp("1963-01-01 00:00:00"),
25: pd.Timestamp("1963-02-01 00:00:00"),
26: pd.Timestamp("1963-03-01 00:00:00"),
27: pd.Timestamp("1963-04-01 00:00:00"),
28: pd.Timestamp("1963-05-01 00:00:00"),
29: pd.Timestamp("1963-06-01 00:00:00"),
},
"fcst": {
0: 472.9444444444443,
1: 475.60162835249025,
2: 478.2588122605362,
3: 480.9159961685822,
4: 483.57318007662815,
5: 486.23036398467417,
6: 488.88754789272014,
7: 491.5447318007661,
8: 494.20191570881207,
9: 496.85909961685803,
10: 499.516283524904,
11: 502.17346743295,
12: 504.830651340996,
13: 507.48783524904195,
14: 510.1450191570879,
15: 512.8022030651339,
16: 515.4593869731799,
17: 518.1165708812258,
18: 520.7737547892718,
19: 523.4309386973177,
20: 526.0881226053638,
21: 528.7453065134097,
22: 531.4024904214557,
23: 534.0596743295017,
24: 536.7168582375476,
25: 539.3740421455936,
26: 542.0312260536396,
27: 544.6884099616856,
28: 547.3455938697316,
29: 550.0027777777775,
},
"fcst_lower": {
0: 351.01805478037915,
1: 353.64044896268456,
2: 356.2623766991775,
3: 358.883838394139,
4: 361.50483445671773,
5: 364.12536530090745,
6: 366.74543134552374,
7: 369.3650330141812,
8: 371.98417073526997,
9: 374.6028449419319,
10: 377.2210560720369,
11: 379.83880456815905,
12: 382.45609087755207,
13: 385.07291545212513,
14: 387.68927874841813,
15: 390.3051812275768,
16: 392.92062335532785,
17: 395.5356056019535,
18: 398.15012844226646,
19: 400.764192355584,
20: 403.37779782570226,
21: 405.99094534087044,
22: 408.60363539376465,
23: 411.2158684814615,
24: 413.82764510541136,
25: 416.4389657714128,
26: 419.04983098958445,
27: 421.66024127433906,
28: 424.2701971443558,
29: 426.8796991225531,
},
"fcst_upper": {
0: 594.8708341085095,
1: 597.562807742296,
2: 600.255247821895,
3: 602.9481539430253,
4: 605.6415256965386,
5: 608.3353626684409,
6: 611.0296644399166,
7: 613.724430587351,
8: 616.4196606823541,
9: 619.1153542917842,
10: 621.8115109777711,
11: 624.508130297741,
12: 627.2052118044398,
13: 629.9027550459588,
14: 632.6007595657577,
15: 635.299224902691,
16: 637.998150591032,
17: 640.6975361604982,
18: 643.3973811362772,
19: 646.0976850390515,
20: 648.7984473850253,
21: 651.4996676859489,
22: 654.2013454491467,
23: 656.903480177542,
24: 659.6060713696838,
25: 662.3091185197744,
26: 665.0126211176946,
27: 667.716578649032,
28: 670.4209905951075,
29: 673.1258564330019,
},
}
)
PEYTON_FCST_LINEAR_95 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("2013-05-01 00:00:00"),
1: pd.Timestamp("2013-05-02 00:00:00"),
2: pd.Timestamp("2013-05-03 00:00:00"),
3: pd.Timestamp("2013-05-04 00:00:00"),
4: pd.Timestamp("2013-05-05 00:00:00"),
5: pd.Timestamp("2013-05-06 00:00:00"),
6: pd.Timestamp("2013-05-07 00:00:00"),
7: pd.Timestamp("2013-05-08 00:00:00"),
8: pd.Timestamp("2013-05-09 00:00:00"),
9: pd.Timestamp("2013-05-10 00:00:00"),
10: pd.Timestamp("2013-05-11 00:00:00"),
11: pd.Timestamp("2013-05-12 00:00:00"),
12: pd.Timestamp("2013-05-13 00:00:00"),
13: pd.Timestamp("2013-05-14 00:00:00"),
14: pd.Timestamp("2013-05-15 00:00:00"),
15: pd.Timestamp("2013-05-16 00:00:00"),
16: pd.Timestamp("2013-05-17 00:00:00"),
17: pd.Timestamp("2013-05-18 00:00:00"),
18: pd.Timestamp("2013-05-19 00:00:00"),
19: pd.Timestamp("2013-05-20 00:00:00"),
20: pd.Timestamp("2013-05-21 00:00:00"),
21: pd.Timestamp("2013-05-22 00:00:00"),
22: pd.Timestamp("2013-05-23 00:00:00"),
23: pd.Timestamp("2013-05-24 00:00:00"),
24: pd.Timestamp("2013-05-25 00:00:00"),
25: pd.Timestamp("2013-05-26 00:00:00"),
26: pd.Timestamp("2013-05-27 00:00:00"),
27: pd.Timestamp("2013-05-28 00:00:00"),
28: pd.Timestamp("2013-05-29 00:00:00"),
29: pd.Timestamp("2013-05-30 00:00:00"),
},
"fcst": {
0: 8.479624727157459,
1: 8.479984673362159,
2: 8.480344619566859,
3: 8.48070456577156,
4: 8.48106451197626,
5: 8.48142445818096,
6: 8.481784404385662,
7: 8.482144350590362,
8: 8.482504296795062,
9: 8.482864242999762,
10: 8.483224189204464,
11: 8.483584135409163,
12: 8.483944081613863,
13: 8.484304027818565,
14: 8.484663974023265,
15: 8.485023920227965,
16: 8.485383866432667,
17: 8.485743812637367,
18: 8.486103758842066,
19: 8.486463705046766,
20: 8.486823651251468,
21: 8.487183597456168,
22: 8.487543543660868,
23: 8.48790348986557,
24: 8.48826343607027,
25: 8.48862338227497,
26: 8.48898332847967,
27: 8.489343274684371,
28: 8.489703220889071,
29: 8.490063167093771,
},
"fcst_lower": {
0: 7.055970485245664,
1: 7.056266316358524,
2: 7.056561800026597,
3: 7.056856936297079,
4: 7.057151725217398,
5: 7.05744616683524,
6: 7.057740261198534,
7: 7.058034008355445,
8: 7.058327408354395,
9: 7.058620461244044,
10: 7.0589131670733005,
11: 7.059205525891312,
12: 7.059497537747475,
13: 7.059789202691431,
14: 7.0600805207730595,
15: 7.060371492042489,
16: 7.060662116550093,
17: 7.060952394346479,
18: 7.06124232548251,
19: 7.0615319100092835,
20: 7.061821147978145,
21: 7.062110039440677,
22: 7.062398584448709,
23: 7.062686783054313,
24: 7.0629746353098,
25: 7.063262141267724,
26: 7.063549300980883,
27: 7.063836114502315,
28: 7.0641225818852975,
29: 7.064408703183352,
},
"fcst_upper": {
0: 9.903278969069254,
1: 9.903703030365794,
2: 9.90412743910712,
3: 9.904552195246042,
4: 9.904977298735123,
5: 9.90540274952668,
6: 9.90582854757279,
7: 9.906254692825279,
8: 9.90668118523573,
9: 9.90710802475548,
10: 9.907535211335626,
11: 9.907962744927016,
12: 9.908390625480251,
13: 9.9088188529457,
14: 9.90924742727347,
15: 9.909676348413441,
16: 9.91010561631524,
17: 9.910535230928254,
18: 9.910965192201623,
19: 9.91139550008425,
20: 9.91182615452479,
21: 9.912257155471659,
22: 9.912688502873028,
23: 9.913120196676825,
24: 9.91355223683074,
25: 9.913984623282214,
26: 9.914417355978456,
27: 9.914850434866427,
28: 9.915283859892844,
29: 9.91571763100419,
},
}
)
PEYTON_FCST_LINEAR_99 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("2013-05-01 00:00:00"),
1: pd.Timestamp("2013-05-02 00:00:00"),
2: pd.Timestamp("2013-05-03 00:00:00"),
3: pd.Timestamp("2013-05-04 00:00:00"),
4: pd.Timestamp("2013-05-05 00:00:00"),
5: pd.Timestamp("2013-05-06 00:00:00"),
6: pd.Timestamp("2013-05-07 00:00:00"),
7: pd.Timestamp("2013-05-08 00:00:00"),
8: pd.Timestamp("2013-05-09 00:00:00"),
9: pd.Timestamp("2013-05-10 00:00:00"),
10: pd.Timestamp("2013-05-11 00:00:00"),
11: pd.Timestamp("2013-05-12 00:00:00"),
12: pd.Timestamp("2013-05-13 00:00:00"),
13: pd.Timestamp("2013-05-14 00:00:00"),
14: pd.Timestamp("2013-05-15 00:00:00"),
15: pd.Timestamp("2013-05-16 00:00:00"),
16: pd.Timestamp("2013-05-17 00:00:00"),
17: pd.Timestamp("2013-05-18 00:00:00"),
18: pd.Timestamp("2013-05-19 00:00:00"),
19: pd.Timestamp("2013-05-20 00:00:00"),
20: pd.Timestamp("2013-05-21 00:00:00"),
21: pd.Timestamp("2013-05-22 00:00:00"),
22: pd.Timestamp("2013-05-23 00:00:00"),
23: pd.Timestamp("2013-05-24 00:00:00"),
24: pd.Timestamp("2013-05-25 00:00:00"),
25: pd.Timestamp("2013-05-26 00:00:00"),
26: pd.Timestamp("2013-05-27 00:00:00"),
27: pd.Timestamp("2013-05-28 00:00:00"),
28: pd.Timestamp("2013-05-29 00:00:00"),
29: pd.Timestamp("2013-05-30 00:00:00"),
},
"fcst": {
0: 8.479624727157459,
1: 8.479984673362159,
2: 8.480344619566859,
3: 8.48070456577156,
4: 8.48106451197626,
5: 8.48142445818096,
6: 8.481784404385662,
7: 8.482144350590362,
8: 8.482504296795062,
9: 8.482864242999762,
10: 8.483224189204464,
11: 8.483584135409163,
12: 8.483944081613863,
13: 8.484304027818565,
14: 8.484663974023265,
15: 8.485023920227965,
16: 8.485383866432667,
17: 8.485743812637367,
18: 8.486103758842066,
19: 8.486463705046766,
20: 8.486823651251468,
21: 8.487183597456168,
22: 8.487543543660868,
23: 8.48790348986557,
24: 8.48826343607027,
25: 8.48862338227497,
26: 8.48898332847967,
27: 8.489343274684371,
28: 8.489703220889071,
29: 8.490063167093771,
},
"fcst_lower": {
0: 6.605000045325637,
1: 6.605275566724015,
2: 6.605550630617649,
3: 6.605825237068679,
4: 6.606099386139563,
5: 6.60637307789309,
6: 6.606646312392368,
7: 6.606919089700827,
8: 6.607191409882221,
9: 6.607463273000626,
10: 6.607734679120443,
11: 6.608005628306389,
12: 6.608276120623508,
13: 6.608546156137163,
14: 6.608815734913038,
15: 6.609084857017139,
16: 6.609353522515795,
17: 6.609621731475649,
18: 6.609889483963668,
19: 6.610156780047143,
20: 6.61042361979368,
21: 6.610690003271204,
22: 6.610955930547961,
23: 6.611221401692519,
24: 6.611486416773756,
25: 6.611750975860878,
26: 6.612015079023405,
27: 6.612278726331177,
28: 6.612541917854348,
29: 6.612804653663393,
},
"fcst_upper": {
0: 10.354249408989281,
1: 10.354693780000304,
2: 10.355138608516068,
3: 10.355583894474442,
4: 10.356029637812957,
5: 10.35647583846883,
6: 10.356922496378955,
7: 10.357369611479896,
8: 10.357817183707903,
9: 10.358265212998898,
10: 10.358713699288483,
11: 10.359162642511938,
12: 10.359612042604219,
13: 10.360061899499968,
14: 10.360512213133493,
15: 10.36096298343879,
16: 10.361414210349539,
17: 10.361865893799084,
18: 10.362318033720465,
19: 10.36277063004639,
20: 10.363223682709256,
21: 10.363677191641132,
22: 10.364131156773775,
23: 10.364585578038621,
24: 10.365040455366783,
25: 10.365495788689062,
26: 10.365951577935935,
27: 10.366407823037564,
28: 10.366864523923793,
29: 10.36732168052415,
},
}
)
PEYTON_FCST_LINEAR_INVALID_ZERO = pd.DataFrame(
{
"time": {
0: pd.Timestamp("2012-05-02 00:00:00"),
1: pd.Timestamp("2012-05-03 00:00:00"),
2: pd.Timestamp("2012-05-04 00:00:00"),
3: pd.Timestamp("2012-05-05 00:00:00"),
4: pd.Timestamp("2012-05-06 00:00:00"),
5: pd.Timestamp("2012-05-07 00:00:00"),
6: pd.Timestamp("2012-05-08 00:00:00"),
7: pd.Timestamp("2012-05-09 00:00:00"),
8: pd.Timestamp("2012-05-10 00:00:00"),
9: pd.Timestamp("2012-05-11 00:00:00"),
10: pd.Timestamp("2012-05-12 00:00:00"),
11: pd.Timestamp("2012-05-13 00:00:00"),
12: pd.Timestamp("2012-05-14 00:00:00"),
13: pd.Timestamp("2012-05-15 00:00:00"),
14: pd.Timestamp("2012-05-16 00:00:00"),
15: pd.Timestamp("2012-05-17 00:00:00"),
16: pd.Timestamp("2012-05-18 00:00:00"),
17: pd.Timestamp("2012-05-19 00:00:00"),
18: pd.Timestamp("2012-05-20 00:00:00"),
19: pd.Timestamp("2012-05-21 00:00:00"),
20: pd.Timestamp("2012-05-22 00:00:00"),
21: pd.Timestamp("2012-05-23 00:00:00"),
22: pd.Timestamp("2012-05-24 00:00:00"),
23: pd.Timestamp("2012-05-25 00:00:00"),
24: pd.Timestamp("2012-05-26 00:00:00"),
25: pd.Timestamp("2012-05-27 00:00:00"),
26: pd.Timestamp("2012-05-28 00:00:00"),
27: pd.Timestamp("2012-05-29 00:00:00"),
28: pd.Timestamp("2012-05-30 00:00:00"),
29: pd.Timestamp("2012-05-31 00:00:00"),
30: pd.Timestamp("2012-06-01 00:00:00"),
31: pd.Timestamp("2012-06-02 00:00:00"),
32: pd.Timestamp("2012-06-03 00:00:00"),
33: pd.Timestamp("2012-06-04 00:00:00"),
34: pd.Timestamp("2012-06-05 00:00:00"),
35: pd.Timestamp("2012-06-06 00:00:00"),
36: pd.Timestamp("2012-06-07 00:00:00"),
37: pd.Timestamp("2012-06-08 00:00:00"),
38: pd.Timestamp("2012-06-09 00:00:00"),
39: pd.Timestamp("2012-06-10 00:00:00"),
40: pd.Timestamp("2012-06-11 00:00:00"),
41: pd.Timestamp("2012-06-12 00:00:00"),
42: pd.Timestamp("2012-06-13 00:00:00"),
43: pd.Timestamp("2012-06-14 00:00:00"),
44: pd.Timestamp("2012-06-15 00:00:00"),
45: pd.Timestamp("2012-06-16 00:00:00"),
46: pd.Timestamp("2012-06-17 00:00:00"),
47: pd.Timestamp("2012-06-18 00:00:00"),
48: pd.Timestamp("2012-06-19 00:00:00"),
49: pd.Timestamp("2012-06-20 00:00:00"),
50: pd.Timestamp("2012-06-21 00:00:00"),
51: pd.Timestamp("2012-06-22 00:00:00"),
52: pd.Timestamp("2012-06-23 00:00:00"),
53: pd.Timestamp("2012-06-24 00:00:00"),
54: pd.Timestamp("2012-06-25 00:00:00"),
55: pd.Timestamp("2012-06-26 00:00:00"),
56: pd.Timestamp("2012-06-27 00:00:00"),
57: pd.Timestamp("2012-06-28 00:00:00"),
58: pd.Timestamp("2012-06-29 00:00:00"),
59: pd.Timestamp("2012-06-30 00:00:00"),
60: pd.Timestamp("2012-07-01 00:00:00"),
61: pd.Timestamp("2012-07-02 00:00:00"),
62: pd.Timestamp("2012-07-03 00:00:00"),
63: pd.Timestamp("2012-07-04 00:00:00"),
64: pd.Timestamp("2012-07-05 00:00:00"),
65: pd.Timestamp("2012-07-06 00:00:00"),
66: pd.Timestamp("2012-07-07 00:00:00"),
67: pd.Timestamp("2012-07-08 00:00:00"),
68: pd.Timestamp("2012-07-09 00:00:00"),
69: pd.Timestamp("2012-07-10 00:00:00"),
70: pd.Timestamp("2012-07-11 00:00:00"),
71: pd.Timestamp("2012-07-12 00:00:00"),
72: pd.Timestamp("2012-07-13 00:00:00"),
73: pd.Timestamp("2012-07-14 00:00:00"),
74: pd.Timestamp("2012-07-15 00:00:00"),
75: pd.Timestamp("2012-07-16 00:00:00"),
76: pd.Timestamp("2012-07-17 00:00:00"),
77: pd.Timestamp("2012-07-18 00:00:00"),
78: pd.Timestamp("2012-07-19 00:00:00"),
79: pd.Timestamp("2012-07-20 00:00:00"),
80: pd.Timestamp("2012-07-21 00:00:00"),
81: pd.Timestamp("2012-07-22 00:00:00"),
82: pd.Timestamp("2012-07-23 00:00:00"),
83: pd.Timestamp("2012-07-24 00:00:00"),
84: pd.Timestamp("2012-07-25 00:00:00"),
85: pd.Timestamp("2012-07-26 00:00:00"),
86: pd.Timestamp("2012-07-27 00:00:00"),
87: pd.Timestamp("2012-07-28 00:00:00"),
88: pd.Timestamp("2012-07-29 00:00:00"),
89: pd.Timestamp("2012-07-30 00:00:00"),
90: pd.Timestamp("2012-07-31 00:00:00"),
91: pd.Timestamp("2012-08-01 00:00:00"),
92: pd.Timestamp("2012-08-02 00:00:00"),
93: pd.Timestamp("2012-08-03 00:00:00"),
94: pd.Timestamp("2012-08-04 00:00:00"),
95: pd.Timestamp("2012-08-05 00:00:00"),
96: pd.Timestamp("2012-08-06 00:00:00"),
97: pd.Timestamp("2012-08-07 00:00:00"),
98: pd.Timestamp("2012-08-08 00:00:00"),
99: pd.Timestamp("2012-08-09 00:00:00"),
100: pd.Timestamp("2012-08-10 00:00:00"),
101: pd.Timestamp("2012-08-11 00:00:00"),
102: pd.Timestamp("2012-08-12 00:00:00"),
103: pd.Timestamp("2012-08-13 00:00:00"),
104: pd.Timestamp("2012-08-14 00:00:00"),
105: pd.Timestamp("2012-08-15 00:00:00"),
106: pd.Timestamp("2012-08-16 00:00:00"),
107: pd.Timestamp("2012-08-17 00:00:00"),
108: pd.Timestamp("2012-08-18 00:00:00"),
109: pd.Timestamp("2012-08-19 00:00:00"),
110: pd.Timestamp("2012-08-20 00:00:00"),
111: pd.Timestamp("2012-08-21 00:00:00"),
112: pd.Timestamp("2012-08-22 00:00:00"),
113: pd.Timestamp("2012-08-23 00:00:00"),
114: pd.Timestamp("2012-08-24 00:00:00"),
115: pd.Timestamp("2012-08-25 00:00:00"),
116: pd.Timestamp("2012-08-26 00:00:00"),
117: pd.Timestamp("2012-08-27 00:00:00"),
118: pd.Timestamp("2012-08-28 00:00:00"),
119: pd.Timestamp("2012-08-29 00:00:00"),
120: pd.Timestamp("2012-08-30 00:00:00"),
121: pd.Timestamp("2012-08-31 00:00:00"),
122: pd.Timestamp("2012-09-01 00:00:00"),
123: pd.Timestamp("2012-09-02 00:00:00"),
124: pd.Timestamp("2012-09-03 00:00:00"),
125: pd.Timestamp("2012-09-04 00:00:00"),
126: pd.Timestamp("2012-09-05 00:00:00"),
127: pd.Timestamp("2012-09-06 00:00:00"),
128: pd.Timestamp("2012-09-07 00:00:00"),
129: pd.Timestamp("2012-09-08 00:00:00"),
130: pd.Timestamp("2012-09-09 00:00:00"),
131: pd.Timestamp("2012-09-10 00:00:00"),
132: pd.Timestamp("2012-09-11 00:00:00"),
133: pd.Timestamp("2012-09-12 00:00:00"),
134: pd.Timestamp("2012-09-13 00:00:00"),
135: pd.Timestamp("2012-09-14 00:00:00"),
136: pd.Timestamp("2012-09-15 00:00:00"),
137: pd.Timestamp("2012-09-16 00:00:00"),
138: pd.Timestamp("2012-09-17 00:00:00"),
139: pd.Timestamp("2012-09-18 00:00:00"),
140: pd.Timestamp("2012-09-19 00:00:00"),
141: pd.Timestamp("2012-09-20 00:00:00"),
142: pd.Timestamp("2012-09-21 00:00:00"),
143: pd.Timestamp("2012-09-22 00:00:00"),
144: pd.Timestamp("2012-09-23 00:00:00"),
145: pd.Timestamp("2012-09-24 00:00:00"),
146: pd.Timestamp("2012-09-25 00:00:00"),
147: pd.Timestamp("2012-09-26 00:00:00"),
148: pd.Timestamp("2012-09-27 00:00:00"),
149: pd.Timestamp("2012-09-28 00:00:00"),
150: pd.Timestamp("2012-09-29 00:00:00"),
151: pd.Timestamp("2012-09-30 00:00:00"),
152: pd.Timestamp("2012-10-01 00:00:00"),
153: pd.Timestamp("2012-10-02 00:00:00"),
154: pd.Timestamp("2012-10-03 00:00:00"),
155: pd.Timestamp("2012-10-04 00:00:00"),
156: pd.Timestamp("2012-10-05 00:00:00"),
157: pd.Timestamp("2012-10-06 00:00:00"),
158: pd.Timestamp("2012-10-07 00:00:00"),
159: pd.Timestamp("2012-10-08 00:00:00"),
160: pd.Timestamp("2012-10-09 00:00:00"),
161: pd.Timestamp("2012-10-10 00:00:00"),
162: pd.Timestamp("2012-10-11 00:00:00"),
163: pd.Timestamp("2012-10-12 00:00:00"),
164: pd.Timestamp("2012-10-13 00:00:00"),
165: pd.Timestamp("2012-10-14 00:00:00"),
166: pd.Timestamp("2012-10-15 00:00:00"),
167: pd.Timestamp("2012-10-16 00:00:00"),
168: pd.Timestamp("2012-10-17 00:00:00"),
169: pd.Timestamp("2012-10-18 00:00:00"),
170: pd.Timestamp("2012-10-19 00:00:00"),
171: pd.Timestamp("2012-10-20 00:00:00"),
172: pd.Timestamp("2012-10-21 00:00:00"),
173: pd.Timestamp("2012-10-22 00:00:00"),
174: pd.Timestamp("2012-10-23 00:00:00"),
175: pd.Timestamp("2012-10-24 00:00:00"),
176: pd.Timestamp("2012-10-25 00:00:00"),
177: pd.Timestamp("2012-10-26 00:00:00"),
178: pd.Timestamp("2012-10-27 00:00:00"),
179: pd.Timestamp("2012-10-28 00:00:00"),
180: pd.Timestamp("2012-10-29 00:00:00"),
181: pd.Timestamp("2012-10-30 00:00:00"),
182: pd.Timestamp("2012-10-31 00:00:00"),
183: pd.Timestamp("2012-11-01 00:00:00"),
184: pd.Timestamp("2012-11-02 00:00:00"),
185: pd.Timestamp("2012-11-03 00:00:00"),
186: pd.Timestamp("2012-11-04 00:00:00"),
187: pd.Timestamp("2012-11-05 00:00:00"),
188: pd.Timestamp("2012-11-06 00:00:00"),
189: pd.Timestamp("2012-11-07 00:00:00"),
190: pd.Timestamp("2012-11-08 00:00:00"),
191: pd.Timestamp("2012-11-09 00:00:00"),
192: pd.Timestamp("2012-11-10 00:00:00"),
193: pd.Timestamp("2012-11-11 00:00:00"),
194: pd.Timestamp("2012-11-12 00:00:00"),
195: pd.Timestamp("2012-11-13 00:00:00"),
196: pd.Timestamp("2012-11-14 00:00:00"),
197: pd.Timestamp("2012-11-15 00:00:00"),
198: pd.Timestamp("2012-11-16 00:00:00"),
199: pd.Timestamp("2012-11-17 00:00:00"),
200: pd.Timestamp("2012-11-18 00:00:00"),
201: pd.Timestamp("2012-11-19 00:00:00"),
202: pd.Timestamp("2012-11-20 00:00:00"),
203: pd.Timestamp("2012-11-21 00:00:00"),
204: pd.Timestamp("2012-11-22 00:00:00"),
205: pd.Timestamp("2012-11-23 00:00:00"),
206: pd.Timestamp("2012-11-24 00:00:00"),
207: pd.Timestamp("2012-11-25 00:00:00"),
208: pd.Timestamp("2012-11-26 00:00:00"),
209: pd.Timestamp("2012-11-27 00:00:00"),
210: pd.Timestamp("2012-11-28 00:00:00"),
211: pd.Timestamp("2012-11-29 00:00:00"),
212: pd.Timestamp("2012-11-30 00:00:00"),
213: pd.Timestamp("2012-12-01 00:00:00"),
214: pd.Timestamp("2012-12-02 00:00:00"),
215: pd.Timestamp("2012-12-03 00:00:00"),
216: pd.Timestamp("2012-12-04 00:00:00"),
217: pd.Timestamp("2012-12-05 00:00:00"),
218: pd.Timestamp("2012-12-06 00:00:00"),
219: pd.Timestamp("2012-12-07 00:00:00"),
220: pd.Timestamp("2012-12-08 00:00:00"),
221: pd.Timestamp("2012-12-09 00:00:00"),
222: pd.Timestamp("2012-12-10 00:00:00"),
223: pd.Timestamp("2012-12-11 00:00:00"),
224: pd.Timestamp("2012-12-12 00:00:00"),
225: pd.Timestamp("2012-12-13 00:00:00"),
226: pd.Timestamp("2012-12-14 00:00:00"),
227: pd.Timestamp("2012-12-15 00:00:00"),
228: pd.Timestamp("2012-12-16 00:00:00"),
229: pd.Timestamp("2012-12-17 00:00:00"),
230: pd.Timestamp("2012-12-18 00:00:00"),
231: pd.Timestamp("2012-12-19 00:00:00"),
232: pd.Timestamp("2012-12-20 00:00:00"),
233: pd.Timestamp("2012-12-21 00:00:00"),
234: pd.Timestamp("2012-12-22 00:00:00"),
235: pd.Timestamp("2012-12-23 00:00:00"),
236: pd.Timestamp("2012-12-24 00:00:00"),
237: pd.Timestamp("2012-12-25 00:00:00"),
238: pd.Timestamp("2012-12-26 00:00:00"),
239: pd.Timestamp("2012-12-27 00:00:00"),
240: pd.Timestamp("2012-12-28 00:00:00"),
241: pd.Timestamp("2012-12-29 00:00:00"),
242: pd.Timestamp("2012-12-30 00:00:00"),
243: pd.Timestamp("2012-12-31 00:00:00"),
244: pd.Timestamp("2013-01-01 00:00:00"),
245: pd.Timestamp("2013-01-02 00:00:00"),
246: pd.Timestamp("2013-01-03 00:00:00"),
247: pd.Timestamp("2013-01-04 00:00:00"),
248: pd.Timestamp("2013-01-05 00:00:00"),
249: pd.Timestamp("2013-01-06 00:00:00"),
250: pd.Timestamp("2013-01-07 00:00:00"),
251: pd.Timestamp("2013-01-08 00:00:00"),
252: pd.Timestamp("2013-01-09 00:00:00"),
253: pd.Timestamp("2013-01-10 00:00:00"),
254: pd.Timestamp("2013-01-11 00:00:00"),
255: pd.Timestamp("2013-01-12 00:00:00"),
256: pd.Timestamp("2013-01-13 00:00:00"),
257: pd.Timestamp("2013-01-14 00:00:00"),
258: pd.Timestamp("2013-01-15 00:00:00"),
259: pd.Timestamp("2013-01-16 00:00:00"),
260: pd.Timestamp("2013-01-17 00:00:00"),
261: pd.Timestamp("2013-01-18 00:00:00"),
262: pd.Timestamp("2013-01-19 00:00:00"),
263: pd.Timestamp("2013-01-20 00:00:00"),
264: pd.Timestamp("2013-01-21 00:00:00"),
265: pd.Timestamp("2013-01-22 00:00:00"),
266: pd.Timestamp("2013-01-23 00:00:00"),
267: pd.Timestamp("2013-01-24 00:00:00"),
268: pd.Timestamp("2013-01-25 00:00:00"),
269: pd.Timestamp("2013-01-26 00:00:00"),
270: pd.Timestamp("2013-01-27 00:00:00"),
271: pd.Timestamp("2013-01-28 00:00:00"),
272: pd.Timestamp("2013-01-29 00:00:00"),
273: pd.Timestamp("2013-01-30 00:00:00"),
274: pd.Timestamp("2013-01-31 00:00:00"),
275: pd.Timestamp("2013-02-01 00:00:00"),
276: pd.Timestamp("2013-02-02 00:00:00"),
277: pd.Timestamp("2013-02-03 00:00:00"),
278: pd.Timestamp("2013-02-04 00:00:00"),
279: pd.Timestamp("2013-02-05 00:00:00"),
280: pd.Timestamp("2013-02-06 00:00:00"),
281: pd.Timestamp("2013-02-07 00:00:00"),
282: pd.Timestamp("2013-02-08 00:00:00"),
283: pd.Timestamp("2013-02-09 00:00:00"),
284: pd.Timestamp("2013-02-10 00:00:00"),
285: pd.Timestamp("2013-02-11 00:00:00"),
286: pd.Timestamp("2013-02-12 00:00:00"),
287: pd.Timestamp("2013-02-13 00:00:00"),
288: pd.Timestamp("2013-02-14 00:00:00"),
289: pd.Timestamp("2013-02-15 00:00:00"),
290: pd.Timestamp("2013-02-16 00:00:00"),
291: pd.Timestamp("2013-02-17 00:00:00"),
292: pd.Timestamp("2013-02-18 00:00:00"),
293: pd.Timestamp("2013-02-19 00:00:00"),
294: pd.Timestamp("2013-02-20 00:00:00"),
295: pd.Timestamp("2013-02-21 00:00:00"),
296: pd.Timestamp("2013-02-22 00:00:00"),
297: pd.Timestamp("2013-02-23 00:00:00"),
298: pd.Timestamp("2013-02-24 00:00:00"),
299: pd.Timestamp("2013-02-25 00:00:00"),
300: pd.Timestamp("2013-02-26 00:00:00"),
301: pd.Timestamp("2013-02-27 00:00:00"),
302: pd.Timestamp("2013-02-28 00:00:00"),
303: pd.Timestamp("2013-03-01 00:00:00"),
304: pd.Timestamp("2013-03-02 00:00:00"),
305: pd.Timestamp("2013-03-03 00:00:00"),
306: pd.Timestamp("2013-03-04 00:00:00"),
307: pd.Timestamp("2013-03-05 00:00:00"),
308: pd.Timestamp("2013-03-06 00:00:00"),
309: pd.Timestamp("2013-03-07 00:00:00"),
310: pd.Timestamp("2013-03-08 00:00:00"),
311: pd.Timestamp("2013-03-09 00:00:00"),
312: pd.Timestamp("2013-03-10 00:00:00"),
313: pd.Timestamp("2013-03-11 00:00:00"),
314: pd.Timestamp("2013-03-12 00:00:00"),
315: pd.Timestamp("2013-03-13 00:00:00"),
316: pd.Timestamp("2013-03-14 00:00:00"),
317: pd.Timestamp("2013-03-15 00:00:00"),
318: pd.Timestamp("2013-03-16 00:00:00"),
319: pd.Timestamp("2013-03-17 00:00:00"),
320: pd.Timestamp("2013-03-18 00:00:00"),
321: pd.Timestamp("2013-03-19 00:00:00"),
322: pd.Timestamp("2013-03-20 00:00:00"),
323: pd.Timestamp("2013-03-21 00:00:00"),
324: pd.Timestamp("2013-03-22 00:00:00"),
325: pd.Timestamp("2013-03-23 00:00:00"),
326: pd.Timestamp("2013-03-24 00:00:00"),
327: pd.Timestamp("2013-03-25 00:00:00"),
328: pd.Timestamp("2013-03-26 00:00:00"),
329: pd.Timestamp("2013-03-27 00:00:00"),
330: pd.Timestamp("2013-03-28 00:00:00"),
331: pd.Timestamp("2013-03-29 00:00:00"),
332: pd.Timestamp("2013-03-30 00:00:00"),
333: pd.Timestamp("2013-03-31 00:00:00"),
334: pd.Timestamp("2013-04-01 00:00:00"),
335: pd.Timestamp("2013-04-02 00:00:00"),
336: pd.Timestamp("2013-04-03 00:00:00"),
337: pd.Timestamp("2013-04-04 00:00:00"),
338: pd.Timestamp("2013-04-05 00:00:00"),
339: pd.Timestamp("2013-04-06 00:00:00"),
340: pd.Timestamp("2013-04-07 00:00:00"),
341: pd.Timestamp("2013-04-08 00:00:00"),
342: pd.Timestamp("2013-04-09 00:00:00"),
343: pd.Timestamp("2013-04-10 00:00:00"),
344: pd.Timestamp("2013-04-11 00:00:00"),
345: pd.Timestamp("2013-04-12 00:00:00"),
346: pd.Timestamp("2013-04-13 00:00:00"),
347: pd.Timestamp("2013-04-14 00:00:00"),
348: pd.Timestamp("2013-04-15 00:00:00"),
349: pd.Timestamp("2013-04-16 00:00:00"),
350: pd.Timestamp("2013-04-17 00:00:00"),
351: pd.Timestamp("2013-04-18 00:00:00"),
352: pd.Timestamp("2013-04-19 00:00:00"),
353: pd.Timestamp("2013-04-20 00:00:00"),
354: pd.Timestamp("2013-04-21 00:00:00"),
355: pd.Timestamp("2013-04-22 00:00:00"),
356: pd.Timestamp("2013-04-23 00:00:00"),
357: pd.Timestamp("2013-04-24 00:00:00"),
358: pd.Timestamp("2013-04-25 00:00:00"),
359: pd.Timestamp("2013-04-26 00:00:00"),
360: pd.Timestamp("2013-04-27 00:00:00"),
361: pd.Timestamp("2013-04-28 00:00:00"),
362: pd.Timestamp("2013-04-29 00:00:00"),
363: pd.Timestamp("2013-04-30 00:00:00"),
364: pd.Timestamp("2013-05-01 00:00:00"),
365: pd.Timestamp("2013-05-02 00:00:00"),
366: pd.Timestamp("2013-05-03 00:00:00"),
367: pd.Timestamp("2013-05-04 00:00:00"),
368: pd.Timestamp("2013-05-05 00:00:00"),
369: pd.Timestamp("2013-05-06 00:00:00"),
370: pd.Timestamp("2013-05-07 00:00:00"),
371: pd.Timestamp("2013-05-08 00:00:00"),
372: pd.Timestamp("2013-05-09 00:00:00"),
373: pd.Timestamp("2013-05-10 00:00:00"),
374: pd.Timestamp("2013-05-11 00:00:00"),
375: pd.Timestamp("2013-05-12 00:00:00"),
376: pd.Timestamp("2013-05-13 00:00:00"),
377: pd.Timestamp("2013-05-14 00:00:00"),
378: pd.Timestamp("2013-05-15 00:00:00"),
379: pd.Timestamp("2013-05-16 00:00:00"),
380: pd.Timestamp("2013-05-17 00:00:00"),
381: pd.Timestamp("2013-05-18 00:00:00"),
382: pd.Timestamp("2013-05-19 00:00:00"),
383: pd.Timestamp("2013-05-20 00:00:00"),
384: pd.Timestamp("2013-05-21 00:00:00"),
385: pd.Timestamp("2013-05-22 00:00:00"),
386: pd.Timestamp("2013-05-23 00:00:00"),
387: pd.Timestamp("2013-05-24 00:00:00"),
388: pd.Timestamp("2013-05-25 00:00:00"),
389: pd.Timestamp("2013-05-26 00:00:00"),
390: pd.Timestamp("2013-05-27 00:00:00"),
391: pd.Timestamp("2013-05-28 00:00:00"),
392: pd.Timestamp("2013-05-29 00:00:00"),
393: pd.Timestamp("2013-05-30 00:00:00"),
},
"fcst": {
0: 8.348604308646497,
1: 8.348964254851197,
2: 8.349324201055898,
3: 8.349684147260598,
4: 8.350044093465298,
5: 8.350404039669998,
6: 8.3507639858747,
7: 8.3511239320794,
8: 8.3514838782841,
9: 8.351843824488801,
10: 8.352203770693501,
11: 8.352563716898201,
12: 8.352923663102903,
13: 8.353283609307603,
14: 8.353643555512303,
15: 8.354003501717003,
16: 8.354363447921704,
17: 8.354723394126404,
18: 8.355083340331104,
19: 8.355443286535806,
20: 8.355803232740506,
21: 8.356163178945206,
22: 8.356523125149906,
23: 8.356883071354607,
24: 8.357243017559307,
25: 8.357602963764007,
26: 8.357962909968709,
27: 8.358322856173409,
28: 8.358682802378109,
29: 8.35904274858281,
30: 8.35940269478751,
31: 8.35976264099221,
32: 8.36012258719691,
33: 8.360482533401612,
34: 8.360842479606312,
35: 8.361202425811012,
36: 8.361562372015714,
37: 8.361922318220413,
38: 8.362282264425113,
39: 8.362642210629813,
40: 8.363002156834515,
41: 8.363362103039215,
42: 8.363722049243915,
43: 8.364081995448617,
44: 8.364441941653316,
45: 8.364801887858016,
46: 8.365161834062716,
47: 8.365521780267418,
48: 8.365881726472118,
49: 8.366241672676818,
50: 8.36660161888152,
51: 8.36696156508622,
52: 8.36732151129092,
53: 8.367681457495621,
54: 8.368041403700321,
55: 8.368401349905021,
56: 8.36876129610972,
57: 8.369121242314423,
58: 8.369481188519122,
59: 8.369841134723822,
60: 8.370201080928524,
61: 8.370561027133224,
62: 8.370920973337924,
63: 8.371280919542624,
64: 8.371640865747326,
65: 8.372000811952026,
66: 8.372360758156725,
67: 8.372720704361427,
68: 8.373080650566127,
69: 8.373440596770827,
70: 8.373800542975529,
71: 8.374160489180229,
72: 8.374520435384929,
73: 8.374880381589628,
74: 8.37524032779433,
75: 8.37560027399903,
76: 8.37596022020373,
77: 8.376320166408432,
78: 8.376680112613132,
79: 8.377040058817832,
80: 8.377400005022531,
81: 8.377759951227233,
82: 8.378119897431933,
83: 8.378479843636633,
84: 8.378839789841335,
85: 8.379199736046035,
86: 8.379559682250735,
87: 8.379919628455436,
88: 8.380279574660136,
89: 8.380639520864836,
90: 8.380999467069536,
91: 8.381359413274238,
92: 8.381719359478938,
93: 8.382079305683638,
94: 8.38243925188834,
95: 8.38279919809304,
96: 8.38315914429774,
97: 8.383519090502439,
98: 8.38387903670714,
99: 8.38423898291184,
100: 8.38459892911654,
101: 8.384958875321242,
102: 8.385318821525942,
103: 8.385678767730642,
104: 8.386038713935344,
105: 8.386398660140044,
106: 8.386758606344744,
107: 8.387118552549444,
108: 8.387478498754145,
109: 8.387838444958845,
110: 8.388198391163545,
111: 8.388558337368247,
112: 8.388918283572947,
113: 8.389278229777647,
114: 8.389638175982347,
115: 8.389998122187048,
116: 8.390358068391748,
117: 8.390718014596448,
118: 8.39107796080115,
119: 8.39143790700585,
120: 8.39179785321055,
121: 8.392157799415251,
122: 8.392517745619951,
123: 8.392877691824651,
124: 8.393237638029351,
125: 8.393597584234053,
126: 8.393957530438753,
127: 8.394317476643453,
128: 8.394677422848154,
129: 8.395037369052854,
130: 8.395397315257554,
131: 8.395757261462254,
132: 8.396117207666956,
133: 8.396477153871656,
134: 8.396837100076356,
135: 8.397197046281057,
136: 8.397556992485757,
137: 8.397916938690457,
138: 8.398276884895157,
139: 8.398636831099859,
140: 8.398996777304559,
141: 8.399356723509259,
142: 8.39971666971396,
143: 8.40007661591866,
144: 8.40043656212336,
145: 8.400796508328062,
146: 8.401156454532762,
147: 8.401516400737462,
148: 8.401876346942162,
149: 8.402236293146863,
150: 8.402596239351563,
151: 8.402956185556263,
152: 8.403316131760965,
153: 8.403676077965665,
154: 8.404036024170365,
155: 8.404395970375065,
156: 8.404755916579767,
157: 8.405115862784466,
158: 8.405475808989166,
159: 8.405835755193868,
160: 8.406195701398568,
161: 8.406555647603268,
162: 8.40691559380797,
163: 8.40727554001267,
164: 8.40763548621737,
165: 8.40799543242207,
166: 8.408355378626771,
167: 8.408715324831471,
168: 8.409075271036171,
169: 8.409435217240873,
170: 8.409795163445573,
171: 8.410155109650272,
172: 8.410515055854972,
173: 8.410875002059674,
174: 8.411234948264374,
175: 8.411594894469074,
176: 8.411954840673776,
177: 8.412314786878476,
178: 8.412674733083175,
179: 8.413034679287877,
180: 8.413394625492577,
181: 8.413754571697277,
182: 8.414114517901977,
183: 8.414474464106679,
184: 8.414834410311379,
185: 8.415194356516078,
186: 8.41555430272078,
187: 8.41591424892548,
188: 8.41627419513018,
189: 8.41663414133488,
190: 8.416994087539582,
191: 8.417354033744282,
192: 8.417713979948982,
193: 8.418073926153683,
194: 8.418433872358383,
195: 8.418793818563083,
196: 8.419153764767785,
197: 8.419513710972485,
198: 8.419873657177185,
199: 8.420233603381885,
200: 8.420593549586586,
201: 8.420953495791286,
202: 8.421313441995986,
203: 8.421673388200688,
204: 8.422033334405388,
205: 8.422393280610088,
206: 8.422753226814788,
207: 8.42311317301949,
208: 8.42347311922419,
209: 8.423833065428889,
210: 8.42419301163359,
211: 8.42455295783829,
212: 8.42491290404299,
213: 8.42527285024769,
214: 8.425632796452392,
215: 8.425992742657092,
216: 8.426352688861792,
217: 8.426712635066494,
218: 8.427072581271194,
219: 8.427432527475894,
220: 8.427792473680595,
221: 8.428152419885295,
222: 8.428512366089995,
223: 8.428872312294695,
224: 8.429232258499397,
225: 8.429592204704097,
226: 8.429952150908797,
227: 8.430312097113498,
228: 8.430672043318198,
229: 8.431031989522898,
230: 8.431391935727598,
231: 8.4317518819323,
232: 8.432111828137,
233: 8.4324717743417,
234: 8.432831720546401,
235: 8.433191666751101,
236: 8.433551612955801,
237: 8.433911559160503,
238: 8.434271505365203,
239: 8.434631451569903,
240: 8.434991397774603,
241: 8.435351343979304,
242: 8.435711290184004,
243: 8.436071236388704,
244: 8.436431182593406,
245: 8.436791128798106,
246: 8.437151075002806,
247: 8.437511021207506,
248: 8.437870967412207,
249: 8.438230913616907,
250: 8.438590859821607,
251: 8.438950806026309,
252: 8.439310752231009,
253: 8.439670698435709,
254: 8.44003064464041,
255: 8.44039059084511,
256: 8.44075053704981,
257: 8.44111048325451,
258: 8.441470429459212,
259: 8.441830375663912,
260: 8.442190321868612,
261: 8.442550268073314,
262: 8.442910214278013,
263: 8.443270160482713,
264: 8.443630106687413,
265: 8.443990052892115,
266: 8.444349999096815,
267: 8.444709945301515,
268: 8.445069891506217,
269: 8.445429837710916,
270: 8.445789783915616,
271: 8.446149730120318,
272: 8.446509676325018,
273: 8.446869622529718,
274: 8.447229568734418,
275: 8.44758951493912,
276: 8.44794946114382,
277: 8.44830940734852,
278: 8.448669353553221,
279: 8.449029299757921,
280: 8.449389245962621,
281: 8.449749192167321,
282: 8.450109138372023,
283: 8.450469084576723,
284: 8.450829030781422,
285: 8.451188976986124,
286: 8.451548923190824,
287: 8.451908869395524,
288: 8.452268815600226,
289: 8.452628761804926,
290: 8.452988708009626,
291: 8.453348654214325,
292: 8.453708600419027,
293: 8.454068546623727,
294: 8.454428492828427,
295: 8.454788439033129,
296: 8.455148385237829,
297: 8.455508331442529,
298: 8.455868277647228,
299: 8.45622822385193,
300: 8.45658817005663,
301: 8.45694811626133,
302: 8.457308062466032,
303: 8.457668008670732,
304: 8.458027954875432,
305: 8.458387901080131,
306: 8.458747847284833,
307: 8.459107793489533,
308: 8.459467739694233,
309: 8.459827685898935,
310: 8.460187632103635,
311: 8.460547578308335,
312: 8.460907524513036,
313: 8.461267470717736,
314: 8.461627416922436,
315: 8.461987363127136,
316: 8.462347309331838,
317: 8.462707255536538,
318: 8.463067201741238,
319: 8.46342714794594,
320: 8.46378709415064,
321: 8.46414704035534,
322: 8.464506986560039,
323: 8.46486693276474,
324: 8.46522687896944,
325: 8.46558682517414,
326: 8.465946771378842,
327: 8.466306717583542,
328: 8.466666663788242,
329: 8.467026609992944,
330: 8.467386556197644,
331: 8.467746502402344,
332: 8.468106448607044,
333: 8.468466394811745,
334: 8.468826341016445,
335: 8.469186287221145,
336: 8.469546233425847,
337: 8.469906179630547,
338: 8.470266125835247,
339: 8.470626072039947,
340: 8.470986018244648,
341: 8.471345964449348,
342: 8.471705910654048,
343: 8.47206585685875,
344: 8.47242580306345,
345: 8.47278574926815,
346: 8.473145695472851,
347: 8.473505641677551,
348: 8.473865587882251,
349: 8.474225534086951,
350: 8.474585480291653,
351: 8.474945426496353,
352: 8.475305372701053,
353: 8.475665318905754,
354: 8.476025265110454,
355: 8.476385211315154,
356: 8.476745157519854,
357: 8.477105103724556,
358: 8.477465049929256,
359: 8.477824996133956,
360: 8.478184942338657,
361: 8.478544888543357,
362: 8.478904834748057,
363: 8.479264780952759,
364: 8.479624727157459,
365: 8.479984673362159,
366: 8.480344619566859,
367: 8.48070456577156,
368: 8.48106451197626,
369: 8.48142445818096,
370: 8.481784404385662,
371: 8.482144350590362,
372: 8.482504296795062,
373: 8.482864242999762,
374: 8.483224189204464,
375: 8.483584135409163,
376: 8.483944081613863,
377: 8.484304027818565,
378: 8.484663974023265,
379: 8.485023920227965,
380: 8.485383866432667,
381: 8.485743812637367,
382: 8.486103758842066,
383: 8.486463705046766,
384: 8.486823651251468,
385: 8.487183597456168,
386: 8.487543543660868,
387: 8.48790348986557,
388: 8.48826343607027,
389: 8.48862338227497,
390: 8.48898332847967,
391: 8.489343274684371,
392: 8.489703220889071,
393: 8.490063167093771,
},
"fcst_lower": {
0: -np.inf,
1: -np.inf,
2: -np.inf,
3: -np.inf,
4: -np.inf,
5: -np.inf,
6: -np.inf,
7: -np.inf,
8: -np.inf,
9: -np.inf,
10: -np.inf,
11: -np.inf,
12: -np.inf,
13: -np.inf,
14: -np.inf,
15: -np.inf,
16: -np.inf,
17: -np.inf,
18: -np.inf,
19: -np.inf,
20: -np.inf,
21: -np.inf,
22: -np.inf,
23: -np.inf,
24: -np.inf,
25: -np.inf,
26: -np.inf,
27: -np.inf,
28: -np.inf,
29: -np.inf,
30: -np.inf,
31: -np.inf,
32: -np.inf,
33: -np.inf,
34: -np.inf,
35: -np.inf,
36: -np.inf,
37: -np.inf,
38: -np.inf,
39: -np.inf,
40: -np.inf,
41: -np.inf,
42: -np.inf,
43: -np.inf,
44: -np.inf,
45: -np.inf,
46: -np.inf,
47: -np.inf,
48: -np.inf,
49: -np.inf,
50: -np.inf,
51: -np.inf,
52: -np.inf,
53: -np.inf,
54: -np.inf,
55: -np.inf,
56: -np.inf,
57: -np.inf,
58: -np.inf,
59: -np.inf,
60: -np.inf,
61: -np.inf,
62: -np.inf,
63: -np.inf,
64: -np.inf,
65: -np.inf,
66: -np.inf,
67: -np.inf,
68: -np.inf,
69: -np.inf,
70: -np.inf,
71: -np.inf,
72: -np.inf,
73: -np.inf,
74: -np.inf,
75: -np.inf,
76: -np.inf,
77: -np.inf,
78: -np.inf,
79: -np.inf,
80: -np.inf,
81: -np.inf,
82: -np.inf,
83: -np.inf,
84: -np.inf,
85: -np.inf,
86: -np.inf,
87: -np.inf,
88: -np.inf,
89: -np.inf,
90: -np.inf,
91: -np.inf,
92: -np.inf,
93: -np.inf,
94: -np.inf,
95: -np.inf,
96: -np.inf,
97: -np.inf,
98: -np.inf,
99: -np.inf,
100: -np.inf,
101: -np.inf,
102: -np.inf,
103: -np.inf,
104: -np.inf,
105: -np.inf,
106: -np.inf,
107: -np.inf,
108: -np.inf,
109: -np.inf,
110: -np.inf,
111: -np.inf,
112: -np.inf,
113: -np.inf,
114: -np.inf,
115: -np.inf,
116: -np.inf,
117: -np.inf,
118: -np.inf,
119: -np.inf,
120: -np.inf,
121: -np.inf,
122: -np.inf,
123: -np.inf,
124: -np.inf,
125: -np.inf,
126: -np.inf,
127: -np.inf,
128: -np.inf,
129: -np.inf,
130: -np.inf,
131: -np.inf,
132: -np.inf,
133: -np.inf,
134: -np.inf,
135: -np.inf,
136: -np.inf,
137: -np.inf,
138: -np.inf,
139: -np.inf,
140: -np.inf,
141: -np.inf,
142: -np.inf,
143: -np.inf,
144: -np.inf,
145: -np.inf,
146: -np.inf,
147: -np.inf,
148: -np.inf,
149: -np.inf,
150: -np.inf,
151: -np.inf,
152: -np.inf,
153: -np.inf,
154: -np.inf,
155: -np.inf,
156: -np.inf,
157: -np.inf,
158: -np.inf,
159: -np.inf,
160: -np.inf,
161: -np.inf,
162: -np.inf,
163: -np.inf,
164: -np.inf,
165: -np.inf,
166: -np.inf,
167: -np.inf,
168: -np.inf,
169: -np.inf,
170: -np.inf,
171: -np.inf,
172: -np.inf,
173: -np.inf,
174: -np.inf,
175: -np.inf,
176: -np.inf,
177: -np.inf,
178: -np.inf,
179: -np.inf,
180: -np.inf,
181: -np.inf,
182: -np.inf,
183: -np.inf,
184: -np.inf,
185: -np.inf,
186: -np.inf,
187: -np.inf,
188: -np.inf,
189: -np.inf,
190: -np.inf,
191: -np.inf,
192: -np.inf,
193: -np.inf,
194: -np.inf,
195: -np.inf,
196: -np.inf,
197: -np.inf,
198: -np.inf,
199: -np.inf,
200: -np.inf,
201: -np.inf,
202: -np.inf,
203: -np.inf,
204: -np.inf,
205: -np.inf,
206: -np.inf,
207: -np.inf,
208: -np.inf,
209: -np.inf,
210: -np.inf,
211: -np.inf,
212: -np.inf,
213: -np.inf,
214: -np.inf,
215: -np.inf,
216: -np.inf,
217: -np.inf,
218: -np.inf,
219: -np.inf,
220: -np.inf,
221: -np.inf,
222: -np.inf,
223: -np.inf,
224: -np.inf,
225: -np.inf,
226: -np.inf,
227: -np.inf,
228: -np.inf,
229: -np.inf,
230: -np.inf,
231: -np.inf,
232: -np.inf,
233: -np.inf,
234: -np.inf,
235: -np.inf,
236: -np.inf,
237: -np.inf,
238: -np.inf,
239: -np.inf,
240: -np.inf,
241: -np.inf,
242: -np.inf,
243: -np.inf,
244: -np.inf,
245: -np.inf,
246: -np.inf,
247: -np.inf,
248: -np.inf,
249: -np.inf,
250: -np.inf,
251: -np.inf,
252: -np.inf,
253: -np.inf,
254: -np.inf,
255: -np.inf,
256: -np.inf,
257: -np.inf,
258: -np.inf,
259: -np.inf,
260: -np.inf,
261: -np.inf,
262: -np.inf,
263: -np.inf,
264: -np.inf,
265: -np.inf,
266: -np.inf,
267: -np.inf,
268: -np.inf,
269: -np.inf,
270: -np.inf,
271: -np.inf,
272: -np.inf,
273: -np.inf,
274: -np.inf,
275: -np.inf,
276: -np.inf,
277: -np.inf,
278: -np.inf,
279: -np.inf,
280: -np.inf,
281: -np.inf,
282: -np.inf,
283: -np.inf,
284: -np.inf,
285: -np.inf,
286: -np.inf,
287: -np.inf,
288: -np.inf,
289: -np.inf,
290: -np.inf,
291: -np.inf,
292: -np.inf,
293: -np.inf,
294: -np.inf,
295: -np.inf,
296: -np.inf,
297: -np.inf,
298: -np.inf,
299: -np.inf,
300: -np.inf,
301: -np.inf,
302: -np.inf,
303: -np.inf,
304: -np.inf,
305: -np.inf,
306: -np.inf,
307: -np.inf,
308: -np.inf,
309: -np.inf,
310: -np.inf,
311: -np.inf,
312: -np.inf,
313: -np.inf,
314: -np.inf,
315: -np.inf,
316: -np.inf,
317: -np.inf,
318: -np.inf,
319: -np.inf,
320: -np.inf,
321: -np.inf,
322: -np.inf,
323: -np.inf,
324: -np.inf,
325: -np.inf,
326: -np.inf,
327: -np.inf,
328: -np.inf,
329: -np.inf,
330: -np.inf,
331: -np.inf,
332: -np.inf,
333: -np.inf,
334: -np.inf,
335: -np.inf,
336: -np.inf,
337: -np.inf,
338: -np.inf,
339: -np.inf,
340: -np.inf,
341: -np.inf,
342: -np.inf,
343: -np.inf,
344: -np.inf,
345: -np.inf,
346: -np.inf,
347: -np.inf,
348: -np.inf,
349: -np.inf,
350: -np.inf,
351: -np.inf,
352: -np.inf,
353: -np.inf,
354: -np.inf,
355: -np.inf,
356: -np.inf,
357: -np.inf,
358: -np.inf,
359: -np.inf,
360: -np.inf,
361: -np.inf,
362: -np.inf,
363: -np.inf,
364: -np.inf,
365: -np.inf,
366: -np.inf,
367: -np.inf,
368: -np.inf,
369: -np.inf,
370: -np.inf,
371: -np.inf,
372: -np.inf,
373: -np.inf,
374: -np.inf,
375: -np.inf,
376: -np.inf,
377: -np.inf,
378: -np.inf,
379: -np.inf,
380: -np.inf,
381: -np.inf,
382: -np.inf,
383: -np.inf,
384: -np.inf,
385: -np.inf,
386: -np.inf,
387: -np.inf,
388: -np.inf,
389: -np.inf,
390: -np.inf,
391: -np.inf,
392: -np.inf,
393: -np.inf,
},
"fcst_upper": {
0: np.inf,
1: np.inf,
2: np.inf,
3: np.inf,
4: np.inf,
5: np.inf,
6: np.inf,
7: np.inf,
8: np.inf,
9: np.inf,
10: np.inf,
11: np.inf,
12: np.inf,
13: np.inf,
14: np.inf,
15: np.inf,
16: np.inf,
17: np.inf,
18: np.inf,
19: np.inf,
20: np.inf,
21: np.inf,
22: np.inf,
23: np.inf,
24: np.inf,
25: np.inf,
26: np.inf,
27: np.inf,
28: np.inf,
29: np.inf,
30: np.inf,
31: np.inf,
32: np.inf,
33: np.inf,
34: np.inf,
35: np.inf,
36: np.inf,
37: np.inf,
38: np.inf,
39: np.inf,
40: np.inf,
41: np.inf,
42: np.inf,
43: np.inf,
44: np.inf,
45: np.inf,
46: np.inf,
47: np.inf,
48: np.inf,
49: np.inf,
50: np.inf,
51: np.inf,
52: np.inf,
53: np.inf,
54: np.inf,
55: np.inf,
56: np.inf,
57: np.inf,
58: np.inf,
59: np.inf,
60: np.inf,
61: np.inf,
62: np.inf,
63: np.inf,
64: np.inf,
65: np.inf,
66: np.inf,
67: np.inf,
68: np.inf,
69: np.inf,
70: np.inf,
71: np.inf,
72: np.inf,
73: np.inf,
74: np.inf,
75: np.inf,
76: np.inf,
77: np.inf,
78: np.inf,
79: np.inf,
80: np.inf,
81: np.inf,
82: np.inf,
83: np.inf,
84: np.inf,
85: np.inf,
86: np.inf,
87: np.inf,
88: np.inf,
89: np.inf,
90: np.inf,
91: np.inf,
92: np.inf,
93: np.inf,
94: np.inf,
95: np.inf,
96: np.inf,
97: np.inf,
98: np.inf,
99: np.inf,
100: np.inf,
101: np.inf,
102: np.inf,
103: np.inf,
104: np.inf,
105: np.inf,
106: np.inf,
107: np.inf,
108: np.inf,
109: np.inf,
110: np.inf,
111: np.inf,
112: np.inf,
113: np.inf,
114: np.inf,
115: np.inf,
116: np.inf,
117: np.inf,
118: np.inf,
119: np.inf,
120: np.inf,
121: np.inf,
122: np.inf,
123: np.inf,
124: np.inf,
125: np.inf,
126: np.inf,
127: np.inf,
128: np.inf,
129: np.inf,
130: np.inf,
131: np.inf,
132: np.inf,
133: np.inf,
134: np.inf,
135: np.inf,
136: np.inf,
137: np.inf,
138: np.inf,
139: np.inf,
140: np.inf,
141: np.inf,
142: np.inf,
143: np.inf,
144: np.inf,
145: np.inf,
146: np.inf,
147: np.inf,
148: np.inf,
149: np.inf,
150: np.inf,
151: np.inf,
152: np.inf,
153: np.inf,
154: np.inf,
155: np.inf,
156: np.inf,
157: np.inf,
158: np.inf,
159: np.inf,
160: np.inf,
161: np.inf,
162: np.inf,
163: np.inf,
164: np.inf,
165: np.inf,
166: np.inf,
167: np.inf,
168: np.inf,
169: np.inf,
170: np.inf,
171: np.inf,
172: np.inf,
173: np.inf,
174: np.inf,
175: np.inf,
176: np.inf,
177: np.inf,
178: np.inf,
179: np.inf,
180: np.inf,
181: np.inf,
182: np.inf,
183: np.inf,
184: np.inf,
185: np.inf,
186: np.inf,
187: np.inf,
188: np.inf,
189: np.inf,
190: np.inf,
191: np.inf,
192: np.inf,
193: np.inf,
194: np.inf,
195: np.inf,
196: np.inf,
197: np.inf,
198: np.inf,
199: np.inf,
200: np.inf,
201: np.inf,
202: np.inf,
203: np.inf,
204: np.inf,
205: np.inf,
206: np.inf,
207: np.inf,
208: np.inf,
209: np.inf,
210: np.inf,
211: np.inf,
212: np.inf,
213: np.inf,
214: np.inf,
215: np.inf,
216: np.inf,
217: np.inf,
218: np.inf,
219: np.inf,
220: np.inf,
221: np.inf,
222: np.inf,
223: np.inf,
224: np.inf,
225: np.inf,
226: np.inf,
227: np.inf,
228: np.inf,
229: np.inf,
230: np.inf,
231: np.inf,
232: np.inf,
233: np.inf,
234: np.inf,
235: np.inf,
236: np.inf,
237: np.inf,
238: np.inf,
239: np.inf,
240: np.inf,
241: np.inf,
242: np.inf,
243: np.inf,
244: np.inf,
245: np.inf,
246: np.inf,
247: np.inf,
248: np.inf,
249: np.inf,
250: np.inf,
251: np.inf,
252: np.inf,
253: np.inf,
254: np.inf,
255: np.inf,
256: np.inf,
257: np.inf,
258: np.inf,
259: np.inf,
260: np.inf,
261: np.inf,
262: np.inf,
263: np.inf,
264: np.inf,
265: np.inf,
266: np.inf,
267: np.inf,
268: np.inf,
269: np.inf,
270: np.inf,
271: np.inf,
272: np.inf,
273: np.inf,
274: np.inf,
275: np.inf,
276: np.inf,
277: np.inf,
278: np.inf,
279: np.inf,
280: np.inf,
281: np.inf,
282: np.inf,
283: np.inf,
284: np.inf,
285: np.inf,
286: np.inf,
287: np.inf,
288: np.inf,
289: np.inf,
290: np.inf,
291: np.inf,
292: np.inf,
293: np.inf,
294: np.inf,
295: np.inf,
296: np.inf,
297: np.inf,
298: np.inf,
299: np.inf,
300: np.inf,
301: np.inf,
302: np.inf,
303: np.inf,
304: np.inf,
305: np.inf,
306: np.inf,
307: np.inf,
308: np.inf,
309: np.inf,
310: np.inf,
311: np.inf,
312: np.inf,
313: np.inf,
314: np.inf,
315: np.inf,
316: np.inf,
317: np.inf,
318: np.inf,
319: np.inf,
320: np.inf,
321: np.inf,
322: np.inf,
323: np.inf,
324: np.inf,
325: np.inf,
326: np.inf,
327: np.inf,
328: np.inf,
329: np.inf,
330: np.inf,
331: np.inf,
332: np.inf,
333: np.inf,
334: np.inf,
335: np.inf,
336: np.inf,
337: np.inf,
338: np.inf,
339: np.inf,
340: np.inf,
341: np.inf,
342: np.inf,
343: np.inf,
344: np.inf,
345: np.inf,
346: np.inf,
347: np.inf,
348: np.inf,
349: np.inf,
350: np.inf,
351: np.inf,
352: np.inf,
353: np.inf,
354: np.inf,
355: np.inf,
356: np.inf,
357: np.inf,
358: np.inf,
359: np.inf,
360: np.inf,
361: np.inf,
362: np.inf,
363: np.inf,
364: np.inf,
365: np.inf,
366: np.inf,
367: np.inf,
368: np.inf,
369: np.inf,
370: np.inf,
371: np.inf,
372: np.inf,
373: np.inf,
374: np.inf,
375: np.inf,
376: np.inf,
377: np.inf,
378: np.inf,
379: np.inf,
380: np.inf,
381: np.inf,
382: np.inf,
383: np.inf,
384: np.inf,
385: np.inf,
386: np.inf,
387: np.inf,
388: np.inf,
389: np.inf,
390: np.inf,
391: np.inf,
392: np.inf,
393: np.inf,
},
}
)
PEYTON_FCST_LINEAR_INVALID_NEG_ONE = pd.DataFrame(
{
"time": {
0: pd.Timestamp("2012-05-02 00:00:00"),
1: pd.Timestamp("2012-05-03 00:00:00"),
2: pd.Timestamp("2012-05-04 00:00:00"),
3: pd.Timestamp("2012-05-05 00:00:00"),
4: pd.Timestamp("2012-05-06 00:00:00"),
5: pd.Timestamp("2012-05-07 00:00:00"),
6: pd.Timestamp("2012-05-08 00:00:00"),
7: pd.Timestamp("2012-05-09 00:00:00"),
8: pd.Timestamp("2012-05-10 00:00:00"),
9: pd.Timestamp("2012-05-11 00:00:00"),
10: pd.Timestamp("2012-05-12 00:00:00"),
11: pd.Timestamp("2012-05-13 00:00:00"),
12: pd.Timestamp("2012-05-14 00:00:00"),
13: pd.Timestamp("2012-05-15 00:00:00"),
14: pd.Timestamp("2012-05-16 00:00:00"),
15: pd.Timestamp("2012-05-17 00:00:00"),
16: pd.Timestamp("2012-05-18 00:00:00"),
17: pd.Timestamp("2012-05-19 00:00:00"),
18: pd.Timestamp("2012-05-20 00:00:00"),
19: pd.Timestamp("2012-05-21 00:00:00"),
20: pd.Timestamp("2012-05-22 00:00:00"),
21: pd.Timestamp("2012-05-23 00:00:00"),
22: pd.Timestamp("2012-05-24 00:00:00"),
23: pd.Timestamp("2012-05-25 00:00:00"),
24: pd.Timestamp("2012-05-26 00:00:00"),
25: pd.Timestamp("2012-05-27 00:00:00"),
26: pd.Timestamp("2012-05-28 00:00:00"),
27: pd.Timestamp("2012-05-29 00:00:00"),
28: pd.Timestamp("2012-05-30 00:00:00"),
29: pd.Timestamp("2012-05-31 00:00:00"),
30: pd.Timestamp("2012-06-01 00:00:00"),
31: pd.Timestamp("2012-06-02 00:00:00"),
32: pd.Timestamp("2012-06-03 00:00:00"),
33: pd.Timestamp("2012-06-04 00:00:00"),
34: pd.Timestamp("2012-06-05 00:00:00"),
35: pd.Timestamp("2012-06-06 00:00:00"),
36: pd.Timestamp("2012-06-07 00:00:00"),
37: pd.Timestamp("2012-06-08 00:00:00"),
38: pd.Timestamp("2012-06-09 00:00:00"),
39: pd.Timestamp("2012-06-10 00:00:00"),
40: pd.Timestamp("2012-06-11 00:00:00"),
41: pd.Timestamp("2012-06-12 00:00:00"),
42: pd.Timestamp("2012-06-13 00:00:00"),
43: pd.Timestamp("2012-06-14 00:00:00"),
44: pd.Timestamp("2012-06-15 00:00:00"),
45: pd.Timestamp("2012-06-16 00:00:00"),
46: pd.Timestamp("2012-06-17 00:00:00"),
47: pd.Timestamp("2012-06-18 00:00:00"),
48: pd.Timestamp("2012-06-19 00:00:00"),
49: pd.Timestamp("2012-06-20 00:00:00"),
50: pd.Timestamp("2012-06-21 00:00:00"),
51: pd.Timestamp("2012-06-22 00:00:00"),
52: pd.Timestamp("2012-06-23 00:00:00"),
53: pd.Timestamp("2012-06-24 00:00:00"),
54: pd.Timestamp("2012-06-25 00:00:00"),
55: pd.Timestamp("2012-06-26 00:00:00"),
56: pd.Timestamp("2012-06-27 00:00:00"),
57: pd.Timestamp("2012-06-28 00:00:00"),
58: pd.Timestamp("2012-06-29 00:00:00"),
59: pd.Timestamp("2012-06-30 00:00:00"),
60: pd.Timestamp("2012-07-01 00:00:00"),
61: pd.Timestamp("2012-07-02 00:00:00"),
62: pd.Timestamp("2012-07-03 00:00:00"),
63: pd.Timestamp("2012-07-04 00:00:00"),
64: pd.Timestamp("2012-07-05 00:00:00"),
65: pd.Timestamp("2012-07-06 00:00:00"),
66: pd.Timestamp("2012-07-07 00:00:00"),
67: pd.Timestamp("2012-07-08 00:00:00"),
68: pd.Timestamp("2012-07-09 00:00:00"),
69: pd.Timestamp("2012-07-10 00:00:00"),
70: pd.Timestamp("2012-07-11 00:00:00"),
71: pd.Timestamp("2012-07-12 00:00:00"),
72: pd.Timestamp("2012-07-13 00:00:00"),
73: pd.Timestamp("2012-07-14 00:00:00"),
74: pd.Timestamp("2012-07-15 00:00:00"),
75: pd.Timestamp("2012-07-16 00:00:00"),
76: pd.Timestamp("2012-07-17 00:00:00"),
77: pd.Timestamp("2012-07-18 00:00:00"),
78: pd.Timestamp("2012-07-19 00:00:00"),
79: pd.Timestamp("2012-07-20 00:00:00"),
80: pd.Timestamp("2012-07-21 00:00:00"),
81: pd.Timestamp("2012-07-22 00:00:00"),
82: pd.Timestamp("2012-07-23 00:00:00"),
83: pd.Timestamp("2012-07-24 00:00:00"),
84: pd.Timestamp("2012-07-25 00:00:00"),
85: pd.Timestamp("2012-07-26 00:00:00"),
86: pd.Timestamp("2012-07-27 00:00:00"),
87: pd.Timestamp("2012-07-28 00:00:00"),
88: pd.Timestamp("2012-07-29 00:00:00"),
89: pd.Timestamp("2012-07-30 00:00:00"),
90: pd.Timestamp("2012-07-31 00:00:00"),
91: pd.Timestamp("2012-08-01 00:00:00"),
92: pd.Timestamp("2012-08-02 00:00:00"),
93: pd.Timestamp("2012-08-03 00:00:00"),
94: pd.Timestamp("2012-08-04 00:00:00"),
95: pd.Timestamp("2012-08-05 00:00:00"),
96: pd.Timestamp("2012-08-06 00:00:00"),
97: pd.Timestamp("2012-08-07 00:00:00"),
98: pd.Timestamp("2012-08-08 00:00:00"),
99: pd.Timestamp("2012-08-09 00:00:00"),
100: pd.Timestamp("2012-08-10 00:00:00"),
101: pd.Timestamp("2012-08-11 00:00:00"),
102: pd.Timestamp("2012-08-12 00:00:00"),
103: pd.Timestamp("2012-08-13 00:00:00"),
104: pd.Timestamp("2012-08-14 00:00:00"),
105: pd.Timestamp("2012-08-15 00:00:00"),
106: pd.Timestamp("2012-08-16 00:00:00"),
107: pd.Timestamp("2012-08-17 00:00:00"),
108: pd.Timestamp("2012-08-18 00:00:00"),
109: pd.Timestamp("2012-08-19 00:00:00"),
110: pd.Timestamp("2012-08-20 00:00:00"),
111: pd.Timestamp("2012-08-21 00:00:00"),
112: pd.Timestamp("2012-08-22 00:00:00"),
113: pd.Timestamp("2012-08-23 00:00:00"),
114: pd.Timestamp("2012-08-24 00:00:00"),
115: pd.Timestamp("2012-08-25 00:00:00"),
116: pd.Timestamp("2012-08-26 00:00:00"),
117: pd.Timestamp("2012-08-27 00:00:00"),
118: pd.Timestamp("2012-08-28 00:00:00"),
119: pd.Timestamp("2012-08-29 00:00:00"),
120: pd.Timestamp("2012-08-30 00:00:00"),
121: pd.Timestamp("2012-08-31 00:00:00"),
122: pd.Timestamp("2012-09-01 00:00:00"),
123: pd.Timestamp("2012-09-02 00:00:00"),
124: pd.Timestamp("2012-09-03 00:00:00"),
125: pd.Timestamp("2012-09-04 00:00:00"),
126: pd.Timestamp("2012-09-05 00:00:00"),
127: pd.Timestamp("2012-09-06 00:00:00"),
128: pd.Timestamp("2012-09-07 00:00:00"),
129: pd.Timestamp("2012-09-08 00:00:00"),
130: pd.Timestamp("2012-09-09 00:00:00"),
131: pd.Timestamp("2012-09-10 00:00:00"),
132: pd.Timestamp("2012-09-11 00:00:00"),
133: pd.Timestamp("2012-09-12 00:00:00"),
134: pd.Timestamp("2012-09-13 00:00:00"),
135: pd.Timestamp("2012-09-14 00:00:00"),
136: pd.Timestamp("2012-09-15 00:00:00"),
137: pd.Timestamp("2012-09-16 00:00:00"),
138: pd.Timestamp("2012-09-17 00:00:00"),
139: pd.Timestamp("2012-09-18 00:00:00"),
140: pd.Timestamp("2012-09-19 00:00:00"),
141: pd.Timestamp("2012-09-20 00:00:00"),
142: pd.Timestamp("2012-09-21 00:00:00"),
143: pd.Timestamp("2012-09-22 00:00:00"),
144: pd.Timestamp("2012-09-23 00:00:00"),
145: pd.Timestamp("2012-09-24 00:00:00"),
146: pd.Timestamp("2012-09-25 00:00:00"),
147: pd.Timestamp("2012-09-26 00:00:00"),
148: pd.Timestamp("2012-09-27 00:00:00"),
149: pd.Timestamp("2012-09-28 00:00:00"),
150: pd.Timestamp("2012-09-29 00:00:00"),
151: pd.Timestamp("2012-09-30 00:00:00"),
152: pd.Timestamp("2012-10-01 00:00:00"),
153: pd.Timestamp("2012-10-02 00:00:00"),
154: pd.Timestamp("2012-10-03 00:00:00"),
155: pd.Timestamp("2012-10-04 00:00:00"),
156: pd.Timestamp("2012-10-05 00:00:00"),
157: pd.Timestamp("2012-10-06 00:00:00"),
158: pd.Timestamp("2012-10-07 00:00:00"),
159: pd.Timestamp("2012-10-08 00:00:00"),
160: pd.Timestamp("2012-10-09 00:00:00"),
161: pd.Timestamp("2012-10-10 00:00:00"),
162: pd.Timestamp("2012-10-11 00:00:00"),
163: pd.Timestamp("2012-10-12 00:00:00"),
164: pd.Timestamp("2012-10-13 00:00:00"),
165: pd.Timestamp("2012-10-14 00:00:00"),
166: pd.Timestamp("2012-10-15 00:00:00"),
167: pd.Timestamp("2012-10-16 00:00:00"),
168: pd.Timestamp("2012-10-17 00:00:00"),
169: pd.Timestamp("2012-10-18 00:00:00"),
170: pd.Timestamp("2012-10-19 00:00:00"),
171: pd.Timestamp("2012-10-20 00:00:00"),
172: pd.Timestamp("2012-10-21 00:00:00"),
173: pd.Timestamp("2012-10-22 00:00:00"),
174: pd.Timestamp("2012-10-23 00:00:00"),
175: pd.Timestamp("2012-10-24 00:00:00"),
176: pd.Timestamp("2012-10-25 00:00:00"),
177: pd.Timestamp("2012-10-26 00:00:00"),
178: pd.Timestamp("2012-10-27 00:00:00"),
179: pd.Timestamp("2012-10-28 00:00:00"),
180: pd.Timestamp("2012-10-29 00:00:00"),
181: pd.Timestamp("2012-10-30 00:00:00"),
182: pd.Timestamp("2012-10-31 00:00:00"),
183: pd.Timestamp("2012-11-01 00:00:00"),
184: pd.Timestamp("2012-11-02 00:00:00"),
185: pd.Timestamp("2012-11-03 00:00:00"),
186: pd.Timestamp("2012-11-04 00:00:00"),
187: pd.Timestamp("2012-11-05 00:00:00"),
188: pd.Timestamp("2012-11-06 00:00:00"),
189: pd.Timestamp("2012-11-07 00:00:00"),
190: pd.Timestamp("2012-11-08 00:00:00"),
191: pd.Timestamp("2012-11-09 00:00:00"),
192: pd.Timestamp("2012-11-10 00:00:00"),
193: pd.Timestamp("2012-11-11 00:00:00"),
194: pd.Timestamp("2012-11-12 00:00:00"),
195: pd.Timestamp("2012-11-13 00:00:00"),
196: pd.Timestamp("2012-11-14 00:00:00"),
197: pd.Timestamp("2012-11-15 00:00:00"),
198: pd.Timestamp("2012-11-16 00:00:00"),
199: pd.Timestamp("2012-11-17 00:00:00"),
200: pd.Timestamp("2012-11-18 00:00:00"),
201: pd.Timestamp("2012-11-19 00:00:00"),
202: pd.Timestamp("2012-11-20 00:00:00"),
203: pd.Timestamp("2012-11-21 00:00:00"),
204: pd.Timestamp("2012-11-22 00:00:00"),
205: pd.Timestamp("2012-11-23 00:00:00"),
206: pd.Timestamp("2012-11-24 00:00:00"),
207: pd.Timestamp("2012-11-25 00:00:00"),
208: pd.Timestamp("2012-11-26 00:00:00"),
209: pd.Timestamp("2012-11-27 00:00:00"),
210: pd.Timestamp("2012-11-28 00:00:00"),
211: pd.Timestamp("2012-11-29 00:00:00"),
212: pd.Timestamp("2012-11-30 00:00:00"),
213: pd.Timestamp("2012-12-01 00:00:00"),
214: pd.Timestamp("2012-12-02 00:00:00"),
215: pd.Timestamp("2012-12-03 00:00:00"),
216: pd.Timestamp("2012-12-04 00:00:00"),
217: pd.Timestamp("2012-12-05 00:00:00"),
218: pd.Timestamp("2012-12-06 00:00:00"),
219: pd.Timestamp("2012-12-07 00:00:00"),
220: pd.Timestamp("2012-12-08 00:00:00"),
221: pd.Timestamp("2012-12-09 00:00:00"),
222: pd.Timestamp("2012-12-10 00:00:00"),
223: pd.Timestamp("2012-12-11 00:00:00"),
224: pd.Timestamp("2012-12-12 00:00:00"),
225: pd.Timestamp("2012-12-13 00:00:00"),
226: pd.Timestamp("2012-12-14 00:00:00"),
227: pd.Timestamp("2012-12-15 00:00:00"),
228: pd.Timestamp("2012-12-16 00:00:00"),
229: pd.Timestamp("2012-12-17 00:00:00"),
230: pd.Timestamp("2012-12-18 00:00:00"),
231: pd.Timestamp("2012-12-19 00:00:00"),
232: pd.Timestamp("2012-12-20 00:00:00"),
233: pd.Timestamp("2012-12-21 00:00:00"),
234: pd.Timestamp("2012-12-22 00:00:00"),
235: pd.Timestamp("2012-12-23 00:00:00"),
236: pd.Timestamp("2012-12-24 00:00:00"),
237: pd.Timestamp("2012-12-25 00:00:00"),
238: pd.Timestamp("2012-12-26 00:00:00"),
239: pd.Timestamp("2012-12-27 00:00:00"),
240: pd.Timestamp("2012-12-28 00:00:00"),
241: pd.Timestamp("2012-12-29 00:00:00"),
242: pd.Timestamp("2012-12-30 00:00:00"),
243: pd.Timestamp("2012-12-31 00:00:00"),
244: pd.Timestamp("2013-01-01 00:00:00"),
245: pd.Timestamp("2013-01-02 00:00:00"),
246: pd.Timestamp("2013-01-03 00:00:00"),
247: pd.Timestamp("2013-01-04 00:00:00"),
248: pd.Timestamp("2013-01-05 00:00:00"),
249: pd.Timestamp("2013-01-06 00:00:00"),
250: pd.Timestamp("2013-01-07 00:00:00"),
251: pd.Timestamp("2013-01-08 00:00:00"),
252: pd.Timestamp("2013-01-09 00:00:00"),
253: pd.Timestamp("2013-01-10 00:00:00"),
254: pd.Timestamp("2013-01-11 00:00:00"),
255: pd.Timestamp("2013-01-12 00:00:00"),
256: pd.Timestamp("2013-01-13 00:00:00"),
257: pd.Timestamp("2013-01-14 00:00:00"),
258: pd.Timestamp("2013-01-15 00:00:00"),
259: pd.Timestamp("2013-01-16 00:00:00"),
260: pd.Timestamp("2013-01-17 00:00:00"),
261: pd.Timestamp("2013-01-18 00:00:00"),
262: pd.Timestamp("2013-01-19 00:00:00"),
263: pd.Timestamp("2013-01-20 00:00:00"),
264: pd.Timestamp("2013-01-21 00:00:00"),
265: pd.Timestamp("2013-01-22 00:00:00"),
266: pd.Timestamp("2013-01-23 00:00:00"),
267: pd.Timestamp("2013-01-24 00:00:00"),
268: pd.Timestamp("2013-01-25 00:00:00"),
269: pd.Timestamp("2013-01-26 00:00:00"),
270: pd.Timestamp("2013-01-27 00:00:00"),
271: pd.Timestamp("2013-01-28 00:00:00"),
272: pd.Timestamp("2013-01-29 00:00:00"),
273: pd.Timestamp("2013-01-30 00:00:00"),
274: pd.Timestamp("2013-01-31 00:00:00"),
275: pd.Timestamp("2013-02-01 00:00:00"),
276: pd.Timestamp("2013-02-02 00:00:00"),
277: pd.Timestamp("2013-02-03 00:00:00"),
278: pd.Timestamp("2013-02-04 00:00:00"),
279: pd.Timestamp("2013-02-05 00:00:00"),
280: pd.Timestamp("2013-02-06 00:00:00"),
281: pd.Timestamp("2013-02-07 00:00:00"),
282: pd.Timestamp("2013-02-08 00:00:00"),
283: pd.Timestamp("2013-02-09 00:00:00"),
284: pd.Timestamp("2013-02-10 00:00:00"),
285: pd.Timestamp("2013-02-11 00:00:00"),
286: pd.Timestamp("2013-02-12 00:00:00"),
287: pd.Timestamp("2013-02-13 00:00:00"),
288: pd.Timestamp("2013-02-14 00:00:00"),
289: pd.Timestamp("2013-02-15 00:00:00"),
290: pd.Timestamp("2013-02-16 00:00:00"),
291: pd.Timestamp("2013-02-17 00:00:00"),
292: pd.Timestamp("2013-02-18 00:00:00"),
293: pd.Timestamp("2013-02-19 00:00:00"),
294: pd.Timestamp("2013-02-20 00:00:00"),
295: pd.Timestamp("2013-02-21 00:00:00"),
296: pd.Timestamp("2013-02-22 00:00:00"),
297: pd.Timestamp("2013-02-23 00:00:00"),
298: pd.Timestamp("2013-02-24 00:00:00"),
299: pd.Timestamp("2013-02-25 00:00:00"),
300: pd.Timestamp("2013-02-26 00:00:00"),
301: pd.Timestamp("2013-02-27 00:00:00"),
302: pd.Timestamp("2013-02-28 00:00:00"),
303: pd.Timestamp("2013-03-01 00:00:00"),
304: pd.Timestamp("2013-03-02 00:00:00"),
305: pd.Timestamp("2013-03-03 00:00:00"),
306: pd.Timestamp("2013-03-04 00:00:00"),
307: pd.Timestamp("2013-03-05 00:00:00"),
308: pd.Timestamp("2013-03-06 00:00:00"),
309: pd.Timestamp("2013-03-07 00:00:00"),
310: pd.Timestamp("2013-03-08 00:00:00"),
311: pd.Timestamp("2013-03-09 00:00:00"),
312: pd.Timestamp("2013-03-10 00:00:00"),
313: pd.Timestamp("2013-03-11 00:00:00"),
314: pd.Timestamp("2013-03-12 00:00:00"),
315: pd.Timestamp("2013-03-13 00:00:00"),
316: pd.Timestamp("2013-03-14 00:00:00"),
317: pd.Timestamp("2013-03-15 00:00:00"),
318: pd.Timestamp("2013-03-16 00:00:00"),
319: pd.Timestamp("2013-03-17 00:00:00"),
320: pd.Timestamp("2013-03-18 00:00:00"),
321: pd.Timestamp("2013-03-19 00:00:00"),
322: pd.Timestamp("2013-03-20 00:00:00"),
323: pd.Timestamp("2013-03-21 00:00:00"),
324: pd.Timestamp("2013-03-22 00:00:00"),
325: pd.Timestamp("2013-03-23 00:00:00"),
326: pd.Timestamp("2013-03-24 00:00:00"),
327: pd.Timestamp("2013-03-25 00:00:00"),
328: pd.Timestamp("2013-03-26 00:00:00"),
329: pd.Timestamp("2013-03-27 00:00:00"),
330: pd.Timestamp("2013-03-28 00:00:00"),
331: pd.Timestamp("2013-03-29 00:00:00"),
332: pd.Timestamp("2013-03-30 00:00:00"),
333: pd.Timestamp("2013-03-31 00:00:00"),
334: pd.Timestamp("2013-04-01 00:00:00"),
335: pd.Timestamp("2013-04-02 00:00:00"),
336: pd.Timestamp("2013-04-03 00:00:00"),
337: pd.Timestamp("2013-04-04 00:00:00"),
338: | pd.Timestamp("2013-04-05 00:00:00") | pandas.Timestamp |
import json
import os
import csv
import socket
import pandas as pd
import numpy as np
import glob
import logging
from datetime import datetime, timedelta
from flask import flash, current_app
from flask_login import current_user
from pathlib import Path
from specter_importer import Specter
from pricing_engine.engine import (fx_rate,
price_ondate, fx_price_ondate, realtime_price,
historical_prices)
from pricing_engine.cryptocompare import multiple_price_grab
from warden_decorators import MWT, timing
from utils import load_config
from dateutil import parser
from parseNumbers import parseNumber
# Returns the current application path
def current_path():
application_path = os.path.dirname(os.path.abspath(__file__))
return (application_path)
# Returns the home path
def home_path():
home = str(Path.home())
return (home)
# ------------------------------------
# Address and Port checker - to check
# which services are running
def check_server(address, port, timeout=10):
# Create a TCP socket
s = socket.socket()
s.settimeout(timeout)
try:
s.connect((address, port))
return True
except socket.error:
return False
finally:
s.close()
# End Config Variables ------------------------------------------------
# Get all transactions of specific wallet by using alias
def get_specter_tx(wallet_alias, sort_by='time', idx=0, load=True, session=None):
df = pd.DataFrame()
wallet_list = current_app.specter.wallet_alias_list()
if wallet_alias not in wallet_list:
logging.error(f"Wallet {wallet_alias}: Wallet not in current_app")
return (df)
t = current_app.specter.refresh_txs(load=True)
df = df.append(pd.DataFrame(t))
logging.info(f"Wallet {wallet_alias} --- Finished txs")
# Sort df
if not df.empty:
df = df.sort_values(by=[sort_by], ascending=False)
return (df)
# This returns data to create the Warden Status Page
def warden_metadata():
from utils import pickle_it
meta = {}
meta['full_df'] = specter_df()
meta['wallet_list'] = current_app.specter.wallet_alias_list()
# Load pickle with previous checkpoint df
df_pkl = 'txs_pf.pkl'
data = pickle_it(action='load', filename=df_pkl)
if not isinstance(data, pd.DataFrame):
if data == 'file not found':
meta['df_old'] = None
else:
meta['df_old'] = data
# load difference / changes in addresses from file
ack_file = 'txs_diff.pkl'
data = pickle_it(action='load', filename=ack_file)
if data == 'file not found':
meta['ack_file'] = None
else:
meta['ack_file'] = data
meta['old_new_df_old'] = data['deleted']
meta['old_new_df_new'] = data['added']
return (meta)
# Transactions Engine --------------------------------------
class Trades():
def __init__(self):
self.id = None
self.user_id = "specter_user"
self.trade_inputon = None
self.trade_date = None
self.trade_currency = current_app.settings['PORTFOLIO']['base_fx']
self.trade_asset_ticker = None
self.trade_account = None
self.trade_quantity = None
self.trade_operation = None
self.trade_price = None
self.trade_fees = None
self.trade_notes = None
self.trade_reference_id = None
self.trade_blockchain_id = None
self.cash_value = None
def to_dict(self):
return (vars(self))
def specter_df(delete_files=False, sort_by='trade_date'):
from utils import pickle_it
df = pd.DataFrame()
try:
t = current_app.specter.refresh_txs(load=True)['txlist']
df = df.append(t)
except Exception as e:
print(e)
# Check if txs exists
return df
if df.empty:
return df
# Clean Date String
df['trade_date'] = pd.to_datetime(df['time'], unit='s')
# Add additional columns
if 'fee' not in df:
df['fee'] = 0
df['trade_blockchain_id'] = df['txid']
df['trade_account'] = df['wallet_alias']
df['trade_currency'] = current_app.settings['PORTFOLIO']['base_fx']
df['trade_asset_ticker'] = "BTC"
portfolio_divisor = current_app.settings['PORTFOLIO'].getfloat('divisor')
if portfolio_divisor is None:
portfolio_divisor = 1
df['amount'] = df['amount'].apply(parseNumber)
try:
df['amount'] = df['amount'] / portfolio_divisor
except TypeError:
pass
df['trade_quantity'] = df['amount']
df['trade_notes'] = 'Imported from Specter Wallet'
df['trade_reference_id'] = ""
def trade_operation(value):
# Get Bitcoin price on each Date
try:
if value.lower() == 'receive':
return ("B")
if value.lower() == 'send':
return ("S")
except Exception:
return ("")
df['trade_operation'] = df['category'].apply(trade_operation)
df['date_str'] = df['trade_date'].dt.strftime('%Y-%m-%d')
def btc_price(date_input):
get_date = datetime.strptime(date_input, "%Y-%m-%d")
# Create price object
try:
fx = fx_price_ondate('USD', current_app.fx['code'], get_date)
price = price_ondate("BTC", get_date)['close'] * fx
except Exception as e:
logging.error("Not Found. Error: " + str(e))
price = 0
return (price)
df['btc_price'] = df['date_str'].apply(btc_price)
df['trade_price'] = df['btc_price']
# For some reason Specter is returning fee = 1 for some transactions
# So the filter below clears all fees higher than 0.10 BTC which is
# probably too high :)
df.loc[df.fee > 0.10, 'fee'] = 0
df['fee'] = df['fee'].fillna(0)
df['trade_fees'] = df['fee'] * df['btc_price']
df['trade_multiplier'] = 0
df.loc[df.trade_operation == 'B', 'trade_multiplier'] = 1
df.loc[df.trade_operation == 'receive', 'trade_multiplier'] = 1
df.loc[df.trade_operation == 'S', 'trade_multiplier'] = -1
df.loc[df.trade_operation == 'send', 'trade_multiplier'] = -1
df['trade_quantity'] = df['trade_quantity'] * df['trade_multiplier']
df['amount'] = df['trade_quantity']
try:
df['cash_value'] = abs(df['trade_price']) * abs(df['trade_quantity']) * df[
'trade_multiplier']
except Exception:
df['cash_value'] = 0
df['loaded'] = False
# TEST LINE ------------- Make this a new transaction forced into df
tester = {
'trade_date': datetime.now(),
'trade_currency': 'USD',
'trade_fees': 0,
'trade_quantity': 1,
'trade_multiplier': 1,
'trade_price': 10000,
'trade_asset_ticker': 'BTC',
'trade_operation': 'B',
'checksum': (5 * (10**19)),
'txid': 'test',
'address': 'test_address',
'amount': 2,
'status': 'Test_line',
'trade_account': 'trezor',
'loaded': False,
'trade_blockchain_id': 'xxsxmssxkxsjsxkxsx'
}
# Comment / Uncomment code below for testing of including new transactions
# Remove last 2 transactions here
# df.drop(df.tail(2).index, inplace=True)
# add transaction above
# df = df.append(tester, ignore_index=True)
# END TEST LINE ----------------------------------------------------
# Files ----------------------------------
df_pkl = 'txs_pf.pkl'
old_df_file = 'old_df.pkl'
ack_file = 'txs_diff.pkl'
# -----------------------------------------
# Activity checkpoint will be created. Delete all old files.
if delete_files:
pickle_it(action='delete', filename=df_pkl)
pickle_it(action='delete', filename=old_df_file)
pickle_it(action='delete', filename=ack_file)
# save this latest df to a file
pickle_it(action='save', filename=df_pkl, data=df)
try:
# Loads the old df to check for activity
df_loaded = pickle_it(action='load', filename=old_df_file)
if not isinstance(df_loaded, pd.DataFrame):
if df_loaded == "file not found":
raise FileNotFoundError
df_loaded['loaded'] = True
# Find differences in old vs. new
df_check = pd.concat([df, df_loaded]).drop_duplicates(
subset='trade_blockchain_id', keep=False)
if not df_check.empty:
# Let's find which checksums are different and compile a list - save this list
# so it can be used on main page to highlight changes
df_old = df_check[df_check['loaded']]
df_new = df_check[~df_check['loaded']]
json_save = {
'changes_detected_on': datetime.now().strftime("%I:%M %p on %B %d, %Y"),
'deleted': df_old,
'added': df_new
}
# If activity is detected, don't delete the old df by saving new df over
save_files = False
else:
json_save = {
'changes_detected_on': None,
'deleted': None,
'added': None
}
save_files = True
# Save the dict above to be accessed later
pickle_it(action='save', filename=ack_file, data=json_save)
except FileNotFoundError:
# Files not found - let's save a new checkpoint
save_files = True
# Sort
df = df.sort_values(by=[sort_by], ascending=False)
if save_files:
pickle_it(action='save', filename=old_df_file, data=df)
return (df)
def find_fx(row, fx=None):
# row.name is the date being passed
# row['trade_currency'] is the base fx (the one where the trade was included)
# Create an instance of PriceData:
price = fx_price_ondate(
current_app.settings['PORTFOLIO']['base_fx'], row['trade_currency'], row.name)
return price
@ MWT(timeout=20)
def transactions_fx():
# Gets the transaction table and fills with fx information
# Note that it uses the currency exchange for the date of transaction
# Get all transactions from Specter and format
# SPECTER ============================================
df = specter_df()
if not df.empty:
df['trade_date'] = pd.to_datetime(df['trade_date'])
df = df.set_index('trade_date')
# Ignore times in df to merge - keep only dates
df.index = df.index.floor('d')
df.index.rename('date', inplace=True)
# SQL DATABASE ========================================
# Get all transactions from db and format
df_sql = pd.read_sql_table('trades', current_app.db.engine)
if not df_sql.empty:
df_sql = df_sql[(df_sql.user_id == current_user.username)]
# df = df[(df.trade_operation == "B") | (df.trade_operation == "S")]
df_sql['trade_date'] = pd.to_datetime(df_sql['trade_date'])
df_sql = df_sql.set_index('trade_date')
# Ignore times in df to merge - keep only dates
df_sql.index = df_sql.index.floor('d')
df_sql.index.rename('date', inplace=True)
# Merge both
df = df.append(df_sql, sort=False)
if df.empty:
logging.warning("Transactions_FX - No txs found")
return df
# The current fx needs no conversion, set to 1
df[fx_rate()['fx_rate']] = 1
# Need to get currencies into the df in order to normalize
# let's load a list of currencies needed and merge
list_of_fx = df.trade_currency.unique().tolist()
# loop through currency list
for currency in list_of_fx:
if currency == fx_rate()['fx_rate']:
continue
# Make a price request
df[currency] = df.apply(find_fx, axis=1)
# Now create a cash value in the preferred currency terms
df['fx'] = df.apply(lambda x: x[x['trade_currency']], axis=1)
df['cash_value_fx'] = df['cash_value'].astype(float) / df['fx'].astype(
float)
df['trade_fees_fx'] = df['trade_fees'].astype(float) / df['fx'].astype(
float)
df['trade_price_fx'] = df['trade_price'].astype(float) / df['fx'].astype(
float)
if 'trade_date' not in df.columns:
df['trade_date'] = pd.to_datetime(df.index)
return (df)
# UTILS -----------------------------------
# Better to use parseNumber most of the times...
# Function to clean CSV fields - leave only digits and .
def clean_float(text):
if isinstance(text, int):
return (float(text))
if isinstance(text, float):
return (text)
if text is None:
return (0)
acceptable = ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "."]
str = ""
for char in text:
if char in acceptable:
str = str + char
if str == '':
return 0
str = float(str)
return (str)
def cleandate(text): # Function to clean Date fields
if text is None:
return (None)
text = str(text)
acceptable = [
"0", "1", "2", "3", "4", "5", "6", "7", "8", "9", ".", "/", "-", ":",
" "
]
str_parse = ""
for char in text:
if char in acceptable:
char = '-' if (char == '.' or char == '/') else char
str_parse = str_parse + char
from dateutil import parser
str_parse = parser.parse(str_parse, dayfirst=True)
return (str_parse)
# PORTFOLIO UTILITIES
def positions():
# Method to create a user's position table
# Returns a df with the following information
# Ticker, name, quantity, small_pos
# THIS SHOULD CONTAIN THE STATIC FIELDS ONLY - no web requests
# It should be a light method to load quickly on the main page.
# Anything with web requests should be done on a separate function
# Get all transactions & group by ticker name and operation
df = transactions_fx()
if df.empty:
logging.warning("No Transactions Found")
return df
summary_table = df.groupby(['trade_asset_ticker', 'trade_operation'])[[
"trade_quantity", "cash_value_fx", "trade_fees_fx"
]].sum()
# Now let's create our main dataframe with information for each ticker
list_of_tickers = df['trade_asset_ticker'].unique().tolist()
main_df = pd.DataFrame({'trade_asset_ticker': list_of_tickers})
# Fill with positions, cash_values and fees
df_tmp = df.groupby(['trade_asset_ticker'])[[
"trade_quantity", "cash_value_fx", "trade_fees_fx"
]].sum()
main_df = pd.merge(main_df, df_tmp, on='trade_asset_ticker')
# Fill in with same information but only for buys, sells, deposits and withdraws
# main_df = pd.merge(main_df, summary_table, on='trade_asset_ticker')
summary_table = summary_table.unstack(level='trade_operation').fillna(0)
main_df = pd.merge(main_df, summary_table, on='trade_asset_ticker')
# Include FIFO and LIFO calculations for each ticker
main_df['cost_frame'] = main_df['trade_asset_ticker'].apply(
cost_calculation)
# Unpack this into multiple columns now
main_df = df_unpack(main_df, 'cost_frame', 0)
main_df = df_unpack(main_df, 'FIFO', 0)
main_df = df_unpack(main_df, 'LIFO', 0)
main_df['is_currency'] = main_df['trade_asset_ticker'].apply(is_currency)
return main_df
def single_price(ticker):
return (realtime_price(ticker)['price'], datetime.now())
@ MWT(timeout=200)
def list_tickers():
df = transactions_fx()
# Now let's create our main dataframe with information for each ticker
list_of_tickers = df['trade_asset_ticker'].unique().tolist()
list_of_tickers = [ticker.upper() for ticker in list_of_tickers]
return (list_of_tickers)
def positions_dynamic():
fx = load_config()['PORTFOLIO']['base_fx']
# This method is the realtime updater for the front page. It gets the
# position information from positions above and returns a dataframe
# with all the realtime pricing and positions data - this method
# should be called from an AJAX request at the front page in order
# to reduce loading time.
df = positions()
# Drop all currencies from table
df = df[df['is_currency'] == False]
# check if trade_asset_ticker is set as index. If so, move to column
df = df.reset_index()
if df is None:
return None, None
tickers_string = ",".join(list_tickers())
# Make sure the Bitcoin price is retrieved even if not in portfolio
if ('BTC' not in tickers_string) and (tickers_string is not None):
tickers_string += ',BTC'
# Let's try to get as many prices as possible into the df with a
# single request - first get all the prices in current currency and USD
multi_price = multiple_price_grab(tickers_string, 'USD,' + fx)
# PARSER Function to fing the ticker price inside the matrix. First part
# looks into the cryptocompare matrix. In the exception, if price is not
# found, it sends a request to other providers
btc_price = None
def find_data(ticker):
notes = None
last_up_source = None
source = None
try:
# Parse the cryptocompare data
price = multi_price["RAW"][ticker][fx]["PRICE"]
# GBTC should not be requested from multi_price as there is a
# coin with same ticker
if ticker in ['GBTC', 'MSTR', 'TSLA', 'SQ']:
raise KeyError
price = float(price)
high = float(multi_price["RAW"][ticker][
fx]["HIGHDAY"])
low = float(multi_price["RAW"][ticker][
fx]["LOWDAY"])
chg = multi_price["RAW"][ticker][fx]["CHANGEPCT24HOUR"]
mktcap = multi_price["DISPLAY"][ticker][fx]["MKTCAP"]
volume = multi_price["DISPLAY"][ticker][fx]["VOLUME24HOURTO"]
last_up_source = multi_price["RAW"][ticker][fx]["LASTUPDATE"]
source = multi_price["DISPLAY"][ticker][fx]["LASTMARKET"]
last_update = datetime.now()
except (KeyError, TypeError):
# Couldn't find price with CryptoCompare. Let's try a different source
# and populate data in the same format [aa = alphavantage]
try:
single_price = realtime_price(ticker)
if single_price is None:
raise KeyError
price = clean_float(single_price['price'])
last_up_source = last_update = single_price['time']
try:
chg = parseNumber(single_price['chg'])
except Exception:
chg = 0
try:
source = last_up_source = single_price['source']
except Exception:
source = last_up_source = '-'
try:
high = single_price['high']
low = single_price['low']
mktcap = volume = '-'
except Exception:
mktcap = high = low = volume = '-'
except Exception:
try:
# Finally, if realtime price is unavailable, find the latest
# saved value in historical prices
# Create a price class
price_class = historical_prices(ticker, fx)
if price_class is None:
raise KeyError
price = clean_float(
price_class.df['close_converted'].iloc[0])
high = '-'
low = '-'
volume = '-'
mktcap = chg = 0
source = last_up_source = 'Historical Data'
last_update = price_class.df.index[0]
except Exception as e:
price = high = low = chg = mktcap = last_up_source = last_update = volume = 0
source = '-'
logging.error(f"There was an error getting the price for {ticker}." +
f"Error: {e}")
if ticker.upper() == 'BTC':
nonlocal btc_price
btc_price = price
# check if 24hr change is indeed 24h or data is old, if so 24hr change = 0
try:
checker = last_update
if not isinstance(checker, datetime):
checker = parser.parse(last_update)
if checker < (datetime.now() - timedelta(days=1)):
chg = 0
except Exception:
pass
return price, last_update, high, low, chg, mktcap, last_up_source, volume, source, notes
df = apply_and_concat(df, 'trade_asset_ticker', find_data, [
'price', 'last_update', '24h_high', '24h_low', '24h_change', 'mktcap',
'last_up_source', 'volume', 'source', 'notes'
])
# Now create additional columns with calculations
df['position_fx'] = df['price'] * df['trade_quantity']
df['position_btc'] = df['price'] * df['trade_quantity'] / btc_price
# Force some fields to float and clean
float_fields = ['price', '24h_high', '24h_low',
'24h_change', 'mktcap', 'volume']
for field in float_fields:
df[field] = df[field].apply(clean_float)
df['allocation'] = df['position_fx'] / df['position_fx'].sum()
df['change_fx'] = df['position_fx'] * df['24h_change'].astype(float) / 100
# Pnl and Cost calculations
df['breakeven'] = (df['cash_value_fx'] +
df['trade_fees_fx']) / df['trade_quantity']
df['pnl_gross'] = df['position_fx'] - df['cash_value_fx']
df['pnl_net'] = df['pnl_gross'] - df['trade_fees_fx']
# FIFO and LIFO PnL calculations
df['LIFO_unreal'] = (df['price'] - df['LIFO_average_cost']) * \
df['trade_quantity']
df['FIFO_unreal'] = (df['price'] - df['FIFO_average_cost']) * \
df['trade_quantity']
df['LIFO_real'] = df['pnl_net'] - df['LIFO_unreal']
df['FIFO_real'] = df['pnl_net'] - df['FIFO_unreal']
df['LIFO_unrealized_be'] = df['price'] - \
(df['LIFO_unreal'] / df['trade_quantity'])
df['FIFO_unrealized_be'] = df['price'] - \
(df['FIFO_unreal'] / df['trade_quantity'])
# Allocations below 0.01% are marked as small
# this is used to hide small and closed positions at html
df['small_pos'] = 'False'
# df.loc[df.allocation <= 0, 'small_pos'] = 'True'
# df.loc[df.allocation >= 0, 'small_pos'] = 'False'
# Prepare for delivery. Change index, add total
df.set_index('trade_asset_ticker', inplace=True)
df.loc['Total'] = 0
# Column names can't be tuples - otherwise json generates an error
df.rename(columns={
('trade_quantity', 'B'): 'trade_quantity_B',
('trade_quantity', 'S'): 'trade_quantity_S',
('trade_quantity', 'D'): 'trade_quantity_D',
('trade_quantity', 'W'): 'trade_quantity_W',
('cash_value_fx', 'B'): 'cash_value_fx_B',
('cash_value_fx', 'S'): 'cash_value_fx_S',
('cash_value_fx', 'D'): 'cash_value_fx_D',
('cash_value_fx', 'W'): 'cash_value_fx_W',
('trade_fees_fx', 'B'): 'trade_fees_fx_B',
('trade_fees_fx', 'S'): 'trade_fees_fx_S',
('trade_fees_fx', 'D'): 'trade_fees_fx_D',
('trade_fees_fx', 'W'): 'trade_fees_fx_W'
},
inplace=True)
# Need to add only some fields - strings can't be added for example
columns_sum = [
'cash_value_fx', 'trade_fees_fx', 'position_fx', 'allocation',
'change_fx', 'pnl_gross', 'pnl_net', 'LIFO_unreal', 'FIFO_unreal',
'LIFO_real', 'FIFO_real', 'position_btc'
]
for field in columns_sum:
df.loc['Total', field] = df[field].sum()
# Set the portfolio last update to be equal to the latest update in df
df.loc['Total', 'last_up_source'] = (
datetime.now()).strftime('%d-%b-%Y %H:%M:%S')
df['last_update'] = df['last_update'].astype(str)
# Create a pie chart data in HighCharts format excluding small pos
pie_data = []
for ticker in list_tickers():
if df.loc[ticker, 'small_pos'] == 'False':
tmp_dict = {}
tmp_dict['y'] = round(df.loc[ticker, 'allocation'] * 100, 2)
tmp_dict['name'] = ticker
pie_data.append(tmp_dict)
return (df, pie_data)
@ MWT(timeout=10)
def generatenav(user=None, force=False, filter=None):
if not user:
user = current_user.username
PORTFOLIO_MIN_SIZE_NAV = 1
RENEW_NAV = 10
FX = current_app.settings['PORTFOLIO']['base_fx']
# Portfolios smaller than this size do not account for NAV calculations
# Otherwise, there's an impact of dust left in the portfolio (in USD)
# This is set in config.ini file
min_size_for_calc = int(PORTFOLIO_MIN_SIZE_NAV)
save_nav = True
# This process can take some time and it's intensive to run NAV
# generation every time the NAV is needed. A compromise is to save
# the last NAV generation locally and only refresh after a period of time.
# This period of time is setup in config.ini as RENEW_NAV (in minutes).
# If last file is newer than 60 minutes (default), the local saved file
# will be used.
# Unless force is true, then a rebuild is done regardless
# Local files are saved under a hash of username.
filename = "warden/" + user + FX + ".nav"
filename = os.path.join(home_path(), filename)
if force:
# Since this function can be run as a thread, it's safer to delete
# the current NAV file if it exists. This avoids other tasks reading
# the local file which is outdated
try:
os.remove(filename)
except Exception:
pass
if not force:
try:
# Check if NAV saved file is recent enough to be used
# Local file has to have a saved time less than RENEW_NAV min old
modified = datetime.utcfromtimestamp(os.path.getmtime(filename))
elapsed_seconds = (datetime.utcnow() - modified).total_seconds()
if (elapsed_seconds / 60) < int(RENEW_NAV):
nav_pickle = | pd.read_pickle(filename) | pandas.read_pickle |
import pandas as pd
import sys
__author__ = '<NAME>, <NAME>'
__copyright__ = '© Pandemic Central, 2021'
__license__ = 'MIT'
__status__ = 'release'
__url__ = 'https://github.com/solveforj/pandemic-central'
__version__ = '3.0.0'
us_state_abbrev = {
'Alabama': 'AL',
'Alaska': 'AK',
'American Samoa': 'AS',
'Arizona': 'AZ',
'Arkansas': 'AR',
'California': 'CA',
'Colorado': 'CO',
'Connecticut': 'CT',
'Delaware': 'DE',
'District Of Columbia': 'DC',
'District of Columbia': 'DC',
'Florida': 'FL',
'Georgia': 'GA',
'Guam': 'GU',
'Hawaii': 'HI',
'Idaho': 'ID',
'Illinois': 'IL',
'Indiana': 'IN',
'Iowa': 'IA',
'Kansas': 'KS',
'Kentucky': 'KY',
'Louisiana': 'LA',
'Maine': 'ME',
'Maryland': 'MD',
'Massachusetts': 'MA',
'Michigan': 'MI',
'Minnesota': 'MN',
'Mississippi': 'MS',
'Missouri': 'MO',
'Montana': 'MT',
'Nebraska': 'NE',
'Nevada': 'NV',
'New Hampshire': 'NH',
'New Jersey': 'NJ',
'New Mexico': 'NM',
'New York': 'NY',
'North Carolina': 'NC',
'North Dakota': 'ND',
'Northern Mariana Islands':'MP',
'Ohio': 'OH',
'Oklahoma': 'OK',
'Oregon': 'OR',
'Pennsylvania': 'PA',
'Puerto Rico': 'PR',
'Rhode Island': 'RI',
'South Carolina': 'SC',
'South Dakota': 'SD',
'Tennessee': 'TN',
'Texas': 'TX',
'Utah': 'UT',
'Vermont': 'VT',
'Virgin Islands': 'VI',
'Virginia': 'VA',
'Washington': 'WA',
'West Virginia': 'WV',
'Wisconsin': 'WI',
'Wyoming': 'WY'
}
def get_state_fips():
# Source: US census
# Link: www.census.gov/geographies/reference-files/2017/demo/popest/2017-fips.html
# File: 2017 State, County, Minor Civil Division, and Incorporated Place FIPS Codes
# Note: .xslx file header was removed and sheet was exported to csv
fips_data = pd.read_csv("data/geodata/all-geocodes-v2017.csv",encoding = "ISO-8859-1", dtype={'State Code (FIPS)': str, 'County Code (FIPS)': str})
# Map 040 level fips code to state name in dictionary
state_data = fips_data[fips_data['Summary Level'] == 40].copy(deep=True)
state_data['state_abbrev'] = state_data['Area Name (including legal/statistical area description)'].apply(lambda x : us_state_abbrev[x])
state_map = pd.Series(state_data['State Code (FIPS)'].values,index=state_data['state_abbrev']).to_dict()
state_map['AS'] = "60"
state_map['GU'] = "66"
state_map['MP'] = "69"
state_map['PR'] = "72"
state_map['VI'] = "78"
# Get all county fips codes
fips_data = fips_data[fips_data['Summary Level'] == 50]
fips_data.insert(0, 'FIPS', fips_data['State Code (FIPS)'] + fips_data['County Code (FIPS)'])
fips_data = fips_data[['FIPS', 'State Code (FIPS)']]
return state_map, fips_data
def preprocess_testing(after_mar_the_seventh = True):
print("• Processing COVID Tracking Project Testing Data")
state_map, fips_data = get_state_fips()
# State testing data obtained from the COVID Tracking Project (www.covidtracking.com)
# COVID Tracking Project stopped updating since Mar 7th, 2021
# API is last accessed on 2021-05-11
# Original API: https://covidtracking.com/api/v1/states/daily.csv
testing = pd.read_csv("data/COVIDTracking/covidtracking_2021_03_07.csv", usecols = ['date', 'state', 'totalTestResultsIncrease', 'positiveIncrease'], dtype = {'date':str})
testing['state'] = testing['state'].apply(lambda x : state_map[x])
testing['date'] = pd.to_datetime(testing['date'])
testing = testing.sort_values(['state','date']).reset_index(drop=True)
if after_mar_the_seventh:
new_testing = pd.read_csv("https://raw.githubusercontent.com/govex/COVID-19/master/data_tables/testing_data/time_series_covid19_US.csv", usecols=['date', 'state', 'tests_combined_total', 'cases_conf_probable'], dtype={'date':str})
new_testing = new_testing.rename(columns = {'cases_conf_probable': 'positiveIncrease', 'tests_combined_total':'totalTestResultsIncrease'})
new_testing['date'] = pd.to_datetime(new_testing['date'])
new_testing['state'] = new_testing['state'].apply(lambda x : state_map[x])
new_testing = new_testing.sort_values(['state','date']).reset_index(drop=True)
new_testing['positiveIncrease'] = new_testing.groupby("state")['positiveIncrease'].diff()
new_testing['totalTestResultsIncrease'] = new_testing.groupby("state")['totalTestResultsIncrease'].diff()
new_testing = new_testing.dropna()
new_testing[['positiveIncrease', 'totalTestResultsIncrease']] = new_testing[['positiveIncrease', 'totalTestResultsIncrease']].astype(int)
new_testing = new_testing[new_testing['date'] > "2021-03-07"].reset_index(drop=True)
testing = | pd.concat([testing, new_testing], axis=0) | pandas.concat |
# coding: utf-8
import numpy as np
import pandas as pd
import click
import json
# define data of picture and model
global dataFram
#dataFram = pd.read_excel("./picData.xlsx", sheetname="sheet1")
'''测试通过'''
'''通过excel表格创建dataFram'''
@click.command()
@click.option('--datasrc', default='./picData.xlsx', help='the class of data')
def createDataFrameFromExcel(datasrc):
dataFram = pd.read_excel(datasrc, sheetname = "sheet1")
#print(dataFram)
#print(dataGram.describe())
saveDataFramToExcel(dataFram, "moduleDataNew.xlsx", "result")
pass
'''仅仅用于测试'''
def createDataFramFromJson(jsonstr):
data = json.loads(jsonstr)
dataFram = | pd.DataFrame(data,columns=['Name','Path','Probability','NoDish'],index=[]) | pandas.DataFrame |
"""
ScmRun provides a high level analysis tool for simple climate model relevant
data. It provides a simple interface for reading/writing, subsetting and visualising
model data. ScmRuns are able to hold multiple model runs which aids in analysis of
ensembles of model runs.
"""
import copy
import datetime as dt
import numbers
import os
import warnings
from logging import getLogger
from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple, Union
import cftime
import numpy as np
import numpy.testing as npt
import pandas as pd
import pint
from dateutil import parser
from openscm_units import unit_registry as ur
from ._base import OpsMixin
from ._xarray import inject_xarray_methods
from .errors import (
DuplicateTimesError,
MissingRequiredColumnError,
NonUniqueMetadataError,
)
from .filters import (
HIERARCHY_SEPARATOR,
datetime_match,
day_match,
hour_match,
month_match,
pattern_match,
years_match,
)
from .groupby import RunGroupBy
from .netcdf import inject_nc_methods
from .offsets import generate_range, to_offset
from .ops import inject_ops_methods
from .plotting import inject_plotting_methods
from .pyam_compat import IamDataFrame, LongDatetimeIamDataFrame
from .time import _TARGET_DTYPE, TimePoints, TimeseriesConverter
from .units import UnitConverter
_logger = getLogger(__name__)
MetadataType = Dict[str, Union[str, int, float]]
ApplyCallable = Callable[[pd.DataFrame], Union[pd.DataFrame, pd.Series, float]]
def _read_file( # pylint: disable=missing-return-doc
fnames: str, required_cols: Tuple[str], *args: Any, **kwargs: Any
) -> Tuple[pd.DataFrame, pd.DataFrame]:
"""
Prepare data to initialize :class:`ScmRun <scmdata.run.ScmRun>` from a file.
Parameters
----------
*args
Passed to :func:`_read_pandas`.
**kwargs
Passed to :func:`_read_pandas`.
Returns
-------
:class:`pandas.DataFrame`, :class:`pandas.DataFrame`
First dataframe is the data. Second dataframe is metadata
"""
_logger.info("Reading %s", fnames)
return _format_data(_read_pandas(fnames, *args, **kwargs), required_cols)
def _read_pandas(
fname: str, *args: Any, lowercase_cols=False, **kwargs: Any
) -> pd.DataFrame:
"""
Read a file and return a :class:`pandas.DataFrame`.
Parameters
----------
fname
Path from which to read data
lowercase_cols
If True, convert the column names of the file to lowercase
*args
Passed to :func:`pandas.read_excel` if :obj:`fname` ends with '.xls' or
'.xslx, otherwise passed to :func:`pandas.read_csv`.
**kwargs
Passed to :func:`pandas.read_excel` if :obj:`fname` ends with '.xls' or
'.xslx, otherwise passed to :func:`pandas.read_csv`.
Returns
-------
:class:`pandas.DataFrame`
Read data
Raises
------
OSError
Path specified by :obj:`fname` does not exist
"""
if not os.path.exists(fname):
raise OSError("no data file `{}` found!".format(fname))
if fname.endswith("xlsx") or fname.endswith("xls"):
_logger.debug("Assuming excel file")
xl = pd.ExcelFile(fname)
if len(xl.sheet_names) > 1 and "sheet_name" not in kwargs:
kwargs["sheet_name"] = "data"
df = pd.read_excel(fname, *args, **kwargs)
else:
_logger.debug("Reading with pandas read_csv")
df = pd.read_csv(fname, *args, **kwargs)
def _to_lower(c):
if hasattr(c, "lower"):
return c.lower()
return c
if lowercase_cols:
df.columns = [_to_lower(c) for c in df.columns]
return df
# pylint doesn't recognise return statements if they include ','
def _format_data( # pylint: disable=missing-return-doc
df: Union[pd.DataFrame, pd.Series], required_cols: Tuple[str]
) -> Tuple[pd.DataFrame, pd.DataFrame]:
"""
Prepare data to initialize :class:`ScmRun <scmdata.run.ScmRun>` from :class:`pandas.DataFrame` or
:class:`pandas.Series`.
See docstring of :func:`ScmRun.__init__` for details.
Parameters
----------
df
Data to format.
Returns
-------
:class:`pandas.DataFrame`, :class:`pandas.DataFrame`
First dataframe is the data. Second dataframe is metadata.
Raises
------
ValueError
Not all required metadata columns are present or the time axis cannot be
understood
"""
if isinstance(df, pd.Series):
df = df.to_frame()
# reset the index if meaningful entries are included there
if list(df.index.names) != [None]:
df.reset_index(inplace=True)
if not set(required_cols).issubset(set(df.columns)):
missing = list(set(required_cols) - set(df.columns))
raise MissingRequiredColumnError(missing)
# check whether data in wide or long format
if "value" in df.columns:
df, meta = _format_long_data(df, required_cols)
else:
df, meta = _format_wide_data(df, required_cols)
return df, meta
def _format_long_data(df, required_cols):
# check if time column is given as `year` (int) or `time` (datetime)
cols = set(df.columns)
if "year" in cols and "time" not in cols:
time_col = "year"
elif "time" in cols and "year" not in cols:
time_col = "time"
else:
msg = "invalid time format, must have either `year` or `time`!"
raise ValueError(msg)
required_cols = list(required_cols)
extra_cols = list(set(cols) - set(required_cols + [time_col, "value"]))
df = df.pivot_table(columns=required_cols + extra_cols, index=time_col).value
meta = df.columns.to_frame(index=None)
df.columns = meta.index
return df, meta
def _format_wide_data(df, required_cols):
cols = set(df.columns) - set(required_cols)
time_cols, extra_cols = False, []
for i in cols:
# if in wide format, check if columns are years (int) or datetime
if isinstance(i, (dt.datetime, cftime.datetime)):
time_cols = True
else:
try:
float(i)
time_cols = True
except (ValueError, TypeError):
try:
try:
# most common format
dt.datetime.strptime(i, "%Y-%m-%d %H:%M:%S")
except ValueError:
# this is super slow so avoid if possible
parser.parse(str(i)) # if no ValueError, this is datetime
time_cols = True
except ValueError:
extra_cols.append(i) # some other string
if not time_cols:
msg = (
"invalid column format, must contain some time (int, float or datetime) "
"columns!"
)
raise ValueError(msg)
all_cols = set(tuple(required_cols) + tuple(extra_cols))
all_cols = list(all_cols)
df_out = df.drop(all_cols, axis="columns").T
df_out.index.name = "time"
meta = df[all_cols].set_index(df_out.columns)
return df_out, meta
def _from_ts(
df: Any,
required_cols: Tuple[str],
index: Any = None,
**columns: Union[str, bool, float, int, List],
) -> Tuple[pd.DataFrame, pd.DataFrame]:
"""
Prepare data to initialize :class:`ScmRun <scmdata.run.ScmRun>` from wide timeseries.
See docstring of :func:`ScmRun.__init__` for details.
Returns
-------
Tuple[pd.DataFrame, pd.DataFrame]
First dataframe is the data. Second dataframe is metadata
Raises
------
ValueError
Not all required columns are present
"""
if not isinstance(df, pd.DataFrame):
df = pd.DataFrame(df)
if index is not None:
if isinstance(index, np.ndarray):
df.index = TimePoints(index).to_index()
elif isinstance(index, TimePoints):
df.index = index.to_index()
else:
df.index = index
# format columns to lower-case and check that all required columns exist
if not set(required_cols).issubset(columns.keys()):
missing = list(set(required_cols) - set(columns.keys()))
raise MissingRequiredColumnError(missing)
df.index.name = "time"
num_ts = len(df.columns)
for c_name, col in columns.items():
col_list = (
[col] if isinstance(col, str) or not isinstance(col, Iterable) else col
)
if len(col_list) == num_ts:
continue
if len(col_list) != 1:
error_msg = (
"Length of column '{}' is incorrect. It should be length "
"1 or {}".format(c_name, num_ts)
)
raise ValueError(error_msg)
columns[c_name] = col_list * num_ts
meta = pd.DataFrame(columns, index=df.columns)
return df, meta
class BaseScmRun(OpsMixin): # pylint: disable=too-many-public-methods
"""
Base class of a data container for timeseries data
"""
required_cols = ("variable", "unit")
"""
Required metadata columns
This is the bare minimum columns which are expected. Attempting to create a run
without the metadata columns specified by :attr:`required_cols` will raise a
MissingRequiredColumnError
"""
data_hierarchy_separator = HIERARCHY_SEPARATOR
"""
str: String used to define different levels in our data hierarchies.
By default we follow pyam and use "|". In such a case, emissions of |CO2| for
energy from coal would be "Emissions|CO2|Energy|Coal".
"""
def __init__(
self,
data: Any,
index: Any = None,
columns: Optional[Union[Dict[str, list], Dict[str, str]]] = None,
metadata: Optional[MetadataType] = None,
copy_data: bool = False,
**kwargs: Any,
):
"""
Initialize the container with timeseries data.
Parameters
----------
data: Union[ScmRun, IamDataFrame, pd.DataFrame, np.ndarray, str]
If a :class:`ScmRun <scmdata.run.ScmRun>` object is provided, then a new
:class:`ScmRun <scmdata.run.ScmRun>` is created with a copy of the values and metadata from :obj:
`data`.
A :class:`pandas.DataFrame` with IAMC-format data columns (the result from
:func:`ScmRun.timeseries()`) can be provided without any additional
:obj:`columns` and :obj:`index` information.
If a numpy array of timeseries data is provided, :obj:`columns` and
:obj:`index` must also be specified. The shape of the numpy array should be
``(n_times, n_series)`` where `n_times` is the number of timesteps and
`n_series` is the number of time series.
If a string is passed, data will be attempted to be read from file.
Currently, reading from CSV, gzipped CSV and Excel formatted files is
supported.
index: np.ndarray
If :obj:`index` is not ``None``, then the :obj:`index` is used as the timesteps
for run. All timeseries in the run use the same set of timesteps.
The values will be attempted to be converted to :class:`numpy.datetime[s]` values.
Possible input formats include :
* :class:`datetime.datetime`
* :obj:`int` Start of year
* :obj:`float` Decimal year
* :obj:`str` Uses :func:`dateutil.parser`. Slow and should be avoided if possible
If :obj:`index` is ``None``, than the time index will be obtained from the
:obj:`data` if possible.
columns
If None, ScmRun will attempt to infer the values from the source.
Otherwise, use this dict to write the metadata for each timeseries in data.
For each metadata key (e.g. "model", "scenario"), an array of values (one
per time series) is expected. Alternatively, providing a list of length 1
applies the same value to all timeseries in data. For example, if you had
three timeseries from 'rcp26' for 3 different models 'model', 'model2' and
'model3', the column dict would look like either 'col_1' or 'col_2':
.. code:: python
>>> col_1 = {
"scenario": ["rcp26"],
"model": ["model1", "model2", "model3"],
"region": ["unspecified"],
"variable": ["unspecified"],
"unit": ["unspecified"]
}
>>> col_2 = {
"scenario": ["rcp26", "rcp26", "rcp26"],
"model": ["model1", "model2", "model3"],
"region": ["unspecified"],
"variable": ["unspecified"],
"unit": ["unspecified"]
}
>>> assert pd.testing.assert_frame_equal(
ScmRun(d, columns=col_1).meta,
ScmRun(d, columns=col_2).meta
)
metadata:
Optional dictionary of metadata for instance as a whole.
This can be used to store information such as the longer-form information
about a particular dataset, for example, dataset description or DOIs.
Defaults to an empty :obj:`dict` if no default metadata are provided.
copy_data: bool
If True, an explicit copy of data is performed.
.. note::
The copy can be very expensive on large timeseries and should only be needed
in cases where the original data is manipulated.
**kwargs:
Additional parameters passed to :func:`_read_file` to read files
Raises
------
ValueError
* If you try to load from multiple files at once. If you wish to do this,
please use :func:`scmdata.run.run_append` instead.
* Not specifying :obj:`index` and :obj:`columns` if :obj:`data` is a
:class:`numpy.ndarray`
:class:`scmdata.errors.MissingRequiredColumn`
If metadata for :attr:`required_cols` is not found
TypeError
Timeseries cannot be read from :obj:`data`
"""
if isinstance(data, ScmRun):
self._df = data._df.copy() if copy_data else data._df
self._meta = data._meta
self._time_points = TimePoints(data.time_points.values)
if metadata is None:
metadata = data.metadata.copy()
else:
if copy_data and hasattr(data, "copy"):
data = data.copy()
self._init_timeseries(data, index, columns, copy_data=copy_data, **kwargs)
if self._duplicated_meta():
raise NonUniqueMetadataError(self.meta)
self.metadata = metadata.copy() if metadata is not None else {}
def _init_timeseries(
self,
data,
index: Any = None,
columns: Optional[Dict[str, list]] = None,
copy_data=False,
**kwargs: Any,
):
if isinstance(data, np.ndarray):
if columns is None:
raise ValueError("`columns` argument is required")
if index is None:
raise ValueError("`index` argument is required")
if columns is not None:
(_df, _meta) = _from_ts(
data, index=index, required_cols=self.required_cols, **columns
)
elif isinstance(data, (pd.DataFrame, pd.Series)):
(_df, _meta) = _format_data(data, self.required_cols)
elif (IamDataFrame is not None) and isinstance(data, IamDataFrame):
(_df, _meta) = _format_data(
data.data.copy() if copy_data else data.data, self.required_cols
)
else:
if not isinstance(data, str):
if isinstance(data, list) and isinstance(data[0], str):
raise ValueError(
"Initialising from multiple files not supported, "
"use `scmdata.run.ScmRun.append()`"
)
error_msg = "Cannot load {} from {}".format(type(self), type(data))
raise TypeError(error_msg)
(_df, _meta) = _read_file(data, required_cols=self.required_cols, **kwargs)
if _df.index.duplicated().any():
raise DuplicateTimesError(_df.index)
# use :class:`TimePoints` to sort times before continuing
_df.index = TimePoints(_df.index.values).to_index()
_df = _df.sort_index()
_df = _df.astype(float)
self._df = _df
# set time points using the sorted times
self._time_points = TimePoints(_df.index.values)
self._meta = pd.MultiIndex.from_frame(_meta.astype("category"))
def copy(self):
"""
Return a :func:`copy.deepcopy` of self.
Also creates copies the underlying Timeseries data
Returns
-------
:class:`ScmRun <scmdata.run.ScmRun>`
:func:`copy.deepcopy` of ``self``
"""
ret = copy.copy(self)
ret._df = self._df.copy()
ret._meta = self._meta.copy()
ret.metadata = copy.copy(self.metadata)
return ret
def __len__(self) -> int:
"""
Get the number of timeseries.
"""
return self._df.shape[1]
def __getitem__(self, key: Any) -> Any:
"""
Get item of self with helpful direct access.
Provides direct access to "time", "year" as well as the columns in :attr:`meta`.
If key is anything else, the key will be applied to :attr:`_data`.
"""
_key_check = (
[key] if isinstance(key, str) or not isinstance(key, Iterable) else key
)
if key == "time":
return pd.Series(self._time_points.to_index(), dtype="object")
if key == "year":
return pd.Series(self._time_points.years())
if set(_key_check).issubset(self.meta_attributes):
try:
return self._meta_column(key).astype(
self._meta_column(key).cat.categories.dtype
)
except ValueError:
return self._meta_column(key).astype(float)
raise KeyError("[{}] is not in metadata".format(key))
def __setitem__(
self, key: str, value: Union[np.ndarray, list, int, float, str]
) -> Any:
"""
Update metadata
Notes
-----
If the meta values changes are applied to a filtered subset, the change will be reflected
in the original :class:`ScmRun <scmdata.run.ScmRun>` object.
.. code:: python
>>> df
<scmdata.ScmRun (timeseries: 3, timepoints: 3)>
Time:
Start: 2005-01-01T00:00:00
End: 2015-01-01T00:00:00
Meta:
model scenario region variable unit climate_model
0 a_iam a_scenario World Primary Energy EJ/yr a_model
1 a_iam a_scenario World Primary Energy|Coal EJ/yr a_model
2 a_iam a_scenario2 World Primary Energy EJ/yr a_model
>>> df["climate_model"] = ["a_model", "a_model", "b_model"]
>>> df
<scmdata.ScmRun (timeseries: 3, timepoints: 3)>
Time:
Start: 2005-01-01T00:00:00
End: 2015-01-01T00:00:00
Meta:
model scenario region variable unit climate_model
0 a_iam a_scenario World Primary Energy EJ/yr a_model
1 a_iam a_scenario World Primary Energy|Coal EJ/yr a_model
2 a_iam a_scenario2 World Primary Energy EJ/yr b_model
>>> df2 = df.filter(variable="Primary Energy")
>>> df2["pe_only"] = True
>>> df2
<scmdata.ScmRun (timeseries: 2, timepoints: 3)>
Time:
Start: 2005-01-01T00:00:00
End: 2015-01-01T00:00:00
Meta:
model scenario region variable unit climate_model pe_only
0 a_iam a_scenario World Primary Energy EJ/yr a_model True
2 a_iam a_scenario2 World Primary Energy EJ/yr b_model True
>>> df
<scmdata.ScmRun (timeseries: 3, timepoints: 3)>
Time:
Start: 2005-01-01T00:00:00
End: 2015-01-01T00:00:00
Meta:
model scenario region variable unit climate_model pe_only
0 a_iam a_scenario World Primary Energy EJ/yr a_model True
1 a_iam a_scenario World Primary Energy|Coal EJ/yr a_model NaN
2 a_iam a_scenario2 World Primary Energy EJ/yr b_model True
Parameters
----------
key
Column name
value
Values to write
If a list of values is provided, then the length of that :obj:`value` must
be the same as the number of timeseries
Raises
------
ValueError
If the length of :obj:`meta` is inconsistent with the number of timeseries
"""
meta = np.atleast_1d(value)
if key == "time":
self._time_points = TimePoints(meta)
self._df.index = self._time_points.to_index()
else:
if len(meta) == 1:
new_meta = self._meta.to_frame()
new_meta[key] = meta[0]
self._meta = pd.MultiIndex.from_frame(new_meta.astype("category"))
elif len(meta) == len(self):
new_meta_index = self._meta.to_frame(index=False)
new_meta_index[key] = pd.Series(meta, dtype="category")
self._meta = pd.MultiIndex.from_frame(new_meta_index)
else:
msg = (
"Invalid length for metadata, `{}`, must be 1 or equal to the "
"number of timeseries, `{}`"
)
raise ValueError(msg.format(len(meta), len(self)))
if self._duplicated_meta():
raise NonUniqueMetadataError(self.meta)
def __repr__(self):
def _indent(s):
lines = ["\t" + line for line in s.split("\n")]
return "\n".join(lines)
meta_str = _indent(self.meta.__repr__())
time_str = [
"Start: {}".format(self.time_points.values[0]),
"End: {}".format(self.time_points.values[-1]),
]
time_str = _indent("\n".join(time_str))
return "<{} (timeseries: {}, timepoints: {})>\nTime:\n{}\nMeta:\n{}".format(
self.__class__.__name__,
len(self),
len(self.time_points),
time_str,
meta_str,
)
def _binary_op(
self, other, f, reflexive=False, **kwargs,
) -> Callable[..., "ScmRun"]:
if isinstance(other, ScmRun):
return NotImplemented
is_scalar = isinstance(other, (numbers.Number, pint.Quantity))
if not is_scalar:
other_ndim = len(other.shape)
if other_ndim == 1:
if other.shape[0] != self.shape[1]:
raise ValueError(
"only vectors with the same number of timesteps "
"as self ({}) are supported".format(self.shape[1])
)
else:
raise ValueError(
"operations with {}d data are not supported".format(other_ndim)
)
def _perform_op(df):
if isinstance(other, pint.Quantity):
try:
data = df.values * ur(df.get_unique_meta("unit", True))
use_pint = True
except KeyError: # pragma: no cover # emergency valve
raise KeyError(
"No `unit` column in your metadata, cannot perform operations "
"with pint quantities"
)
else:
data = df.values
use_pint = False
res = []
for v in data:
if not reflexive:
res.append(f(v, other))
else:
res.append(f(other, v))
res = np.vstack(res)
if use_pint:
df._df.values[:] = res.magnitude.T
df["unit"] = str(res.units)
else:
df._df.values[:] = res.T
return df
return self.copy().groupby("unit").map(_perform_op)
def _unary_op(self, f, *args, **kwargs) -> Callable[..., "ScmRun"]:
df = self.copy()
res = [f(v) for v in df.values]
res = np.vstack(res)
df._df.values[:] = res.T
return df
def drop_meta(self, columns: Union[list, str], inplace: Optional[bool] = False):
"""
Drop meta columns out of the Run
Parameters
----------
columns
The column or columns to drop
inplace
If True, do operation inplace and return None.
Raises
------
KeyError
If any of the columns do not exist in the meta :class:`DataFrame`
"""
if inplace:
ret = self
else:
ret = self.copy()
if isinstance(columns, str):
columns = [columns]
existing_cols = ret.meta_attributes
for c in columns:
if c not in existing_cols:
raise KeyError(c)
if c in self.required_cols:
raise MissingRequiredColumnError([c])
for c in columns:
ret._meta = ret._meta.droplevel(c)
if ret._duplicated_meta():
raise NonUniqueMetadataError(ret.meta)
if not inplace:
return ret
@property
def meta_attributes(self):
"""
Get a list of all meta keys
Returns
-------
list
Sorted list of meta keys
"""
return sorted(list(self._meta.names))
@property
def time_points(self):
"""
Time points of the data
Returns
-------
:class:`scmdata.time.TimePoints`
"""
return self._time_points
def timeseries(
self, meta=None, check_duplicated=True, time_axis=None, drop_all_nan_times=False
):
"""
Return the data with metadata as a :class:`pandas.DataFrame`.
Parameters
----------
meta : list[str]
The list of meta columns that will be included in the output's
MultiIndex. If None (default), then all metadata will be used.
check_duplicated : bool
If True, an exception is raised if any of the timeseries have
duplicated metadata
time_axis : {None, "year", "year-month", "days since 1970-01-01", "seconds since 1970-01-01"}
See :func:`long_data` for a description of the options.
drop_all_nan_times : bool
Should time points which contain only nan values be dropped? This operation is applied
after any transforms introduced by the value of ``time_axis``.
Returns
-------
:class:`pandas.DataFrame`
DataFrame with datetimes as columns and timeseries as rows.
Metadata is in the index.
Raises
------
:class:`NonUniqueMetadataError`
If the metadata are not unique between timeseries and
``check_duplicated`` is ``True``
NotImplementedError
The value of `time_axis` is not recognised
ValueError
The value of `time_axis` would result in columns which aren't unique
"""
df = self._df.T
_meta = self.meta if meta is None else self.meta[meta]
if check_duplicated and self._duplicated_meta(meta=_meta):
raise NonUniqueMetadataError(_meta)
if time_axis is None:
columns = self._time_points.to_index()
elif time_axis == "year":
columns = self._time_points.years()
elif time_axis == "year-month":
columns = (
self._time_points.years() + (self._time_points.months() - 0.5) / 12
)
elif time_axis == "days since 1970-01-01":
def calc_days(x):
ref = np.array(["1970-01-01"], dtype=_TARGET_DTYPE)[0]
return (x - ref).astype("timedelta64[D]")
columns = calc_days(self._time_points.values).astype(int)
elif time_axis == "seconds since 1970-01-01":
def calc_seconds(x):
ref = np.array(["1970-01-01"], dtype=_TARGET_DTYPE)[0]
return x - ref
columns = calc_seconds(self._time_points.values).astype(int)
else:
raise NotImplementedError("time_axis = '{}'".format(time_axis))
if len(np.unique(columns)) != len(columns):
raise ValueError(
"Ambiguous time values with time_axis = '{}'".format(time_axis)
)
df.columns = pd.Index(columns, name="time")
df.index = pd.MultiIndex.from_frame(_meta)
if drop_all_nan_times:
df = df.dropna(how="all", axis="columns")
return df
def _duplicated_meta(self, meta=None):
_meta = self._meta if meta is None else meta
return _meta.duplicated().any()
def long_data(self, time_axis=None):
"""
Return data in long form, particularly useful for plotting with seaborn
Parameters
----------
time_axis : {None, "year", "year-month", "days since 1970-01-01", "seconds since 1970-01-01"}
Time axis to use for the output's columns.
If ``None``, :class:`datetime.datetime` objects will be used.
If ``"year"``, the year of each time point will be used.
If ``"year-month"``, the year plus (month - 0.5) / 12 will be used.
If ``"days since 1970-01-01"``, the number of days since 1st Jan 1970
will be used (calculated using the :mod:`datetime` module).
If ``"seconds since 1970-01-01"``, the number of seconds since 1st Jan
1970 will be used (calculated using the :mod:`datetime` module).
Returns
-------
:class:`pandas.DataFrame`
:class:`pandas.DataFrame` containing the data in 'long form' (i.e. one observation
per row).
"""
out = self.timeseries(time_axis=time_axis).stack()
out.name = "value"
out = out.to_frame().reset_index()
return out
@property
def shape(self) -> tuple:
"""
Get the shape of the underlying data as ``(num_timeseries, num_timesteps)``
Returns
-------
tuple of int
"""
return self._df.T.shape
@property
def values(self) -> np.ndarray:
"""
Timeseries values without metadata
The values are returned such that each row is a different
timeseries being a row and each column is a different time (although
no time information is included as a plain :class:`numpy.ndarray` is
returned).
Returns
-------
np.ndarray
The array in the same shape as :meth:`ScmRun.shape`, that is
``(num_timeseries, num_timesteps)``.
"""
return self._df.values.T
@property
def empty(self) -> bool:
"""
Indicate whether :class:`ScmRun <scmdata.run.ScmRun>` is empty i.e. contains no data
Returns
-------
bool
If :class:`ScmRun <scmdata.run.ScmRun>` is empty, return ``True``, if not return ``False``
"""
return np.equal(len(self), 0)
@property
def meta(self) -> pd.DataFrame:
"""
Metadata
"""
df = pd.DataFrame(
self._meta.to_list(), columns=self._meta.names, index=self._df.columns
)
return df[sorted(df.columns)]
def _meta_column(self, col) -> pd.Series:
out = self._meta.get_level_values(col)
return pd.Series(out, name=col, index=self._df.columns)
def filter(
self,
keep: bool = True,
inplace: bool = False,
log_if_empty: bool = True,
**kwargs: Any,
):
"""
Return a filtered ScmRun (i.e., a subset of the data).
.. code:: python
>>> df
<scmdata.ScmRun (timeseries: 3, timepoints: 3)>
Time:
Start: 2005-01-01T00:00:00
End: 2015-01-01T00:00:00
Meta:
model scenario region variable unit climate_model
0 a_iam a_scenario World Primary Energy EJ/yr a_model
1 a_iam a_scenario World Primary Energy|Coal EJ/yr a_model
2 a_iam a_scenario2 World Primary Energy EJ/yr a_model
[3 rows x 7 columns]
>>> df.filter(scenario="a_scenario")
<scmdata.ScmRun (timeseries: 2, timepoints: 3)>
Time:
Start: 2005-01-01T00:00:00
End: 2015-01-01T00:00:00
Meta:
model scenario region variable unit climate_model
0 a_iam a_scenario World Primary Energy EJ/yr a_model
1 a_iam a_scenario World Primary Energy|Coal EJ/yr a_model
[2 rows x 7 columns]
>>> df.filter(scenario="a_scenario", keep=False)
<scmdata.ScmRun (timeseries: 1, timepoints: 3)>
Time:
Start: 2005-01-01T00:00:00
End: 2015-01-01T00:00:00
Meta:
model scenario region variable unit climate_model
2 a_iam a_scenario2 World Primary Energy EJ/yr a_model
[1 rows x 7 columns]
>>> df.filter(level=1)
<scmdata.ScmRun (timeseries: 2, timepoints: 3)>
Time:
Start: 2005-01-01T00:00:00
End: 2015-01-01T00:00:00
Meta:
model scenario region variable unit climate_model
0 a_iam a_scenario World Primary Energy EJ/yr a_model
2 a_iam a_scenario2 World Primary Energy EJ/yr a_model
[2 rows x 7 columns]
>>> df.filter(year=range(2000, 2011))
<scmdata.ScmRun (timeseries: 3, timepoints: 2)>
Time:
Start: 2005-01-01T00:00:00
End: 2010-01-01T00:00:00
Meta:
model scenario region variable unit climate_model
0 a_iam a_scenario World Primary Energy EJ/yr a_model
1 a_iam a_scenario World Primary Energy|Coal EJ/yr a_model
2 a_iam a_scenario2 World Primary Energy EJ/yr a_model
[2 rows x 7 columns]
Parameters
----------
keep
If True, keep all timeseries satisfying the filters, otherwise drop all the
timeseries satisfying the filters
inplace
If True, do operation inplace and return None
log_if_empty
If ``True``, log a warning level message if the result is empty.
**kwargs
Argument names are keys with which to filter, values are used to do the
filtering. Filtering can be done on:
- all metadata columns with strings, "*" can be used as a wildcard in search
strings
- 'level': the maximum "depth" of IAM variables (number of hierarchy levels,
excluding the strings given in the 'variable' argument)
- 'time': takes a :class:`datetime.datetime` or list of
:class:`datetime.datetime`'s
TODO: default to np.datetime64
- 'year', 'month', 'day', hour': takes an :obj:`int` or list of
:obj:`int`'s ('month' and 'day' also accept :obj:`str` or list of
:obj:`str`)
If ``regexp=True`` is included in :obj:`kwargs` then the pseudo-regexp
syntax in :func:`pattern_match` is disabled.
Returns
-------
:class:`ScmRun <scmdata.run.ScmRun>`
If not ``inplace``, return a new instance with the filtered data.
"""
ret = copy.copy(self) if not inplace else self
if len(ret):
_keep_times, _keep_rows = self._apply_filters(kwargs)
if not keep and sum(~_keep_rows) and sum(~_keep_times):
raise ValueError(
"If keep==False, filtering cannot be performed on the temporal axis "
"and with metadata at the same time"
)
reduce_times = (~_keep_times).sum() > 0
reduce_rows = (~_keep_rows).sum() > 0
if not keep:
if reduce_times:
_keep_times = ~_keep_times
if reduce_rows:
_keep_rows = ~_keep_rows
if not reduce_rows and not reduce_times:
_keep_times = _keep_times * False
_keep_rows = _keep_rows * False
ret._df = ret._df.loc[_keep_times, _keep_rows]
ret._meta = ret._meta[_keep_rows]
ret["time"] = self.time_points.values[_keep_times]
if log_if_empty and ret.empty:
_logger.warning("Filtered ScmRun is empty!", stack_info=True)
if not inplace:
return ret
return None
# pylint doesn't recognise ',' in returns type definition
def _apply_filters( # pylint: disable=missing-return-doc
self, filters: Dict
) -> Tuple[np.ndarray, np.ndarray]:
"""
Determine rows to keep in data for given set of filters.
Parameters
----------
filters
Dictionary of filters ``({col: values}})``; uses a pseudo-regexp syntax by
default but if ``filters["regexp"]`` is ``True``, regexp is used directly.
Returns
-------
:class:`numpy.ndarray` of :class:`bool`, :class:`numpy.ndarray` of :class:`bool`
Two boolean :class:`numpy.ndarray`'s. The first contains the columns to keep
(i.e. which time points to keep). The second contains the rows to keep (i.e.
which metadata matched the filters).
Raises
------
ValueError
Filtering cannot be performed on requested column
"""
regexp = filters.pop("regexp", False)
keep_ts = np.array([True] * len(self.time_points))
keep_meta = np.array([True] * len(self))
# filter by columns and list of values
for col, values in filters.items():
if col in self._meta.names:
if col == "variable":
level = filters["level"] if "level" in filters else None
else:
level = None
keep_meta &= pattern_match(
self._meta.get_level_values(col),
values,
level=level,
regexp=regexp,
separator=self.data_hierarchy_separator,
)
elif col == "level":
if "variable" not in filters.keys():
keep_meta &= pattern_match(
self._meta.get_level_values("variable"),
"*",
level=values,
regexp=regexp,
separator=self.data_hierarchy_separator,
)
# else do nothing as level handled in variable filtering
elif col == "year":
keep_ts &= years_match(self._time_points.years(), values)
elif col == "month":
keep_ts &= month_match(self._time_points.months(), values)
elif col == "day":
keep_ts &= self._day_match(values)
elif col == "hour":
keep_ts &= hour_match(self._time_points.hours(), values)
elif col == "time":
keep_ts &= datetime_match(self._time_points.values, values)
else:
raise ValueError("filter by `{}` not supported".format(col))
return keep_ts, keep_meta
def _day_match(self, values):
if isinstance(values, str):
wday = True
elif isinstance(values, list) and isinstance(values[0], str):
wday = True
else:
wday = False
if wday:
days = self._time_points.weekdays()
else: # ints or list of ints
days = self._time_points.days()
return day_match(days, values)
def head(self, *args, **kwargs):
"""
Return head of :func:`self.timeseries()`.
Parameters
----------
*args
Passed to :func:`self.timeseries().head()`
**kwargs
Passed to :func:`self.timeseries().head()`
Returns
-------
:class:`pandas.DataFrame`
Tail of :func:`self.timeseries()`
"""
return self.timeseries().head(*args, **kwargs)
def tail(self, *args: Any, **kwargs: Any) -> pd.DataFrame:
"""
Return tail of :func:`self.timeseries()`.
Parameters
----------
*args
Passed to :func:`self.timeseries().tail()`
**kwargs
Passed to :func:`self.timeseries().tail()`
Returns
-------
:class:`pandas.DataFrame`
Tail of :func:`self.timeseries()`
"""
return self.timeseries().tail(*args, **kwargs)
def get_unique_meta(
self, meta: str, no_duplicates: Optional[bool] = False,
) -> Union[List[Any], Any]:
"""
Get unique values in a metadata column.
Parameters
----------
meta
Column to retrieve metadata for
no_duplicates
Should I raise an error if there is more than one unique value in the
metadata column?
Raises
------
ValueError
There is more than one unique value in the metadata column and
``no_duplicates`` is ``True``.
KeyError
If a ``meta`` column does not exist in the run's metadata
Returns
-------
[List[Any], Any]
List of unique metadata values. If ``no_duplicates`` is ``True`` the
metadata value will be returned (rather than a list).
"""
vals = self._meta.get_level_values(meta).unique().to_list()
if no_duplicates:
if len(vals) != 1:
raise ValueError(
"`{}` column is not unique (found values: {})".format(meta, vals)
)
return vals[0]
return vals
def interpolate(
self,
target_times: Union[np.ndarray, List[Union[dt.datetime, int]]],
interpolation_type: str = "linear",
extrapolation_type: str = "linear",
):
"""
Interpolate the data onto a new time frame.
Parameters
----------
target_times
Time grid onto which to interpolate
interpolation_type: str
Interpolation type. Options are 'linear'
extrapolation_type: str or None
Extrapolation type. Options are None, 'linear' or 'constant'
Returns
-------
:class:`ScmRun <scmdata.run.ScmRun>`
A new :class:`ScmRun <scmdata.run.ScmRun>` containing the data interpolated onto the
:obj:`target_times` grid
"""
# pylint: disable=protected-access
target_times = np.asarray(target_times, dtype="datetime64[s]")
res = self.copy()
target_times = TimePoints(target_times)
timeseries_converter = TimeseriesConverter(
self.time_points.values,
target_times.values,
interpolation_type=interpolation_type,
extrapolation_type=extrapolation_type,
)
target_data = np.zeros((len(target_times), len(res)))
# TODO: Extend TimeseriesConverter to handle 2d inputs
for i in range(len(res)):
target_data[:, i] = timeseries_converter.convert_from(
res._df.iloc[:, i].values
)
res._df = pd.DataFrame(
target_data, columns=res._df.columns, index=target_times.to_index()
)
res._time_points = target_times
return res
def resample(self, rule: str = "AS", **kwargs: Any):
"""
Resample the time index of the timeseries data onto a custom grid.
This helper function allows for values to be easily interpolated onto annual or
monthly timesteps using the rules='AS' or 'MS' respectively. Internally, the
interpolate function performs the regridding.
Parameters
----------
rule
See the pandas `user guide
<http://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#dateoffset-objects>`_
for a list of options. Note that Business-related offsets such as
"BusinessDay" are not supported.
**kwargs
Other arguments to pass through to :func:`interpolate`
Returns
-------
:class:`ScmRun <scmdata.run.ScmRun>`
New :class:`ScmRun <scmdata.run.ScmRun>` instance on a new time index
Examples
--------
Resample a run to annual values
>>> scm_df = ScmRun(
... pd.Series([1, 2, 10], index=(2000, 2001, 2009)),
... columns={
... "model": ["a_iam"],
... "scenario": ["a_scenario"],
... "region": ["World"],
... "variable": ["Primary Energy"],
... "unit": ["EJ/y"],
... }
... )
>>> scm_df.timeseries().T
model a_iam
scenario a_scenario
region World
variable Primary Energy
unit EJ/y
year
2000 1
2010 10
An annual timeseries can be the created by interpolating to the start of years
using the rule 'AS'.
>>> res = scm_df.resample('AS')
>>> res.timeseries().T
model a_iam
scenario a_scenario
region World
variable Primary Energy
unit EJ/y
time
2000-01-01 00:00:00 1.000000
2001-01-01 00:00:00 2.001825
2002-01-01 00:00:00 3.000912
2003-01-01 00:00:00 4.000000
2004-01-01 00:00:00 4.999088
2005-01-01 00:00:00 6.000912
2006-01-01 00:00:00 7.000000
2007-01-01 00:00:00 7.999088
2008-01-01 00:00:00 8.998175
2009-01-01 00:00:00 10.00000
>>> m_df = scm_df.resample('MS')
>>> m_df.timeseries().T
model a_iam
scenario a_scenario
region World
variable Primary Energy
unit EJ/y
time
2000-01-01 00:00:00 1.000000
2000-02-01 00:00:00 1.084854
2000-03-01 00:00:00 1.164234
2000-04-01 00:00:00 1.249088
2000-05-01 00:00:00 1.331204
2000-06-01 00:00:00 1.416058
2000-07-01 00:00:00 1.498175
2000-08-01 00:00:00 1.583029
2000-09-01 00:00:00 1.667883
...
2008-05-01 00:00:00 9.329380
2008-06-01 00:00:00 9.414234
2008-07-01 00:00:00 9.496350
2008-08-01 00:00:00 9.581204
2008-09-01 00:00:00 9.666058
2008-10-01 00:00:00 9.748175
2008-11-01 00:00:00 9.833029
2008-12-01 00:00:00 9.915146
2009-01-01 00:00:00 10.000000
[109 rows x 1 columns]
Note that the values do not fall exactly on integer values as not all years are
exactly the same length.
References
----------
See the pandas documentation for
`resample <http://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.
Series.resample.html>`
for more information about possible arguments.
"""
orig_dts = self["time"]
target_dts = generate_range(
orig_dts.iloc[0], orig_dts.iloc[-1], to_offset(rule)
)
return self.interpolate(list(target_dts), **kwargs)
def time_mean(self, rule: str):
"""
Take time mean of self
Note that this method will not copy the ``metadata`` attribute to the returned
value.
Parameters
----------
rule : ["AC", "AS", "A"]
How to take the time mean. The names reflect the pandas
`user guide <http://pandas.pydata.org/pandas-docs/stable/user_guide/timeser
ies.html#dateoffset-objects>`_
where they can, but only the options
given above are supported. For clarity, if ``rule`` is ``'AC'``, then the
mean is an annual mean i.e. each time point in the result is the mean of
all values for that particular year. If ``rule`` is ``'AS'``, then the
mean is an annual mean centred on the beginning of the year i.e. each time
point in the result is the mean of all values from July 1st in the
previous year to June 30 in the given year. If ``rule`` is ``'A'``, then
the mean is an annual mean centred on the end of the year i.e. each time
point in the result is the mean of all values from July 1st of the given
year to June 30 in the next year.
Returns
-------
:class:`ScmRun <scmdata.run.ScmRun>`
The time mean of ``self``.
"""
if rule == "AS":
def group_annual_mean_beginning_of_year(x):
if x.month <= 6:
return x.year
return x.year + 1
ts_resampled = (
self.timeseries()
.T.groupby(group_annual_mean_beginning_of_year)
.mean()
.T
)
ts_resampled.columns = ts_resampled.columns.map(
lambda x: dt.datetime(x, 1, 1)
)
return type(self)(ts_resampled)
if rule == "AC":
def group_annual_mean(x):
return x.year
ts_resampled = self.timeseries().T.groupby(group_annual_mean).mean().T
ts_resampled.columns = ts_resampled.columns.map(
lambda x: dt.datetime(x, 7, 1)
)
return type(self)(ts_resampled)
if rule == "A":
def group_annual_mean_end_of_year(x):
if x.month >= 7:
return x.year
return x.year - 1
ts_resampled = (
self.timeseries().T.groupby(group_annual_mean_end_of_year).mean().T
)
ts_resampled.columns = ts_resampled.columns.map(
lambda x: dt.datetime(x, 12, 31)
)
return type(self)(ts_resampled)
raise ValueError("`rule` = `{}` is not supported".format(rule))
def process_over(
self,
cols: Union[str, List[str]],
operation: Union[str, ApplyCallable],
na_override=-1e6,
op_cols=None,
as_run=False,
**kwargs: Any,
) -> pd.DataFrame:
"""
Process the data over the input columns.
Parameters
----------
cols
Columns to perform the operation on. The timeseries will be grouped by all
other columns in :attr:`meta`.
operation : str or func
The operation to perform.
If a string is provided, the equivalent pandas groupby function is used. Note
that not all groupby functions are available as some do not make sense for
this particular application. Additional information about the arguments for
the pandas groupby functions can be found at <https://pandas.pydata.org/pan
das-docs/stable/reference/groupby.html>`_.
If a function is provided, it will be applied to each group. The function must
take a dataframe as its first argument and return a DataFrame, Series or scalar.
Note that quantile means the value of the data at a given point in the cumulative
distribution of values at each point in the timeseries, for each timeseries
once the groupby is applied. As a result, using ``q=0.5`` is the same as
taking the median and not the same as taking the mean/average.
na_override: [int, float]
Convert any nan value in the timeseries meta to this value during processsing.
The meta values converted back to nan's before the run is returned. This
should not need to be changed unless the existing metadata clashes with the
default na_override value.
This functionality is disabled if na_override is None, but may result in incorrect
results if the timeseries meta includes any nan's.
op_cols: dict of str: str
Dictionary containing any columns that should be overridden after processing.
If a required column from :class:`scmdata.ScmRun` is specified in ``cols`` and
``as_run=True``, an override must be provided for that column in ``op_cols``
otherwise the conversion to :class:`scmdata.ScmRun` will fail.
as_run: bool or subclass of BaseScmRun
If True, return the resulting timeseries as an :class:`scmdata.ScmRun` object,
otherwise if False, a :class:`pandas.DataFrame`or :class:`pandas.Series` is
returned (depending on the nature of the operation). Some operations may not be
able to be converted to a :class:`scmdata.ScmRun`. For example if the operation
returns scalar values rather than timeseries.
If a class is provided, the return value will be cast to this class.
**kwargs
Keyword arguments to pass ``operation`` (or the pandas operation if ``operation``
is a string)
Returns
-------
:class:`pandas.DataFrame` or :class:`pandas.Series` or :class:`scmdata.ScmRun`
The result of ``operation``, grouped by all columns in :attr:`meta`
other than :obj:`cols`
Raises
------
ValueError
If the operation is not an allowed operation
If the value of na_override clashes with any existing metadata
If ``operation`` produces a :class:`pandas.Series`, but `as_run`` is True
If ``as_run`` is not True, False or a subclass of :class:`scmdata.run.BaseScmRun`
:class:`scmdata.errors.MissingRequiredColumnError`
If `as_run` is not False and the result does not have the required metadata
to convert to an :class`ScmRun <scmdata.ScmRun>`.
This can be resolved by specifying additional metadata via ``op_cols``
"""
cols = [cols] if isinstance(cols, str) else cols
ts = self.timeseries()
if na_override is not None:
ts_idx = ts.index.to_frame()
if ts_idx[ts_idx == na_override].any().any():
raise ValueError(
"na_override clashes with existing meta: {}".format(na_override)
)
ts.index = pd.MultiIndex.from_frame(ts_idx.fillna(na_override))
group_cols = list(set(ts.index.names) - set(cols))
grouper = ts.groupby(group_cols)
# This is a subset of the available functions
# https://pandas.pydata.org/pandas-docs/stable/reference/groupby.html
allowed_pd_ops = [
"count",
"cumcount",
"cummax",
"cummin",
"cumprod",
"cumsum",
"first",
"last",
"max",
"mean",
"median",
"min",
"prod",
"rank",
"std",
"sum",
"var",
"quantile",
]
if isinstance(operation, str):
if operation not in allowed_pd_ops:
raise ValueError("invalid process_over operation")
grouper_func = getattr(grouper, operation)
res = grouper_func(**kwargs)
else:
res = grouper.apply(operation, **kwargs)
if op_cols is not None:
idx_df = res.index.to_frame()
for column_name in op_cols:
idx_df[column_name] = op_cols[column_name]
res.index = pd.MultiIndex.from_frame(idx_df)
if na_override is not None:
idx_df = res.index.to_frame()
idx_df[idx_df == na_override] = np.nan
res.index = pd.MultiIndex.from_frame(idx_df)
res = res.reorder_levels(sorted(res.index.names))
if as_run:
if isinstance(res, pd.Series):
raise ValueError("Cannot convert pd.Series to ScmRun")
if isinstance(as_run, bool):
Cls = self.__class__
elif issubclass(as_run, BaseScmRun):
Cls = as_run
else:
raise ValueError(
"Invalid value for as_run. Expected True, False or class based on scmdata.run.BaseScmRun"
)
return Cls(res, metadata=self.metadata)
else:
return res
def quantiles_over(
self,
cols: Union[str, List[str]],
quantiles: Union[str, List[float]],
**kwargs: Any,
) -> pd.DataFrame:
"""
Calculate quantiles of the data over the input columns.
Parameters
----------
cols
Columns to perform the operation on. The timeseries will be grouped by all
other columns in :attr:`meta`.
quantiles
The quantiles to calculate. This should be a list of quantiles to calculate
(quantile values between 0 and 1). ``quantiles`` can also include the strings
"median" or "mean" if these values are to be calculated.
**kwargs
Passed to :meth:`~ScmRun.process_over`.
Returns
-------
:class:`pandas.DataFrame`
The quantiles of the timeseries, grouped by all columns in :attr:`meta`
other than :obj:`cols`. Each calculated quantile is given a label which is
stored in the ``quantile`` column within the output index.
Raises
------
TypeError
``operation`` is included in ``kwargs``. The operation is inferred from ``quantiles``.
"""
if "operation" in kwargs:
raise TypeError(
"quantiles_over() does not take the keyword argument 'operation', the operations "
"are inferred from the 'quantiles' argument"
)
out = []
for quant in quantiles:
if quant == "median":
quantile_df = self.process_over(cols, "median")
elif quant == "mean":
quantile_df = self.process_over(cols, "mean")
else:
quantile_df = self.process_over(cols, "quantile", q=quant)
quantile_df["quantile"] = quant
out.append(quantile_df)
out = pd.concat(out).set_index("quantile", append=True)
return out
@staticmethod
def _check_groupby_input(v):
if len(v) == 1 and not isinstance(v[0], str):
v = tuple(v[0])
return v
def groupby(self, *group):
"""
Group the object by unique metadata
Enables iteration over groups of data. For example, to iterate over each
scenario in the object
.. code:: python
>>> for group in df.groupby("scenario"):
>>> print(group)
<scmdata.ScmRun (timeseries: 2, timepoints: 3)>
Time:
Start: 2005-01-01T00:00:00
End: 2015-01-01T00:00:00
Meta:
model scenario region variable unit climate_model
0 a_iam a_scenario World Primary Energy EJ/yr a_model
1 a_iam a_scenario World Primary Energy|Coal EJ/yr a_model
<scmdata.ScmRun (timeseries: 1, timepoints: 3)>
Time:
Start: 2005-01-01T00:00:00
End: 2015-01-01T00:00:00
Meta:
model scenario region variable unit climate_model
2 a_iam a_scenario2 World Primary Energy EJ/yr a_model
Parameters
----------
group: str or list of str
Columns to group by
Returns
-------
:class:`RunGroupBy`
See the documentation for :class:`RunGroupBy` for more information
"""
group = self._check_groupby_input(group)
return RunGroupBy(self, group)
def get_meta_columns_except(self, *not_group):
"""
Get columns in meta except a set
Parameters
----------
not_group: str or list of str
Columns to exclude from the grouping
Returns
-------
list
Meta columns except the ones supplied (sorted alphabetically)
"""
not_group = self._check_groupby_input(not_group)
group = sorted(tuple(set(self.meta.columns) - set(not_group)))
return group
def groupby_all_except(self, *not_group):
"""
Group the object by unique metadata apart from the input columns
In other words, the groups are determined by all columns in
``self.meta`` except for those in ``not_group``
Parameters
----------
not_group: str or list of str
Columns to exclude from the grouping
Returns
-------
:class:`RunGroupBy`
See the documentation for :class:`RunGroupBy` for more information
"""
group = self.get_meta_columns_except(not_group)
return RunGroupBy(self, group)
def convert_unit(
self,
unit: str,
context: Optional[str] = None,
inplace: bool = False,
**kwargs: Any,
):
"""
Convert the units of a selection of timeseries.
Uses :class:`scmdata.units.UnitConverter` to perform the conversion.
Parameters
----------
unit
Unit to convert to. This must be recognised by
:class:`~openscm.units.UnitConverter`.
context
Context to use for the conversion i.e. which metric to apply when performing
CO2-equivalent calculations. If ``None``, no metric will be applied and
CO2-equivalent calculations will raise :class:`DimensionalityError`.
inplace
If True, apply the conversion inplace and return None
**kwargs
Extra arguments which are passed to :meth:`~ScmRun.filter` to
limit the timeseries which are attempted to be converted. Defaults to
selecting the entire ScmRun, which will likely fail.
Returns
-------
:class:`ScmRun <scmdata.run.ScmRun>`
If :obj:`inplace` is not ``False``, a new :class:`ScmRun <scmdata.run.ScmRun>` instance
with the converted units.
Notes
-----
If ``context`` is not ``None``, then the context used for the conversion will
be checked against any existing metadata and, if the conversion is valid,
stored in the output's metadata.
Raises
------
ValueError
``"unit_context"`` is already included in ``self``'s :meth:`meta_attributes`
and it does not match ``context`` for the variables to be converted.
"""
# pylint: disable=protected-access
if inplace:
ret = self
else:
ret = self.copy()
to_convert_filtered = ret.filter(**kwargs, log_if_empty=False)
to_not_convert_filtered = ret.filter(**kwargs, keep=False, log_if_empty=False)
already_correct_unit = to_convert_filtered.filter(unit=unit, log_if_empty=False)
if (
"unit_context" in already_correct_unit.meta_attributes
and not already_correct_unit.empty
):
self._check_unit_context(already_correct_unit, context)
to_convert = to_convert_filtered.filter(
unit=unit, log_if_empty=False, keep=False
)
to_not_convert = run_append([to_not_convert_filtered, already_correct_unit])
if "unit_context" in to_convert.meta_attributes and not to_convert.empty:
self._check_unit_context(to_convert, context)
if context is not None:
to_convert["unit_context"] = context
if "unit_context" not in to_not_convert.meta_attributes and context is not None:
to_not_convert["unit_context"] = None
def apply_units(group):
orig_unit = group.get_unique_meta("unit", no_duplicates=True)
uc = UnitConverter(orig_unit, unit, context=context)
group._df.values[:] = uc.convert_from(group._df.values)
group["unit"] = unit
return group
ret = to_convert
if not to_convert.empty:
ret = ret.groupby("unit").map(apply_units)
ret = run_append([ret, to_not_convert], inplace=inplace)
if not inplace:
return ret
@staticmethod
def _check_unit_context(dat, context):
unit_context = dat.get_unique_meta("unit_context")
# check if contexts don't match, unless the context is nan
non_matching_contexts = len(unit_context) > 1 or unit_context[0] != context
if isinstance(unit_context[0], float):
non_matching_contexts &= not np.isnan(unit_context[0])
if non_matching_contexts:
raise ValueError(
"Existing unit conversion context(s), `{}`, doesn't match input "
"context, `{}`, drop `unit_context` metadata before doing "
"conversion".format(unit_context, context)
)
def relative_to_ref_period_mean(self, append_str=None, **kwargs):
"""
Return the timeseries relative to a given reference period mean.
The reference period mean is subtracted from all values in the input timeseries.
Parameters
----------
append_str
Deprecated
**kwargs
Arguments to pass to :func:`filter` to determine the data to be included in
the reference time period. See the docs of :func:`filter` for valid options.
Returns
-------
:class:`ScmRun <scmdata.run.ScmRun>`
New object containing the timeseries, adjusted to the reference period mean.
The reference period year bounds are stored in the meta columns
``"reference_period_start_year"`` and ``"reference_period_end_year"``.
Raises
------
NotImplementedError
``append_str`` is not ``None``
"""
if append_str is not None:
raise NotImplementedError("`append_str` is deprecated")
ts = self.timeseries()
# mypy confused by `inplace` default
ref_data = self.filter(**kwargs)
ref_period_mean = ref_data.timeseries().mean(axis="columns") # type: ignore
res = ts.sub(ref_period_mean, axis="rows")
res.reset_index(inplace=True)
res["reference_period_start_year"] = ref_data["year"].min()
res["reference_period_end_year"] = ref_data["year"].max()
return type(self)(res)
def append(
self,
other,
inplace: bool = False,
duplicate_msg: Union[str, bool] = True,
metadata: Optional[MetadataType] = None,
**kwargs: Any,
):
"""
Append additional data to the current data.
For details, see :func:`run_append`.
Parameters
----------
other
Data (in format which can be cast to :class:`ScmRun <scmdata.run.ScmRun>`) to append
inplace
If ``True``, append data in place and return ``None``. Otherwise, return a
new :class:`ScmRun <scmdata.run.ScmRun>` instance with the appended data.
duplicate_msg
If ``True``, raise a :class:`scmdata.errors.NonUniqueMetadataError` error
so the user can see the duplicate timeseries. If ``False``, take the average
and do not raise a warning or error. If ``"warn"``, raise a
warning if duplicate data is detected.
metadata
If not ``None``, override the metadata of the resulting :class:`ScmRun <scmdata.run.ScmRun>` with
``metadata``. Otherwise, the metadata for the runs are merged. In the case
where there are duplicate metadata keys, the values from the first run are
used.
**kwargs
Keywords to pass to :func:`ScmRun.__init__` when reading
:obj:`other`
Returns
-------
:class:`ScmRun <scmdata.run.ScmRun>`
If not :obj:`inplace`, return a new :class:`ScmRun <scmdata.run.ScmRun>` instance
containing the result of the append.
Raises
------
NonUniqueMetadataError
If the appending results in timeseries with duplicate metadata and
:attr:`duplicate_msg` is ``True``
"""
if not isinstance(other, ScmRun):
other = self.__class__(other, **kwargs)
return run_append(
[self, other],
inplace=inplace,
duplicate_msg=duplicate_msg,
metadata=metadata,
)
def append_timewise(
self, other, align_columns,
):
"""
Append timeseries along the time axis
Parameters
----------
other : :obj:`scmdata.ScmRun`
:obj:`scmdata.ScmRun` containing the timeseries to append
align_columns : list
Columns used to align ``other`` and ``self`` when joining
Returns
-------
:obj:`scmdata.ScmRun`
Result of joining ``self`` and ``other`` along the time axis
"""
ts_self = self.timeseries()
try:
ts_other = other.timeseries(meta=align_columns)
except NonUniqueMetadataError as exc:
error_msg = (
"Calling ``other.timeseries(meta=align_columns)`` must "
"result in umabiguous timeseries"
)
raise ValueError(error_msg) from exc
ts_other_aligned, ts_self_aligned = ts_other.align(ts_self)
ts_self_aligned = ts_self_aligned.dropna(how="all", axis="columns")
ts_other_aligned = ts_other_aligned.dropna(how="all", axis="columns")
# if ts_other_aligned.isnull().any(axis=1):
# warning?
out = | pd.concat([ts_other_aligned, ts_self_aligned], axis=1) | pandas.concat |
from flask import render_template, flash, redirect, url_for,request
from flask_login import current_user, login_user, logout_user,login_required
from flask_paginate import Pagination, get_page_parameter, get_page_args
from werkzeug.urls import url_parse
from app.forms import LoginForm, RegistrationForm, VerifyUserForm, SubmitQuoteForm,ResetPasswordRequestForm,ResetPasswordForm
from app import app,db
from app.models import users,people_quoted, quotes,phrases
from app.email import send_password_reset_email
import pandas as pd
import time
import random
'''
This is the main script for executing the flask functions so we can
send our html templates to the browser for rendering. We boot, handle requests,
and configure our server right here.
Read more about Flask how-to here:
https://blog.miguelgrinberg.com/post/the-flask-mega-tutorial-part-i-hello-world
'''
# Helper functions
def clean_people_quoted_table():
'''
cleanup any existing quoted person if we can't find a quote of theirs.
works inplace.
'''
all_quoted_people_id = [name.person_quoted_id for name in phrases.query.all()]
people_quoted_roster = people_quoted.query.all()
for person in people_quoted_roster:
if person.id not in all_quoted_people_id:
db.session.delete(person)
db.session.commit()
# load global variable for users who are authenticated.
def load_people():
'''
Load a "people_data" variable to be used in logged in. If user is not logged
in, returns None.
'''
if current_user.is_authenticated:
all_quoted_people_id = [name.primary_person_quoted_id for name in quotes.query.all()]
people_data = [people_quoted.query.filter_by(id = x).first_or_404().name for x in all_quoted_people_id]
people_data.sort()
people_data = set(people_data)
else:
people_data = None
return people_data
def verify_administrator(username):
'''
Quick function to verify a user and, if they fail, boot them back to the home page.
'''
user = users.query.filter_by(username=username).first_or_404()
is_admin = user.is_admin
if is_admin ==False:
flash("You are not an administrator. Quit trying to access the \
admin panel!")
return redirect(url_for('home'))
return is_admin
# ****** ROUTES ***********
@app.route('/')
def home():
people_data = load_people()
return render_template('home.html',people_data = people_data)
@app.route('/about/')
def about():
people_data = load_people()
return render_template('about.html',
title='About',
people_data = people_data)
@app.route('/login/',methods=['GET',"POST"])
def login():
if current_user.is_authenticated:
return redirect(url_for('home'))
form = LoginForm()
if form.validate_on_submit():
user = users.query.filter_by(username=form.username.data).first()
if user== None:
flash("Huh. That username isn't in the database anywhere. Try a different one.")
return redirect(url_for('login'))
elif not user.check_password(form.password.data):
flash("Poop. That password doesn't match the username." )
return redirect(url_for('login'))
elif not user.check_is_verified():
flash("Looks like you haven't been verified yet. You'll need to be\
verified by a site administrator before you can fully log in.")
return redirect(url_for('login'))
else:
login_user(user, remember=form.remember_me.data)
next_page = request.args.get('next')
if not next_page or url_parse(next_page).netloc != '':
next_page = url_for('home')
return redirect(next_page)
people_data = load_people()
return render_template('login.html',
title='Sign In',
form=form,
people_data = people_data)
@app.route('/logout')
def logout():
logout_user()
return redirect(url_for('home'))
@app.route('/signup', methods=['GET', 'POST'])
def signup():
if current_user.is_authenticated:
return redirect(url_for('home'))
form = RegistrationForm()
if form.validate_on_submit():
user = users(username=form.username.data, email=form.email.data)
user.set_password(form.password.data)
db.session.add(user)
db.session.commit()
flash("Your submission has been sent. But you'll have to have an \
administrator verify before you can log in.")
return redirect(url_for('login'))
return render_template('signup.html', title='Signup', form=form)
@app.route('/admin_verify/<username>', methods=['GET','POST'])
@login_required
def admin_verify(username):
if verify_administrator(username):
# pull all user data for verification
user_data_raw = users.query.all()
data = {'current_user':current_user,
'table':user_data_raw}
people_data = load_people()
return render_template('admin_verify.html',
title = 'Administrator Panel: Verify Users',
data = data,
people_data = people_data)
@app.route('/admin_manage/<username>', methods=['GET','POST'])
@login_required
def admin_manage(username):
if verify_administrator(username):
data = {}
# pull all quotes and their phrases for deletion if necessary.
quotes_data = quotes.query.all()
phrases_data = phrases.query.all()
people_quoted_data = people_quoted.query.all()
phrases_df = pd.DataFrame([[x.quote_id,x.phrase_text] for x in phrases_data])
phrases_df.columns = ['quote_id','phrases']
quotes_df = pd.DataFrame([[x.primary_person_quoted_id,x.id,x.submitted_datetime] for x in quotes_data])
quotes_df.columns = ['person_quoted','quote_id','submission_date']
people_quoted_df = pd.DataFrame([[x.id,x.name] for x in people_quoted_data])
people_quoted_df.columns = ['person_quoted','name']
quotes_df = quotes_df.merge(people_quoted_df,
on='person_quoted',
how='inner')
data['quotes'] = quotes_df.merge(phrases_df.groupby('quote_id').sum().reset_index(),
on='quote_id',
how='inner')
data['quotes']['submission_date'] = data['quotes']['submission_date'].dt.date
data['current_user'] = current_user
people_data = load_people()
return render_template('admin_manage.html',
title = 'Administrator Panel: Manage',
data = data,
people_data = people_data)
@app.route('/verify_user/<username>/<verify_username>', methods=['GET','POST'])
@login_required
def verify_user(username,verify_username):
if verify_administrator(username):
user_to_verify = users.query.filter_by(username=verify_username).first()
user_to_verify.is_verified = True
db.session.add(user_to_verify)
db.session.commit()
return redirect(url_for('admin_verify',username=username))
@app.route('/delete_quote/<username>/<quote_id>', methods=['GET','DELETE','POST'])
@login_required
def delete_quote(username,quote_id):
if verify_administrator(username):
del_quote = quotes.query.filter_by(id = quote_id).all()
del_phrases = phrases.query.filter_by(quote_id = quote_id).all()
for qt in del_quote:
for phr in del_phrases:
db.session.delete(phr)
db.session.delete(qt)
db.session.commit()
time.sleep(2)
# cleanup any existing quoted person if we can't find a quote of theirs.
clean_people_quoted_table()
return redirect(url_for('admin_manage',username=username))
@app.route('/submit', methods=['POST','GET'])
@login_required
def submit():
#create the forms and data to send to the page
submit_form = SubmitQuoteForm()
people = people_quoted.query.all()
people = [x.name for x in people]
people_list = list(set(str(x).lower() for x in people))
submitting_user = current_user.id
quoted_in_session = []
if submit_form.validate_on_submit():
#submit context with new quote object
if submit_form.context:
new_quote = quotes(submitted_by_id=submitting_user,
context = submit_form.context.data)
else:
new_quote = quotes(submitted_by_id=submitting_user)
# add phrases to quote object
for phrase in submit_form.phrases:
person = phrase.quoted_person_name.data
person_lower = person.lower()
# add new person_quoted if necessary
if person_lower not in people_list:
person_to_add = people_quoted(name=person)
db.session.add(person_to_add)
db.session.commit()
person_quoted_id = person_to_add.id
people_list.append(person_lower)
else:
person_quoted_id = people_quoted.query.filter_by(name=person).first_or_404().id
new_phrase = phrases(phrase_text = phrase.phrase_text.data,
person_quoted_id = person_quoted_id)
new_quote.phrases.append(new_phrase)
quoted_in_session.append(person_quoted_id)
# whoever spoke last is the primary person quoted
new_quote.primary_person_quoted_id = quoted_in_session[-1]
new_quote.date = submit_form.quote_date.data
new_quote.phonetic_date = str( | pd.to_datetime(new_quote.date) | pandas.to_datetime |
"""Tests for timeseries anomalies detection and imputation."""
import pandas as pd
import pudl.analysis.plant_parts_eia
GENS_MEGA = pd.DataFrame(
{
"plant_id_eia": [1, 1, 1, 1],
"report_date": ["2020-01-01", "2020-01-01", "2020-01-01", "2020-01-01"],
"utility_id_eia": [111, 111, 111, 111],
"generator_id": ["a", "b", "c", "d"],
"prime_mover_code": ["ST", "GT", "CT", "CA"],
"energy_source_code_1": ["BIT", "NG", "NG", "NG"],
"ownership": [
"total",
"total",
"total",
"total",
],
"operational_status_pudl": ["operating", "operating", "operating", "operating"],
"capacity_mw": [400, 50, 125, 75],
}
).astype({"report_date": "datetime64[ns]"})
def test_plant_ag():
"""Test aggregation of the plant-part part list by plant.
The only data col we are testing here is capacity_mw.
"""
# test aggregation by plant
plant_ag_out = (
pudl.analysis.plant_parts_eia.PlantPart(part_name="plant")
.ag_part_by_own_slice(GENS_MEGA, sum_cols=["capacity_mw"], wtavg_dict={})
.convert_dtypes()
)
plant_ag_expected = (
pd.DataFrame(
{
"plant_id_eia": [1],
"report_date": ["2020-01-01"],
"operational_status_pudl": ["operating"],
"utility_id_eia": [111],
"ownership": ["total"],
"capacity_mw": [650.0],
}
)
.astype({"report_date": "datetime64[ns]"})
.convert_dtypes()
)
pd.testing.assert_frame_equal(plant_ag_out, plant_ag_expected)
def test_prime_fuel_ag():
"""Test aggregation of the plant-part part list by prime fuel.
The only data col we are testing here is capacity_mw.
"""
# test aggregation by plant prime fuel
plant_primary_fuel_ag_out = (
pudl.analysis.plant_parts_eia.PlantPart(part_name="plant_prime_fuel")
.ag_part_by_own_slice(GENS_MEGA, sum_cols=["capacity_mw"], wtavg_dict={})
.convert_dtypes()
)
plant_primary_fuel_ag_expected = (
pd.DataFrame(
{
"plant_id_eia": 1,
"energy_source_code_1": ["BIT", "NG"],
"report_date": "2020-01-01",
"operational_status_pudl": "operating",
"utility_id_eia": 111,
"ownership": "total",
"capacity_mw": [400.0, 250.0],
}
)
.astype({"report_date": "datetime64[ns]"})
.convert_dtypes()
)
pd.testing.assert_frame_equal(
plant_primary_fuel_ag_out, plant_primary_fuel_ag_expected
)
def test_prime_mover_ag():
"""Test aggregation of the plant-part part list by prime mover.
The only data col we are testing here is capacity_mw.
"""
# test aggregation by plant prime mover
plant_prime_mover_ag_out = (
pudl.analysis.plant_parts_eia.PlantPart(part_name="plant_prime_mover")
.ag_part_by_own_slice(GENS_MEGA, sum_cols=["capacity_mw"], wtavg_dict={})
.convert_dtypes()
)
plant_prime_mover_ag_expected = (
pd.DataFrame(
{
"plant_id_eia": 1,
"prime_mover_code": ["CA", "CT", "GT", "ST"],
"report_date": "2020-01-01",
"operational_status_pudl": "operating",
"utility_id_eia": 111,
"ownership": "total",
"capacity_mw": [75.0, 125.0, 50.0, 400.0],
}
)
.astype({"report_date": "datetime64[ns]"})
.convert_dtypes()
)
pd.testing.assert_frame_equal(
plant_prime_mover_ag_out, plant_prime_mover_ag_expected
)
def test_plant_gen_ag():
"""Test aggregation of the plant-part part list by generator.
The only data col we are testing here is capacity_mw.
"""
# test aggregation by plant gen
plant_gen_ag_out = (
pudl.analysis.plant_parts_eia.PlantPart(part_name="plant_gen")
.ag_part_by_own_slice(GENS_MEGA, sum_cols=["capacity_mw"], wtavg_dict={})
.convert_dtypes()
)
plant_gen_ag_expected = (
pd.DataFrame(
{
"plant_id_eia": 1,
"generator_id": ["a", "b", "c", "d"],
"report_date": "2020-01-01",
"operational_status_pudl": "operating",
"utility_id_eia": 111,
"ownership": "total",
"capacity_mw": [400.0, 50.0, 125.0, 75.0],
}
)
.astype({"report_date": "datetime64[ns]"})
.convert_dtypes()
)
pd.testing.assert_frame_equal(plant_gen_ag_out, plant_gen_ag_expected)
def test_make_mega_gen_tbl():
"""Test the creation of the mega generator table.
Integrates ownership with generators.
"""
# one plant with three generators
mcoe = pd.DataFrame(
{
"plant_id_eia": 1,
"report_date": "2020-01-01",
"generator_id": ["a", "b", "c"],
"utility_id_eia": [111, 111, 111],
"unit_id_pudl": 1,
"prime_mover_code": ["CT", "CT", "CA"],
"technology_description": "Natural Gas Fired Combined Cycle",
"operational_status": "existing",
"retirement_date": pd.NA,
"capacity_mw": [50, 50, 100],
}
).astype(
{
"retirement_date": "datetime64[ns]",
"report_date": "datetime64[ns]",
}
)
# one record for every owner of each generator
df_own_eia860 = pd.DataFrame(
{
"plant_id_eia": 1,
"report_date": "2020-01-01",
"generator_id": ["a", "b", "c", "c"],
"utility_id_eia": 111,
"owner_utility_id_eia": [111, 111, 111, 888],
"fraction_owned": [1, 1, 0.75, 0.25],
}
).astype({"report_date": "datetime64[ns]"})
out = pudl.analysis.plant_parts_eia.MakeMegaGenTbl().execute(
mcoe, df_own_eia860, slice_cols=["capacity_mw"]
)
out_expected = (
pd.DataFrame(
{
"plant_id_eia": 1,
"report_date": "2020-01-01",
"generator_id": ["a", "b", "c", "c", "a", "b", "c", "c"],
"unit_id_pudl": 1,
"prime_mover_code": ["CT", "CT", "CA", "CA", "CT", "CT", "CA", "CA"],
"technology_description": "Natural Gas Fired Combined Cycle",
"operational_status": "existing",
"retirement_date": pd.NaT,
"capacity_mw": [50.0, 50.0, 75.0, 25.0, 50.0, 50.0, 100.0, 100.0],
"ferc_acct_name": "Other",
"operational_status_pudl": "operating",
"capacity_eoy_mw": [50, 50, 100, 100, 50, 50, 100, 100],
"fraction_owned": [1.00, 1.00, 0.75, 0.25, 1.00, 1.00, 1.00, 1.00],
"utility_id_eia": [111, 111, 111, 888, 111, 111, 111, 888],
"ownership": [
"owned",
"owned",
"owned",
"owned",
"total",
"total",
"total",
"total",
],
}
)
.astype(
{
"retirement_date": "datetime64[ns]",
"report_date": "datetime64[ns]",
"utility_id_eia": "Int64", # convert to pandas Int64 instead of numpy int64
}
)
.set_index([[0, 1, 2, 3, 0, 1, 2, 3]])
)
pd.testing.assert_frame_equal(out, out_expected)
def test_scale_by_ownership():
"""Test the scale_by_ownership method."""
dtypes = {"report_date": "datetime64[ns]", "utility_id_eia": | pd.Int64Dtype() | pandas.Int64Dtype |
import os
import sys
import json
import copy
import numpy as np
import pandas as pd
import random
import tensorflow as tf
# import PIL
seed_value = 123
os.environ['PYTHONHASHSEED']=str(seed_value)
random.seed(seed_value)
np.random.seed(seed_value)
tf.set_random_seed(seed_value)
from keras.utils import to_categorical
import keras.backend as k
config = tf.ConfigProto(allow_soft_placement=True)
config.gpu_options.allow_growth=True
k.set_session(tf.Session(config=config))
sys.path.append('/'.join(os.getcwd().split('/')))
from ornstein_auto_encoder import logging_daily
from ornstein_auto_encoder import configuration
from ornstein_auto_encoder import readers
from ornstein_auto_encoder import samplers
from ornstein_auto_encoder import build_network
from ornstein_auto_encoder.utils import argv_parse
if '1.15' in tf.__version__:
from ornstein_auto_encoder.fid_v1_15 import get_fid as _get_fid
else:
from ornstein_auto_encoder.fid import get_fid as _get_fid
from ornstein_auto_encoder.inception_score import get_inception_score as _get_inception_score
#####################################################################################################
def get_fid(images1, images2):
imgs1 = np.clip(255*((images1).transpose([0,3,1,2]) * 0.5 + 0.5),0,255) #.astype(np.uint8)
imgs2 = np.clip(255*((images2).transpose([0,3,1,2]) * 0.5 + 0.5),0,255) #.astype(np.uint8)
return _get_fid(imgs1, imgs2)
def get_is(images, size=100):
imgs = np.clip(255*(images.transpose([0,3,1,2]) * 0.5 + 0.5),0,255) #.astype(np.uint8)
return _get_inception_score(imgs, splits=1)[0]
if __name__=='__main__':
argdict = argv_parse(sys.argv)
logger = logging_daily.logging_daily(argdict['log_info'][0])
logger.reset_logging()
log = logger.get_logging()
log.setLevel(logging_daily.logging.INFO)
log.info('-----------------------------------------------------------------------------------')
log.info('Evaluate the performance measures for VGGFace2')
log.info('-----------------------------------------------------------------------------------')
model_path = argdict['model_path'][0].strip()
try:
model_aka = argdict['model_aka'][0].strip()
except:
model_aka = model_path.split('/')[-1]
feature_b = True
path_info_config = argdict['path_info'][0]
network_info_config = argdict['network_info'][0]
##############################################################################################
# Set hyper-parameter for testing
config_data = configuration.Configurator(path_info_config, log, verbose=False)
config_data.set_config_map(config_data.get_section_map())
config_network = configuration.Configurator(network_info_config, log, verbose=False)
config_network.set_config_map(config_network.get_section_map())
path_info = config_data.get_config_map()
network_info = config_network.get_config_map()
path_info['model_info']['model_dir'] = model_path
if network_info['model_info']['network_class'] == 'ProductSpaceOAEFixedBHSIC_GAN':
network_info['model_info']['network_class'] == 'ProductSpaceOAEHSIC_GAN'
if float(network_info['model_info']['e_weight']) == 0.: network_info['model_info']['e_weight'] = '1.'
if network_info['training_info']['warm_start'] == 'True':
network_info['training_info']['warm_start'] = 'False'
network_info['training_info']['warm_start_model'] = ''
if network_info['model_info']['augment'] == 'True':
network_info['model_info']['augment'] = 'False'
##############################################################################################
# Reader
reader_class = getattr(readers, network_info['model_info']['reader_class'].strip())
reader = reader_class(log, path_info, network_info, mode='train', verbose=True)
def get_numerics(model_path, model_aka,
path_info_config = "configurations/vggface2/psoae_path_info.cfg",
network_info_config = "configurations/vggface2/psoae_network_total_info.cfg",
unknown=False, feature_b=False):
# Set hyper-parameter for testing
config_data = configuration.Configurator(path_info_config, log, verbose=False)
config_data.set_config_map(config_data.get_section_map())
config_network = configuration.Configurator(network_info_config, log, verbose=False)
config_network.set_config_map(config_network.get_section_map())
path_info = config_data.get_config_map()
network_info = config_network.get_config_map()
path_info['model_info']['model_dir'] = model_path
if network_info['model_info']['network_class'] == 'ProductSpaceOAEFixedBHSIC_GAN':
network_info['model_info']['network_class'] == 'ProductSpaceOAEHSIC_GAN'
if float(network_info['model_info']['e_weight']) == 0.: network_info['model_info']['e_weight'] = '1.'
if network_info['training_info']['warm_start'] == 'True':
network_info['training_info']['warm_start'] = 'False'
network_info['training_info']['warm_start_model'] = ''
if network_info['model_info']['augment'] == 'True':
network_info['model_info']['augment'] = 'False'
log.info('-----------------------------------------------------------------')
unknown = unknown
log.info('%s: unknown=%s' % (model_aka, unknown))
log.info('-----------------------------------------------------------------')
config_data = configuration.Configurator(argdict['path_info'][0], log, verbose=False)
config_data.set_config_map(config_data.get_section_map())
config_network = configuration.Configurator(argdict['network_info'][0], log, verbose=False)
config_network.set_config_map(config_network.get_section_map())
path_info = config_data.get_config_map()
network_info = config_network.get_config_map()
# Set hyper-parameter for testing
path_info['model_info']['model_dir'] = model_path
if network_info['model_info']['network_class'] == 'ProductSpaceOAEFixedBHSIC_GAN':
network_info['model_info']['network_class'] == 'ProductSpaceOAEHSIC_GAN'
if float(network_info['model_info']['e_weight']) == 0.: network_info['model_info']['e_weight'] = '1.'
if network_info['training_info']['warm_start'] == 'True':
network_info['training_info']['warm_start'] = 'False'
network_info['training_info']['warm_start_model'] = ''
if network_info['model_info']['augment'] == 'True':
network_info['model_info']['augment'] = 'False'
### Bulid network ####################################################################################
log.info('-----------------------------------------------------------------')
network_class = getattr(build_network, ''.join(network_info['model_info']['network_class'].strip().split('FixedB')))
network = network_class(log, path_info, network_info, n_label=reader.get_n_label())
network.build_model('./%s/%s' % (model_path, path_info['model_info']['model_architecture']), verbose=0)
network.load(model_path)
log.info('-----------------------------------------------------------------')
# Training
test_tot_idxs_path = os.path.join(model_path, path_info['model_info']['test_tot_idxs'])
test_idx = np.load(test_tot_idxs_path)
if unknown:
# Real Test data sampler (not-trained subject)
new_network_info = copy.deepcopy(network_info)
new_path_info = copy.deepcopy(path_info)
new_reader = reader_class(log, new_path_info, new_network_info, mode='test', verbose=False)
real_test_idx = np.arange(new_reader.get_label().shape[0])
test_idx = real_test_idx
log.info('Construct test data sampler')
validation_sampler_class = getattr(samplers, network_info['validation_info']['sampler_class'].strip())
if unknown:
test_sampler = validation_sampler_class(log, test_idx, new_reader, network_info['validation_info'], verbose=False)
else:
test_sampler = validation_sampler_class(log, test_idx, reader, network_info['validation_info'], verbose=False)
tot_sharpness_original = []
tot_is_original = []
# tot_reconstruction = []
tot_gen_fid = []
tot_gen_is = []
tot_sharpness_gen = []
tot_one_shot_gen_fid = []
tot_one_shot_gen_is = []
tot_one_shot_sharpness_gen = []
for nrepeat in range(10):
log.info('-%d------------------------------------------------' % nrepeat)
nunit = 30
nobservations = 300
picked_y_class = np.random.choice(test_sampler.y_class, nunit, replace=False)
test_idxs = []
picked_one_shot_idxs = []
for yc in picked_y_class:
try: chosen_observations = np.random.choice(test_sampler.train_idx[test_sampler.y_index.get_loc(yc)], nobservations)
except: chosen_observations = np.random.choice(test_sampler.train_idx[test_sampler.y_index.get_loc(yc)], nobservations, replace=True)
test_idxs.append(chosen_observations)
picked_one_shot_idxs.append(np.random.choice(np.arange(nobservations), 1)[0])
test_idxs = np.array(test_idxs).flatten()
picked_one_shot_idxs = np.array(picked_one_shot_idxs)
x, y = test_sampler.reader.get_batch(test_idxs)
y_table = | pd.Series(y) | pandas.Series |
# -*- coding: utf-8 -*-
"""
Created on Sun Mar 22 13:30:57 2020
@author: johnx
"""
FolderRoot = (r' ')
import os
import json
import pandas as pd
# Creates a loop that will look through folder and any sub folders
for subdir, dirs, files in os.walk(FolderRoot):
for filename in files:
Borisfile = []
filepath = subdir + os.sep + filename
# If is used to check if file is a .boris
if filepath.endswith(".boris"):
# Open Boris file using json format
fileName = filename
infile = open(filepath,"r")
s = infile.read()
project = json.loads(s)
# Opens and extracts name of observations in file
# The file has a hierarchical file structure
observationlist = project["observations"]
obkeys = observationlist.keys()
p = 0
for i in obkeys:
current = observationlist[i]
# Finds events within the Observation and writes to a lits
Event_list = current["events"]
# Some Event lists were empty - therefore filled with NAN/No value
if not Event_list:
Event_list = [['nan','nan','nan','nan','nan'],['nan','nan','nan','nan','nan']]
# Data was converted from list to a data frame / table
newFrame = | pd.DataFrame(Event_list) | pandas.DataFrame |
# -*- coding: utf-8 -*-
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import keras
from keras.models import Sequential
from keras.layers import Dense, Activation
from keras.optimizers import SGD
from sklearn.metrics import classification_report, confusion_matrix
df = pd.read_csv("data/iris.csv")
df.head()
inputs_x = df[['sepal_length', 'sepal_width', 'petal_length', 'petal_width']]
inputs_y = df['variety']
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
inputs_x_scaler = scaler.fit_transform(inputs_x.values)
df_scaler = | pd.DataFrame(inputs_x_scaler, index=inputs_x.index, columns=inputs_x.columns) | pandas.DataFrame |
import json
import os
import tarfile
import tempfile
import warnings
from itertools import zip_longest
from pathlib import Path
import pandas as pd
import woodwork as ww
from woodwork.exceptions import OutdatedSchemaWarning, UpgradeSchemaWarning
from woodwork.s3_utils import get_transport_params, use_smartopen
from woodwork.serialize import FORMATS, SCHEMA_VERSION
from woodwork.utils import _is_s3, _is_url, import_or_raise
def read_table_typing_information(path):
"""Read Woodwork typing information from disk, S3 path, or URL.
Args:
path (str): Location on disk, S3 path, or URL to read `woodwork_typing_info.json`.
Returns:
dict: Woodwork typing information dictionary
"""
path = os.path.abspath(path)
assert os.path.exists(path), '"{}" does not exist'.format(path)
file = os.path.join(path, "woodwork_typing_info.json")
with open(file, "r") as file:
typing_info = json.load(file)
typing_info["path"] = path
return typing_info
def _typing_information_to_woodwork_table(table_typing_info, validate, **kwargs):
"""Deserialize Woodwork table from table description.
Args:
table_typing_info (dict) : Woodwork typing information. Likely generated using :meth:`.serialize.typing_info_to_dict`
validate (bool): Whether parameter and data validation should occur during table initialization
kwargs (keywords): Additional keyword arguments to pass as keywords arguments to the underlying deserialization method.
Returns:
DataFrame: DataFrame with Woodwork typing information initialized.
"""
_check_schema_version(table_typing_info["schema_version"])
path = table_typing_info["path"]
loading_info = table_typing_info["loading_info"]
file = os.path.join(path, loading_info["location"])
load_format = loading_info["type"]
assert load_format in FORMATS
kwargs = loading_info.get("params", {})
table_type = loading_info.get("table_type", "pandas")
logical_types = {}
semantic_tags = {}
column_descriptions = {}
column_origins = {}
column_metadata = {}
use_standard_tags = {}
column_dtypes = {}
for col in table_typing_info["column_typing_info"]:
col_name = col["name"]
ltype_metadata = col["logical_type"]
ltype = ww.type_system.str_to_logical_type(
ltype_metadata["type"], params=ltype_metadata["parameters"]
)
tags = col["semantic_tags"]
if "index" in tags:
tags.remove("index")
elif "time_index" in tags:
tags.remove("time_index")
logical_types[col_name] = ltype
semantic_tags[col_name] = tags
column_descriptions[col_name] = col["description"]
column_origins[col_name] = col["origin"]
column_metadata[col_name] = col["metadata"]
use_standard_tags[col_name] = col["use_standard_tags"]
col_type = col["physical_type"]["type"]
if col_type == "category":
# Make sure categories are recreated properly
cat_values = col["physical_type"]["cat_values"]
cat_dtype = col["physical_type"]["cat_dtype"]
if table_type == "pandas":
cat_object = pd.CategoricalDtype(pd.Index(cat_values, dtype=cat_dtype))
else:
cat_object = pd.CategoricalDtype( | pd.Series(cat_values) | pandas.Series |
# -*- coding: utf-8 -*-
# pylint: disable=E1101,E1103,W0232
import os
import sys
from datetime import datetime
from distutils.version import LooseVersion
import numpy as np
import pandas as pd
import pandas.compat as compat
import pandas.core.common as com
import pandas.util.testing as tm
from pandas import (Categorical, Index, Series, DataFrame, PeriodIndex,
Timestamp, CategoricalIndex)
from pandas.compat import range, lrange, u, PY3
from pandas.core.config import option_context
# GH 12066
# flake8: noqa
class TestCategorical(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
self.factor = Categorical.from_array(['a', 'b', 'b', 'a',
'a', 'c', 'c', 'c'],
ordered=True)
def test_getitem(self):
self.assertEqual(self.factor[0], 'a')
self.assertEqual(self.factor[-1], 'c')
subf = self.factor[[0, 1, 2]]
tm.assert_almost_equal(subf._codes, [0, 1, 1])
subf = self.factor[np.asarray(self.factor) == 'c']
tm.assert_almost_equal(subf._codes, [2, 2, 2])
def test_getitem_listlike(self):
# GH 9469
# properly coerce the input indexers
np.random.seed(1)
c = Categorical(np.random.randint(0, 5, size=150000).astype(np.int8))
result = c.codes[np.array([100000]).astype(np.int64)]
expected = c[np.array([100000]).astype(np.int64)].codes
self.assert_numpy_array_equal(result, expected)
def test_setitem(self):
# int/positional
c = self.factor.copy()
c[0] = 'b'
self.assertEqual(c[0], 'b')
c[-1] = 'a'
self.assertEqual(c[-1], 'a')
# boolean
c = self.factor.copy()
indexer = np.zeros(len(c), dtype='bool')
indexer[0] = True
indexer[-1] = True
c[indexer] = 'c'
expected = Categorical.from_array(['c', 'b', 'b', 'a',
'a', 'c', 'c', 'c'], ordered=True)
self.assert_categorical_equal(c, expected)
def test_setitem_listlike(self):
# GH 9469
# properly coerce the input indexers
np.random.seed(1)
c = Categorical(np.random.randint(0, 5, size=150000).astype(
np.int8)).add_categories([-1000])
indexer = np.array([100000]).astype(np.int64)
c[indexer] = -1000
# we are asserting the code result here
# which maps to the -1000 category
result = c.codes[np.array([100000]).astype(np.int64)]
self.assertEqual(result, np.array([5], dtype='int8'))
def test_constructor_unsortable(self):
# it works!
arr = np.array([1, 2, 3, datetime.now()], dtype='O')
factor = Categorical.from_array(arr, ordered=False)
self.assertFalse(factor.ordered)
if compat.PY3:
self.assertRaises(
TypeError, lambda: Categorical.from_array(arr, ordered=True))
else:
# this however will raise as cannot be sorted (on PY3 or older
# numpies)
if LooseVersion(np.__version__) < "1.10":
self.assertRaises(
TypeError,
lambda: Categorical.from_array(arr, ordered=True))
else:
Categorical.from_array(arr, ordered=True)
def test_is_equal_dtype(self):
# test dtype comparisons between cats
c1 = Categorical(list('aabca'), categories=list('abc'), ordered=False)
c2 = Categorical(list('aabca'), categories=list('cab'), ordered=False)
c3 = Categorical(list('aabca'), categories=list('cab'), ordered=True)
self.assertTrue(c1.is_dtype_equal(c1))
self.assertTrue(c2.is_dtype_equal(c2))
self.assertTrue(c3.is_dtype_equal(c3))
self.assertFalse(c1.is_dtype_equal(c2))
self.assertFalse(c1.is_dtype_equal(c3))
self.assertFalse(c1.is_dtype_equal(Index(list('aabca'))))
self.assertFalse(c1.is_dtype_equal(c1.astype(object)))
self.assertTrue(c1.is_dtype_equal(CategoricalIndex(c1)))
self.assertFalse(c1.is_dtype_equal(
CategoricalIndex(c1, categories=list('cab'))))
self.assertFalse(c1.is_dtype_equal(CategoricalIndex(c1, ordered=True)))
def test_constructor(self):
exp_arr = np.array(["a", "b", "c", "a", "b", "c"])
c1 = Categorical(exp_arr)
self.assert_numpy_array_equal(c1.__array__(), exp_arr)
c2 = Categorical(exp_arr, categories=["a", "b", "c"])
self.assert_numpy_array_equal(c2.__array__(), exp_arr)
c2 = Categorical(exp_arr, categories=["c", "b", "a"])
self.assert_numpy_array_equal(c2.__array__(), exp_arr)
# categories must be unique
def f():
Categorical([1, 2], [1, 2, 2])
self.assertRaises(ValueError, f)
def f():
Categorical(["a", "b"], ["a", "b", "b"])
self.assertRaises(ValueError, f)
def f():
with tm.assert_produces_warning(FutureWarning):
Categorical([1, 2], [1, 2, np.nan, np.nan])
self.assertRaises(ValueError, f)
# The default should be unordered
c1 = Categorical(["a", "b", "c", "a"])
self.assertFalse(c1.ordered)
# Categorical as input
c1 = Categorical(["a", "b", "c", "a"])
c2 = Categorical(c1)
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "b", "c", "d"])
c2 = Categorical(c1)
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "c", "b"])
c2 = Categorical(c1)
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "c", "b"])
c2 = Categorical(c1, categories=["a", "b", "c"])
self.assert_numpy_array_equal(c1.__array__(), c2.__array__())
self.assert_numpy_array_equal(c2.categories, np.array(["a", "b", "c"]))
# Series of dtype category
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "b", "c", "d"])
c2 = Categorical(Series(c1))
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "c", "b"])
c2 = Categorical(Series(c1))
self.assertTrue(c1.equals(c2))
# Series
c1 = Categorical(["a", "b", "c", "a"])
c2 = Categorical(Series(["a", "b", "c", "a"]))
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "b", "c", "d"])
c2 = Categorical(
Series(["a", "b", "c", "a"]), categories=["a", "b", "c", "d"])
self.assertTrue(c1.equals(c2))
# This should result in integer categories, not float!
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
self.assertTrue(com.is_integer_dtype(cat.categories))
# https://github.com/pydata/pandas/issues/3678
cat = pd.Categorical([np.nan, 1, 2, 3])
self.assertTrue(com.is_integer_dtype(cat.categories))
# this should result in floats
cat = pd.Categorical([np.nan, 1, 2., 3])
self.assertTrue(com.is_float_dtype(cat.categories))
cat = pd.Categorical([np.nan, 1., 2., 3.])
self.assertTrue(com.is_float_dtype(cat.categories))
# Deprecating NaNs in categoires (GH #10748)
# preserve int as far as possible by converting to object if NaN is in
# categories
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical([np.nan, 1, 2, 3],
categories=[np.nan, 1, 2, 3])
self.assertTrue(com.is_object_dtype(cat.categories))
# This doesn't work -> this would probably need some kind of "remember
# the original type" feature to try to cast the array interface result
# to...
# vals = np.asarray(cat[cat.notnull()])
# self.assertTrue(com.is_integer_dtype(vals))
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical([np.nan, "a", "b", "c"],
categories=[np.nan, "a", "b", "c"])
self.assertTrue(com.is_object_dtype(cat.categories))
# but don't do it for floats
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical([np.nan, 1., 2., 3.],
categories=[np.nan, 1., 2., 3.])
self.assertTrue(com.is_float_dtype(cat.categories))
# corner cases
cat = pd.Categorical([1])
self.assertTrue(len(cat.categories) == 1)
self.assertTrue(cat.categories[0] == 1)
self.assertTrue(len(cat.codes) == 1)
self.assertTrue(cat.codes[0] == 0)
cat = pd.Categorical(["a"])
self.assertTrue(len(cat.categories) == 1)
self.assertTrue(cat.categories[0] == "a")
self.assertTrue(len(cat.codes) == 1)
self.assertTrue(cat.codes[0] == 0)
# Scalars should be converted to lists
cat = pd.Categorical(1)
self.assertTrue(len(cat.categories) == 1)
self.assertTrue(cat.categories[0] == 1)
self.assertTrue(len(cat.codes) == 1)
self.assertTrue(cat.codes[0] == 0)
cat = pd.Categorical([1], categories=1)
self.assertTrue(len(cat.categories) == 1)
self.assertTrue(cat.categories[0] == 1)
self.assertTrue(len(cat.codes) == 1)
self.assertTrue(cat.codes[0] == 0)
# Catch old style constructor useage: two arrays, codes + categories
# We can only catch two cases:
# - when the first is an integer dtype and the second is not
# - when the resulting codes are all -1/NaN
with tm.assert_produces_warning(RuntimeWarning):
c_old = Categorical([0, 1, 2, 0, 1, 2],
categories=["a", "b", "c"]) # noqa
with tm.assert_produces_warning(RuntimeWarning):
c_old = Categorical([0, 1, 2, 0, 1, 2], # noqa
categories=[3, 4, 5])
# the next one are from the old docs, but unfortunately these don't
# trigger :-(
with tm.assert_produces_warning(None):
c_old2 = Categorical([0, 1, 2, 0, 1, 2], [1, 2, 3]) # noqa
cat = Categorical([1, 2], categories=[1, 2, 3])
# this is a legitimate constructor
with tm.assert_produces_warning(None):
c = Categorical(np.array([], dtype='int64'), # noqa
categories=[3, 2, 1], ordered=True)
def test_constructor_with_index(self):
ci = CategoricalIndex(list('aabbca'), categories=list('cab'))
self.assertTrue(ci.values.equals(Categorical(ci)))
ci = CategoricalIndex(list('aabbca'), categories=list('cab'))
self.assertTrue(ci.values.equals(Categorical(
ci.astype(object), categories=ci.categories)))
def test_constructor_with_generator(self):
# This was raising an Error in isnull(single_val).any() because isnull
# returned a scalar for a generator
xrange = range
exp = Categorical([0, 1, 2])
cat = Categorical((x for x in [0, 1, 2]))
self.assertTrue(cat.equals(exp))
cat = Categorical(xrange(3))
self.assertTrue(cat.equals(exp))
# This uses xrange internally
from pandas.core.index import MultiIndex
MultiIndex.from_product([range(5), ['a', 'b', 'c']])
# check that categories accept generators and sequences
cat = pd.Categorical([0, 1, 2], categories=(x for x in [0, 1, 2]))
self.assertTrue(cat.equals(exp))
cat = pd.Categorical([0, 1, 2], categories=xrange(3))
self.assertTrue(cat.equals(exp))
def test_from_codes(self):
# too few categories
def f():
Categorical.from_codes([1, 2], [1, 2])
self.assertRaises(ValueError, f)
# no int codes
def f():
Categorical.from_codes(["a"], [1, 2])
self.assertRaises(ValueError, f)
# no unique categories
def f():
Categorical.from_codes([0, 1, 2], ["a", "a", "b"])
self.assertRaises(ValueError, f)
# too negative
def f():
Categorical.from_codes([-2, 1, 2], ["a", "b", "c"])
self.assertRaises(ValueError, f)
exp = Categorical(["a", "b", "c"], ordered=False)
res = Categorical.from_codes([0, 1, 2], ["a", "b", "c"])
self.assertTrue(exp.equals(res))
# Not available in earlier numpy versions
if hasattr(np.random, "choice"):
codes = np.random.choice([0, 1], 5, p=[0.9, 0.1])
pd.Categorical.from_codes(codes, categories=["train", "test"])
def test_comparisons(self):
result = self.factor[self.factor == 'a']
expected = self.factor[np.asarray(self.factor) == 'a']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor != 'a']
expected = self.factor[np.asarray(self.factor) != 'a']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor < 'c']
expected = self.factor[np.asarray(self.factor) < 'c']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor > 'a']
expected = self.factor[np.asarray(self.factor) > 'a']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor >= 'b']
expected = self.factor[np.asarray(self.factor) >= 'b']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor <= 'b']
expected = self.factor[np.asarray(self.factor) <= 'b']
self.assertTrue(result.equals(expected))
n = len(self.factor)
other = self.factor[np.random.permutation(n)]
result = self.factor == other
expected = np.asarray(self.factor) == np.asarray(other)
self.assert_numpy_array_equal(result, expected)
result = self.factor == 'd'
expected = np.repeat(False, len(self.factor))
self.assert_numpy_array_equal(result, expected)
# comparisons with categoricals
cat_rev = pd.Categorical(["a", "b", "c"], categories=["c", "b", "a"],
ordered=True)
cat_rev_base = pd.Categorical(
["b", "b", "b"], categories=["c", "b", "a"], ordered=True)
cat = pd.Categorical(["a", "b", "c"], ordered=True)
cat_base = pd.Categorical(["b", "b", "b"], categories=cat.categories,
ordered=True)
# comparisons need to take categories ordering into account
res_rev = cat_rev > cat_rev_base
exp_rev = np.array([True, False, False])
self.assert_numpy_array_equal(res_rev, exp_rev)
res_rev = cat_rev < cat_rev_base
exp_rev = np.array([False, False, True])
self.assert_numpy_array_equal(res_rev, exp_rev)
res = cat > cat_base
exp = np.array([False, False, True])
self.assert_numpy_array_equal(res, exp)
# Only categories with same categories can be compared
def f():
cat > cat_rev
self.assertRaises(TypeError, f)
cat_rev_base2 = pd.Categorical(
["b", "b", "b"], categories=["c", "b", "a", "d"])
def f():
cat_rev > cat_rev_base2
self.assertRaises(TypeError, f)
# Only categories with same ordering information can be compared
cat_unorderd = cat.set_ordered(False)
self.assertFalse((cat > cat).any())
def f():
cat > cat_unorderd
self.assertRaises(TypeError, f)
# comparison (in both directions) with Series will raise
s = Series(["b", "b", "b"])
self.assertRaises(TypeError, lambda: cat > s)
self.assertRaises(TypeError, lambda: cat_rev > s)
self.assertRaises(TypeError, lambda: s < cat)
self.assertRaises(TypeError, lambda: s < cat_rev)
# comparison with numpy.array will raise in both direction, but only on
# newer numpy versions
a = np.array(["b", "b", "b"])
self.assertRaises(TypeError, lambda: cat > a)
self.assertRaises(TypeError, lambda: cat_rev > a)
# The following work via '__array_priority__ = 1000'
# works only on numpy >= 1.7.1
if LooseVersion(np.__version__) > "1.7.1":
self.assertRaises(TypeError, lambda: a < cat)
self.assertRaises(TypeError, lambda: a < cat_rev)
# Make sure that unequal comparison take the categories order in
# account
cat_rev = pd.Categorical(
list("abc"), categories=list("cba"), ordered=True)
exp = np.array([True, False, False])
res = cat_rev > "b"
self.assert_numpy_array_equal(res, exp)
def test_na_flags_int_categories(self):
# #1457
categories = lrange(10)
labels = np.random.randint(0, 10, 20)
labels[::5] = -1
cat = Categorical(labels, categories, fastpath=True)
repr(cat)
self.assert_numpy_array_equal(com.isnull(cat), labels == -1)
def test_categories_none(self):
factor = Categorical(['a', 'b', 'b', 'a',
'a', 'c', 'c', 'c'], ordered=True)
self.assertTrue(factor.equals(self.factor))
def test_describe(self):
# string type
desc = self.factor.describe()
expected = DataFrame({'counts': [3, 2, 3],
'freqs': [3 / 8., 2 / 8., 3 / 8.]},
index=pd.CategoricalIndex(['a', 'b', 'c'],
name='categories'))
tm.assert_frame_equal(desc, expected)
# check unused categories
cat = self.factor.copy()
cat.set_categories(["a", "b", "c", "d"], inplace=True)
desc = cat.describe()
expected = DataFrame({'counts': [3, 2, 3, 0],
'freqs': [3 / 8., 2 / 8., 3 / 8., 0]},
index=pd.CategoricalIndex(['a', 'b', 'c', 'd'],
name='categories'))
tm.assert_frame_equal(desc, expected)
# check an integer one
desc = Categorical([1, 2, 3, 1, 2, 3, 3, 2, 1, 1, 1]).describe()
expected = DataFrame({'counts': [5, 3, 3],
'freqs': [5 / 11., 3 / 11., 3 / 11.]},
index=pd.CategoricalIndex([1, 2, 3],
name='categories'))
tm.assert_frame_equal(desc, expected)
# https://github.com/pydata/pandas/issues/3678
# describe should work with NaN
cat = pd.Categorical([np.nan, 1, 2, 2])
desc = cat.describe()
expected = DataFrame({'counts': [1, 2, 1],
'freqs': [1 / 4., 2 / 4., 1 / 4.]},
index=pd.CategoricalIndex([1, 2, np.nan],
categories=[1, 2],
name='categories'))
tm.assert_frame_equal(desc, expected)
# NA as a category
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical(["a", "c", "c", np.nan],
categories=["b", "a", "c", np.nan])
result = cat.describe()
expected = DataFrame([[0, 0], [1, 0.25], [2, 0.5], [1, 0.25]],
columns=['counts', 'freqs'],
index=pd.CategoricalIndex(['b', 'a', 'c', np.nan],
name='categories'))
tm.assert_frame_equal(result, expected)
# NA as an unused category
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical(["a", "c", "c"],
categories=["b", "a", "c", np.nan])
result = cat.describe()
exp_idx = pd.CategoricalIndex(
['b', 'a', 'c', np.nan], name='categories')
expected = DataFrame([[0, 0], [1, 1 / 3.], [2, 2 / 3.], [0, 0]],
columns=['counts', 'freqs'], index=exp_idx)
tm.assert_frame_equal(result, expected)
def test_print(self):
expected = ["[a, b, b, a, a, c, c, c]",
"Categories (3, object): [a < b < c]"]
expected = "\n".join(expected)
actual = repr(self.factor)
self.assertEqual(actual, expected)
def test_big_print(self):
factor = Categorical([0, 1, 2, 0, 1, 2] * 100, ['a', 'b', 'c'],
name='cat', fastpath=True)
expected = ["[a, b, c, a, b, ..., b, c, a, b, c]", "Length: 600",
"Categories (3, object): [a, b, c]"]
expected = "\n".join(expected)
actual = repr(factor)
self.assertEqual(actual, expected)
def test_empty_print(self):
factor = Categorical([], ["a", "b", "c"])
expected = ("[], Categories (3, object): [a, b, c]")
# hack because array_repr changed in numpy > 1.6.x
actual = repr(factor)
self.assertEqual(actual, expected)
self.assertEqual(expected, actual)
factor = Categorical([], ["a", "b", "c"], ordered=True)
expected = ("[], Categories (3, object): [a < b < c]")
actual = repr(factor)
self.assertEqual(expected, actual)
factor = Categorical([], [])
expected = ("[], Categories (0, object): []")
self.assertEqual(expected, repr(factor))
def test_print_none_width(self):
# GH10087
a = pd.Series(pd.Categorical([1, 2, 3, 4]))
exp = u("0 1\n1 2\n2 3\n3 4\n" +
"dtype: category\nCategories (4, int64): [1, 2, 3, 4]")
with option_context("display.width", None):
self.assertEqual(exp, repr(a))
def test_unicode_print(self):
if PY3:
_rep = repr
else:
_rep = unicode # noqa
c = pd.Categorical(['aaaaa', 'bb', 'cccc'] * 20)
expected = u"""\
[aaaaa, bb, cccc, aaaaa, bb, ..., bb, cccc, aaaaa, bb, cccc]
Length: 60
Categories (3, object): [aaaaa, bb, cccc]"""
self.assertEqual(_rep(c), expected)
c = pd.Categorical([u'ああああ', u'いいいいい', u'ううううううう']
* 20)
expected = u"""\
[ああああ, いいいいい, ううううううう, ああああ, いいいいい, ..., いいいいい, ううううううう, ああああ, いいいいい, ううううううう]
Length: 60
Categories (3, object): [ああああ, いいいいい, ううううううう]""" # noqa
self.assertEqual(_rep(c), expected)
# unicode option should not affect to Categorical, as it doesn't care
# the repr width
with option_context('display.unicode.east_asian_width', True):
c = pd.Categorical([u'ああああ', u'いいいいい', u'ううううううう']
* 20)
expected = u"""[ああああ, いいいいい, ううううううう, ああああ, いいいいい, ..., いいいいい, ううううううう, ああああ, いいいいい, ううううううう]
Length: 60
Categories (3, object): [ああああ, いいいいい, ううううううう]""" # noqa
self.assertEqual(_rep(c), expected)
def test_periodindex(self):
idx1 = PeriodIndex(['2014-01', '2014-01', '2014-02', '2014-02',
'2014-03', '2014-03'], freq='M')
cat1 = Categorical.from_array(idx1)
str(cat1)
exp_arr = np.array([0, 0, 1, 1, 2, 2], dtype='int64')
exp_idx = PeriodIndex(['2014-01', '2014-02', '2014-03'], freq='M')
self.assert_numpy_array_equal(cat1._codes, exp_arr)
self.assertTrue(cat1.categories.equals(exp_idx))
idx2 = PeriodIndex(['2014-03', '2014-03', '2014-02', '2014-01',
'2014-03', '2014-01'], freq='M')
cat2 = Categorical.from_array(idx2, ordered=True)
str(cat2)
exp_arr = np.array([2, 2, 1, 0, 2, 0], dtype='int64')
exp_idx2 = PeriodIndex(['2014-01', '2014-02', '2014-03'], freq='M')
self.assert_numpy_array_equal(cat2._codes, exp_arr)
self.assertTrue(cat2.categories.equals(exp_idx2))
idx3 = PeriodIndex(['2013-12', '2013-11', '2013-10', '2013-09',
'2013-08', '2013-07', '2013-05'], freq='M')
cat3 = Categorical.from_array(idx3, ordered=True)
exp_arr = np.array([6, 5, 4, 3, 2, 1, 0], dtype='int64')
exp_idx = PeriodIndex(['2013-05', '2013-07', '2013-08', '2013-09',
'2013-10', '2013-11', '2013-12'], freq='M')
self.assert_numpy_array_equal(cat3._codes, exp_arr)
self.assertTrue(cat3.categories.equals(exp_idx))
def test_categories_assigments(self):
s = pd.Categorical(["a", "b", "c", "a"])
exp = np.array([1, 2, 3, 1])
s.categories = [1, 2, 3]
self.assert_numpy_array_equal(s.__array__(), exp)
self.assert_numpy_array_equal(s.categories, np.array([1, 2, 3]))
# lengthen
def f():
s.categories = [1, 2, 3, 4]
self.assertRaises(ValueError, f)
# shorten
def f():
s.categories = [1, 2]
self.assertRaises(ValueError, f)
def test_construction_with_ordered(self):
# GH 9347, 9190
cat = Categorical([0, 1, 2])
self.assertFalse(cat.ordered)
cat = Categorical([0, 1, 2], ordered=False)
self.assertFalse(cat.ordered)
cat = Categorical([0, 1, 2], ordered=True)
self.assertTrue(cat.ordered)
def test_ordered_api(self):
# GH 9347
cat1 = pd.Categorical(["a", "c", "b"], ordered=False)
self.assertTrue(cat1.categories.equals(Index(['a', 'b', 'c'])))
self.assertFalse(cat1.ordered)
cat2 = pd.Categorical(["a", "c", "b"], categories=['b', 'c', 'a'],
ordered=False)
self.assertTrue(cat2.categories.equals(Index(['b', 'c', 'a'])))
self.assertFalse(cat2.ordered)
cat3 = pd.Categorical(["a", "c", "b"], ordered=True)
self.assertTrue(cat3.categories.equals(Index(['a', 'b', 'c'])))
self.assertTrue(cat3.ordered)
cat4 = pd.Categorical(["a", "c", "b"], categories=['b', 'c', 'a'],
ordered=True)
self.assertTrue(cat4.categories.equals(Index(['b', 'c', 'a'])))
self.assertTrue(cat4.ordered)
def test_set_ordered(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
cat2 = cat.as_unordered()
self.assertFalse(cat2.ordered)
cat2 = cat.as_ordered()
self.assertTrue(cat2.ordered)
cat2.as_unordered(inplace=True)
self.assertFalse(cat2.ordered)
cat2.as_ordered(inplace=True)
self.assertTrue(cat2.ordered)
self.assertTrue(cat2.set_ordered(True).ordered)
self.assertFalse(cat2.set_ordered(False).ordered)
cat2.set_ordered(True, inplace=True)
self.assertTrue(cat2.ordered)
cat2.set_ordered(False, inplace=True)
self.assertFalse(cat2.ordered)
# deperecated in v0.16.0
with tm.assert_produces_warning(FutureWarning):
cat.ordered = False
self.assertFalse(cat.ordered)
with tm.assert_produces_warning(FutureWarning):
cat.ordered = True
self.assertTrue(cat.ordered)
def test_set_categories(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
exp_categories = np.array(["c", "b", "a"])
exp_values = np.array(["a", "b", "c", "a"])
res = cat.set_categories(["c", "b", "a"], inplace=True)
self.assert_numpy_array_equal(cat.categories, exp_categories)
self.assert_numpy_array_equal(cat.__array__(), exp_values)
self.assertIsNone(res)
res = cat.set_categories(["a", "b", "c"])
# cat must be the same as before
self.assert_numpy_array_equal(cat.categories, exp_categories)
self.assert_numpy_array_equal(cat.__array__(), exp_values)
# only res is changed
exp_categories_back = np.array(["a", "b", "c"])
self.assert_numpy_array_equal(res.categories, exp_categories_back)
self.assert_numpy_array_equal(res.__array__(), exp_values)
# not all "old" included in "new" -> all not included ones are now
# np.nan
cat = Categorical(["a", "b", "c", "a"], ordered=True)
res = cat.set_categories(["a"])
self.assert_numpy_array_equal(res.codes, np.array([0, -1, -1, 0]))
# still not all "old" in "new"
res = cat.set_categories(["a", "b", "d"])
self.assert_numpy_array_equal(res.codes, np.array([0, 1, -1, 0]))
self.assert_numpy_array_equal(res.categories,
np.array(["a", "b", "d"]))
# all "old" included in "new"
cat = cat.set_categories(["a", "b", "c", "d"])
exp_categories = np.array(["a", "b", "c", "d"])
self.assert_numpy_array_equal(cat.categories, exp_categories)
# internals...
c = Categorical([1, 2, 3, 4, 1], categories=[1, 2, 3, 4], ordered=True)
self.assert_numpy_array_equal(c._codes, np.array([0, 1, 2, 3, 0]))
self.assert_numpy_array_equal(c.categories, np.array([1, 2, 3, 4]))
self.assert_numpy_array_equal(c.get_values(),
np.array([1, 2, 3, 4, 1]))
c = c.set_categories(
[4, 3, 2, 1
]) # all "pointers" to '4' must be changed from 3 to 0,...
self.assert_numpy_array_equal(c._codes, np.array([3, 2, 1, 0, 3])
) # positions are changed
self.assert_numpy_array_equal(c.categories, np.array([4, 3, 2, 1])
) # categories are now in new order
self.assert_numpy_array_equal(c.get_values(), np.array([1, 2, 3, 4, 1])
) # output is the same
self.assertTrue(c.min(), 4)
self.assertTrue(c.max(), 1)
# set_categories should set the ordering if specified
c2 = c.set_categories([4, 3, 2, 1], ordered=False)
self.assertFalse(c2.ordered)
self.assert_numpy_array_equal(c.get_values(), c2.get_values())
# set_categories should pass thru the ordering
c2 = c.set_ordered(False).set_categories([4, 3, 2, 1])
self.assertFalse(c2.ordered)
self.assert_numpy_array_equal(c.get_values(), c2.get_values())
def test_rename_categories(self):
cat = pd.Categorical(["a", "b", "c", "a"])
# inplace=False: the old one must not be changed
res = cat.rename_categories([1, 2, 3])
self.assert_numpy_array_equal(res.__array__(), np.array([1, 2, 3, 1]))
self.assert_numpy_array_equal(res.categories, np.array([1, 2, 3]))
self.assert_numpy_array_equal(cat.__array__(),
np.array(["a", "b", "c", "a"]))
self.assert_numpy_array_equal(cat.categories,
np.array(["a", "b", "c"]))
res = cat.rename_categories([1, 2, 3], inplace=True)
# and now inplace
self.assertIsNone(res)
self.assert_numpy_array_equal(cat.__array__(), np.array([1, 2, 3, 1]))
self.assert_numpy_array_equal(cat.categories, np.array([1, 2, 3]))
# lengthen
def f():
cat.rename_categories([1, 2, 3, 4])
self.assertRaises(ValueError, f)
# shorten
def f():
cat.rename_categories([1, 2])
self.assertRaises(ValueError, f)
def test_reorder_categories(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
old = cat.copy()
new = Categorical(["a", "b", "c", "a"], categories=["c", "b", "a"],
ordered=True)
# first inplace == False
res = cat.reorder_categories(["c", "b", "a"])
# cat must be the same as before
self.assert_categorical_equal(cat, old)
# only res is changed
self.assert_categorical_equal(res, new)
# inplace == True
res = cat.reorder_categories(["c", "b", "a"], inplace=True)
self.assertIsNone(res)
self.assert_categorical_equal(cat, new)
# not all "old" included in "new"
cat = Categorical(["a", "b", "c", "a"], ordered=True)
def f():
cat.reorder_categories(["a"])
self.assertRaises(ValueError, f)
# still not all "old" in "new"
def f():
cat.reorder_categories(["a", "b", "d"])
self.assertRaises(ValueError, f)
# all "old" included in "new", but too long
def f():
cat.reorder_categories(["a", "b", "c", "d"])
self.assertRaises(ValueError, f)
def test_add_categories(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
old = cat.copy()
new = Categorical(["a", "b", "c", "a"],
categories=["a", "b", "c", "d"], ordered=True)
# first inplace == False
res = cat.add_categories("d")
self.assert_categorical_equal(cat, old)
self.assert_categorical_equal(res, new)
res = cat.add_categories(["d"])
self.assert_categorical_equal(cat, old)
self.assert_categorical_equal(res, new)
# inplace == True
res = cat.add_categories("d", inplace=True)
self.assert_categorical_equal(cat, new)
self.assertIsNone(res)
# new is in old categories
def f():
cat.add_categories(["d"])
self.assertRaises(ValueError, f)
# GH 9927
cat = Categorical(list("abc"), ordered=True)
expected = Categorical(
list("abc"), categories=list("abcde"), ordered=True)
# test with Series, np.array, index, list
res = cat.add_categories(Series(["d", "e"]))
self.assert_categorical_equal(res, expected)
res = cat.add_categories(np.array(["d", "e"]))
self.assert_categorical_equal(res, expected)
res = cat.add_categories(Index(["d", "e"]))
self.assert_categorical_equal(res, expected)
res = cat.add_categories(["d", "e"])
self.assert_categorical_equal(res, expected)
def test_remove_categories(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
old = cat.copy()
new = Categorical(["a", "b", np.nan, "a"], categories=["a", "b"],
ordered=True)
# first inplace == False
res = cat.remove_categories("c")
self.assert_categorical_equal(cat, old)
self.assert_categorical_equal(res, new)
res = cat.remove_categories(["c"])
self.assert_categorical_equal(cat, old)
self.assert_categorical_equal(res, new)
# inplace == True
res = cat.remove_categories("c", inplace=True)
self.assert_categorical_equal(cat, new)
self.assertIsNone(res)
# removal is not in categories
def f():
cat.remove_categories(["c"])
self.assertRaises(ValueError, f)
def test_remove_unused_categories(self):
c = Categorical(["a", "b", "c", "d", "a"],
categories=["a", "b", "c", "d", "e"])
exp_categories_all = np.array(["a", "b", "c", "d", "e"])
exp_categories_dropped = np.array(["a", "b", "c", "d"])
self.assert_numpy_array_equal(c.categories, exp_categories_all)
res = c.remove_unused_categories()
self.assert_numpy_array_equal(res.categories, exp_categories_dropped)
self.assert_numpy_array_equal(c.categories, exp_categories_all)
res = c.remove_unused_categories(inplace=True)
self.assert_numpy_array_equal(c.categories, exp_categories_dropped)
self.assertIsNone(res)
# with NaN values (GH11599)
c = Categorical(["a", "b", "c", np.nan],
categories=["a", "b", "c", "d", "e"])
res = c.remove_unused_categories()
self.assert_numpy_array_equal(res.categories,
np.array(["a", "b", "c"]))
self.assert_numpy_array_equal(c.categories, exp_categories_all)
val = ['F', np.nan, 'D', 'B', 'D', 'F', np.nan]
cat = pd.Categorical(values=val, categories=list('ABCDEFG'))
out = cat.remove_unused_categories()
self.assert_numpy_array_equal(out.categories, ['B', 'D', 'F'])
self.assert_numpy_array_equal(out.codes, [2, -1, 1, 0, 1, 2, -1])
self.assertEqual(out.get_values().tolist(), val)
alpha = list('abcdefghijklmnopqrstuvwxyz')
val = np.random.choice(alpha[::2], 10000).astype('object')
val[np.random.choice(len(val), 100)] = np.nan
cat = pd.Categorical(values=val, categories=alpha)
out = cat.remove_unused_categories()
self.assertEqual(out.get_values().tolist(), val.tolist())
def test_nan_handling(self):
# Nans are represented as -1 in codes
c = Categorical(["a", "b", np.nan, "a"])
self.assert_numpy_array_equal(c.categories, np.array(["a", "b"]))
self.assert_numpy_array_equal(c._codes, np.array([0, 1, -1, 0]))
c[1] = np.nan
self.assert_numpy_array_equal(c.categories, np.array(["a", "b"]))
self.assert_numpy_array_equal(c._codes, np.array([0, -1, -1, 0]))
# If categories have nan included, the code should point to that
# instead
with tm.assert_produces_warning(FutureWarning):
c = Categorical(["a", "b", np.nan, "a"],
categories=["a", "b", np.nan])
self.assert_numpy_array_equal(c.categories,
np.array(["a", "b", np.nan],
dtype=np.object_))
self.assert_numpy_array_equal(c._codes, np.array([0, 1, 2, 0]))
c[1] = np.nan
self.assert_numpy_array_equal(c.categories,
np.array(["a", "b", np.nan],
dtype=np.object_))
self.assert_numpy_array_equal(c._codes, np.array([0, 2, 2, 0]))
# Changing categories should also make the replaced category np.nan
c = Categorical(["a", "b", "c", "a"])
with tm.assert_produces_warning(FutureWarning):
c.categories = ["a", "b", np.nan] # noqa
self.assert_numpy_array_equal(c.categories,
np.array(["a", "b", np.nan],
dtype=np.object_))
self.assert_numpy_array_equal(c._codes, np.array([0, 1, 2, 0]))
# Adding nan to categories should make assigned nan point to the
# category!
c = Categorical(["a", "b", np.nan, "a"])
self.assert_numpy_array_equal(c.categories, np.array(["a", "b"]))
self.assert_numpy_array_equal(c._codes, np.array([0, 1, -1, 0]))
with tm.assert_produces_warning(FutureWarning):
c.set_categories(["a", "b", np.nan], rename=True, inplace=True)
self.assert_numpy_array_equal(c.categories,
np.array(["a", "b", np.nan],
dtype=np.object_))
self.assert_numpy_array_equal(c._codes, np.array([0, 1, -1, 0]))
c[1] = np.nan
self.assert_numpy_array_equal(c.categories,
np.array(["a", "b", np.nan],
dtype=np.object_))
self.assert_numpy_array_equal(c._codes, np.array([0, 2, -1, 0]))
# Remove null categories (GH 10156)
cases = [
([1.0, 2.0, np.nan], [1.0, 2.0]),
(['a', 'b', None], ['a', 'b']),
([pd.Timestamp('2012-05-01'), pd.NaT],
[pd.Timestamp('2012-05-01')])
]
null_values = [np.nan, None, pd.NaT]
for with_null, without in cases:
with tm.assert_produces_warning(FutureWarning):
base = Categorical([], with_null)
expected = Categorical([], without)
for nullval in null_values:
result = base.remove_categories(nullval)
self.assert_categorical_equal(result, expected)
# Different null values are indistinguishable
for i, j in [(0, 1), (0, 2), (1, 2)]:
nulls = [null_values[i], null_values[j]]
def f():
with tm.assert_produces_warning(FutureWarning):
Categorical([], categories=nulls)
self.assertRaises(ValueError, f)
def test_isnull(self):
exp = np.array([False, False, True])
c = Categorical(["a", "b", np.nan])
res = c.isnull()
self.assert_numpy_array_equal(res, exp)
with tm.assert_produces_warning(FutureWarning):
c = Categorical(["a", "b", np.nan], categories=["a", "b", np.nan])
res = c.isnull()
self.assert_numpy_array_equal(res, exp)
# test both nan in categories and as -1
exp = np.array([True, False, True])
c = Categorical(["a", "b", np.nan])
with tm.assert_produces_warning(FutureWarning):
c.set_categories(["a", "b", np.nan], rename=True, inplace=True)
c[0] = np.nan
res = c.isnull()
self.assert_numpy_array_equal(res, exp)
def test_codes_immutable(self):
# Codes should be read only
c = Categorical(["a", "b", "c", "a", np.nan])
exp = np.array([0, 1, 2, 0, -1], dtype='int8')
self.assert_numpy_array_equal(c.codes, exp)
# Assignments to codes should raise
def f():
c.codes = np.array([0, 1, 2, 0, 1], dtype='int8')
self.assertRaises(ValueError, f)
# changes in the codes array should raise
# np 1.6.1 raises RuntimeError rather than ValueError
codes = c.codes
def f():
codes[4] = 1
self.assertRaises(ValueError, f)
# But even after getting the codes, the original array should still be
# writeable!
c[4] = "a"
exp = np.array([0, 1, 2, 0, 0], dtype='int8')
self.assert_numpy_array_equal(c.codes, exp)
c._codes[4] = 2
exp = np.array([0, 1, 2, 0, 2], dtype='int8')
self.assert_numpy_array_equal(c.codes, exp)
def test_min_max(self):
# unordered cats have no min/max
cat = Categorical(["a", "b", "c", "d"], ordered=False)
self.assertRaises(TypeError, lambda: cat.min())
self.assertRaises(TypeError, lambda: cat.max())
cat = Categorical(["a", "b", "c", "d"], ordered=True)
_min = cat.min()
_max = cat.max()
self.assertEqual(_min, "a")
self.assertEqual(_max, "d")
cat = Categorical(["a", "b", "c", "d"],
categories=['d', 'c', 'b', 'a'], ordered=True)
_min = cat.min()
_max = cat.max()
self.assertEqual(_min, "d")
self.assertEqual(_max, "a")
cat = Categorical([np.nan, "b", "c", np.nan],
categories=['d', 'c', 'b', 'a'], ordered=True)
_min = cat.min()
_max = cat.max()
self.assertTrue(np.isnan(_min))
self.assertEqual(_max, "b")
_min = cat.min(numeric_only=True)
self.assertEqual(_min, "c")
_max = cat.max(numeric_only=True)
self.assertEqual(_max, "b")
cat = Categorical([np.nan, 1, 2, np.nan], categories=[5, 4, 3, 2, 1],
ordered=True)
_min = cat.min()
_max = cat.max()
self.assertTrue(np.isnan(_min))
self.assertEqual(_max, 1)
_min = cat.min(numeric_only=True)
self.assertEqual(_min, 2)
_max = cat.max(numeric_only=True)
self.assertEqual(_max, 1)
def test_unique(self):
# categories are reordered based on value when ordered=False
cat = Categorical(["a", "b"])
exp = np.asarray(["a", "b"])
res = cat.unique()
self.assert_numpy_array_equal(res, exp)
cat = Categorical(["a", "b", "a", "a"], categories=["a", "b", "c"])
res = cat.unique()
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, Categorical(exp))
cat = Categorical(["c", "a", "b", "a", "a"],
categories=["a", "b", "c"])
exp = np.asarray(["c", "a", "b"])
res = cat.unique()
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, Categorical(
exp, categories=['c', 'a', 'b']))
# nan must be removed
cat = Categorical(["b", np.nan, "b", np.nan, "a"],
categories=["a", "b", "c"])
res = cat.unique()
exp = np.asarray(["b", np.nan, "a"], dtype=object)
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, Categorical(
["b", np.nan, "a"], categories=["b", "a"]))
def test_unique_ordered(self):
# keep categories order when ordered=True
cat = Categorical(['b', 'a', 'b'], categories=['a', 'b'], ordered=True)
res = cat.unique()
exp = np.asarray(['b', 'a'])
exp_cat = Categorical(exp, categories=['a', 'b'], ordered=True)
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, exp_cat)
cat = Categorical(['c', 'b', 'a', 'a'], categories=['a', 'b', 'c'],
ordered=True)
res = cat.unique()
exp = np.asarray(['c', 'b', 'a'])
exp_cat = Categorical(exp, categories=['a', 'b', 'c'], ordered=True)
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, exp_cat)
cat = Categorical(['b', 'a', 'a'], categories=['a', 'b', 'c'],
ordered=True)
res = cat.unique()
exp = np.asarray(['b', 'a'])
exp_cat = Categorical(exp, categories=['a', 'b'], ordered=True)
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, exp_cat)
cat = Categorical(['b', 'b', np.nan, 'a'], categories=['a', 'b', 'c'],
ordered=True)
res = cat.unique()
exp = np.asarray(['b', np.nan, 'a'], dtype=object)
exp_cat = Categorical(exp, categories=['a', 'b'], ordered=True)
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, exp_cat)
def test_mode(self):
s = Categorical([1, 1, 2, 4, 5, 5, 5], categories=[5, 4, 3, 2, 1],
ordered=True)
res = s.mode()
exp = Categorical([5], categories=[5, 4, 3, 2, 1], ordered=True)
self.assertTrue(res.equals(exp))
s = Categorical([1, 1, 1, 4, 5, 5, 5], categories=[5, 4, 3, 2, 1],
ordered=True)
res = s.mode()
exp = Categorical([5, 1], categories=[5, 4, 3, 2, 1], ordered=True)
self.assertTrue(res.equals(exp))
s = Categorical([1, 2, 3, 4, 5], categories=[5, 4, 3, 2, 1],
ordered=True)
res = s.mode()
exp = Categorical([], categories=[5, 4, 3, 2, 1], ordered=True)
self.assertTrue(res.equals(exp))
# NaN should not become the mode!
s = Categorical([np.nan, np.nan, np.nan, 4, 5],
categories=[5, 4, 3, 2, 1], ordered=True)
res = s.mode()
exp = Categorical([], categories=[5, 4, 3, 2, 1], ordered=True)
self.assertTrue(res.equals(exp))
s = Categorical([np.nan, np.nan, np.nan, 4, 5, 4],
categories=[5, 4, 3, 2, 1], ordered=True)
res = s.mode()
exp = Categorical([4], categories=[5, 4, 3, 2, 1], ordered=True)
self.assertTrue(res.equals(exp))
s = Categorical([np.nan, np.nan, 4, 5, 4], categories=[5, 4, 3, 2, 1],
ordered=True)
res = s.mode()
exp = Categorical([4], categories=[5, 4, 3, 2, 1], ordered=True)
self.assertTrue(res.equals(exp))
def test_sort(self):
# unordered cats are sortable
cat = Categorical(["a", "b", "b", "a"], ordered=False)
cat.sort_values()
cat.sort()
cat = Categorical(["a", "c", "b", "d"], ordered=True)
# sort_values
res = cat.sort_values()
exp = np.array(["a", "b", "c", "d"], dtype=object)
self.assert_numpy_array_equal(res.__array__(), exp)
cat = Categorical(["a", "c", "b", "d"],
categories=["a", "b", "c", "d"], ordered=True)
res = cat.sort_values()
exp = np.array(["a", "b", "c", "d"], dtype=object)
self.assert_numpy_array_equal(res.__array__(), exp)
res = cat.sort_values(ascending=False)
exp = np.array(["d", "c", "b", "a"], dtype=object)
self.assert_numpy_array_equal(res.__array__(), exp)
# sort (inplace order)
cat1 = cat.copy()
cat1.sort()
exp = np.array(["a", "b", "c", "d"], dtype=object)
self.assert_numpy_array_equal(cat1.__array__(), exp)
def test_slicing_directly(self):
cat = Categorical(["a", "b", "c", "d", "a", "b", "c"])
sliced = cat[3]
tm.assert_equal(sliced, "d")
sliced = cat[3:5]
expected = Categorical(["d", "a"], categories=['a', 'b', 'c', 'd'])
self.assert_numpy_array_equal(sliced._codes, expected._codes)
tm.assert_index_equal(sliced.categories, expected.categories)
def test_set_item_nan(self):
cat = pd.Categorical([1, 2, 3])
exp = pd.Categorical([1, np.nan, 3], categories=[1, 2, 3])
cat[1] = np.nan
self.assertTrue(cat.equals(exp))
# if nan in categories, the proper code should be set!
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
with tm.assert_produces_warning(FutureWarning):
cat.set_categories([1, 2, 3, np.nan], rename=True, inplace=True)
cat[1] = np.nan
exp = np.array([0, 3, 2, -1])
self.assert_numpy_array_equal(cat.codes, exp)
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
with tm.assert_produces_warning(FutureWarning):
cat.set_categories([1, 2, 3, np.nan], rename=True, inplace=True)
cat[1:3] = np.nan
exp = np.array([0, 3, 3, -1])
self.assert_numpy_array_equal(cat.codes, exp)
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
with tm.assert_produces_warning(FutureWarning):
cat.set_categories([1, 2, 3, np.nan], rename=True, inplace=True)
cat[1:3] = [np.nan, 1]
exp = np.array([0, 3, 0, -1])
self.assert_numpy_array_equal(cat.codes, exp)
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
with tm.assert_produces_warning(FutureWarning):
cat.set_categories([1, 2, 3, np.nan], rename=True, inplace=True)
cat[1:3] = [np.nan, np.nan]
exp = np.array([0, 3, 3, -1])
self.assert_numpy_array_equal(cat.codes, exp)
cat = pd.Categorical([1, 2, np.nan, 3], categories=[1, 2, 3])
with tm.assert_produces_warning(FutureWarning):
cat.set_categories([1, 2, 3, np.nan], rename=True, inplace=True)
cat[pd.isnull(cat)] = np.nan
exp = np.array([0, 1, 3, 2])
self.assert_numpy_array_equal(cat.codes, exp)
def test_shift(self):
# GH 9416
cat = pd.Categorical(['a', 'b', 'c', 'd', 'a'])
# shift forward
sp1 = cat.shift(1)
xp1 = pd.Categorical([np.nan, 'a', 'b', 'c', 'd'])
self.assert_categorical_equal(sp1, xp1)
self.assert_categorical_equal(cat[:-1], sp1[1:])
# shift back
sn2 = cat.shift(-2)
xp2 = pd.Categorical(['c', 'd', 'a', np.nan, np.nan],
categories=['a', 'b', 'c', 'd'])
self.assert_categorical_equal(sn2, xp2)
self.assert_categorical_equal(cat[2:], sn2[:-2])
# shift by zero
self.assert_categorical_equal(cat, cat.shift(0))
def test_nbytes(self):
cat = pd.Categorical([1, 2, 3])
exp = cat._codes.nbytes + cat._categories.values.nbytes
self.assertEqual(cat.nbytes, exp)
def test_memory_usage(self):
cat = pd.Categorical([1, 2, 3])
self.assertEqual(cat.nbytes, cat.memory_usage())
self.assertEqual(cat.nbytes, cat.memory_usage(deep=True))
cat = pd.Categorical(['foo', 'foo', 'bar'])
self.assertEqual(cat.nbytes, cat.memory_usage())
self.assertTrue(cat.memory_usage(deep=True) > cat.nbytes)
# sys.getsizeof will call the .memory_usage with
# deep=True, and add on some GC overhead
diff = cat.memory_usage(deep=True) - sys.getsizeof(cat)
self.assertTrue(abs(diff) < 100)
def test_searchsorted(self):
# https://github.com/pydata/pandas/issues/8420
s1 = pd.Series(['apple', 'bread', 'bread', 'cheese', 'milk'])
s2 = pd.Series(['apple', 'bread', 'bread', 'cheese', 'milk', 'donuts'])
c1 = pd.Categorical(s1, ordered=True)
c2 = pd.Categorical(s2, ordered=True)
# Single item array
res = c1.searchsorted(['bread'])
chk = s1.searchsorted(['bread'])
exp = np.array([1])
self.assert_numpy_array_equal(res, exp)
self.assert_numpy_array_equal(res, chk)
# Scalar version of single item array
# Categorical return np.array like pd.Series, but different from
# np.array.searchsorted()
res = c1.searchsorted('bread')
chk = s1.searchsorted('bread')
exp = np.array([1])
self.assert_numpy_array_equal(res, exp)
self.assert_numpy_array_equal(res, chk)
# Searching for a value that is not present in the Categorical
res = c1.searchsorted(['bread', 'eggs'])
chk = s1.searchsorted(['bread', 'eggs'])
exp = np.array([1, 4])
self.assert_numpy_array_equal(res, exp)
self.assert_numpy_array_equal(res, chk)
# Searching for a value that is not present, to the right
res = c1.searchsorted(['bread', 'eggs'], side='right')
chk = s1.searchsorted(['bread', 'eggs'], side='right')
exp = np.array([3, 4]) # eggs before milk
self.assert_numpy_array_equal(res, exp)
self.assert_numpy_array_equal(res, chk)
# As above, but with a sorter array to reorder an unsorted array
res = c2.searchsorted(['bread', 'eggs'], side='right',
sorter=[0, 1, 2, 3, 5, 4])
chk = s2.searchsorted(['bread', 'eggs'], side='right',
sorter=[0, 1, 2, 3, 5, 4])
exp = np.array([3, 5]
) # eggs after donuts, after switching milk and donuts
self.assert_numpy_array_equal(res, exp)
self.assert_numpy_array_equal(res, chk)
def test_deprecated_labels(self):
# TODO: labels is deprecated and should be removed in 0.18 or 2017,
# whatever is earlier
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
exp = cat.codes
with tm.assert_produces_warning(FutureWarning):
res = cat.labels
self.assert_numpy_array_equal(res, exp)
self.assertFalse(LooseVersion(pd.__version__) >= '0.18')
def test_deprecated_levels(self):
# TODO: levels is deprecated and should be removed in 0.18 or 2017,
# whatever is earlier
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
exp = cat.categories
with tm.assert_produces_warning(FutureWarning):
res = cat.levels
self.assert_numpy_array_equal(res, exp)
with tm.assert_produces_warning(FutureWarning):
res = pd.Categorical([1, 2, 3, np.nan], levels=[1, 2, 3])
self.assert_numpy_array_equal(res.categories, exp)
self.assertFalse(LooseVersion(pd.__version__) >= '0.18')
def test_removed_names_produces_warning(self):
# 10482
with tm.assert_produces_warning(UserWarning):
Categorical([0, 1], name="a")
with tm.assert_produces_warning(UserWarning):
Categorical.from_codes([1, 2], ["a", "b", "c"], name="a")
def test_datetime_categorical_comparison(self):
dt_cat = pd.Categorical(
pd.date_range('2014-01-01', periods=3), ordered=True)
self.assert_numpy_array_equal(dt_cat > dt_cat[0], [False, True, True])
self.assert_numpy_array_equal(dt_cat[0] < dt_cat, [False, True, True])
def test_reflected_comparison_with_scalars(self):
# GH8658
cat = pd.Categorical([1, 2, 3], ordered=True)
self.assert_numpy_array_equal(cat > cat[0], [False, True, True])
self.assert_numpy_array_equal(cat[0] < cat, [False, True, True])
def test_comparison_with_unknown_scalars(self):
# https://github.com/pydata/pandas/issues/9836#issuecomment-92123057
# and following comparisons with scalars not in categories should raise
# for unequal comps, but not for equal/not equal
cat = pd.Categorical([1, 2, 3], ordered=True)
self.assertRaises(TypeError, lambda: cat < 4)
self.assertRaises(TypeError, lambda: cat > 4)
self.assertRaises(TypeError, lambda: 4 < cat)
self.assertRaises(TypeError, lambda: 4 > cat)
self.assert_numpy_array_equal(cat == 4, [False, False, False])
self.assert_numpy_array_equal(cat != 4, [True, True, True])
class TestCategoricalAsBlock(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
self.factor = Categorical.from_array(['a', 'b', 'b', 'a', 'a', 'c',
'c', 'c'])
df = DataFrame({'value': np.random.randint(0, 10000, 100)})
labels = ["{0} - {1}".format(i, i + 499) for i in range(0, 10000, 500)]
df = df.sort_values(by=['value'], ascending=True)
df['value_group'] = pd.cut(df.value, range(0, 10500, 500), right=False,
labels=labels)
self.cat = df
def test_dtypes(self):
# GH8143
index = ['cat', 'obj', 'num']
cat = pd.Categorical(['a', 'b', 'c'])
obj = pd.Series(['a', 'b', 'c'])
num = pd.Series([1, 2, 3])
df = pd.concat([pd.Series(cat), obj, num], axis=1, keys=index)
result = df.dtypes == 'object'
expected = Series([False, True, False], index=index)
tm.assert_series_equal(result, expected)
result = df.dtypes == 'int64'
expected = Series([False, False, True], index=index)
tm.assert_series_equal(result, expected)
result = df.dtypes == 'category'
expected = Series([True, False, False], index=index)
tm.assert_series_equal(result, expected)
def test_codes_dtypes(self):
# GH 8453
result = Categorical(['foo', 'bar', 'baz'])
self.assertTrue(result.codes.dtype == 'int8')
result = Categorical(['foo%05d' % i for i in range(400)])
self.assertTrue(result.codes.dtype == 'int16')
result = Categorical(['foo%05d' % i for i in range(40000)])
self.assertTrue(result.codes.dtype == 'int32')
# adding cats
result = Categorical(['foo', 'bar', 'baz'])
self.assertTrue(result.codes.dtype == 'int8')
result = result.add_categories(['foo%05d' % i for i in range(400)])
self.assertTrue(result.codes.dtype == 'int16')
# removing cats
result = result.remove_categories(['foo%05d' % i for i in range(300)])
self.assertTrue(result.codes.dtype == 'int8')
def test_basic(self):
# test basic creation / coercion of categoricals
s = Series(self.factor, name='A')
self.assertEqual(s.dtype, 'category')
self.assertEqual(len(s), len(self.factor))
str(s.values)
str(s)
# in a frame
df = DataFrame({'A': self.factor})
result = df['A']
tm.assert_series_equal(result, s)
result = df.iloc[:, 0]
tm.assert_series_equal(result, s)
self.assertEqual(len(df), len(self.factor))
str(df.values)
str(df)
df = DataFrame({'A': s})
result = df['A']
tm.assert_series_equal(result, s)
self.assertEqual(len(df), len(self.factor))
str(df.values)
str(df)
# multiples
df = DataFrame({'A': s, 'B': s, 'C': 1})
result1 = df['A']
result2 = df['B']
tm.assert_series_equal(result1, s)
tm.assert_series_equal(result2, s, check_names=False)
self.assertEqual(result2.name, 'B')
self.assertEqual(len(df), len(self.factor))
str(df.values)
str(df)
# GH8623
x = pd.DataFrame([[1, '<NAME>'], [2, '<NAME>'],
[1, '<NAME>']],
columns=['person_id', 'person_name'])
x['person_name'] = pd.Categorical(x.person_name
) # doing this breaks transform
expected = x.iloc[0].person_name
result = x.person_name.iloc[0]
self.assertEqual(result, expected)
result = x.person_name[0]
self.assertEqual(result, expected)
result = x.person_name.loc[0]
self.assertEqual(result, expected)
def test_creation_astype(self):
l = ["a", "b", "c", "a"]
s = pd.Series(l)
exp = pd.Series(Categorical(l))
res = s.astype('category')
tm.assert_series_equal(res, exp)
l = [1, 2, 3, 1]
s = pd.Series(l)
exp = pd.Series(Categorical(l))
res = s.astype('category')
tm.assert_series_equal(res, exp)
df = pd.DataFrame({"cats": [1, 2, 3, 4, 5, 6],
"vals": [1, 2, 3, 4, 5, 6]})
cats = Categorical([1, 2, 3, 4, 5, 6])
exp_df = pd.DataFrame({"cats": cats, "vals": [1, 2, 3, 4, 5, 6]})
df["cats"] = df["cats"].astype("category")
tm.assert_frame_equal(exp_df, df)
df = pd.DataFrame({"cats": ['a', 'b', 'b', 'a', 'a', 'd'],
"vals": [1, 2, 3, 4, 5, 6]})
cats = Categorical(['a', 'b', 'b', 'a', 'a', 'd'])
exp_df = pd.DataFrame({"cats": cats, "vals": [1, 2, 3, 4, 5, 6]})
df["cats"] = df["cats"].astype("category")
tm.assert_frame_equal(exp_df, df)
# with keywords
l = ["a", "b", "c", "a"]
s = pd.Series(l)
exp = pd.Series(Categorical(l, ordered=True))
res = s.astype('category', ordered=True)
tm.assert_series_equal(res, exp)
exp = pd.Series(Categorical(
l, categories=list('abcdef'), ordered=True))
res = s.astype('category', categories=list('abcdef'), ordered=True)
tm.assert_series_equal(res, exp)
def test_construction_series(self):
l = [1, 2, 3, 1]
exp = Series(l).astype('category')
res = Series(l, dtype='category')
tm.assert_series_equal(res, exp)
l = ["a", "b", "c", "a"]
exp = Series(l).astype('category')
res = Series(l, dtype='category')
tm.assert_series_equal(res, exp)
# insert into frame with different index
# GH 8076
index = pd.date_range('20000101', periods=3)
expected = Series(Categorical(values=[np.nan, np.nan, np.nan],
categories=['a', 'b', 'c']))
expected.index = index
expected = DataFrame({'x': expected})
df = DataFrame(
{'x': Series(['a', 'b', 'c'], dtype='category')}, index=index)
tm.assert_frame_equal(df, expected)
def test_construction_frame(self):
# GH8626
# dict creation
df = DataFrame({'A': list('abc')}, dtype='category')
expected = Series(list('abc'), dtype='category', name='A')
tm.assert_series_equal(df['A'], expected)
# to_frame
s = Series(list('abc'), dtype='category')
result = s.to_frame()
expected = Series(list('abc'), dtype='category', name=0)
tm.assert_series_equal(result[0], expected)
result = s.to_frame(name='foo')
expected = Series(list('abc'), dtype='category', name='foo')
tm.assert_series_equal(result['foo'], expected)
# list-like creation
df = DataFrame(list('abc'), dtype='category')
expected = Series(list('abc'), dtype='category', name=0)
tm.assert_series_equal(df[0], expected)
# ndim != 1
df = DataFrame([pd.Categorical(list('abc'))])
expected = DataFrame({0: Series(list('abc'), dtype='category')})
tm.assert_frame_equal(df, expected)
df = DataFrame([pd.Categorical(list('abc')), pd.Categorical(list(
'abd'))])
expected = DataFrame({0: Series(list('abc'), dtype='category'),
1: Series(list('abd'), dtype='category')},
columns=[0, 1])
tm.assert_frame_equal(df, expected)
# mixed
df = DataFrame([pd.Categorical(list('abc')), list('def')])
expected = DataFrame({0: Series(list('abc'), dtype='category'),
1: list('def')}, columns=[0, 1])
tm.assert_frame_equal(df, expected)
# invalid (shape)
self.assertRaises(
ValueError,
lambda: DataFrame([pd.Categorical(list('abc')),
pd.Categorical(list('abdefg'))]))
# ndim > 1
self.assertRaises(NotImplementedError,
lambda: pd.Categorical(np.array([list('abcd')])))
def test_reshaping(self):
p = tm.makePanel()
p['str'] = 'foo'
df = p.to_frame()
df['category'] = df['str'].astype('category')
result = df['category'].unstack()
c = Categorical(['foo'] * len(p.major_axis))
expected = DataFrame({'A': c.copy(),
'B': c.copy(),
'C': c.copy(),
'D': c.copy()},
columns=Index(list('ABCD'), name='minor'),
index=p.major_axis.set_names('major'))
tm.assert_frame_equal(result, expected)
def test_reindex(self):
index = pd.date_range('20000101', periods=3)
# reindexing to an invalid Categorical
s = Series(['a', 'b', 'c'], dtype='category')
result = s.reindex(index)
expected = Series(Categorical(values=[np.nan, np.nan, np.nan],
categories=['a', 'b', 'c']))
expected.index = index
tm.assert_series_equal(result, expected)
# partial reindexing
expected = Series(Categorical(values=['b', 'c'], categories=['a', 'b',
'c']))
expected.index = [1, 2]
result = s.reindex([1, 2])
tm.assert_series_equal(result, expected)
expected = Series(Categorical(
values=['c', np.nan], categories=['a', 'b', 'c']))
expected.index = [2, 3]
result = s.reindex([2, 3])
tm.assert_series_equal(result, expected)
def test_sideeffects_free(self):
# Passing a categorical to a Series and then changing values in either
# the series or the categorical should not change the values in the
# other one, IF you specify copy!
cat = Categorical(["a", "b", "c", "a"])
s = pd.Series(cat, copy=True)
self.assertFalse(s.cat is cat)
s.cat.categories = [1, 2, 3]
exp_s = np.array([1, 2, 3, 1])
exp_cat = np.array(["a", "b", "c", "a"])
self.assert_numpy_array_equal(s.__array__(), exp_s)
self.assert_numpy_array_equal(cat.__array__(), exp_cat)
# setting
s[0] = 2
exp_s2 = np.array([2, 2, 3, 1])
self.assert_numpy_array_equal(s.__array__(), exp_s2)
self.assert_numpy_array_equal(cat.__array__(), exp_cat)
# however, copy is False by default
# so this WILL change values
cat = Categorical(["a", "b", "c", "a"])
s = pd.Series(cat)
self.assertTrue(s.values is cat)
s.cat.categories = [1, 2, 3]
exp_s = np.array([1, 2, 3, 1])
self.assert_numpy_array_equal(s.__array__(), exp_s)
self.assert_numpy_array_equal(cat.__array__(), exp_s)
s[0] = 2
exp_s2 = np.array([2, 2, 3, 1])
self.assert_numpy_array_equal(s.__array__(), exp_s2)
self.assert_numpy_array_equal(cat.__array__(), exp_s2)
def test_nan_handling(self):
# Nans are represented as -1 in labels
s = Series(Categorical(["a", "b", np.nan, "a"]))
self.assert_numpy_array_equal(s.cat.categories, np.array(["a", "b"]))
self.assert_numpy_array_equal(s.values.codes, np.array([0, 1, -1, 0]))
# If categories have nan included, the label should point to that
# instead
with tm.assert_produces_warning(FutureWarning):
s2 = Series(Categorical(
["a", "b", np.nan, "a"], categories=["a", "b", np.nan]))
self.assert_numpy_array_equal(s2.cat.categories, np.array(
["a", "b", np.nan], dtype=np.object_))
self.assert_numpy_array_equal(s2.values.codes, np.array([0, 1, 2, 0]))
# Changing categories should also make the replaced category np.nan
s3 = Series(Categorical(["a", "b", "c", "a"]))
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
s3.cat.categories = ["a", "b", np.nan]
self.assert_numpy_array_equal(s3.cat.categories, np.array(
["a", "b", np.nan], dtype=np.object_))
self.assert_numpy_array_equal(s3.values.codes, np.array([0, 1, 2, 0]))
def test_cat_accessor(self):
s = Series(Categorical(["a", "b", np.nan, "a"]))
self.assert_numpy_array_equal(s.cat.categories, np.array(["a", "b"]))
self.assertEqual(s.cat.ordered, False)
exp = Categorical(["a", "b", np.nan, "a"], categories=["b", "a"])
s.cat.set_categories(["b", "a"], inplace=True)
self.assertTrue(s.values.equals(exp))
res = s.cat.set_categories(["b", "a"])
self.assertTrue(res.values.equals(exp))
exp = Categorical(["a", "b", np.nan, "a"], categories=["b", "a"])
s[:] = "a"
s = s.cat.remove_unused_categories()
self.assert_numpy_array_equal(s.cat.categories, np.array(["a"]))
def test_sequence_like(self):
# GH 7839
# make sure can iterate
df = DataFrame({"id": [1, 2, 3, 4, 5, 6],
"raw_grade": ['a', 'b', 'b', 'a', 'a', 'e']})
df['grade'] = Categorical(df['raw_grade'])
# basic sequencing testing
result = list(df.grade.values)
expected = np.array(df.grade.values).tolist()
tm.assert_almost_equal(result, expected)
# iteration
for t in df.itertuples(index=False):
str(t)
for row, s in df.iterrows():
str(s)
for c, col in df.iteritems():
str(s)
def test_series_delegations(self):
# invalid accessor
self.assertRaises(AttributeError, lambda: Series([1, 2, 3]).cat)
tm.assertRaisesRegexp(
AttributeError,
r"Can only use .cat accessor with a 'category' dtype",
lambda: Series([1, 2, 3]).cat)
self.assertRaises(AttributeError, lambda: Series(['a', 'b', 'c']).cat)
self.assertRaises(AttributeError, lambda: Series(np.arange(5.)).cat)
self.assertRaises(AttributeError,
lambda: Series([Timestamp('20130101')]).cat)
# Series should delegate calls to '.categories', '.codes', '.ordered'
# and the methods '.set_categories()' 'drop_unused_categories()' to the
# categorical
s = Series(Categorical(["a", "b", "c", "a"], ordered=True))
exp_categories = np.array(["a", "b", "c"])
self.assert_numpy_array_equal(s.cat.categories, exp_categories)
s.cat.categories = [1, 2, 3]
exp_categories = np.array([1, 2, 3])
self.assert_numpy_array_equal(s.cat.categories, exp_categories)
exp_codes = Series([0, 1, 2, 0], dtype='int8')
tm.assert_series_equal(s.cat.codes, exp_codes)
self.assertEqual(s.cat.ordered, True)
s = s.cat.as_unordered()
self.assertEqual(s.cat.ordered, False)
s.cat.as_ordered(inplace=True)
self.assertEqual(s.cat.ordered, True)
# reorder
s = Series(Categorical(["a", "b", "c", "a"], ordered=True))
exp_categories = np.array(["c", "b", "a"])
exp_values = np.array(["a", "b", "c", "a"])
s = s.cat.set_categories(["c", "b", "a"])
self.assert_numpy_array_equal(s.cat.categories, exp_categories)
self.assert_numpy_array_equal(s.values.__array__(), exp_values)
self.assert_numpy_array_equal(s.__array__(), exp_values)
# remove unused categories
s = Series(Categorical(["a", "b", "b", "a"], categories=["a", "b", "c"
]))
exp_categories = np.array(["a", "b"])
exp_values = np.array(["a", "b", "b", "a"])
s = s.cat.remove_unused_categories()
self.assert_numpy_array_equal(s.cat.categories, exp_categories)
self.assert_numpy_array_equal(s.values.__array__(), exp_values)
self.assert_numpy_array_equal(s.__array__(), exp_values)
# This method is likely to be confused, so test that it raises an error
# on wrong inputs:
def f():
s.set_categories([4, 3, 2, 1])
self.assertRaises(Exception, f)
# right: s.cat.set_categories([4,3,2,1])
def test_series_functions_no_warnings(self):
df = pd.DataFrame({'value': np.random.randint(0, 100, 20)})
labels = ["{0} - {1}".format(i, i + 9) for i in range(0, 100, 10)]
with tm.assert_produces_warning(False):
df['group'] = pd.cut(df.value, range(0, 105, 10), right=False,
labels=labels)
def test_assignment_to_dataframe(self):
# assignment
df = DataFrame({'value': np.array(
np.random.randint(0, 10000, 100), dtype='int32')})
labels = ["{0} - {1}".format(i, i + 499) for i in range(0, 10000, 500)]
df = df.sort_values(by=['value'], ascending=True)
s = pd.cut(df.value, range(0, 10500, 500), right=False, labels=labels)
d = s.values
df['D'] = d
str(df)
result = df.dtypes
expected = Series(
[np.dtype('int32'), com.CategoricalDtype()], index=['value', 'D'])
tm.assert_series_equal(result, expected)
df['E'] = s
str(df)
result = df.dtypes
expected = Series([np.dtype('int32'), com.CategoricalDtype(),
com.CategoricalDtype()],
index=['value', 'D', 'E'])
tm.assert_series_equal(result, expected)
result1 = df['D']
result2 = df['E']
self.assertTrue(result1._data._block.values.equals(d))
# sorting
s.name = 'E'
self.assertTrue(result2.sort_index().equals(s.sort_index()))
cat = pd.Categorical([1, 2, 3, 10], categories=[1, 2, 3, 4, 10])
df = pd.DataFrame(pd.Series(cat))
def test_describe(self):
# Categoricals should not show up together with numerical columns
result = self.cat.describe()
self.assertEqual(len(result.columns), 1)
# In a frame, describe() for the cat should be the same as for string
# arrays (count, unique, top, freq)
cat = Categorical(["a", "b", "b", "b"], categories=['a', 'b', 'c'],
ordered=True)
s = Series(cat)
result = s.describe()
expected = Series([4, 2, "b", 3],
index=['count', 'unique', 'top', 'freq'])
tm.assert_series_equal(result, expected)
cat = pd.Series(pd.Categorical(["a", "b", "c", "c"]))
df3 = pd.DataFrame({"cat": cat, "s": ["a", "b", "c", "c"]})
res = df3.describe()
self.assert_numpy_array_equal(res["cat"].values, res["s"].values)
def test_repr(self):
a = pd.Series(pd.Categorical([1, 2, 3, 4]))
exp = u("0 1\n1 2\n2 3\n3 4\n" +
"dtype: category\nCategories (4, int64): [1, 2, 3, 4]")
self.assertEqual(exp, a.__unicode__())
a = pd.Series(pd.Categorical(["a", "b"] * 25))
exp = u("0 a\n1 b\n" + " ..\n" + "48 a\n49 b\n" +
"dtype: category\nCategories (2, object): [a, b]")
with option_context("display.max_rows", 5):
self.assertEqual(exp, repr(a))
levs = list("abcdefghijklmnopqrstuvwxyz")
a = pd.Series(pd.Categorical(
["a", "b"], categories=levs, ordered=True))
exp = u("0 a\n1 b\n" + "dtype: category\n"
"Categories (26, object): [a < b < c < d ... w < x < y < z]")
self.assertEqual(exp, a.__unicode__())
def test_categorical_repr(self):
c = pd.Categorical([1, 2, 3])
exp = """[1, 2, 3]
Categories (3, int64): [1, 2, 3]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical([1, 2, 3, 1, 2, 3], categories=[1, 2, 3])
exp = """[1, 2, 3, 1, 2, 3]
Categories (3, int64): [1, 2, 3]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical([1, 2, 3, 4, 5] * 10)
exp = """[1, 2, 3, 4, 5, ..., 1, 2, 3, 4, 5]
Length: 50
Categories (5, int64): [1, 2, 3, 4, 5]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(np.arange(20))
exp = """[0, 1, 2, 3, 4, ..., 15, 16, 17, 18, 19]
Length: 20
Categories (20, int64): [0, 1, 2, 3, ..., 16, 17, 18, 19]"""
self.assertEqual(repr(c), exp)
def test_categorical_repr_ordered(self):
c = pd.Categorical([1, 2, 3], ordered=True)
exp = """[1, 2, 3]
Categories (3, int64): [1 < 2 < 3]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical([1, 2, 3, 1, 2, 3], categories=[1, 2, 3],
ordered=True)
exp = """[1, 2, 3, 1, 2, 3]
Categories (3, int64): [1 < 2 < 3]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical([1, 2, 3, 4, 5] * 10, ordered=True)
exp = """[1, 2, 3, 4, 5, ..., 1, 2, 3, 4, 5]
Length: 50
Categories (5, int64): [1 < 2 < 3 < 4 < 5]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(np.arange(20), ordered=True)
exp = """[0, 1, 2, 3, 4, ..., 15, 16, 17, 18, 19]
Length: 20
Categories (20, int64): [0 < 1 < 2 < 3 ... 16 < 17 < 18 < 19]"""
self.assertEqual(repr(c), exp)
def test_categorical_repr_datetime(self):
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5)
c = pd.Categorical(idx)
# TODO(wesm): exceeding 80 characters in the console is not good
# behavior
exp = (
"[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, "
"2011-01-01 12:00:00, 2011-01-01 13:00:00]\n"
"Categories (5, datetime64[ns]): [2011-01-01 09:00:00, "
"2011-01-01 10:00:00, 2011-01-01 11:00:00,\n"
" 2011-01-01 12:00:00, "
"2011-01-01 13:00:00]""")
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx)
exp = (
"[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, "
"2011-01-01 12:00:00, 2011-01-01 13:00:00, 2011-01-01 09:00:00, "
"2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, "
"2011-01-01 13:00:00]\n"
"Categories (5, datetime64[ns]): [2011-01-01 09:00:00, "
"2011-01-01 10:00:00, 2011-01-01 11:00:00,\n"
" 2011-01-01 12:00:00, "
"2011-01-01 13:00:00]")
self.assertEqual(repr(c), exp)
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5,
tz='US/Eastern')
c = pd.Categorical(idx)
exp = (
"[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, "
"2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, "
"2011-01-01 13:00:00-05:00]\n"
"Categories (5, datetime64[ns, US/Eastern]): "
"[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00,\n"
" "
"2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00,\n"
" "
"2011-01-01 13:00:00-05:00]")
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx)
exp = (
"[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, "
"2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, "
"2011-01-01 13:00:00-05:00, 2011-01-01 09:00:00-05:00, "
"2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, "
"2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00]\n"
"Categories (5, datetime64[ns, US/Eastern]): "
"[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00,\n"
" "
"2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00,\n"
" "
"2011-01-01 13:00:00-05:00]")
self.assertEqual(repr(c), exp)
def test_categorical_repr_datetime_ordered(self):
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5)
c = pd.Categorical(idx, ordered=True)
exp = """[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00]
Categories (5, datetime64[ns]): [2011-01-01 09:00:00 < 2011-01-01 10:00:00 < 2011-01-01 11:00:00 <
2011-01-01 12:00:00 < 2011-01-01 13:00:00]""" # noqa
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx, ordered=True)
exp = """[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00, 2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00]
Categories (5, datetime64[ns]): [2011-01-01 09:00:00 < 2011-01-01 10:00:00 < 2011-01-01 11:00:00 <
2011-01-01 12:00:00 < 2011-01-01 13:00:00]""" # noqa
self.assertEqual(repr(c), exp)
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5,
tz='US/Eastern')
c = pd.Categorical(idx, ordered=True)
exp = """[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00]
Categories (5, datetime64[ns, US/Eastern]): [2011-01-01 09:00:00-05:00 < 2011-01-01 10:00:00-05:00 <
2011-01-01 11:00:00-05:00 < 2011-01-01 12:00:00-05:00 <
2011-01-01 13:00:00-05:00]""" # noqa
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx, ordered=True)
exp = """[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00, 2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00]
Categories (5, datetime64[ns, US/Eastern]): [2011-01-01 09:00:00-05:00 < 2011-01-01 10:00:00-05:00 <
2011-01-01 11:00:00-05:00 < 2011-01-01 12:00:00-05:00 <
2011-01-01 13:00:00-05:00]"""
self.assertEqual(repr(c), exp)
def test_categorical_repr_period(self):
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=5)
c = pd.Categorical(idx)
exp = """[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00]
Categories (5, period): [2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00,
2011-01-01 13:00]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx)
exp = """[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00, 2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00]
Categories (5, period): [2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00,
2011-01-01 13:00]"""
self.assertEqual(repr(c), exp)
idx = pd.period_range('2011-01', freq='M', periods=5)
c = pd.Categorical(idx)
exp = """[2011-01, 2011-02, 2011-03, 2011-04, 2011-05]
Categories (5, period): [2011-01, 2011-02, 2011-03, 2011-04, 2011-05]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx)
exp = """[2011-01, 2011-02, 2011-03, 2011-04, 2011-05, 2011-01, 2011-02, 2011-03, 2011-04, 2011-05]
Categories (5, period): [2011-01, 2011-02, 2011-03, 2011-04, 2011-05]"""
self.assertEqual(repr(c), exp)
def test_categorical_repr_period_ordered(self):
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=5)
c = pd.Categorical(idx, ordered=True)
exp = """[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00]
Categories (5, period): [2011-01-01 09:00 < 2011-01-01 10:00 < 2011-01-01 11:00 < 2011-01-01 12:00 <
2011-01-01 13:00]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx, ordered=True)
exp = """[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00, 2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00]
Categories (5, period): [2011-01-01 09:00 < 2011-01-01 10:00 < 2011-01-01 11:00 < 2011-01-01 12:00 <
2011-01-01 13:00]"""
self.assertEqual(repr(c), exp)
idx = pd.period_range('2011-01', freq='M', periods=5)
c = pd.Categorical(idx, ordered=True)
exp = """[2011-01, 2011-02, 2011-03, 2011-04, 2011-05]
Categories (5, period): [2011-01 < 2011-02 < 2011-03 < 2011-04 < 2011-05]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx, ordered=True)
exp = """[2011-01, 2011-02, 2011-03, 2011-04, 2011-05, 2011-01, 2011-02, 2011-03, 2011-04, 2011-05]
Categories (5, period): [2011-01 < 2011-02 < 2011-03 < 2011-04 < 2011-05]"""
self.assertEqual(repr(c), exp)
def test_categorical_repr_timedelta(self):
idx = pd.timedelta_range('1 days', periods=5)
c = pd.Categorical(idx)
exp = """[1 days, 2 days, 3 days, 4 days, 5 days]
Categories (5, timedelta64[ns]): [1 days, 2 days, 3 days, 4 days, 5 days]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx)
exp = """[1 days, 2 days, 3 days, 4 days, 5 days, 1 days, 2 days, 3 days, 4 days, 5 days]
Categories (5, timedelta64[ns]): [1 days, 2 days, 3 days, 4 days, 5 days]"""
self.assertEqual(repr(c), exp)
idx = pd.timedelta_range('1 hours', periods=20)
c = pd.Categorical(idx)
exp = """[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, ..., 15 days 01:00:00, 16 days 01:00:00, 17 days 01:00:00, 18 days 01:00:00, 19 days 01:00:00]
Length: 20
Categories (20, timedelta64[ns]): [0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00,
3 days 01:00:00, ..., 16 days 01:00:00, 17 days 01:00:00,
18 days 01:00:00, 19 days 01:00:00]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx)
exp = """[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, ..., 15 days 01:00:00, 16 days 01:00:00, 17 days 01:00:00, 18 days 01:00:00, 19 days 01:00:00]
Length: 40
Categories (20, timedelta64[ns]): [0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00,
3 days 01:00:00, ..., 16 days 01:00:00, 17 days 01:00:00,
18 days 01:00:00, 19 days 01:00:00]"""
self.assertEqual(repr(c), exp)
def test_categorical_repr_timedelta_ordered(self):
idx = pd.timedelta_range('1 days', periods=5)
c = pd.Categorical(idx, ordered=True)
exp = """[1 days, 2 days, 3 days, 4 days, 5 days]
Categories (5, timedelta64[ns]): [1 days < 2 days < 3 days < 4 days < 5 days]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx, ordered=True)
exp = """[1 days, 2 days, 3 days, 4 days, 5 days, 1 days, 2 days, 3 days, 4 days, 5 days]
Categories (5, timedelta64[ns]): [1 days < 2 days < 3 days < 4 days < 5 days]"""
self.assertEqual(repr(c), exp)
idx = pd.timedelta_range('1 hours', periods=20)
c = pd.Categorical(idx, ordered=True)
exp = """[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, ..., 15 days 01:00:00, 16 days 01:00:00, 17 days 01:00:00, 18 days 01:00:00, 19 days 01:00:00]
Length: 20
Categories (20, timedelta64[ns]): [0 days 01:00:00 < 1 days 01:00:00 < 2 days 01:00:00 <
3 days 01:00:00 ... 16 days 01:00:00 < 17 days 01:00:00 <
18 days 01:00:00 < 19 days 01:00:00]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx, ordered=True)
exp = """[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, ..., 15 days 01:00:00, 16 days 01:00:00, 17 days 01:00:00, 18 days 01:00:00, 19 days 01:00:00]
Length: 40
Categories (20, timedelta64[ns]): [0 days 01:00:00 < 1 days 01:00:00 < 2 days 01:00:00 <
3 days 01:00:00 ... 16 days 01:00:00 < 17 days 01:00:00 <
18 days 01:00:00 < 19 days 01:00:00]"""
self.assertEqual(repr(c), exp)
def test_categorical_series_repr(self):
s = pd.Series(pd.Categorical([1, 2, 3]))
exp = """0 1
1 2
2 3
dtype: category
Categories (3, int64): [1, 2, 3]"""
self.assertEqual(repr(s), exp)
s = pd.Series(pd.Categorical(np.arange(10)))
exp = """0 0
1 1
2 2
3 3
4 4
5 5
6 6
7 7
8 8
9 9
dtype: category
Categories (10, int64): [0, 1, 2, 3, ..., 6, 7, 8, 9]"""
self.assertEqual(repr(s), exp)
def test_categorical_series_repr_ordered(self):
s = pd.Series(pd.Categorical([1, 2, 3], ordered=True))
exp = """0 1
1 2
2 3
dtype: category
Categories (3, int64): [1 < 2 < 3]"""
self.assertEqual(repr(s), exp)
s = pd.Series(pd.Categorical(np.arange(10), ordered=True))
exp = """0 0
1 1
2 2
3 3
4 4
5 5
6 6
7 7
8 8
9 9
dtype: category
Categories (10, int64): [0 < 1 < 2 < 3 ... 6 < 7 < 8 < 9]"""
self.assertEqual(repr(s), exp)
def test_categorical_series_repr_datetime(self):
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5)
s = pd.Series(pd.Categorical(idx))
exp = """0 2011-01-01 09:00:00
1 2011-01-01 10:00:00
2 2011-01-01 11:00:00
3 2011-01-01 12:00:00
4 2011-01-01 13:00:00
dtype: category
Categories (5, datetime64[ns]): [2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00,
2011-01-01 12:00:00, 2011-01-01 13:00:00]"""
self.assertEqual(repr(s), exp)
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5,
tz='US/Eastern')
s = pd.Series(pd.Categorical(idx))
exp = """0 2011-01-01 09:00:00-05:00
1 2011-01-01 10:00:00-05:00
2 2011-01-01 11:00:00-05:00
3 2011-01-01 12:00:00-05:00
4 2011-01-01 13:00:00-05:00
dtype: category
Categories (5, datetime64[ns, US/Eastern]): [2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00,
2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00,
2011-01-01 13:00:00-05:00]"""
self.assertEqual(repr(s), exp)
def test_categorical_series_repr_datetime_ordered(self):
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5)
s = pd.Series(pd.Categorical(idx, ordered=True))
exp = """0 2011-01-01 09:00:00
1 2011-01-01 10:00:00
2 2011-01-01 11:00:00
3 2011-01-01 12:00:00
4 2011-01-01 13:00:00
dtype: category
Categories (5, datetime64[ns]): [2011-01-01 09:00:00 < 2011-01-01 10:00:00 < 2011-01-01 11:00:00 <
2011-01-01 12:00:00 < 2011-01-01 13:00:00]"""
self.assertEqual(repr(s), exp)
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5,
tz='US/Eastern')
s = pd.Series(pd.Categorical(idx, ordered=True))
exp = """0 2011-01-01 09:00:00-05:00
1 2011-01-01 10:00:00-05:00
2 2011-01-01 11:00:00-05:00
3 2011-01-01 12:00:00-05:00
4 2011-01-01 13:00:00-05:00
dtype: category
Categories (5, datetime64[ns, US/Eastern]): [2011-01-01 09:00:00-05:00 < 2011-01-01 10:00:00-05:00 <
2011-01-01 11:00:00-05:00 < 2011-01-01 12:00:00-05:00 <
2011-01-01 13:00:00-05:00]"""
self.assertEqual(repr(s), exp)
def test_categorical_series_repr_period(self):
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=5)
s = pd.Series(pd.Categorical(idx))
exp = """0 2011-01-01 09:00
1 2011-01-01 10:00
2 2011-01-01 11:00
3 2011-01-01 12:00
4 2011-01-01 13:00
dtype: category
Categories (5, period): [2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00,
2011-01-01 13:00]"""
self.assertEqual(repr(s), exp)
idx = pd.period_range('2011-01', freq='M', periods=5)
s = pd.Series(pd.Categorical(idx))
exp = """0 2011-01
1 2011-02
2 2011-03
3 2011-04
4 2011-05
dtype: category
Categories (5, period): [2011-01, 2011-02, 2011-03, 2011-04, 2011-05]"""
self.assertEqual(repr(s), exp)
def test_categorical_series_repr_period_ordered(self):
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=5)
s = pd.Series(pd.Categorical(idx, ordered=True))
exp = """0 2011-01-01 09:00
1 2011-01-01 10:00
2 2011-01-01 11:00
3 2011-01-01 12:00
4 2011-01-01 13:00
dtype: category
Categories (5, period): [2011-01-01 09:00 < 2011-01-01 10:00 < 2011-01-01 11:00 < 2011-01-01 12:00 <
2011-01-01 13:00]"""
self.assertEqual(repr(s), exp)
idx = pd.period_range('2011-01', freq='M', periods=5)
s = pd.Series(pd.Categorical(idx, ordered=True))
exp = """0 2011-01
1 2011-02
2 2011-03
3 2011-04
4 2011-05
dtype: category
Categories (5, period): [2011-01 < 2011-02 < 2011-03 < 2011-04 < 2011-05]"""
self.assertEqual(repr(s), exp)
def test_categorical_series_repr_timedelta(self):
idx = pd.timedelta_range('1 days', periods=5)
s = pd.Series(pd.Categorical(idx))
exp = """0 1 days
1 2 days
2 3 days
3 4 days
4 5 days
dtype: category
Categories (5, timedelta64[ns]): [1 days, 2 days, 3 days, 4 days, 5 days]"""
self.assertEqual(repr(s), exp)
idx = pd.timedelta_range('1 hours', periods=10)
s = pd.Series(pd.Categorical(idx))
exp = """0 0 days 01:00:00
1 1 days 01:00:00
2 2 days 01:00:00
3 3 days 01:00:00
4 4 days 01:00:00
5 5 days 01:00:00
6 6 days 01:00:00
7 7 days 01:00:00
8 8 days 01:00:00
9 9 days 01:00:00
dtype: category
Categories (10, timedelta64[ns]): [0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00,
3 days 01:00:00, ..., 6 days 01:00:00, 7 days 01:00:00,
8 days 01:00:00, 9 days 01:00:00]"""
self.assertEqual(repr(s), exp)
def test_categorical_series_repr_timedelta_ordered(self):
idx = pd.timedelta_range('1 days', periods=5)
s = pd.Series(pd.Categorical(idx, ordered=True))
exp = """0 1 days
1 2 days
2 3 days
3 4 days
4 5 days
dtype: category
Categories (5, timedelta64[ns]): [1 days < 2 days < 3 days < 4 days < 5 days]"""
self.assertEqual(repr(s), exp)
idx = pd.timedelta_range('1 hours', periods=10)
s = pd.Series(pd.Categorical(idx, ordered=True))
exp = """0 0 days 01:00:00
1 1 days 01:00:00
2 2 days 01:00:00
3 3 days 01:00:00
4 4 days 01:00:00
5 5 days 01:00:00
6 6 days 01:00:00
7 7 days 01:00:00
8 8 days 01:00:00
9 9 days 01:00:00
dtype: category
Categories (10, timedelta64[ns]): [0 days 01:00:00 < 1 days 01:00:00 < 2 days 01:00:00 <
3 days 01:00:00 ... 6 days 01:00:00 < 7 days 01:00:00 <
8 days 01:00:00 < 9 days 01:00:00]"""
self.assertEqual(repr(s), exp)
def test_categorical_index_repr(self):
idx = pd.CategoricalIndex(pd.Categorical([1, 2, 3]))
exp = """CategoricalIndex([1, 2, 3], categories=[1, 2, 3], ordered=False, dtype='category')"""
self.assertEqual(repr(idx), exp)
i = pd.CategoricalIndex(pd.Categorical(np.arange(10)))
exp = """CategoricalIndex([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], categories=[0, 1, 2, 3, 4, 5, 6, 7, ...], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
def test_categorical_index_repr_ordered(self):
i = pd.CategoricalIndex(pd.Categorical([1, 2, 3], ordered=True))
exp = """CategoricalIndex([1, 2, 3], categories=[1, 2, 3], ordered=True, dtype='category')"""
self.assertEqual(repr(i), exp)
i = pd.CategoricalIndex(pd.Categorical(np.arange(10), ordered=True))
exp = """CategoricalIndex([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], categories=[0, 1, 2, 3, 4, 5, 6, 7, ...], ordered=True, dtype='category')"""
self.assertEqual(repr(i), exp)
def test_categorical_index_repr_datetime(self):
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5)
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['2011-01-01 09:00:00', '2011-01-01 10:00:00',
'2011-01-01 11:00:00', '2011-01-01 12:00:00',
'2011-01-01 13:00:00'],
categories=[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5,
tz='US/Eastern')
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['2011-01-01 09:00:00-05:00', '2011-01-01 10:00:00-05:00',
'2011-01-01 11:00:00-05:00', '2011-01-01 12:00:00-05:00',
'2011-01-01 13:00:00-05:00'],
categories=[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
def test_categorical_index_repr_datetime_ordered(self):
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5)
i = pd.CategoricalIndex(pd.Categorical(idx, ordered=True))
exp = """CategoricalIndex(['2011-01-01 09:00:00', '2011-01-01 10:00:00',
'2011-01-01 11:00:00', '2011-01-01 12:00:00',
'2011-01-01 13:00:00'],
categories=[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00], ordered=True, dtype='category')"""
self.assertEqual(repr(i), exp)
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5,
tz='US/Eastern')
i = pd.CategoricalIndex(pd.Categorical(idx, ordered=True))
exp = """CategoricalIndex(['2011-01-01 09:00:00-05:00', '2011-01-01 10:00:00-05:00',
'2011-01-01 11:00:00-05:00', '2011-01-01 12:00:00-05:00',
'2011-01-01 13:00:00-05:00'],
categories=[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00], ordered=True, dtype='category')"""
self.assertEqual(repr(i), exp)
i = pd.CategoricalIndex(pd.Categorical(idx.append(idx), ordered=True))
exp = """CategoricalIndex(['2011-01-01 09:00:00-05:00', '2011-01-01 10:00:00-05:00',
'2011-01-01 11:00:00-05:00', '2011-01-01 12:00:00-05:00',
'2011-01-01 13:00:00-05:00', '2011-01-01 09:00:00-05:00',
'2011-01-01 10:00:00-05:00', '2011-01-01 11:00:00-05:00',
'2011-01-01 12:00:00-05:00', '2011-01-01 13:00:00-05:00'],
categories=[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00], ordered=True, dtype='category')"""
self.assertEqual(repr(i), exp)
def test_categorical_index_repr_period(self):
# test all length
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=1)
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['2011-01-01 09:00'], categories=[2011-01-01 09:00], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=2)
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['2011-01-01 09:00', '2011-01-01 10:00'], categories=[2011-01-01 09:00, 2011-01-01 10:00], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=3)
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00'], categories=[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=5)
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00',
'2011-01-01 12:00', '2011-01-01 13:00'],
categories=[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
i = pd.CategoricalIndex(pd.Categorical(idx.append(idx)))
exp = """CategoricalIndex(['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00',
'2011-01-01 12:00', '2011-01-01 13:00', '2011-01-01 09:00',
'2011-01-01 10:00', '2011-01-01 11:00', '2011-01-01 12:00',
'2011-01-01 13:00'],
categories=[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
idx = pd.period_range('2011-01', freq='M', periods=5)
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['2011-01', '2011-02', '2011-03', '2011-04', '2011-05'], categories=[2011-01, 2011-02, 2011-03, 2011-04, 2011-05], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
def test_categorical_index_repr_period_ordered(self):
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=5)
i = pd.CategoricalIndex(pd.Categorical(idx, ordered=True))
exp = """CategoricalIndex(['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00',
'2011-01-01 12:00', '2011-01-01 13:00'],
categories=[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00], ordered=True, dtype='category')"""
self.assertEqual(repr(i), exp)
idx = pd.period_range('2011-01', freq='M', periods=5)
i = pd.CategoricalIndex(pd.Categorical(idx, ordered=True))
exp = """CategoricalIndex(['2011-01', '2011-02', '2011-03', '2011-04', '2011-05'], categories=[2011-01, 2011-02, 2011-03, 2011-04, 2011-05], ordered=True, dtype='category')"""
self.assertEqual(repr(i), exp)
def test_categorical_index_repr_timedelta(self):
idx = pd.timedelta_range('1 days', periods=5)
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['1 days', '2 days', '3 days', '4 days', '5 days'], categories=[1 days 00:00:00, 2 days 00:00:00, 3 days 00:00:00, 4 days 00:00:00, 5 days 00:00:00], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
idx = pd.timedelta_range('1 hours', periods=10)
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['0 days 01:00:00', '1 days 01:00:00', '2 days 01:00:00',
'3 days 01:00:00', '4 days 01:00:00', '5 days 01:00:00',
'6 days 01:00:00', '7 days 01:00:00', '8 days 01:00:00',
'9 days 01:00:00'],
categories=[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, 5 days 01:00:00, 6 days 01:00:00, 7 days 01:00:00, ...], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
def test_categorical_index_repr_timedelta_ordered(self):
idx = pd.timedelta_range('1 days', periods=5)
i = pd.CategoricalIndex(pd.Categorical(idx, ordered=True))
exp = """CategoricalIndex(['1 days', '2 days', '3 days', '4 days', '5 days'], categories=[1 days 00:00:00, 2 days 00:00:00, 3 days 00:00:00, 4 days 00:00:00, 5 days 00:00:00], ordered=True, dtype='category')"""
self.assertEqual(repr(i), exp)
idx = pd.timedelta_range('1 hours', periods=10)
i = pd.CategoricalIndex(pd.Categorical(idx, ordered=True))
exp = """CategoricalIndex(['0 days 01:00:00', '1 days 01:00:00', '2 days 01:00:00',
'3 days 01:00:00', '4 days 01:00:00', '5 days 01:00:00',
'6 days 01:00:00', '7 days 01:00:00', '8 days 01:00:00',
'9 days 01:00:00'],
categories=[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, 5 days 01:00:00, 6 days 01:00:00, 7 days 01:00:00, ...], ordered=True, dtype='category')"""
self.assertEqual(repr(i), exp)
def test_categorical_frame(self):
# normal DataFrame
dt = pd.date_range('2011-01-01 09:00', freq='H', periods=5,
tz='US/Eastern')
p = pd.period_range('2011-01', freq='M', periods=5)
df = pd.DataFrame({'dt': dt, 'p': p})
exp = """ dt p
0 2011-01-01 09:00:00-05:00 2011-01
1 2011-01-01 10:00:00-05:00 2011-02
2 2011-01-01 11:00:00-05:00 2011-03
3 2011-01-01 12:00:00-05:00 2011-04
4 2011-01-01 13:00:00-05:00 2011-05"""
df = pd.DataFrame({'dt': pd.Categorical(dt), 'p': pd.Categorical(p)})
self.assertEqual(repr(df), exp)
def test_info(self):
# make sure it works
n = 2500
df = DataFrame({'int64': np.random.randint(100, size=n)})
df['category'] = Series(np.array(list('abcdefghij')).take(
np.random.randint(0, 10, size=n))).astype('category')
df.isnull()
df.info()
df2 = df[df['category'] == 'd']
df2.info()
def test_groupby_sort(self):
# http://stackoverflow.com/questions/23814368/sorting-pandas-categorical-labels-after-groupby
# This should result in a properly sorted Series so that the plot
# has a sorted x axis
# self.cat.groupby(['value_group'])['value_group'].count().plot(kind='bar')
res = self.cat.groupby(['value_group'])['value_group'].count()
exp = res[sorted(res.index, key=lambda x: float(x.split()[0]))]
exp.index = pd.CategoricalIndex(exp.index, name=exp.index.name)
tm.assert_series_equal(res, exp)
def test_min_max(self):
# unordered cats have no min/max
cat = Series(Categorical(["a", "b", "c", "d"], ordered=False))
self.assertRaises(TypeError, lambda: cat.min())
self.assertRaises(TypeError, lambda: cat.max())
cat = Series(Categorical(["a", "b", "c", "d"], ordered=True))
_min = cat.min()
_max = cat.max()
self.assertEqual(_min, "a")
self.assertEqual(_max, "d")
cat = Series(Categorical(["a", "b", "c", "d"], categories=[
'd', 'c', 'b', 'a'], ordered=True))
_min = cat.min()
_max = cat.max()
self.assertEqual(_min, "d")
self.assertEqual(_max, "a")
cat = Series(Categorical(
[np.nan, "b", "c", np.nan], categories=['d', 'c', 'b', 'a'
], ordered=True))
_min = cat.min()
_max = cat.max()
self.assertTrue(np.isnan(_min))
self.assertEqual(_max, "b")
cat = Series(Categorical(
[np.nan, 1, 2, np.nan], categories=[5, 4, 3, 2, 1], ordered=True))
_min = cat.min()
_max = cat.max()
self.assertTrue(np.isnan(_min))
self.assertEqual(_max, 1)
def test_mode(self):
s = Series(Categorical([1, 1, 2, 4, 5, 5, 5],
categories=[5, 4, 3, 2, 1], ordered=True))
res = s.mode()
exp = Series(Categorical([5], categories=[
5, 4, 3, 2, 1], ordered=True))
tm.assert_series_equal(res, exp)
s = Series(Categorical([1, 1, 1, 4, 5, 5, 5],
categories=[5, 4, 3, 2, 1], ordered=True))
res = s.mode()
exp = Series(Categorical([5, 1], categories=[
5, 4, 3, 2, 1], ordered=True))
tm.assert_series_equal(res, exp)
s = Series(Categorical([1, 2, 3, 4, 5], categories=[5, 4, 3, 2, 1],
ordered=True))
res = s.mode()
exp = Series(Categorical([], categories=[5, 4, 3, 2, 1], ordered=True))
tm.assert_series_equal(res, exp)
def test_value_counts(self):
s = pd.Series(pd.Categorical(
["a", "b", "c", "c", "c", "b"], categories=["c", "a", "b", "d"]))
res = s.value_counts(sort=False)
exp = Series([3, 1, 2, 0],
index=pd.CategoricalIndex(["c", "a", "b", "d"]))
tm.assert_series_equal(res, exp)
res = s.value_counts(sort=True)
exp = Series([3, 2, 1, 0],
index=pd.CategoricalIndex(["c", "b", "a", "d"]))
tm.assert_series_equal(res, exp)
def test_value_counts_with_nan(self):
# https://github.com/pydata/pandas/issues/9443
s = pd.Series(["a", "b", "a"], dtype="category")
tm.assert_series_equal(
s.value_counts(dropna=True),
pd.Series([2, 1], index=pd.CategoricalIndex(["a", "b"])))
tm.assert_series_equal(
s.value_counts(dropna=False),
pd.Series([2, 1], index=pd.CategoricalIndex(["a", "b"])))
s = pd.Series(["a", "b", None, "a", None, None], dtype="category")
tm.assert_series_equal(
s.value_counts(dropna=True),
pd.Series([2, 1], index=pd.CategoricalIndex(["a", "b"])))
tm.assert_series_equal(
s.value_counts(dropna=False),
pd.Series([3, 2, 1], index=pd.CategoricalIndex([np.nan, "a", "b"])))
# When we aren't sorting by counts, and np.nan isn't a
# category, it should be last.
tm.assert_series_equal(
s.value_counts(dropna=False, sort=False),
pd.Series([2, 1, 3],
index=pd.CategoricalIndex(["a", "b", np.nan])))
with tm.assert_produces_warning(FutureWarning):
s = pd.Series(pd.Categorical(
["a", "b", "a"], categories=["a", "b", np.nan]))
tm.assert_series_equal(
s.value_counts(dropna=True),
pd.Series([2, 1], index=pd.CategoricalIndex(["a", "b"])))
tm.assert_series_equal(
s.value_counts(dropna=False),
pd.Series([2, 1, 0],
index=pd.CategoricalIndex(["a", "b", np.nan])))
with tm.assert_produces_warning(FutureWarning):
s = pd.Series(pd.Categorical(
["a", "b", None, "a", None, None], categories=["a", "b", np.nan
]))
tm.assert_series_equal(
s.value_counts(dropna=True),
pd.Series([2, 1], index=pd.CategoricalIndex(["a", "b"])))
tm.assert_series_equal(
s.value_counts(dropna=False),
pd.Series([3, 2, 1],
index=pd.CategoricalIndex([np.nan, "a", "b"])))
def test_groupby(self):
cats = Categorical(
["a", "a", "a", "b", "b", "b", "c", "c", "c"
], categories=["a", "b", "c", "d"], ordered=True)
data = DataFrame({"a": [1, 1, 1, 2, 2, 2, 3, 4, 5], "b": cats})
expected = DataFrame({'a': Series(
[1, 2, 4, np.nan], index=pd.CategoricalIndex(
['a', 'b', 'c', 'd'], name='b'))})
result = data.groupby("b").mean()
tm.assert_frame_equal(result, expected)
raw_cat1 = Categorical(["a", "a", "b", "b"],
categories=["a", "b", "z"], ordered=True)
raw_cat2 = Categorical(["c", "d", "c", "d"],
categories=["c", "d", "y"], ordered=True)
df = | DataFrame({"A": raw_cat1, "B": raw_cat2, "values": [1, 2, 3, 4]}) | pandas.DataFrame |
# -*- coding: utf-8 -*-
# !/usr/bin/env python3 -u
# copyright: sktime developers, BSD-3-Clause License (see LICENSE file)
"""Implements adapter for pmdarima forecasters to be used in sktime framework."""
__author__ = ["mloning", "hyang1996", "kejsitake", "fkiraly"]
__all__ = ["_PmdArimaAdapter"]
import pandas as pd
from sktime.forecasting.base import BaseForecaster
from sktime.forecasting.base._base import DEFAULT_ALPHA
class _PmdArimaAdapter(BaseForecaster):
"""Base class for interfacing pmdarima."""
_tags = {
"ignores-exogeneous-X": False,
"capability:pred_int": True,
"requires-fh-in-fit": False,
"handles-missing-data": True,
}
def __init__(self):
self._forecaster = None
super(_PmdArimaAdapter, self).__init__()
def _instantiate_model(self):
raise NotImplementedError("abstract method")
def _fit(self, y, X=None, fh=None):
"""Fit to training data.
Parameters
----------
y : pd.Series
Target time series to which to fit the forecaster.
fh : int, list, np.array or ForecastingHorizon, optional (default=None)
The forecasters horizon with the steps ahead to to predict.
X : pd.DataFrame, optional (default=None)
Exogenous variables are ignored
Returns
-------
self : returns an instance of self.
"""
self._forecaster = self._instantiate_model()
self._forecaster.fit(y, X=X)
return self
def _predict(self, fh, X=None):
"""Make forecasts.
Parameters
----------
fh : array-like
The forecasters horizon with the steps ahead to to predict.
Default is
one-step ahead forecast, i.e. np.array([1]).
Returns
-------
y_pred : pandas.Series
Returns series of predicted values.
"""
# distinguish between in-sample and out-of-sample prediction
fh_oos = fh.to_out_of_sample(self.cutoff)
fh_ins = fh.to_in_sample(self.cutoff)
# all values are out-of-sample
if fh.is_all_out_of_sample(self.cutoff):
return self._predict_fixed_cutoff(fh_oos, X=X)
# all values are in-sample
elif fh.is_all_in_sample(self.cutoff):
return self._predict_in_sample(fh_ins, X=X)
# both in-sample and out-of-sample values
else:
y_ins = self._predict_in_sample(fh_ins, X=X)
y_oos = self._predict_fixed_cutoff(fh_oos, X=X)
return y_ins.append(y_oos)
def _predict_in_sample(
self, fh, X=None, return_pred_int=False, alpha=DEFAULT_ALPHA
):
"""Generate in sample predictions.
Parameters
----------
fh : array-like
The forecasters horizon with the steps ahead to to predict.
Default is
one-step ahead forecast, i.e. np.array([1]).
Returns
-------
y_pred : pandas.Series
Returns series of predicted values.
"""
if hasattr(self, "order"):
diff_order = self.order[1]
else:
diff_order = self._forecaster.model_.order[1]
# Initialize return objects
fh_abs = fh.to_absolute(self.cutoff).to_numpy()
fh_idx = fh.to_indexer(self.cutoff, from_cutoff=False)
y_pred = | pd.Series(index=fh_abs) | pandas.Series |
# %%%%
import pandas as pd
import numpy as np
import re
# %%%% functions
## Fill missing values
def fillmissing(x,col,index,benchmark):
for i in range(index,len(x)):
# find missing value
if x.loc[i,col] == benchmark:
# if first is missing, fill using the value next to it
if i == index:
x.loc[i,col] = x.loc[i+1,col]
# if the last one is missing, fill using the value preceeds it
elif i == len(x)-1:
x.loc[i,col] = x.loc[i-1,col]
# otherwise, fill using the average of the two not null values above and after
else:
j = i-1
k = i+1
while x.loc[j,col] == benchmark:
j -= 1
while x.loc[k,col] == benchmark:
k += 1
x.loc[i,col] = np.mean([x.loc[j,col],x.loc[k,col]])
return x
## Data Preprocess
def preprocess(x,name,Date,column,index,benchmark,q):
# select the valid starting day
x = x[x['Date'] > Date].copy()
x = x.reset_index().copy()
x = x.drop('index',axis = 1).copy()
# fill na with benchmark we chose
x[column] = x[column].fillna(benchmark).copy()
# fill missing values
x = fillmissing(x,column,index,benchmark).copy()
# calculate daily return
x['lag_'+column] = x[column].shift(1)
x = x.iloc[1:,:].copy().reset_index()
x = x.drop('index',axis = 1).copy()
x['log_ret'] = np.log(x[column])-np.log(x['lag_'+column])
retm = np.mean(x['log_ret'])
x['retv'] = np.square(x['log_ret']-retm)*100
# estimate volatility
x[name+'_20day_vol'] = np.sqrt(x['retv'].rolling(window=20,win_type="boxcar").mean())/10
# estimate quantiles of the distribution of log-returns
x[name+'_quant_ret'] = np.nan
for r in range(len(x)-20):
R_quant = np.quantile(x['log_ret'][r:r+20],q)
x.loc[r+19,name+'_quant_ret'] = R_quant
return x
# %%%% Main Dataset: csi300
csi = pd.read_csv('/Users/msstark/Desktop/project/Shanghai Shenzhen CSI 300 Historical Data.csv')
# setting date format
csi['Date'] = csi['Date'].apply(lambda x: re.sub(r',',r'',x))
csi['Day'] = csi['Date'].apply(lambda x: x.split(' ')[1]).astype(int)
csi['Month'] = csi['Date'].apply(lambda x: x.split(' ')[0])
csi['Month'].unique()
csi['Month'] = csi['Month'].map({'Jan':1,'Feb':2,'Mar':3,'Apr':4,'May':5,'Jun':6,
'Jul':7,'Aug':8,'Sep':9,'Oct':10,'Nov':11,'Dec':12})
csi['Year'] = csi['Date'].apply(lambda x: x.split(' ')[2]).astype(int)
csi['Date'] = csi['Year'].astype(str) +'-'+csi['Month'].astype(str)+'-'+csi['Day'].astype(str)
csi['Date'] = pd.to_datetime(csi['Date'], format='%Y-%m-%d')
csi = csi.rename(columns = {'Price':'Close'}).copy()
# convert object type to float
col = ['Close','Open','High','Low']
for c in col:
csi[c] = csi[c].apply(lambda x: re.sub(r',',r'',x)).astype('float')
csi['log_dsprd'] = np.log(csi['High'] - csi['Low'])
csi.columns
# apply preprocess function
csi = preprocess(csi,'csi','2005-01-03','Close',0,0,0.025).copy()
# %%%% spot exchange rate
xr = pd.read_csv('/Users/msstark/Desktop/project/DEXCHUS.csv')
# setting date format
xr['DATE'] = pd.to_datetime(xr['DATE'], format='%Y-%m-%d')
xr = xr.rename(columns = {'DATE':'Date','DEXCHUS':'exR'}).copy()
# we find there's '.' inside our dataset
# replace '.' with '0', which is also the benchmark we chose to fill the missing values
xr['exR'] = xr[['exR']].apply(lambda x: x.replace('.','0'))
# convert object type to float
xr['exR'] = xr['exR'].astype(float)
# apply preprocess function
xr = preprocess(xr,'exR','2005-01-03','exR',0,0,0.025).copy()
# merge onto the main dataset
csi = csi.merge(xr[['Date','exR_quant_ret']],left_on = ['Date'],right_on = ['Date'],how = 'left').copy()
# %%%% hsi
hsi = pd.read_csv('^HSI.csv')
# setting date format
hsi['Date'] = pd.to_datetime(hsi['Date'], format='%Y-%m-%d')
# apply preprocess function
hsi = preprocess(hsi,'hsi','2005-01-03','Close',0,0,0.025).copy()
# merge onto the main dataset
csi = csi.merge(hsi[['Date','hsi_quant_ret']],left_on = ['Date'],right_on = ['Date'],how = 'left').copy()
# %%%% sse
sse = pd.read_csv('SSE Composite Index.csv')
# setting date format
sse['Date'] = pd.to_datetime(sse['Date'], format='%Y-%m-%d')
# apply preprocess function
sse = preprocess(sse,'sse','2005-01-03','Close',0,0,0.025).copy()
# merge onto the main dataset
csi = csi.merge(sse[['Date','sse_quant_ret']],left_on = ['Date'],right_on = ['Date'],how = 'left').copy()
# %%%% commodities
# corn
corn = pd.read_csv('corn-prices-historical-chart-data.csv')
corn = corn.rename(columns = {'date':'Date',' value':'Close'})
# setting date format
corn['Date'] = pd.to_datetime(corn['Date'], format='%Y-%m-%d')
# apply preprocess function
corn = preprocess(corn,'corn','2005-01-03','Close',0,0,0.025).copy()
# merge onto the main dataset
csi = csi.merge(corn[['Date','corn_quant_ret']],left_on = ['Date'],right_on = ['Date'],how = 'left').copy()
# soybean
soybean = pd.read_csv('soybean-prices-historical-chart-data.csv')
soybean = soybean.rename(columns = {'date':'Date',' value':'Close'})
# setting date format
soybean['Date'] = pd.to_datetime(soybean['Date'], format='%Y-%m-%d')
# apply preprocess function
soybean = preprocess(soybean,'soybean','2005-01-03','Close',0,0,0.025).copy()
# merge onto the main dataset
csi = csi.merge(soybean[['Date','soybean_quant_ret']],left_on = ['Date'],right_on = ['Date'],how = 'left').copy()
# %%%% heating oil
heat = pd.read_csv('New_York_Harbor_No._2_Heating_Oil_Spot_Price_FOB.csv')
heat = heat.rename(columns = {'Day':'Date','New York Harbor No. 2 Heating Oil Spot Price FOB Dollars per Gallon':'Close'})
# setting date format
heat['Date'] = heat['Date'].apply(lambda x: re.sub(r'\/',r'-',x))
heat['Date'] = pd.to_datetime(heat['Date'], format='%m-%d-%Y')
heat = heat.sort_values(by=['Date'],ascending=True).reset_index().copy()
heat = heat.drop('index',axis = 1).copy()
# apply preprocess function
heat = preprocess(heat,'heat','2005-01-03','Close',0,0,0.025).copy()
# merge onto the main dataset
csi = csi.merge(heat[['Date','heat_quant_ret']],left_on = ['Date'],right_on = ['Date'],how = 'left').copy()
# %%%% 10-year bond yield rate
bond = | pd.read_csv('China 10-Year Bond Yield Historical Data.csv') | pandas.read_csv |
from __future__ import print_function, division
from _cython_feature_detectors import *
import numpy as np
import pandas as pd
import copy
from scipy import stats
import scipy.optimize
import matplotlib.dates as mdates
import math, datetime
from slicedpy.normal import Normal
from slicedpy.powerstate import PowerState, PowerSegment
from slicedpy.datastore import DataStore
from slicedpy import utils
from pda.channel import _indicies_of_periods
"""
.. module:: feature_detectors
:synopsis: Functions for detecting features in power data.
This file implements feature detectors which are written in pure
Python. Cython feature detectors are in
cython/_cython_feature_detectors.pyx. This file also holds helper functions
for pre-processing prior to using feature detectors.
"""
###############################################################################
# SPIKE HISTOGRAM FUNCTIONS
###############################################################################
def get_merged_spikes(fdiff):
"""Merge consecutive forward difference values of the same sign.
Args:
fdiff (1D np.ndarray): forward difference of power.
e.g. calculated by np.diff
Returns:
merged_fdiff (1D np.ndarray). Will be zero prior to each merged spike.
"""
sign_comparison = (fdiff[:-1] * fdiff[1:]) > 0
merged_fdiff = copy.copy(fdiff)
accumulator = 0
for i in range(0,merged_fdiff.size-1):
if sign_comparison[i] == True:
if accumulator == 0:
accumulator = fdiff[i] + fdiff[i+1]
else:
accumulator += fdiff[i+1]
merged_fdiff[i] = 0
else:
if accumulator != 0:
merged_fdiff[i] = accumulator
accumulator = 0
# Handle last element if necessary
if accumulator != 0:
merged_fdiff[-1] = accumulator
return merged_fdiff
def get_merged_spikes_pandas(series):
return pd.Series(get_merged_spikes(series.values), index=series.index)
def spike_histogram(series, merge_spikes=True, window_duration=60, n_bins=8):
"""
Args:
* series (pd.Series): watts
* merge_spikes (bool): Default = True
* window_duration (float): Width of each window in seconds
* n_bins (int): number of bins per window.
Returns:
spike_hist, bin_edges:
spike_hist (pd.DataFrame):
index is pd.DateTimeIndex of start of each time window
columns are 2-tuples of the bin edges in watts (int)
bin_edges (list of ints):
"""
fdiff = series.diff()
if merge_spikes:
fdiff = get_merged_spikes_pandas(fdiff)
abs_fdiff = np.fabs(fdiff)
freq = (window_duration, 'S')
date_range, boundaries = _indicies_of_periods(fdiff.index,
freq=freq)
bin_edges = np.concatenate(([0], np.exp(np.arange(1,n_bins+1))))
bin_edges = np.round(bin_edges).astype(int)
cols = zip(bin_edges[:-1], bin_edges[1:])
spike_hist = | pd.DataFrame(index=date_range, columns=cols) | pandas.DataFrame |
"""
lipydomics/interactive.py
<NAME> and <NAME>
description:
"""
from pandas import DataFrame, concat, read_csv
import csv
import numpy as np
import os
from lipydomics.data import Dataset
from lipydomics.stats import add_anova_p, add_pca3, add_plsda, add_2group_corr, add_plsra, add_log2fc
from lipydomics.plotting import (
barplot_feature_bygroup, batch_barplot_feature_bygroup, scatter_pca3_projections_bygroup,
scatter_plsda_projections_bygroup, splot_plsda_pcorr_bygroup, scatter_plsra_projections_bygroup,
heatmap_lipid_class_log2fc
)
from lipydomics.identification import add_feature_ids, remove_potential_nonlipids
from lipydomics.identification.rt_calibration import get_ref_rt, RTCalibration
from lipydomics.util import filter_d, parse_lipid
def load_dset():
"""
load_dset
description:
Prompts the user with options to load a lipydomics Dataset instance, either a new one from a .csv file or an
existing one from .pickle file. Returns the loaded instance or None on any sort of failure. There is also a
hidden exit option that will completely stop execution.
(if None is returned, this function is called again until a Dataset instance is returned)
(if 'exit' is returned, main() will return None rather than actually exiting)
returns:
(lipydomics.data.Dataset or None or 'exit') -- lipidomics dataset instance
"""
dset = None
print('\nWhat would you like to do?')
print('\t1. Make a new Dataset')
print('\t2. Load a previous Dataset')
option = input('> ')
if option == '1':
print('Please enter the path to the csv file you want to work with.')
csv_fname = input('> ')
# validate the csv file exists
if not os.path.isfile(csv_fname):
print('! ERROR: Make sure the path specified is correct and the file exists.')
return None
# prompt for positive or negative ESI mode
print('What ESI mode was used for this data? (pos/neg)')
esi = input('> ')
if esi not in ['pos', 'neg']:
print('! ERROR: ESI mode "{}" not recognized'.format(esi))
return None
# load the Dataset from .csv file
dset = Dataset(csv_fname, esi_mode=esi)
print('! INFO: Loaded a new Dataset from .csv file: "{}"'.format(csv_fname))
# try to automatically assign headers
print('Would you like to automatically assign groups from headers? (y/N)')
ans = input('> ')
if ans == 'y':
try:
with open(csv_fname, newline='') as f:
reader = csv.reader(f)
header = next(reader)
header = header[3:]
group_map = {}
for i in range(len(header)):
if header[i] not in group_map:
group_map[header[i]] = [i]
else:
group_map[header[i]] = group_map[header[i]] + [i]
dset.assign_groups(group_map)
print('! INFO: Automatically assigned groups from headers')
except Exception as e:
print('! ERROR:', e)
print('! ERROR: Unable to automatically assign groups from headers')
# reload the Dataset just in case
dset = Dataset(csv_fname)
elif option == '2':
print('Please enter the path to the pickle file you want to load.')
pickle_fname = input('> ')
if not os.path.isfile(pickle_fname):
print('! ERROR: Make sure the path specified is correct and the file exists.')
return None
dset = Dataset.load_bin(pickle_fname)
print('! INFO: Loaded existing Dataset from .pickle file: "{}"'.format(pickle_fname))
# exit option not listed
elif option == 'exit':
return 'exit'
return dset
def manage_groups(dset):
"""
manage_groups
description:
Prompts the user with options to manage group assignments on the Dataset instance:
- Assign indices to a group
- View assigned group indices
- Get data by group
Returns a boolean indicating whether the user is finished with assigning groups.
(this function gets called again if False is returned)
parameters:
dset (lipydomics.data.Dataset) -- lipidomics dataset instance
returns:
(bool) -- finished managing groups
"""
print('Managing groups... What would you like to do?')
print("\t1. Assign group")
print("\t2. View assigned groups")
print("\t3. Get data by group(s)")
print('\t"back" to go back')
option = input('> ')
if option == "1":
print("Please provide a name for a group and its indices in order of name > starting index > ending index."
"\n\t* group name should not contain spaces\n\t* indices start at 0\n\t* example: 'A 1 3'")
group = input('> ')
group = group.split()
name = group[0]
indices = [_ for _ in range(int(group[1]), int(group[2]) + 1)]
try:
dset.assign_groups({name: indices})
print('! INFO: Assigned indices: {} to group: "{}"'.format(dset.group_indices[name], name))
except ValueError as ve:
print('! ERROR:', ve)
print("! ERROR: Failed to assign group, please check your formatting and try again")
elif option == "2":
for group in dset.group_indices:
print('\t"{}": {}'.format(group, dset.group_indices[group]))
return False
elif option == "3":
print("Which group would you like to view?")
name = input('> ')
print(dset.get_data_bygroup(name))
return False
elif option == 'back':
return True
else:
print('! ERROR: unrecognized option: "{}"'.format(option))
return False
def filter_data(dset):
"""
filter_data
description:
Prompts the user with options for filtering the data:
- The user is to provide ranges for M/Z, RT and CCS values and it let's the user download
a csv file containing all data matching that range.
- Can also take multiple ranges given a CSV file of ranges.
parameters:
dset (lipydomics.data.Dataset) -- lipidomics dataset instance
returns:
(bool) -- finished filtering data
"""
print('Filtering data... What would you like to do?')
print("\t1. Single query")
print("\t2. Batch query")
print("\t3. S-Plot filtering")
print('\t"back" to go back')
option = input('> ')
if option == "back":
return True
label_dat = dset.labels
label_df = DataFrame(label_dat)
if option == "1":
print("Please Provide m/z and tolerance (Ex. '150 1' <--- This would be 150 plus or minus 1)")
mz = input('> ')
print("Please Provide Retention Time and tolerance (Ex. '1 1' <--- This would be 1 plus or minus 1)")
rt = input('> ')
print("Please Provide CCS and tolerance (Ex. '150 3' <--- This would be 150 plus or minus 3%)")
ccs = input('> ')
print("Which group would you like to choose? ('All' to select the whole data)")
group = input('> ')
try:
if group == "All":
cur_data = DataFrame(dset.intensities)
else:
cur_data = dset.get_data_bygroup(group)
int_df = DataFrame(cur_data)
cur_df = concat([label_df, int_df], axis=1, ignore_index=True, sort=False)
mzs = [float(_) for _ in mz.split()]
rts = [float(_) for _ in rt.split()]
ccss = [float(_) for _ in ccs.split()]
# convert CCS tolerance from percentage to an absolute value
ccss[1] = ccss[1] / 100. * ccss[0]
filtered = filter_d(mzs, rts, ccss, cur_df)
except ValueError as ve:
print('! ERROR:', ve)
print("! ERROR: Failed to filter data, please check your groups and try again")
return False
elif option == "2":
print("Please provide the path of the file with batch-query information")
path = input('> ')
try:
query = read_csv(path)
except Exception as e:
print('! ERROR:', e)
print("! ERROR: Failed to load the file. Please make sure the file exists at the right path.")
return False
print("Which group would you like to choose? ('All' to select the whole data)")
group = input('> ')
try:
if group == "All":
cur_data = | DataFrame(dset.intensities) | pandas.DataFrame |
"""
Rank summarization results.
"""
import os
import sys
import time
import argparse
from datetime import datetime
from itertools import product
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
from scipy.stats import sem
from tqdm import tqdm
here = os.path.abspath(os.path.dirname(__file__))
sys.path.insert(0, here + '/../')
from experiments import util as exp_util
from postprocess import util as pp_util
from config import rank_args
from rank.remove import get_mean_df
from rank.remove import plot_mean_df
def process(args, exp_hash, out_dir, logger):
begin = time.time()
color, line, label = pp_util.get_plot_dicts()
df_loss_list = []
df_li_loss_list = []
df_acc_list = []
df_li_acc_list = []
df_auc_list = []
df_li_auc_list = []
df_loss_rel_list = []
df_acc_rel_list = []
df_auc_rel_list = []
for tree_type in args.tree_type:
in_dir = os.path.join(args.in_dir,
tree_type,
f'exp_{exp_hash}',
'summary')
for ckpt in args.ckpt:
ckpt_dir = os.path.join(in_dir, f'ckpt_{ckpt}')
# rankings
fp_loss = os.path.join(ckpt_dir, 'loss_rank.csv')
fp_li_loss = os.path.join(ckpt_dir, 'loss_rank_li.csv')
fp_acc = os.path.join(ckpt_dir, 'acc_rank.csv')
fp_li_acc = os.path.join(ckpt_dir, 'acc_rank_li.csv')
fp_auc = os.path.join(ckpt_dir, 'auc_rank.csv')
fp_li_auc = os.path.join(ckpt_dir, 'auc_rank_li.csv')
assert os.path.exists(fp_loss), f'{fp_loss} does not exist!'
assert os.path.exists(fp_li_loss), f'{fp_li_loss} does not exist!'
assert os.path.exists(fp_acc), f'{fp_acc} does not exist!'
assert os.path.exists(fp_li_acc), f'{fp_li_acc} does not exist!'
assert os.path.exists(fp_auc), f'{fp_auc} does not exist!'
assert os.path.exists(fp_auc), f'{fp_auc} doess not exist!'
df_loss_list.append(pd.read_csv(fp_loss))
df_li_loss_list.append(pd.read_csv(fp_li_loss))
df_acc_list.append(pd.read_csv(fp_acc))
df_li_acc_list.append(pd.read_csv(fp_li_acc))
df_auc_list.append(pd.read_csv(fp_auc))
df_li_auc_list.append(pd.read_csv(fp_li_auc))
# relative performance
fp_loss_rel = os.path.join(ckpt_dir, 'loss_rel.csv')
fp_acc_rel = os.path.join(ckpt_dir, 'acc_rel.csv')
fp_auc_rel = os.path.join(ckpt_dir, 'auc_rel.csv')
assert os.path.exists(fp_loss_rel), f'{fp_loss_rel} does not exist!'
assert os.path.exists(fp_acc_rel), f'{fp_acc_rel} does not exist!'
assert os.path.exists(fp_auc_rel), f'{fp_auc_rel} does not exist!'
df_loss_rel_list.append(pd.read_csv(fp_loss_rel))
df_acc_rel_list.append(pd.read_csv(fp_acc_rel))
df_auc_rel_list.append(pd.read_csv(fp_auc_rel))
# compile results
df_loss_all = pd.concat(df_loss_list)
df_li_loss_all = pd.concat(df_li_loss_list)
df_acc_all = pd.concat(df_acc_list)
df_li_acc_all = pd.concat(df_li_acc_list)
df_auc_all = pd.concat(df_auc_list)
df_li_auc_all = | pd.concat(df_li_auc_list) | pandas.concat |
#!/usr/bin/env python
import argparse
from collections import defaultdict
import logging, shutil
import os
from subprocess import Popen
import re
from datetime import datetime
import sys
import pandas as pd
import numpy as np
from sistr.misc.reduce_to_centroid_alleles import run_allele_reduction
from sistr.sistr_cmd import genome_name_from_fasta_path
from sistr.src.blast_wrapper import BlastRunner
from sistr.src.logger import init_console_logger
from sistr.src.parsers import parse_fasta
from sistr.src.serovar_prediction import SerovarPredictor, overall_serovar_call
from sistr.src.cgmlst import CGMLST_PROFILES_PATH, run_cgmlst, allele_name, CGMLST_FULL_FASTA_PATH
from sistr.src.serovar_prediction.constants import GENOMES_TO_SEROVAR_PATH, GENOMES_TO_SPP_PATH, SEROVAR_TABLE_PATH
from sistr.src.mash import MASH_SKETCH_FILE
def init_parser():
prog_desc = '''Add reference genomes to sistr_cmd
Supply genome FASTA files and a table with genome name to serovar (and subspecies). If genome not present in table or table not supplied then the serovar and subspecies predictions will be used instead.
sistr_cmd ref genome info files will be written to an output directory
'''
parser = argparse.ArgumentParser(prog='predict_serovar',
formatter_class=argparse.RawDescriptionHelpFormatter,
description=prog_desc)
parser.add_argument('fastas',
metavar='F',
nargs='+',
help='Input genome FASTA file(s). Genome names in filenames before file extension (e.g. for "g1.fasta" genome name is "g1")')
parser.add_argument('-o',
'--outdir',
required=True,
help='Output destination')
parser.add_argument('-s',
'--serovar-table',
help='Table with serovar (and subspecies). CSV expected if extension is .csv; otherwise tab delimited expected. Columns=[genome,serovar, subspecies(optional)]')
parser.add_argument('--force',
action='store_true',
help='Force overwrite of output directory if it exists!')
parser.add_argument('-T',
'--tmp-dir',
default='/tmp',
help='Base temporary working directory for intermediate analysis files.')
parser.add_argument('-t', '--threads',
type=int,
default=1,
help='Number of parallel threads to run sistr_cmd analysis.')
parser.add_argument('-v',
'--verbose',
action='count',
default=2,
help='Logging verbosity level (-v to show warnings; -vvv to show debug info)')
return parser
def sketch_fasta(fasta_path, outdir):
"""Create a Mash sketch from an input fasta file
Args:
fasta_path (str): input fasta file path. Genome name in fasta filename
outdir (str): output directory path to write Mash sketch file to
Returns:
str: output Mash sketch file path
"""
genome_name = genome_name_from_fasta_path(fasta_path)
outpath = os.path.join(outdir, genome_name)
args = ['mash', 'sketch', '-o', outpath, fasta_path]
logging.info('Running Mash sketch with command: %s', ' '.join(args))
p = Popen(args)
p.wait()
sketch_path = outpath + '.msh'
assert os.path.exists(sketch_path), 'Mash sketch for genome {} was not created at {}'.format(
genome_name,
sketch_path)
return sketch_path
def merge_sketches(outdir, sketch_paths):
"""Merge new Mash sketches with current Mash sketches
Args:
outdir (str): output directory to write merged Mash sketch file
sketch_paths (list of str): Mash sketch file paths for input fasta files
Returns:
str: output path for Mash sketch file with new and old sketches
"""
merge_sketch_path = os.path.join(outdir, 'sistr.msh')
args = ['mash', 'paste', merge_sketch_path]
for x in sketch_paths:
args.append(x)
args.append(MASH_SKETCH_FILE)
logging.info('Running Mash paste with command: %s', ' '.join(args))
p = Popen(args)
p.wait()
assert os.path.exists(merge_sketch_path), 'Merged sketch was not created at {}'.format(merge_sketch_path)
return merge_sketch_path
def create_subdirs(outdir, *args):
subdir = os.path.join(outdir, *args)
try:
os.makedirs(subdir)
return subdir
except Exception as ex:
if os.path.exists(subdir):
return subdir
logging.error(ex)
def merge_cgmlst_prediction(serovar_prediction, cgmlst_prediction):
serovar_prediction.cgmlst_distance = cgmlst_prediction['distance']
serovar_prediction.cgmlst_genome_match = cgmlst_prediction['genome_match']
serovar_prediction.serovar_cgmlst = cgmlst_prediction['serovar']
serovar_prediction.cgmlst_matching_alleles = cgmlst_prediction['matching_alleles']
serovar_prediction.cgmlst_subspecies = cgmlst_prediction['subspecies']
return serovar_prediction
def run_sistr(input_fasta, tmp_dir):
blast_runner = None
try:
assert os.path.exists(input_fasta), "Input fasta file '%s' must exist!" % input_fasta
fasta_filename = os.path.basename(input_fasta)
genome_name = genome_name_from_fasta_path(input_fasta)
dtnow = datetime.now()
genome_tmp_dir = os.path.join(tmp_dir, dtnow.strftime("%Y%m%d%H%M%S") + '-' + 'SISTR' + '-' + genome_name)
blast_runner = BlastRunner(input_fasta, genome_tmp_dir)
logging.info('Initializing temporary analysis directory "%s" and preparing for BLAST searching.',
genome_tmp_dir)
blast_runner.prep_blast()
logging.info('Temporary FASTA file copied to %s', blast_runner.tmp_fasta_path)
cgmlst_prediction, cgmlst_results = run_cgmlst(blast_runner)
spp = cgmlst_prediction['subspecies']
serovar_predictor = SerovarPredictor(blast_runner, spp)
serovar_predictor.predict_serovar_from_antigen_blast()
prediction = serovar_predictor.get_serovar_prediction()
merge_cgmlst_prediction(prediction, cgmlst_prediction)
overall_serovar_call(prediction, serovar_predictor)
logging.info('%s | Antigen gene BLAST serovar prediction: "%s" serogroup=%s:H1=%s:H2=%s',
fasta_filename,
prediction.serovar_antigen,
prediction.serogroup,
prediction.h1,
prediction.h2)
logging.info('%s | Subspecies prediction: %s',
fasta_filename,
spp)
logging.info('%s | Overall serovar prediction: %s',
fasta_filename,
prediction.serovar)
finally:
logging.info('Deleting temporary working directory at %s', blast_runner.tmp_work_dir)
blast_runner.cleanup()
return prediction, cgmlst_results
def cgmlst_profiles_df(fastas, cgmlst_results):
genome_marker_cgmlst_result = {}
for fasta, res in zip(fastas, cgmlst_results):
genome = genome_name_from_fasta_path(fasta)
tmp = {}
for marker, res_dict in res.items():
aname = res_dict['name']
tmp[marker] = int(aname) if aname is not None else None
genome_marker_cgmlst_result[genome] = tmp
return pd.DataFrame(genome_marker_cgmlst_result).transpose()
def write_cgmlst_fasta(outdir, cgmlst_results):
marker_allele_seqs = defaultdict(set)
allowed_nts = set('ATGCatgc')
for h, s in parse_fasta(CGMLST_FULL_FASTA_PATH):
marker, allele = h.split('|')
s = s.replace('-', '')
forbidden_char = set(s) - allowed_nts
if len(forbidden_char) > 0:
logging.warning('Forbidden nucleotide characters %s in allele "%s". Skipping this allele!',
forbidden_char,
h)
continue
marker_allele_seqs[marker].add(s)
# init default dict with int where values start as int 0
new_allele_count = defaultdict(int)
for x in cgmlst_results:
for marker, res in x.items():
seq = res['seq']
if seq is not None:
if seq not in marker_allele_seqs[marker]:
new_allele_count[marker] += 1
if '-' in seq:
logging.error('marker %s | result %s', marker, res)
marker_allele_seqs[marker].add(seq)
for marker in sorted(new_allele_count.keys()):
logging.info('Added %s new alleles for marker %s', new_allele_count[marker], marker)
new_cgmlst_fasta_path = os.path.join(outdir, 'cgmlst-full.fasta')
with open(new_cgmlst_fasta_path, 'w') as fout:
for marker in sorted(marker_allele_seqs.keys()):
seqs = marker_allele_seqs[marker]
for seq in seqs:
fout.write('>{}|{}\n{}\n'.format(marker, allele_name(seq), seq))
logging.info('cgMLST FASTA written to "%s" with %s novel alleles',
new_cgmlst_fasta_path,
sum([v for k, v in new_allele_count.items()]))
return new_cgmlst_fasta_path
def write_cgmlst_profiles_csv(outdir, cgmlst_results, genome_names):
df_profiles_old = pd.read_csv(CGMLST_PROFILES_PATH, index_col=0)
markers = df_profiles_old.columns
genome_marker_allele_results = defaultdict(dict)
for genome, cgmlst_result in zip(genome_names, cgmlst_results):
for marker in markers:
allele = None
if marker in cgmlst_result:
r = cgmlst_result[marker]
if 'name' in r:
allele = int(r['name']) if r['name'] is not None else None
else:
allele = None
genome_marker_allele_results[genome][marker] = allele
df_profiles_new = pd.DataFrame(genome_marker_allele_results).transpose()
df_all_profiles = pd.concat([df_profiles_new, df_profiles_old])
profiles_output_path = os.path.join(outdir, 'cgmlst-profiles.csv')
df_all_profiles.to_csv(profiles_output_path, float_format='%.0f')
assert os.path.exists(profiles_output_path), 'cgMLST profiles CSV file was not written to "{}"'.format(
profiles_output_path)
logging.info('cgMLST profiles (dim=%s) CSV written to "%s"',
df_all_profiles.shape,
profiles_output_path)
def read_genomes_to_x(path):
out = {}
with open(path) as f:
for l in f:
l = l.strip()
g, s = l.split('\t')
out[g] = s
return out
def write_genomes_to_x_table(path, genome_to_x):
with open(path, 'w') as fout:
for k, v in genome_to_x.items():
fout.write('{}\t{}\n'.format(k, v))
def write_serovar_and_spp_tables(outdir, df_serovar, predictions, genome_names):
genome_serovar = read_genomes_to_x(GENOMES_TO_SEROVAR_PATH)
genome_spp = read_genomes_to_x(GENOMES_TO_SPP_PATH)
# prediction serovars and subspecies
pred_genome_serovar = {}
pred_genome_spp = {}
for genome, prediction in zip(genome_names, predictions):
pred_dict = prediction.__dict__
pred_genome_serovar[genome] = pred_dict['serovar']
if 'cgmlst_subspecies' in pred_dict:
pred_genome_spp[genome] = pred_dict['cgmlst_subspecies']
else:
pred_genome_spp[genome] = None
if df_serovar is not None:
for i, row in df_serovar.iterrows():
genome = row['genome']
serovar = row['serovar']
if not serovar in pred_genome_serovar[genome]:
logging.warning('Genome "%s" user specified serovar "%s" not in serovar prediction "%s"',
genome,
serovar,
pred_genome_serovar[genome])
if 'subspecies' in df_serovar:
spp = row['subspecies']
if spp != pred_genome_spp[genome]:
logging.warning('Genome "%s" provided subspecies of "%s" does not match prediction of "%s"',
genome,
spp,
pred_genome_spp[genome])
else:
spp = pred_genome_spp[genome]
logging.warning('Genome "%s" subspecies info not provided. Using subspecies prediction of "%s"',
genome,
spp)
genome_serovar[genome] = serovar
genome_spp[genome] = spp
else:
logging.warning(
'User did not specify serovar/subspecies table! Using SISTR serovar and subspecies predictions for all genomes.')
for genome in genome_names:
genome_serovar[genome] = pred_genome_serovar[genome]
genome_spp[genome] = pred_genome_spp[genome]
genomes_to_serovar_path = os.path.join(outdir, 'genomes-to-serovar.txt')
genomes_to_spp_path = os.path.join(outdir, 'genomes-to-subspecies.txt')
write_genomes_to_x_table(genomes_to_serovar_path, genome_serovar)
assert os.path.exists(genomes_to_serovar_path), '{} file could not be written!'.format(
genomes_to_serovar_path)
logging.info('Wrote genomes to serovars table at %s', genomes_to_serovar_path)
write_genomes_to_x_table(genomes_to_spp_path, genome_spp)
assert os.path.exists(genomes_to_spp_path), '{} file could not be written!'.format(
genomes_to_spp_path)
logging.info('Wrote genomes to subspecies table at %s', genomes_to_spp_path)
def create_merge_mash_sketches(input_fastas, data_outdir, sketch_outdir):
sketch_paths = [sketch_fasta(fasta, sketch_outdir) for fasta in input_fastas]
merge_sketches(data_outdir, sketch_paths)
def write_cgmlst_profiles_hdf5(outdir, cgmlst_results, genome_names):
df_profiles_old = pd.read_hdf(CGMLST_PROFILES_PATH, key='cgmlst')
markers = df_profiles_old.columns
genome_marker_allele_results = defaultdict(dict)
for genome, cgmlst_result in zip(genome_names, cgmlst_results):
for marker in markers:
allele = None
if marker in cgmlst_result:
r = cgmlst_result[marker]
if 'name' in r:
allele = int(r['name']) if r['name'] is not None else None
else:
allele = None
genome_marker_allele_results[genome][marker] = allele
df_profiles_new = pd.DataFrame(genome_marker_allele_results).transpose()
df_all_profiles = pd.concat([df_profiles_new, df_profiles_old])
profiles_output_path = os.path.join(outdir, 'cgmlst-profiles.hdf')
df_all_profiles.to_hdf(profiles_output_path, float_format='%.0f',key='cgmlst')
assert os.path.exists(profiles_output_path), 'cgMLST profiles HDF5 file was not written to "{}"'.format(
profiles_output_path)
logging.info('cgMLST profiles (dim=%s) HDF5 written to "%s"',
df_all_profiles.shape,
profiles_output_path)
def main():
parser = init_parser()
args = parser.parse_args()
init_console_logger(args.verbose)
logging.debug(args)
input_fastas = args.fastas
outdir = args.outdir
tmp_dir = args.tmp_dir
serovar_table_path = args.serovar_table
threads = args.threads
force = args.force
assert len(input_fastas) > 0, 'No FASTA files specified!'
for input_fasta in input_fastas:
assert os.path.exists(input_fasta), 'Genome FASTA file does not exist at "{}"'.format(input_fasta)
genome_names = [genome_name_from_fasta_path(x) for x in input_fastas]
logging.info('You have specified %s genomes to add to current sistr_cmd data files! %s',
len(genome_names),
genome_names)
if os.path.exists(outdir):
if not force:
raise Exception('Output directory already exists at {}!'.format(outdir))
else:
shutil.rmtree(outdir)
logging.warning('Using existing output directory at %s', outdir)
try:
os.makedirs(outdir)
except:
pass
assert os.path.exists(outdir), 'Output directory could not be created!'
if serovar_table_path:
assert os.path.exists(serovar_table_path), 'Provided serovar table path does not exist! {}'.format(
serovar_table_path)
logging.info('Parsing serovar table from "%s"', serovar_table_path)
if re.match(r'.*.csv$', serovar_table_path):
logging.info('Trying to read serovar table "%s" as CSV', serovar_table_path)
df_serovar = pd.read_csv(serovar_table_path)
else:
logging.info('Trying to read serovar table "%s" as tab-delimited', serovar_table_path)
df_serovar = pd.read_table(serovar_table_path)
expected_columns = ['genome', 'serovar','subspecies']
assert np.all(
df_serovar.columns.isin(expected_columns)), 'User serovar table did not contain expected columns {}'.format(
expected_columns)
if 'subspecies' not in df_serovar.columns:
logging.warning(
'User serovar table did not contain "subspecies" column so the sistr_cmd subspecies prediction will be used!')
genome_names_series = pd.Series(genome_names)
genomes_in_serovar_table = genome_names_series.isin(df_serovar.genome)
if not np.all(genomes_in_serovar_table):
missing_genomes = '-->,->'.join([x for x in genome_names_series[~genomes_in_serovar_table]])
logging.error('The following genomes were not found in the serovar table: %s', missing_genomes)
raise Exception('Not all user provided genome FASTA files in the provided serovar table!')
df_wklm = | pd.read_csv(SEROVAR_TABLE_PATH) | pandas.read_csv |
import nose
import warnings
import os
import datetime
import numpy as np
import sys
from distutils.version import LooseVersion
from pandas import compat
from pandas.compat import u, PY3
from pandas import (Series, DataFrame, Panel, MultiIndex, bdate_range,
date_range, period_range, Index, Categorical)
from pandas.core.common import PerformanceWarning
from pandas.io.packers import to_msgpack, read_msgpack
import pandas.util.testing as tm
from pandas.util.testing import (ensure_clean,
assert_categorical_equal,
assert_frame_equal,
assert_index_equal,
assert_series_equal,
patch)
from pandas.tests.test_panel import assert_panel_equal
import pandas
from pandas import Timestamp, NaT, tslib
nan = np.nan
try:
import blosc # NOQA
except ImportError:
_BLOSC_INSTALLED = False
else:
_BLOSC_INSTALLED = True
try:
import zlib # NOQA
except ImportError:
_ZLIB_INSTALLED = False
else:
_ZLIB_INSTALLED = True
_multiprocess_can_split_ = False
def check_arbitrary(a, b):
if isinstance(a, (list, tuple)) and isinstance(b, (list, tuple)):
assert(len(a) == len(b))
for a_, b_ in zip(a, b):
check_arbitrary(a_, b_)
elif isinstance(a, Panel):
assert_panel_equal(a, b)
elif isinstance(a, DataFrame):
assert_frame_equal(a, b)
elif isinstance(a, Series):
assert_series_equal(a, b)
elif isinstance(a, Index):
assert_index_equal(a, b)
elif isinstance(a, Categorical):
# Temp,
# Categorical.categories is changed from str to bytes in PY3
# maybe the same as GH 13591
if PY3 and b.categories.inferred_type == 'string':
pass
else:
tm.assert_categorical_equal(a, b)
elif a is NaT:
assert b is NaT
elif isinstance(a, Timestamp):
assert a == b
assert a.freq == b.freq
else:
assert(a == b)
class TestPackers(tm.TestCase):
def setUp(self):
self.path = '__%s__.msg' % tm.rands(10)
def tearDown(self):
pass
def encode_decode(self, x, compress=None, **kwargs):
with ensure_clean(self.path) as p:
to_msgpack(p, x, compress=compress, **kwargs)
return read_msgpack(p, **kwargs)
class TestAPI(TestPackers):
def test_string_io(self):
df = DataFrame(np.random.randn(10, 2))
s = df.to_msgpack(None)
result = read_msgpack(s)
tm.assert_frame_equal(result, df)
s = df.to_msgpack()
result = read_msgpack(s)
tm.assert_frame_equal(result, df)
s = df.to_msgpack()
result = read_msgpack(compat.BytesIO(s))
tm.assert_frame_equal(result, df)
s = to_msgpack(None, df)
result = read_msgpack(s)
tm.assert_frame_equal(result, df)
with ensure_clean(self.path) as p:
s = df.to_msgpack()
fh = open(p, 'wb')
fh.write(s)
fh.close()
result = read_msgpack(p)
tm.assert_frame_equal(result, df)
def test_iterator_with_string_io(self):
dfs = [DataFrame(np.random.randn(10, 2)) for i in range(5)]
s = to_msgpack(None, *dfs)
for i, result in enumerate(read_msgpack(s, iterator=True)):
tm.assert_frame_equal(result, dfs[i])
def test_invalid_arg(self):
# GH10369
class A(object):
def __init__(self):
self.read = 0
tm.assertRaises(ValueError, read_msgpack, path_or_buf=None)
tm.assertRaises(ValueError, read_msgpack, path_or_buf={})
tm.assertRaises(ValueError, read_msgpack, path_or_buf=A())
class TestNumpy(TestPackers):
def test_numpy_scalar_float(self):
x = np.float32(np.random.rand())
x_rec = self.encode_decode(x)
tm.assert_almost_equal(x, x_rec)
def test_numpy_scalar_complex(self):
x = np.complex64(np.random.rand() + 1j * np.random.rand())
x_rec = self.encode_decode(x)
self.assertTrue(np.allclose(x, x_rec))
def test_scalar_float(self):
x = np.random.rand()
x_rec = self.encode_decode(x)
tm.assert_almost_equal(x, x_rec)
def test_scalar_complex(self):
x = np.random.rand() + 1j * np.random.rand()
x_rec = self.encode_decode(x)
self.assertTrue(np.allclose(x, x_rec))
def test_list_numpy_float(self):
x = [np.float32(np.random.rand()) for i in range(5)]
x_rec = self.encode_decode(x)
# current msgpack cannot distinguish list/tuple
tm.assert_almost_equal(tuple(x), x_rec)
x_rec = self.encode_decode(tuple(x))
tm.assert_almost_equal(tuple(x), x_rec)
def test_list_numpy_float_complex(self):
if not hasattr(np, 'complex128'):
raise nose.SkipTest('numpy cant handle complex128')
x = [np.float32(np.random.rand()) for i in range(5)] + \
[np.complex128(np.random.rand() + 1j * np.random.rand())
for i in range(5)]
x_rec = self.encode_decode(x)
self.assertTrue(np.allclose(x, x_rec))
def test_list_float(self):
x = [np.random.rand() for i in range(5)]
x_rec = self.encode_decode(x)
# current msgpack cannot distinguish list/tuple
tm.assert_almost_equal(tuple(x), x_rec)
x_rec = self.encode_decode(tuple(x))
tm.assert_almost_equal(tuple(x), x_rec)
def test_list_float_complex(self):
x = [np.random.rand() for i in range(5)] + \
[(np.random.rand() + 1j * np.random.rand()) for i in range(5)]
x_rec = self.encode_decode(x)
self.assertTrue(np.allclose(x, x_rec))
def test_dict_float(self):
x = {'foo': 1.0, 'bar': 2.0}
x_rec = self.encode_decode(x)
tm.assert_almost_equal(x, x_rec)
def test_dict_complex(self):
x = {'foo': 1.0 + 1.0j, 'bar': 2.0 + 2.0j}
x_rec = self.encode_decode(x)
self.assertEqual(x, x_rec)
for key in x:
self.assertEqual(type(x[key]), type(x_rec[key]))
def test_dict_numpy_float(self):
x = {'foo': np.float32(1.0), 'bar': np.float32(2.0)}
x_rec = self.encode_decode(x)
tm.assert_almost_equal(x, x_rec)
def test_dict_numpy_complex(self):
x = {'foo': np.complex128(1.0 + 1.0j),
'bar': np.complex128(2.0 + 2.0j)}
x_rec = self.encode_decode(x)
self.assertEqual(x, x_rec)
for key in x:
self.assertEqual(type(x[key]), type(x_rec[key]))
def test_numpy_array_float(self):
# run multiple times
for n in range(10):
x = np.random.rand(10)
for dtype in ['float32', 'float64']:
x = x.astype(dtype)
x_rec = self.encode_decode(x)
tm.assert_almost_equal(x, x_rec)
def test_numpy_array_complex(self):
x = (np.random.rand(5) + 1j * np.random.rand(5)).astype(np.complex128)
x_rec = self.encode_decode(x)
self.assertTrue(all(map(lambda x, y: x == y, x, x_rec)) and
x.dtype == x_rec.dtype)
def test_list_mixed(self):
x = [1.0, np.float32(3.5), np.complex128(4.25), u('foo')]
x_rec = self.encode_decode(x)
# current msgpack cannot distinguish list/tuple
tm.assert_almost_equal(tuple(x), x_rec)
x_rec = self.encode_decode(tuple(x))
tm.assert_almost_equal(tuple(x), x_rec)
class TestBasic(TestPackers):
def test_timestamp(self):
for i in [Timestamp(
'20130101'), Timestamp('20130101', tz='US/Eastern'),
Timestamp('201301010501')]:
i_rec = self.encode_decode(i)
self.assertEqual(i, i_rec)
def test_nat(self):
nat_rec = self.encode_decode(NaT)
self.assertIs(NaT, nat_rec)
def test_datetimes(self):
# fails under 2.6/win32 (np.datetime64 seems broken)
if LooseVersion(sys.version) < '2.7':
raise nose.SkipTest('2.6 with np.datetime64 is broken')
for i in [datetime.datetime(2013, 1, 1),
datetime.datetime(2013, 1, 1, 5, 1),
datetime.date(2013, 1, 1),
np.datetime64(datetime.datetime(2013, 1, 5, 2, 15))]:
i_rec = self.encode_decode(i)
self.assertEqual(i, i_rec)
def test_timedeltas(self):
for i in [datetime.timedelta(days=1),
datetime.timedelta(days=1, seconds=10),
np.timedelta64(1000000)]:
i_rec = self.encode_decode(i)
self.assertEqual(i, i_rec)
class TestIndex(TestPackers):
def setUp(self):
super(TestIndex, self).setUp()
self.d = {
'string': tm.makeStringIndex(100),
'date': tm.makeDateIndex(100),
'int': tm.makeIntIndex(100),
'rng': tm.makeRangeIndex(100),
'float': tm.makeFloatIndex(100),
'empty': Index([]),
'tuple': Index(zip(['foo', 'bar', 'baz'], [1, 2, 3])),
'period': Index(period_range('2012-1-1', freq='M', periods=3)),
'date2': Index(date_range('2013-01-1', periods=10)),
'bdate': Index(bdate_range('2013-01-02', periods=10)),
}
self.mi = {
'reg': MultiIndex.from_tuples([('bar', 'one'), ('baz', 'two'),
('foo', 'two'),
('qux', 'one'), ('qux', 'two')],
names=['first', 'second']),
}
def test_basic_index(self):
for s, i in self.d.items():
i_rec = self.encode_decode(i)
self.assert_index_equal(i, i_rec)
# datetime with no freq (GH5506)
i = Index([Timestamp('20130101'), Timestamp('20130103')])
i_rec = self.encode_decode(i)
self.assert_index_equal(i, i_rec)
# datetime with timezone
i = Index([Timestamp('20130101 9:00:00'), Timestamp(
'20130103 11:00:00')]).tz_localize('US/Eastern')
i_rec = self.encode_decode(i)
self.assert_index_equal(i, i_rec)
def test_multi_index(self):
for s, i in self.mi.items():
i_rec = self.encode_decode(i)
self.assert_index_equal(i, i_rec)
def test_unicode(self):
i = tm.makeUnicodeIndex(100)
i_rec = self.encode_decode(i)
self.assert_index_equal(i, i_rec)
class TestSeries(TestPackers):
def setUp(self):
super(TestSeries, self).setUp()
self.d = {}
s = tm.makeStringSeries()
s.name = 'string'
self.d['string'] = s
s = tm.makeObjectSeries()
s.name = 'object'
self.d['object'] = s
s = Series(tslib.iNaT, dtype='M8[ns]', index=range(5))
self.d['date'] = s
data = {
'A': [0., 1., 2., 3., np.nan],
'B': [0, 1, 0, 1, 0],
'C': ['foo1', 'foo2', 'foo3', 'foo4', 'foo5'],
'D': date_range('1/1/2009', periods=5),
'E': [0., 1, Timestamp('20100101'), 'foo', 2.],
'F': [Timestamp('20130102', tz='US/Eastern')] * 2 +
[Timestamp('20130603', tz='CET')] * 3,
'G': [Timestamp('20130102', tz='US/Eastern')] * 5,
}
self.d['float'] = Series(data['A'])
self.d['int'] = Series(data['B'])
self.d['mixed'] = Series(data['E'])
self.d['dt_tz_mixed'] = Series(data['F'])
self.d['dt_tz'] = Series(data['G'])
def test_basic(self):
# run multiple times here
for n in range(10):
for s, i in self.d.items():
i_rec = self.encode_decode(i)
assert_series_equal(i, i_rec)
class TestCategorical(TestPackers):
def setUp(self):
super(TestCategorical, self).setUp()
self.d = {}
self.d['plain_str'] = Categorical(['a', 'b', 'c', 'd', 'e'])
self.d['plain_str_ordered'] = Categorical(['a', 'b', 'c', 'd', 'e'],
ordered=True)
self.d['plain_int'] = Categorical([5, 6, 7, 8])
self.d['plain_int_ordered'] = Categorical([5, 6, 7, 8], ordered=True)
def test_basic(self):
# run multiple times here
for n in range(10):
for s, i in self.d.items():
i_rec = self.encode_decode(i)
assert_categorical_equal(i, i_rec)
class TestNDFrame(TestPackers):
def setUp(self):
super(TestNDFrame, self).setUp()
data = {
'A': [0., 1., 2., 3., np.nan],
'B': [0, 1, 0, 1, 0],
'C': ['foo1', 'foo2', 'foo3', 'foo4', 'foo5'],
'D': date_range('1/1/2009', periods=5),
'E': [0., 1, Timestamp('20100101'), 'foo', 2.],
'F': [Timestamp('20130102', tz='US/Eastern')] * 5,
'G': [Timestamp('20130603', tz='CET')] * 5,
'H': Categorical(['a', 'b', 'c', 'd', 'e']),
'I': Categorical(['a', 'b', 'c', 'd', 'e'], ordered=True),
}
self.frame = {
'float': DataFrame(dict(A=data['A'], B=Series(data['A']) + 1)),
'int': DataFrame(dict(A=data['B'], B=Series(data['B']) + 1)),
'mixed': DataFrame(data)}
self.panel = {
'float': Panel(dict(ItemA=self.frame['float'],
ItemB=self.frame['float'] + 1))}
def test_basic_frame(self):
for s, i in self.frame.items():
i_rec = self.encode_decode(i)
assert_frame_equal(i, i_rec)
def test_basic_panel(self):
for s, i in self.panel.items():
i_rec = self.encode_decode(i)
assert_panel_equal(i, i_rec)
def test_multi(self):
i_rec = self.encode_decode(self.frame)
for k in self.frame.keys():
assert_frame_equal(self.frame[k], i_rec[k])
l = tuple([self.frame['float'], self.frame['float'].A,
self.frame['float'].B, None])
l_rec = self.encode_decode(l)
check_arbitrary(l, l_rec)
# this is an oddity in that packed lists will be returned as tuples
l = [self.frame['float'], self.frame['float']
.A, self.frame['float'].B, None]
l_rec = self.encode_decode(l)
self.assertIsInstance(l_rec, tuple)
check_arbitrary(l, l_rec)
def test_iterator(self):
l = [self.frame['float'], self.frame['float']
.A, self.frame['float'].B, None]
with ensure_clean(self.path) as path:
to_msgpack(path, *l)
for i, packed in enumerate(read_msgpack(path, iterator=True)):
check_arbitrary(packed, l[i])
def tests_datetimeindex_freq_issue(self):
# GH 5947
# inferring freq on the datetimeindex
df = DataFrame([1, 2, 3], index=date_range('1/1/2013', '1/3/2013'))
result = self.encode_decode(df)
assert_frame_equal(result, df)
df = DataFrame([1, 2], index=date_range('1/1/2013', '1/2/2013'))
result = self.encode_decode(df)
assert_frame_equal(result, df)
def test_dataframe_duplicate_column_names(self):
# GH 9618
expected_1 = DataFrame(columns=['a', 'a'])
expected_2 = DataFrame(columns=[1] * 100)
expected_2.loc[0] = np.random.randn(100)
expected_3 = DataFrame(columns=[1, 1])
expected_3.loc[0] = ['abc', np.nan]
result_1 = self.encode_decode(expected_1)
result_2 = self.encode_decode(expected_2)
result_3 = self.encode_decode(expected_3)
| assert_frame_equal(result_1, expected_1) | pandas.util.testing.assert_frame_equal |
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import time
import random
import datetime
from bs4 import BeautifulSoup as bs
import pandas as pd
import os
import json
import requests
import io
url11='https://www.boxofficemojo.com/weekend/by-year/2019/?area=AU'
url12='https://www.boxofficemojo.com/weekend/by-year/2020/?area=AU'
url21='https://www.boxofficemojo.com/weekend/by-year/2019/?area=DE'
url22='https://www.boxofficemojo.com/weekend/by-year/2020/?area=DE'
url31='https://www.boxofficemojo.com/weekend/by-year/2019/?area=JP'
url32='https://www.boxofficemojo.com/weekend/by-year/2020/?area=JP'
url41='https://www.boxofficemojo.com/weekend/by-year/2019/'
url42='https://www.boxofficemojo.com/weekend/by-year/2020/'
#Australia
dates=[]
dfs1=pd.read_html(url11)
dfs2=pd.read_html(url12)
df11=pd.DataFrame()
df12=pd.DataFrame()
df21=pd.DataFrame()
df22=pd.DataFrame()
total1=pd.DataFrame()
df110=dfs1[0]['Overall Gross'][29::-1]
df12=dfs1[0]['Dates'][29::-1]
df210=dfs2[0]['Overall Gross'][:0:-1].replace(',','')
df22=dfs2[0]['Dates'][:0:-1]
k = []
for i in df110:
k.append(int((i.replace('$','').replace(',',''))))
df11['Overall Gross']=k
k = []
for i in df210:
k.append(int((i.replace('$','').replace(',',''))))
df21['Overall Gross']=k
for i in range(0,42):
dates.append((datetime.datetime.strptime('2019-06-06','%Y-%m-%d')+datetime.timedelta(days=7*i)).date())
dates.append('2020-03-28')
dates.append('2020-06-04')
total1['Dates']=dates
total1['Overall Gross']=pd.concat([df11,df21],ignore_index=True)
print(total1)
total1.to_csv(r'C:/Users/USER/Desktop/資訊/鄭恆安/csv/Australia.csv',encoding='big5',index=False)
#Germany
dates=[]
dfs1=pd.read_html(url21)
dfs2=pd.read_html(url22)
df11=pd.DataFrame()
df12=pd.DataFrame()
df21=pd.DataFrame()
df22=pd.DataFrame()
total2=pd.DataFrame()
df110=dfs1[0]['Overall Gross'][29::-1]
df12=dfs1[0]['Dates'][29::-1]
df210=dfs2[0]['Overall Gross'][:0:-1].replace(',','')
df22=dfs2[0]['Dates'][:0:-1]
k = []
for i in df110:
k.append(int((i.replace('$','').replace(',',''))))
df11['Overall Gross']=k
k = []
for i in df210:
k.append(int((i.replace('$','').replace(',',''))))
df21['Overall Gross']=k
for i in range(0,42):
dates.append((datetime.datetime.strptime('2019-06-06','%Y-%m-%d')+datetime.timedelta(days=7*i)).date())
dates.append('2020-04-09')
dates.append('2020-05-21')
dates.append('2020-05-28')
dates.append('2020-06-04')
total2['Dates']=dates
total2['Overall Gross']=pd.concat([df11,df21],ignore_index=True)
print(total2)
total2.to_csv(r'C:/Users/USER/Desktop/資訊/鄭恆安/csv/Germany.csv',encoding='big5',index=False)
#Japan
dates=[]
dfs1=pd.read_html(url31)
dfs2=pd.read_html(url32)
df11=pd.DataFrame()
df12= | pd.DataFrame() | pandas.DataFrame |
"""Dataset module."""
import logging
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Type
import pandas as pd
import torch
from sklearn.compose import ColumnTransformer
from sklearn.preprocessing import MinMaxScaler, OneHotEncoder, StandardScaler
from torch.utils.data import Dataset
from ..ml.models import ARCHITECTURE_FACTORY, AUTOENCODER_ARCHITECTURES
from ..tokenizer.tokenizer import TOKENIZER_FACTORY, Tokenizer
logger = logging.getLogger(__name__)
logger.addHandler(logging.NullHandler())
SCALING_FACTORY_FN: Dict[str, Callable] = {
"onehot": lambda: OneHotEncoder(handle_unknown="error", sparse=False),
"min-max": lambda: MinMaxScaler(),
"standard": lambda: StandardScaler(),
}
MODEL_TYPES = set(ARCHITECTURE_FACTORY.keys())
class GranularDataset(Dataset):
"""A dataset wrapper for granular"""
def __init__(self, name: str, data: Dict[str, Any]) -> None:
"""Initialize a granular dataset.
Args:
name: dataset name.
data: dataset samples.
"""
self.dataset: Dict[str, Any] = {"name": name, "data": data}
def __len__(self) -> int:
"""Dataset length.
Returns:
length of the dataset.
"""
lengths = {key: len(data) for key, data in self.dataset["data"].items()}
if len(set(lengths.values())) > 1:
raise ValueError(f"mismatching dimensions for the data: {lengths}")
return list(lengths.values())[0]
def __getitem__(self, index: int) -> Dict[str, Any]:
"""Retrieve an item from the dataset by index.
Args:
index: index for the item.
Returns:
an item.
"""
result = dict()
for key in self.dataset["data"]:
result[self.dataset["name"] + "_" + key] = self.dataset["data"][key][index]
return result
class CombinedGranularDataset(Dataset):
"""General dataset combining multiple granular datasets."""
def __init__(self, datasets: List[Dict[str, Any]]) -> None:
"""Initialize a general dataset.
Args:
datasets: list of dataset configurations.
"""
self.datasets = datasets
self.names = [data["name"] for data in datasets]
def __len__(self) -> int:
"""Dataset length.
Returns:
length of the dataset.
"""
return len([*self.datasets[0]["data"].values()][0])
def __getitem__(self, index: int) -> Dict[str, Any]:
"""Retrieve an item from the dataset by index.
Args:
index: index for the item.
Returns:
an item.
"""
result = dict()
for dataset in self.datasets:
keys = [*dataset["data"]]
for key in keys:
result[dataset["name"] + "_" + key] = dataset["data"][key][index]
return result
class SmilesTokenizationPreProcessingDataset(GranularDataset):
"""Dataset for SMILES/SELFIES preprocessing."""
def __init__(
self,
name: str,
data_columns: Dict[str, Any],
input_smiles: pd.DataFrame,
target_smiles: pd.DataFrame,
tokenizer: Tokenizer,
set_seq_size: Optional[int] = None,
) -> None:
"""Construct a SmilesTokenizationPreProcessingDataset.
Args:
name: dataset name.
data_columns: data columns mapping.
input_smiles: dataframe containing input SMILES.
target_smiles: dataframe containing target SMILES.
tokenizer: a tokenizer defining the molecule representation used.
set_seq_size: sequence size. Defaults to None, a.k.a., define this
using the input SMILES.
"""
self.name = name
self.input_smiles = input_smiles.values.flatten().tolist()
self.target_smiles = target_smiles.values.flatten().tolist()
self.tokenizer = tokenizer
self.input_tokens: List[torch.Tensor] = []
self.target_tokens: List[torch.Tensor] = []
tokens_ids = [
tokenizer.convert_tokens_to_ids(tokenizer.tokenize(smile))
for smile in self.input_smiles
]
if set_seq_size:
self.set_seq_size = set_seq_size
else:
self.set_seq_size = max([len(i) for i in tokens_ids]) + 20
self.smiles_to_ids(input_smiles=self.input_smiles)
self.smiles_to_ids(target_smiles=self.target_smiles)
super().__init__(
name=name,
data={
data_columns["input"]: self.input_tokens,
data_columns["target"]: self.target_tokens,
},
)
def smiles_to_ids(
self, input_smiles: List[str] = [], target_smiles: List[str] = []
) -> None:
"""Process input SMILES lists generating examples by tokenizing strings and converting them to tensors.
Args:
input_smiles: list of input SMILES representations. Defaults to [].
target_smiles: list of target SMILES representations. Defaults to [].
"""
if len(input_smiles) > 0 and len(target_smiles) == 0:
self.input_smiles = input_smiles
smiles = input_smiles
elif len(input_smiles) == 0 and len(target_smiles) > 0:
self.target_smiles = target_smiles
smiles = target_smiles
else:
raise Exception(
"Either input_smiles or target_smiles needs to be specified"
)
tokens_ids = [
self.tokenizer.convert_tokens_to_ids(self.tokenizer.tokenize(smile))
for smile in smiles
]
examples = []
for token in tokens_ids:
example_tokens = self.tokenizer.convert_tokens_to_ids(
[self.tokenizer.sos_token]
)
example_tokens.extend(token)
example_tokens.extend(
self.tokenizer.convert_tokens_to_ids([self.tokenizer.eos_token])
)
examples.append(
torch.tensor(
self.tokenizer.add_padding_tokens(example_tokens, self.set_seq_size)
)
)
if len(input_smiles) > 0 and len(target_smiles) == 0:
self.input_tokens = examples
elif len(input_smiles) == 0 and len(target_smiles) > 0:
self.target_tokens = examples
class LatentModelDataset(GranularDataset):
"""Latent model dataset."""
def __init__(
self,
name: str,
data_columns: Dict[str, Any],
target_data: pd.DataFrame,
scaling: Optional[str] = None,
) -> None:
"""Construct a LatentModelDataset.
Args:
name: dataset name.
data_columns: data columns mapping.
target_data: dataframe for targets.
scaling: feature scaling process. Defaults to None, a.k.a. no scaling. Currently not supported.
Raises:
NotImplementedError: in case a scaling is selected.
"""
self.name = name
if scaling:
raise NotImplementedError("Scaling not yet supported")
self.target_data = torch.from_numpy(target_data.values)
self.target_data = self.target_data.type(torch.float)
self.target_size = target_data.shape[1]
super().__init__(name=name, data={data_columns["target"]: self.target_data})
class AutoEncoderDataset(GranularDataset):
"""Autoencoder dataset."""
def __init__(
self,
name: str,
data_columns: Dict[str, Any],
input_data: pd.DataFrame,
target_data: pd.DataFrame,
scaling: Optional[str] = None,
) -> None:
"""Construct an AutoEncoderDataset.
Args:
name: dataset name.
data_columns: data columns mapping.
input_data: dataframe for inputs.
target_data: dataframe for targets.
scaling: feature scaling process. Defaults to None, a.k.a. no scaling. Feasible values: "onehot", "min-max" and "standard".
Raises:
ValueError: in case requested scaling is not supported.
"""
self.name = name
self.data_columns = data_columns
if scaling is None:
self.input_data = torch.from_numpy(input_data.values)
self.target_data = torch.from_numpy(target_data.values)
else:
if scaling not in SCALING_FACTORY_FN:
raise ValueError(
f"Scaling={scaling} not supported. Pick a valid one: {sorted(list(SCALING_FACTORY_FN.keys()))}"
)
self.input_scaling = ColumnTransformer(
transformers=[
(
"InputScaling",
SCALING_FACTORY_FN[scaling](),
[data_columns["input"]],
)
]
)
self.target_scaling = ColumnTransformer(
transformers=[
(
"TargetScaling",
SCALING_FACTORY_FN[scaling](),
[data_columns["target"]],
)
]
)
self.input_data = torch.from_numpy(
self.input_scaling.fit_transform(pd.concat([input_data], axis=1))
)
self.target_data = torch.from_numpy(
self.target_scaling.fit_transform(pd.concat([target_data], axis=1))
)
self.input_data, self.target_data = (
self.input_data.type(torch.float),
self.target_data.type(torch.float),
)
self.input_size = self.input_data.shape[1]
self.target_size = self.target_data.shape[1]
super().__init__(
name=name,
data={
data_columns["input"]: self.input_data,
data_columns["target"]: self.target_data,
},
)
DATASET_FACTORY: Dict[str, Type[GranularDataset]] = {
"latentmodel": LatentModelDataset,
"smiles": SmilesTokenizationPreProcessingDataset,
"selfies": SmilesTokenizationPreProcessingDataset,
"autoencoder": AutoEncoderDataset,
}
def build_data_columns(hparams: Dict[str, Any]) -> Dict[str, Any]:
"""Build data columns from hyper-parameters.
Args:
hparams: hyper-parameters for the data columns.
Returns:
data columns.
"""
try:
input_columns = hparams["input"]
except KeyError:
input_columns = None
try:
target_columns = hparams["target"]
except KeyError:
target_columns = None
# create dictionary
if input_columns:
data_columns = {"input": input_columns, "target": target_columns}
else:
data_columns = {"target": target_columns}
return data_columns
def build_dataset(
name: str,
data: pd.DataFrame,
dataset_type: str,
data_columns: Dict[str, Any],
hparams: Dict[str, Any],
) -> GranularDataset:
"""Build a granular dataset.
Args:
name: dataset name.
data: dataframe representing the dataset.
dataset_type: dataset type. Feasible values: "latentmodel", "smiles", "selfies" and "autoencoder".
data_columns: data columns mapping.
hparams: hyper-parameters for the data columns.
Raises:
ValueError: in case requested dataset type is not supported.
Returns:
a granular dataset.
"""
dataset: GranularDataset
dataset_type = dataset_type.lower()
if dataset_type not in DATASET_FACTORY:
raise ValueError(
f"dataset_type={dataset_type} not supported. Pick a valid one: {sorted(list(DATASET_FACTORY.keys()))}"
)
input_columns: List[Any]
if not dataset_type == "latentmodel":
if data_columns["input"] == "all":
input_columns = data.columns.tolist()
else:
if isinstance(data_columns["input"], list):
input_columns = data_columns["input"]
else:
input_columns = [data_columns["input"]]
target_columns: List[Any]
if data_columns["target"] == "all":
target_columns = data.columns.tolist()
else:
if isinstance(data_columns["target"], list):
target_columns = data_columns["target"]
else:
target_columns = [data_columns["target"]]
if dataset_type in {"smiles", "selfies"}:
try:
build_vocab = hparams["build_vocab"]
except KeyError:
build_vocab = None
try:
sequence_size = hparams["sequence_size"]
except KeyError:
sequence_size = None
vocab_file = hparams["vocab_file"]
# build tokenizer
if build_vocab:
tokenizer = TOKENIZER_FACTORY[dataset_type](
vocab_file, smiles=data[input_columns].squeeze().tolist()
)
else:
tokenizer = TOKENIZER_FACTORY[dataset_type](vocab_file, smiles=[])
dataset = SmilesTokenizationPreProcessingDataset(
name=name,
data_columns=data_columns,
input_smiles=data[input_columns],
target_smiles=data[target_columns],
tokenizer=tokenizer,
set_seq_size=sequence_size,
)
elif dataset_type == "latentmodel":
dataset = LatentModelDataset(
name=name,
data_columns=data_columns,
target_data=data[target_columns],
scaling=None,
)
elif dataset_type == "autoencoder":
dataset = AutoEncoderDataset(
name=name,
data_columns=data_columns,
input_data=data[input_columns],
target_data=data[target_columns],
scaling=hparams["scaling"],
)
return dataset
def build_architecture(
model_type: str,
data_columns: Dict[str, Any],
dataset: GranularDataset,
hparams: Dict[str, Any],
) -> Dict[str, Any]:
"""Build architecture configuration for the selected model type and dataset.
Args:
model_type: model type. Feasible values: "vae_rnn", "vae_trans", "mlp_predictor", "no_encoding", "mlp_autoencoder" and "vae_mlp".
data_columns: data columns mapping.
dataset: a granular dataset.
hparams: hyper-parameters for the data columns.
Raises:
ValueError: in case requested model type is not supported.
Returns:
architecture configuration.
"""
model_type = model_type.lower()
if model_type not in MODEL_TYPES:
raise ValueError(
f"model_type={model_type} not supported. Pick a valid one: {sorted(list(MODEL_TYPES))}"
)
architecture: Dict[str, Any] = {
"name": hparams["name"],
"type": hparams["type"],
"start_from_checkpoint": hparams["start_from_checkpoint"],
"freeze_weights": hparams["freeze_weights"],
"data": data_columns,
"hparams": hparams,
}
if model_type in AUTOENCODER_ARCHITECTURES:
architecture["position"] = hparams["position"]
if model_type in {"vae_rnn", "vae_trans"}:
hparams["tokenizer"] = dataset.tokenizer
hparams["vocab_size"] = dataset.tokenizer.vocab_size
if model_type == "vae_rnn":
hparams["embedding_size"] = dataset.set_seq_size
else: # "vae_trans"
hparams["sequence_len"] = dataset.set_seq_size
elif model_type == "no_encoding":
hparams["latent_size"] = dataset.input_size
elif model_type in {"mlp_autoencoder", "vae_mlp"}:
hparams["input_size_enc"] = dataset.input_size
hparams["output_size_dec"] = dataset.target_size
else: # "mlp_predictor"
hparams["output_size"] = dataset.target_size
architecture["from_position"] = hparams["from_position"]
return architecture
def build_dataset_and_architecture(
name: str,
data_path: str,
data_file: str,
dataset_type: str,
model_type: str,
hparams: Dict[str, Any],
**kwargs,
) -> Tuple[GranularDataset, Dict[str, Any]]:
"""Build a dataset and an architecture configuration.
Args:
name: dataset name.
data_path: path to the dataset.
data_file: data file name.
dataset_type: dataset type. Feasible values: "latentmodel", "smiles", "selfies" and "autoencoder".
model_type: model type. Feasible values: "vae_rnn", "vae_trans", "mlp_predictor", "no_encoding", "mlp_autoencoder" and "vae_mlp".
hparams: hyper-parameters for the data columns.
Raises:
ValueError: in case the data file has an unsupported extension/format.
Returns:
a tuple containing a granular dataset and a related architecture configuration.
"""
if data_file.endswith(".csv"):
data = pd.read_csv(f"{data_path}{os.path.sep}{data_file}")
elif data_file.endswith(".bz2") or data_file.endswith(".pkl"):
data = | pd.read_pickle(f"{data_path}{os.path.sep}{data_file}") | pandas.read_pickle |
# -*- coding: utf-8 -*-
"""
Created on Fri Aug 2 16:39:25 2019
@author: Shane
"""
import numpy as np
import pandas as pd
from pandas import Series, DataFrame
import scipy
import scipy.stats as stats
import glob
import statsmodels.stats.api as sms
#import matplotlib for plotting
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from matplotlib.ticker import ScalarFormatter, LogFormatter
import seaborn as sns
import math
from scipy.spatial import distance
#import os to handle operating system
import os
#=============================================================================
#import files to analyze
datadir = "D:\\Goode_Lab\\Projects\\actin_cables\\data\\summary_data\\"
#initalize data frame to append all data
df = pd.DataFrame()
#import data to dataframe
df = pd.read_csv(datadir + '201210_cdc28-13ts_t-8_t1_yBG12_yBG9_all_cable_analysis.csv')
#=============================================================================
#parse the data into the necessary strain types for plotting
#setup df with only yBG12 cells
df_hap = pd.DataFrame()
df_hap = df.loc[(df['strain']=='yBG12')].reset_index()
#setup df with only yBG9 cells
df_dip = pd.DataFrame()
df_dip = df.loc[(df['strain']=='yBG9')].reset_index()
#setup df with only uninduced cdc28 cells
df_cdcu = pd.DataFrame()
df_cdcu = df.loc[(df['strain']=='cdc28-13ts, t0')].reset_index()
#setup df with only induced cdc28 cells
df_cdci = | pd.DataFrame() | pandas.DataFrame |
from collections import (
abc,
deque,
)
from decimal import Decimal
from warnings import catch_warnings
import numpy as np
import pytest
import pandas as pd
from pandas import (
DataFrame,
Index,
MultiIndex,
PeriodIndex,
Series,
concat,
date_range,
)
import pandas._testing as tm
from pandas.core.arrays import SparseArray
from pandas.core.construction import create_series_with_explicit_dtype
from pandas.tests.extension.decimal import to_decimal
class TestConcatenate:
def test_append_concat(self):
# GH#1815
d1 = date_range("12/31/1990", "12/31/1999", freq="A-DEC")
d2 = date_range("12/31/2000", "12/31/2009", freq="A-DEC")
s1 = Series(np.random.randn(10), d1)
s2 = Series(np.random.randn(10), d2)
s1 = s1.to_period()
s2 = s2.to_period()
# drops index
result = concat([s1, s2])
assert isinstance(result.index, PeriodIndex)
assert result.index[0] == s1.index[0]
def test_concat_copy(self, using_array_manager):
df = DataFrame(np.random.randn(4, 3))
df2 = DataFrame(np.random.randint(0, 10, size=4).reshape(4, 1))
df3 = DataFrame({5: "foo"}, index=range(4))
# These are actual copies.
result = concat([df, df2, df3], axis=1, copy=True)
for arr in result._mgr.arrays:
assert arr.base is None
# These are the same.
result = concat([df, df2, df3], axis=1, copy=False)
for arr in result._mgr.arrays:
if arr.dtype.kind == "f":
assert arr.base is df._mgr.arrays[0].base
elif arr.dtype.kind in ["i", "u"]:
assert arr.base is df2._mgr.arrays[0].base
elif arr.dtype == object:
if using_array_manager:
# we get the same array object, which has no base
assert arr is df3._mgr.arrays[0]
else:
assert arr.base is not None
# Float block was consolidated.
df4 = DataFrame(np.random.randn(4, 1))
result = concat([df, df2, df3, df4], axis=1, copy=False)
for arr in result._mgr.arrays:
if arr.dtype.kind == "f":
if using_array_manager:
# this is a view on some array in either df or df4
assert any(
np.shares_memory(arr, other)
for other in df._mgr.arrays + df4._mgr.arrays
)
else:
# the block was consolidated, so we got a copy anyway
assert arr.base is None
elif arr.dtype.kind in ["i", "u"]:
assert arr.base is df2._mgr.arrays[0].base
elif arr.dtype == object:
# this is a view on df3
assert any(np.shares_memory(arr, other) for other in df3._mgr.arrays)
def test_concat_with_group_keys(self):
# axis=0
df = DataFrame(np.random.randn(3, 4))
df2 = DataFrame(np.random.randn(4, 4))
result = concat([df, df2], keys=[0, 1])
exp_index = MultiIndex.from_arrays(
[[0, 0, 0, 1, 1, 1, 1], [0, 1, 2, 0, 1, 2, 3]]
)
expected = DataFrame(np.r_[df.values, df2.values], index=exp_index)
tm.assert_frame_equal(result, expected)
result = concat([df, df], keys=[0, 1])
exp_index2 = MultiIndex.from_arrays([[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 1, 2]])
expected = DataFrame(np.r_[df.values, df.values], index=exp_index2)
tm.assert_frame_equal(result, expected)
# axis=1
df = DataFrame(np.random.randn(4, 3))
df2 = DataFrame(np.random.randn(4, 4))
result = concat([df, df2], keys=[0, 1], axis=1)
expected = DataFrame(np.c_[df.values, df2.values], columns=exp_index)
tm.assert_frame_equal(result, expected)
result = concat([df, df], keys=[0, 1], axis=1)
expected = DataFrame(np.c_[df.values, df.values], columns=exp_index2)
tm.assert_frame_equal(result, expected)
def test_concat_keys_specific_levels(self):
df = DataFrame(np.random.randn(10, 4))
pieces = [df.iloc[:, [0, 1]], df.iloc[:, [2]], df.iloc[:, [3]]]
level = ["three", "two", "one", "zero"]
result = concat(
pieces,
axis=1,
keys=["one", "two", "three"],
levels=[level],
names=["group_key"],
)
tm.assert_index_equal(result.columns.levels[0], Index(level, name="group_key"))
tm.assert_index_equal(result.columns.levels[1], Index([0, 1, 2, 3]))
assert result.columns.names == ["group_key", None]
@pytest.mark.parametrize("mapping", ["mapping", "dict"])
def test_concat_mapping(self, mapping, non_dict_mapping_subclass):
constructor = dict if mapping == "dict" else non_dict_mapping_subclass
frames = constructor(
{
"foo": DataFrame(np.random.randn(4, 3)),
"bar": DataFrame(np.random.randn(4, 3)),
"baz": DataFrame(np.random.randn(4, 3)),
"qux": DataFrame(np.random.randn(4, 3)),
}
)
sorted_keys = list(frames.keys())
result = concat(frames)
expected = concat([frames[k] for k in sorted_keys], keys=sorted_keys)
tm.assert_frame_equal(result, expected)
result = concat(frames, axis=1)
expected = concat([frames[k] for k in sorted_keys], keys=sorted_keys, axis=1)
tm.assert_frame_equal(result, expected)
keys = ["baz", "foo", "bar"]
result = concat(frames, keys=keys)
expected = concat([frames[k] for k in keys], keys=keys)
tm.assert_frame_equal(result, expected)
def test_concat_keys_and_levels(self):
df = DataFrame(np.random.randn(1, 3))
df2 = DataFrame(np.random.randn(1, 4))
levels = [["foo", "baz"], ["one", "two"]]
names = ["first", "second"]
result = concat(
[df, df2, df, df2],
keys=[("foo", "one"), ("foo", "two"), ("baz", "one"), ("baz", "two")],
levels=levels,
names=names,
)
expected = concat([df, df2, df, df2])
exp_index = MultiIndex(
levels=levels + [[0]],
codes=[[0, 0, 1, 1], [0, 1, 0, 1], [0, 0, 0, 0]],
names=names + [None],
)
expected.index = exp_index
tm.assert_frame_equal(result, expected)
# no names
result = concat(
[df, df2, df, df2],
keys=[("foo", "one"), ("foo", "two"), ("baz", "one"), ("baz", "two")],
levels=levels,
)
assert result.index.names == (None,) * 3
# no levels
result = concat(
[df, df2, df, df2],
keys=[("foo", "one"), ("foo", "two"), ("baz", "one"), ("baz", "two")],
names=["first", "second"],
)
assert result.index.names == ("first", "second", None)
tm.assert_index_equal(
result.index.levels[0], Index(["baz", "foo"], name="first")
)
def test_concat_keys_levels_no_overlap(self):
# GH #1406
df = DataFrame(np.random.randn(1, 3), index=["a"])
df2 = DataFrame(np.random.randn(1, 4), index=["b"])
msg = "Values not found in passed level"
with pytest.raises(ValueError, match=msg):
concat([df, df], keys=["one", "two"], levels=[["foo", "bar", "baz"]])
msg = "Key one not in level"
with pytest.raises(ValueError, match=msg):
concat([df, df2], keys=["one", "two"], levels=[["foo", "bar", "baz"]])
def test_crossed_dtypes_weird_corner(self):
columns = ["A", "B", "C", "D"]
df1 = DataFrame(
{
"A": np.array([1, 2, 3, 4], dtype="f8"),
"B": np.array([1, 2, 3, 4], dtype="i8"),
"C": np.array([1, 2, 3, 4], dtype="f8"),
"D": np.array([1, 2, 3, 4], dtype="i8"),
},
columns=columns,
)
df2 = DataFrame(
{
"A": np.array([1, 2, 3, 4], dtype="i8"),
"B": np.array([1, 2, 3, 4], dtype="f8"),
"C": np.array([1, 2, 3, 4], dtype="i8"),
"D": np.array([1, 2, 3, 4], dtype="f8"),
},
columns=columns,
)
appended = df1.append(df2, ignore_index=True)
expected = DataFrame(
np.concatenate([df1.values, df2.values], axis=0), columns=columns
)
tm.assert_frame_equal(appended, expected)
df = DataFrame(np.random.randn(1, 3), index=["a"])
df2 = DataFrame(np.random.randn(1, 4), index=["b"])
result = concat([df, df2], keys=["one", "two"], names=["first", "second"])
assert result.index.names == ("first", "second")
def test_with_mixed_tuples(self, sort):
# 10697
# columns have mixed tuples, so handle properly
df1 = DataFrame({"A": "foo", ("B", 1): "bar"}, index=range(2))
df2 = DataFrame({"B": "foo", ("B", 1): "bar"}, index=range(2))
# it works
concat([df1, df2], sort=sort)
def test_concat_mixed_objs(self):
# concat mixed series/frames
# G2385
# axis 1
index = date_range("01-Jan-2013", periods=10, freq="H")
arr = np.arange(10, dtype="int64")
s1 = Series(arr, index=index)
s2 = Series(arr, index=index)
df = DataFrame(arr.reshape(-1, 1), index=index)
expected = DataFrame(
np.repeat(arr, 2).reshape(-1, 2), index=index, columns=[0, 0]
)
result = concat([df, df], axis=1)
tm.assert_frame_equal(result, expected)
expected = DataFrame(
np.repeat(arr, 2).reshape(-1, 2), index=index, columns=[0, 1]
)
result = concat([s1, s2], axis=1)
tm.assert_frame_equal(result, expected)
expected = DataFrame(
np.repeat(arr, 3).reshape(-1, 3), index=index, columns=[0, 1, 2]
)
result = concat([s1, s2, s1], axis=1)
tm.assert_frame_equal(result, expected)
expected = DataFrame(
np.repeat(arr, 5).reshape(-1, 5), index=index, columns=[0, 0, 1, 2, 3]
)
result = concat([s1, df, s2, s2, s1], axis=1)
tm.assert_frame_equal(result, expected)
# with names
s1.name = "foo"
expected = DataFrame(
np.repeat(arr, 3).reshape(-1, 3), index=index, columns=["foo", 0, 0]
)
result = concat([s1, df, s2], axis=1)
tm.assert_frame_equal(result, expected)
s2.name = "bar"
expected = DataFrame(
np.repeat(arr, 3).reshape(-1, 3), index=index, columns=["foo", 0, "bar"]
)
result = concat([s1, df, s2], axis=1)
tm.assert_frame_equal(result, expected)
# ignore index
expected = DataFrame(
np.repeat(arr, 3).reshape(-1, 3), index=index, columns=[0, 1, 2]
)
result = concat([s1, df, s2], axis=1, ignore_index=True)
tm.assert_frame_equal(result, expected)
# axis 0
expected = DataFrame(
np.tile(arr, 3).reshape(-1, 1), index=index.tolist() * 3, columns=[0]
)
result = concat([s1, df, s2])
tm.assert_frame_equal(result, expected)
expected = DataFrame(np.tile(arr, 3).reshape(-1, 1), columns=[0])
result = concat([s1, df, s2], ignore_index=True)
tm.assert_frame_equal(result, expected)
def test_dtype_coerceion(self):
# 12411
df = DataFrame({"date": [pd.Timestamp("20130101").tz_localize("UTC"), pd.NaT]})
result = concat([df.iloc[[0]], df.iloc[[1]]])
tm.assert_series_equal(result.dtypes, df.dtypes)
# 12045
import datetime
df = DataFrame(
{"date": [datetime.datetime(2012, 1, 1), datetime.datetime(1012, 1, 2)]}
)
result = concat([df.iloc[[0]], df.iloc[[1]]])
tm.assert_series_equal(result.dtypes, df.dtypes)
# 11594
df = DataFrame({"text": ["some words"] + [None] * 9})
result = concat([df.iloc[[0]], df.iloc[[1]]])
tm.assert_series_equal(result.dtypes, df.dtypes)
def test_concat_single_with_key(self):
df = DataFrame(np.random.randn(10, 4))
result = concat([df], keys=["foo"])
expected = concat([df, df], keys=["foo", "bar"])
tm.assert_frame_equal(result, expected[:10])
def test_concat_no_items_raises(self):
with pytest.raises(ValueError, match="No objects to concatenate"):
concat([])
def test_concat_exclude_none(self):
df = DataFrame(np.random.randn(10, 4))
pieces = [df[:5], None, None, df[5:]]
result = concat(pieces)
tm.assert_frame_equal(result, df)
with pytest.raises(ValueError, match="All objects passed were None"):
concat([None, None])
def test_concat_keys_with_none(self):
# #1649
df0 = DataFrame([[10, 20, 30], [10, 20, 30], [10, 20, 30]])
result = concat({"a": None, "b": df0, "c": df0[:2], "d": df0[:1], "e": df0})
expected = concat({"b": df0, "c": df0[:2], "d": df0[:1], "e": df0})
tm.assert_frame_equal(result, expected)
result = concat(
[None, df0, df0[:2], df0[:1], df0], keys=["a", "b", "c", "d", "e"]
)
expected = concat([df0, df0[:2], df0[:1], df0], keys=["b", "c", "d", "e"])
tm.assert_frame_equal(result, expected)
def test_concat_bug_1719(self):
ts1 = tm.makeTimeSeries()
ts2 = tm.makeTimeSeries()[::2]
# to join with union
# these two are of different length!
left = concat([ts1, ts2], join="outer", axis=1)
right = concat([ts2, ts1], join="outer", axis=1)
assert len(left) == len(right)
def test_concat_bug_2972(self):
ts0 = Series(np.zeros(5))
ts1 = Series(np.ones(5))
ts0.name = ts1.name = "same name"
result = concat([ts0, ts1], axis=1)
expected = DataFrame({0: ts0, 1: ts1})
expected.columns = ["same name", "same name"]
tm.assert_frame_equal(result, expected)
def test_concat_bug_3602(self):
# GH 3602, duplicate columns
df1 = DataFrame(
{
"firmNo": [0, 0, 0, 0],
"prc": [6, 6, 6, 6],
"stringvar": ["rrr", "rrr", "rrr", "rrr"],
}
)
df2 = DataFrame(
{"C": [9, 10, 11, 12], "misc": [1, 2, 3, 4], "prc": [6, 6, 6, 6]}
)
expected = DataFrame(
[
[0, 6, "rrr", 9, 1, 6],
[0, 6, "rrr", 10, 2, 6],
[0, 6, "rrr", 11, 3, 6],
[0, 6, "rrr", 12, 4, 6],
]
)
expected.columns = ["firmNo", "prc", "stringvar", "C", "misc", "prc"]
result = concat([df1, df2], axis=1)
tm.assert_frame_equal(result, expected)
def test_concat_iterables(self):
# GH8645 check concat works with tuples, list, generators, and weird
# stuff like deque and custom iterables
df1 = DataFrame([1, 2, 3])
df2 = DataFrame([4, 5, 6])
expected = DataFrame([1, 2, 3, 4, 5, 6])
tm.assert_frame_equal(concat((df1, df2), ignore_index=True), expected)
tm.assert_frame_equal(concat([df1, df2], ignore_index=True), expected)
tm.assert_frame_equal(
concat((df for df in (df1, df2)), ignore_index=True), expected
)
tm.assert_frame_equal(concat(deque((df1, df2)), ignore_index=True), expected)
class CustomIterator1:
def __len__(self) -> int:
return 2
def __getitem__(self, index):
try:
return {0: df1, 1: df2}[index]
except KeyError as err:
raise IndexError from err
tm.assert_frame_equal(concat(CustomIterator1(), ignore_index=True), expected)
class CustomIterator2(abc.Iterable):
def __iter__(self):
yield df1
yield df2
tm.assert_frame_equal(concat(CustomIterator2(), ignore_index=True), expected)
def test_concat_order(self):
# GH 17344
dfs = [DataFrame(index=range(3), columns=["a", 1, None])]
dfs += [DataFrame(index=range(3), columns=[None, 1, "a"]) for i in range(100)]
result = concat(dfs, sort=True).columns
expected = dfs[0].columns
tm.assert_index_equal(result, expected)
def test_concat_different_extension_dtypes_upcasts(self):
a = Series(pd.array([1, 2], dtype="Int64"))
b = Series(to_decimal([1, 2]))
result = concat([a, b], ignore_index=True)
expected = Series([1, 2, Decimal(1), Decimal(2)], dtype=object)
tm.assert_series_equal(result, expected)
def test_concat_ordered_dict(self):
# GH 21510
expected = concat(
[Series(range(3)), Series(range(4))], keys=["First", "Another"]
)
result = concat({"First": Series(range(3)), "Another": Series(range(4))})
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("pdt", [Series, DataFrame])
@pytest.mark.parametrize("dt", np.sctypes["float"])
def test_concat_no_unnecessary_upcast(dt, pdt):
# GH 13247
dims = pdt(dtype=object).ndim
dfs = [
pdt(np.array([1], dtype=dt, ndmin=dims)),
pdt(np.array([np.nan], dtype=dt, ndmin=dims)),
pdt(np.array([5], dtype=dt, ndmin=dims)),
]
x = concat(dfs)
assert x.values.dtype == dt
@pytest.mark.parametrize("pdt", [create_series_with_explicit_dtype, DataFrame])
@pytest.mark.parametrize("dt", np.sctypes["int"])
def test_concat_will_upcast(dt, pdt):
with catch_warnings(record=True):
dims = pdt().ndim
dfs = [
pdt(np.array([1], dtype=dt, ndmin=dims)),
pdt(np.array([np.nan], ndmin=dims)),
pdt(np.array([5], dtype=dt, ndmin=dims)),
]
x = concat(dfs)
assert x.values.dtype == "float64"
def test_concat_empty_and_non_empty_frame_regression():
# GH 18178 regression test
df1 = DataFrame({"foo": [1]})
df2 = DataFrame({"foo": []})
expected = DataFrame({"foo": [1.0]})
result = concat([df1, df2])
tm.assert_frame_equal(result, expected)
def test_concat_sparse():
# GH 23557
a = Series(SparseArray([0, 1, 2]))
expected = DataFrame(data=[[0, 0], [1, 1], [2, 2]]).astype(
pd.SparseDtype(np.int64, 0)
)
result = concat([a, a], axis=1)
tm.assert_frame_equal(result, expected)
def test_concat_dense_sparse():
# GH 30668
a = Series(pd.arrays.SparseArray([1, None]), dtype=float)
b = Series([1], dtype=float)
expected = Series(data=[1, None, 1], index=[0, 1, 0]).astype(
pd.SparseDtype(np.float64, None)
)
result = concat([a, b], axis=0)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("keys", [["e", "f", "f"], ["f", "e", "f"]])
def test_duplicate_keys(keys):
# GH 33654
df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
s1 = Series([7, 8, 9], name="c")
s2 = Series([10, 11, 12], name="d")
result = concat([df, s1, s2], axis=1, keys=keys)
expected_values = [[1, 4, 7, 10], [2, 5, 8, 11], [3, 6, 9, 12]]
expected_columns = MultiIndex.from_tuples(
[(keys[0], "a"), (keys[0], "b"), (keys[1], "c"), (keys[2], "d")]
)
expected = DataFrame(expected_values, columns=expected_columns)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"obj",
[
tm.SubclassedDataFrame({"A": np.arange(0, 10)}),
tm.SubclassedSeries(np.arange(0, 10), name="A"),
],
)
def test_concat_preserves_subclass(obj):
# GH28330 -- preserve subclass
result = concat([obj, obj])
assert isinstance(result, type(obj))
def test_concat_frame_axis0_extension_dtypes():
# preserve extension dtype (through common_dtype mechanism)
df1 = DataFrame({"a": pd.array([1, 2, 3], dtype="Int64")})
df2 = DataFrame({"a": np.array([4, 5, 6])})
result = concat([df1, df2], ignore_index=True)
expected = DataFrame({"a": [1, 2, 3, 4, 5, 6]}, dtype="Int64")
tm.assert_frame_equal(result, expected)
result = concat([df2, df1], ignore_index=True)
expected = DataFrame({"a": [4, 5, 6, 1, 2, 3]}, dtype="Int64")
tm.assert_frame_equal(result, expected)
def test_concat_preserves_extension_int64_dtype():
# GH 24768
df_a = DataFrame({"a": [-1]}, dtype="Int64")
df_b = DataFrame({"b": [1]}, dtype="Int64")
result = concat([df_a, df_b], ignore_index=True)
expected = DataFrame({"a": [-1, None], "b": [None, 1]}, dtype="Int64")
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"dtype1,dtype2,expected_dtype",
[
("bool", "bool", "bool"),
("boolean", "bool", "boolean"),
("bool", "boolean", "boolean"),
("boolean", "boolean", "boolean"),
],
)
def test_concat_bool_types(dtype1, dtype2, expected_dtype):
# GH 42800
ser1 = Series([True, False], dtype=dtype1)
ser2 = Series([False, True], dtype=dtype2)
result = concat([ser1, ser2], ignore_index=True)
expected = Series([True, False, False, True], dtype=expected_dtype)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
("keys", "integrity"),
[
(["red"] * 3, True),
(["red"] * 3, False),
(["red", "blue", "red"], False),
(["red", "blue", "red"], True),
],
)
def test_concat_repeated_keys(keys, integrity):
# GH: 20816
series_list = [Series({"a": 1}), Series({"b": 2}), Series({"c": 3})]
result = concat(series_list, keys=keys, verify_integrity=integrity)
tuples = list(zip(keys, ["a", "b", "c"]))
expected = Series([1, 2, 3], index=MultiIndex.from_tuples(tuples))
tm.assert_series_equal(result, expected)
def test_concat_null_object_with_dti():
# GH#40841
dti = pd.DatetimeIndex(
["2021-04-08 21:21:14+00:00"], dtype="datetime64[ns, UTC]", name="Time (UTC)"
)
right = DataFrame(data={"C": [0.5274]}, index=dti)
idx = Index([None], dtype="object", name="Maybe Time (UTC)")
left = DataFrame(data={"A": [None], "B": [np.nan]}, index=idx)
result = concat([left, right], axis="columns")
exp_index = Index([None, dti[0]], dtype=object)
expected = DataFrame(
{"A": [None, None], "B": [np.nan, np.nan], "C": [np.nan, 0.5274]},
index=exp_index,
)
tm.assert_frame_equal(result, expected)
def test_concat_multiindex_with_empty_rangeindex():
# GH#41234
mi = MultiIndex.from_tuples([("B", 1), ("C", 1)])
df1 = DataFrame([[1, 2]], columns=mi)
df2 = DataFrame(index=[1], columns=pd.RangeIndex(0))
result = concat([df1, df2])
expected = DataFrame([[1, 2], [np.nan, np.nan]], columns=mi)
tm.assert_frame_equal(result, expected)
def test_concat_posargs_deprecation():
# https://github.com/pandas-dev/pandas/issues/41485
df = DataFrame([[1, 2, 3]], index=["a"])
df2 = DataFrame([[4, 5, 6]], index=["b"])
msg = (
"In a future version of pandas all arguments of concat "
"except for the argument 'objs' will be keyword-only"
)
with tm.assert_produces_warning(FutureWarning, match=msg):
result = | concat([df, df2], 0) | pandas.concat |
import math
import numpy as np
import pandas as pd
def weibull(v, A=None, k=2, v_m=None):
print("A: ",A)
if A is None:
A = 2/math.sqrt(math.pi) * v_m
if v_m is None:
v_m = round(A * math.sqrt(math.pi) / 2, 2)
h = k/A * (v/A)**(k-1) * np.exp(-(v/A)**k)
return h, A, v_m
def weibull_windhistogramm(start=0, stop=30, step = 1, A = None, k = 2, v_m = None):
v = np.arange(start, stop + step, step)
df = pd.DataFrame()
df['v'] = v
v_detailed = np.arange(start, stop + 0.01, 0.01)
df_detailed = | pd.DataFrame() | pandas.DataFrame |
import pandas as pd
import logging
import numpy as np
import collections
import configparser
import shutil
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import matplotlib.dates as mdates
import requests
import io
from astropy.io import fits
from astropy.time import Time
from pathlib import Path
from matplotlib.backends.backend_pdf import PdfPages
import sphere
import sphere.utils as utils
import sphere.toolbox as toolbox
_log = logging.getLogger(__name__)
# WFS wavelength
wave_wfs = 500e-9
class Reduction(object):
'''
SPHERE/SPARTA dataset reduction class
The analysis and plotting code of this class was originally
developed by <NAME> (ESO/IPAG) and based on SAXO tools
from Jean-<NAME> (ONERA). See:
https://github.com/jmilou/sparta
for the code from <NAME>.
'''
##################################################
# Class variables
##################################################
# specify for each recipe which other recipes need to have been executed before
recipe_requirements = collections.OrderedDict([
('sort_files', []),
('sph_sparta_dtts', ['sort_files']),
('sph_sparta_wfs_parameters', ['sort_files']),
('sph_sparta_atmospheric_parameters', ['sort_files']),
('sph_query_databases', ['sort_files']),
('sph_sparta_plot', ['sort_files', 'sph_sparta_dtts', 'sph_sparta_wfs_parameters', 'sph_sparta_atmospheric_parameters']),
('sph_sparta_clean', [])
])
##################################################
# Constructor
##################################################
def __new__(cls, path, log_level='info', sphere_handler=None):
'''
Custom instantiation for the class
The customized instantiation enables to check that the
provided path is a valid reduction path. If not, None will be
returned for the reduction being created. Otherwise, an
instance is created and returned at the end.
Parameters
----------
path : str
Path to the directory containing the dataset
level : {'debug', 'info', 'warning', 'error', 'critical'}
The log level of the handler
sphere_handler : log handler
Higher-level SPHERE.Dataset log handler
'''
#
# make sure we are dealing with a proper reduction directory
#
# init path
path = Path(path).expanduser().resolve()
# zeroth-order reduction validation
raw = path / 'raw'
if not raw.exists():
_log.error('No raw/ subdirectory. {0} is not a valid reduction path'.format(path))
return None
else:
reduction = super(Reduction, cls).__new__(cls)
#
# basic init
#
# init path
reduction._path = utils.ReductionPath(path)
# instrument and mode
reduction._instrument = 'SPARTA'
#
# logging
#
logger = logging.getLogger(str(path))
logger.setLevel(log_level.upper())
if logger.hasHandlers():
for hdlr in logger.handlers:
logger.removeHandler(hdlr)
handler = logging.FileHandler(reduction._path.products / 'reduction.log', mode='w', encoding='utf-8')
formatter = logging.Formatter('%(asctime)s\t%(levelname)8s\t%(message)s')
formatter.default_msec_format = '%s.%03d'
handler.setFormatter(formatter)
logger.addHandler(handler)
if sphere_handler:
logger.addHandler(sphere_handler)
reduction._logger = logger
reduction._logger.info('Creating SPARTA reduction at path {}'.format(path))
#
# configuration
#
reduction._logger.debug('> read default configuration')
configfile = f'{Path(sphere.__file__).parent}/instruments/{reduction._instrument}.ini'
config = configparser.ConfigParser()
reduction._logger.debug('Read configuration')
config.read(configfile)
# reduction parameters
reduction._config = dict(config.items('reduction'))
for key, value in reduction._config.items():
try:
val = eval(value)
except NameError:
val = value
reduction._config[key] = val
#
# reduction and recipe status
#
reduction._status = sphere.INIT
reduction._recipes_status = collections.OrderedDict()
for recipe in reduction.recipe_requirements.keys():
reduction._update_recipe_status(recipe, sphere.NOTSET)
# reload any existing data frames
reduction._read_info()
reduction._logger.warning('#########################################################')
reduction._logger.warning('# WARNING! #')
reduction._logger.warning('# Support for SPARTA files is preliminary. The current #')
reduction._logger.warning('# format of product files may change in future versions #')
reduction._logger.warning('# of the pipeline until an appropriate format is found. #')
reduction._logger.warning('# Please do not blindly rely on the current format. #')
reduction._logger.warning('#########################################################')
#
# return instance
#
return reduction
##################################################
# Representation
##################################################
def __repr__(self):
return '<Reduction, instrument={}, path={}, log={}>'.format(self._instrument, self._path, self.loglevel)
def __format__(self):
return self.__repr__()
##################################################
# Properties
##################################################
@property
def loglevel(self):
return logging.getLevelName(self._logger.level)
@loglevel.setter
def loglevel(self, level):
self._logger.setLevel(level.upper())
@property
def instrument(self):
return self._instrument
@property
def path(self):
return self._path
@property
def files_info(self):
return self._files_info
@property
def dtts_info(self):
return self._dtts_info
@property
def visloop_info(self):
return self._visloop_info
@property
def irloop_info(self):
return self._irloop_info
@property
def atmospheric_info(self):
return self._atmos_info
@property
def recipe_status(self):
return self._recipes_status
@property
def config(self):
return self._config
@property
def status(self):
return self._status
##################################################
# Private methods
##################################################
def _read_info(self):
'''
Read the files, calibs and frames information from disk
files_info : dataframe
The data frame with all the information on files
This function is not supposed to be called directly by the user.
'''
self._logger.info('Read existing reduction information')
# path
path = self.path
# files info
fname = path.preproc / 'files.csv'
if fname.exists():
self._logger.debug('> read files.csv')
files_info = pd.read_csv(fname, index_col=0)
# convert times
files_info['DATE-OBS'] = pd.to_datetime(files_info['DATE-OBS'], utc=False)
files_info['DATE'] = pd.to_datetime(files_info['DATE'], utc=False)
# update recipe execution
self._update_recipe_status('sort_files', sphere.SUCCESS)
else:
files_info = None
# DTTS info
fname = path.products / 'dtts_frames.csv'
if fname.exists():
self._logger.debug('> read dtts_frames.csv')
dtts_info = pd.read_csv(fname, index_col=0)
# convert times
dtts_info['DATE-OBS'] = pd.to_datetime(dtts_info['DATE-OBS'], utc=False)
dtts_info['DATE'] = pd.to_datetime(dtts_info['DATE'], utc=False)
dtts_info['TIME'] = pd.to_datetime(dtts_info['TIME'], utc=False)
# update recipe execution
self._update_recipe_status('sph_sparta_dtts', sphere.SUCCESS)
else:
dtts_info = None
# VisLoop info
fname = path.products / 'visloop_info.csv'
visloop = False
if fname.exists():
self._logger.debug('> read visloop_info.csv')
visloop_info = pd.read_csv(fname, index_col=0)
# convert times
visloop_info['DATE-OBS'] = pd.to_datetime(visloop_info['DATE-OBS'], utc=False)
visloop_info['DATE'] = pd.to_datetime(visloop_info['DATE'], utc=False)
visloop_info['TIME'] = pd.to_datetime(visloop_info['TIME'], utc=False)
visloop = True
else:
visloop_info = None
# IRLoop info
fname = path.products / 'irloop_info.csv'
irloop = False
if fname.exists():
self._logger.debug('> read irloop_info.csv')
irloop_info = pd.read_csv(fname, index_col=0)
# convert times
irloop_info['DATE-OBS'] = | pd.to_datetime(irloop_info['DATE-OBS'], utc=False) | pandas.to_datetime |
# -*- coding: utf-8 -*-
import os
import numpy as np
import pandas as pd
from tqdm import tqdm, trange
# https://mkjjo.github.io/python/2019/01/10/scaler.html
class MinMaxScaler():
def __init__(self, min_val, max_val):
assert (max_val > min_val)
self.min_val = min_val
self.max_val = max_val
def scale_value(self, val):
return (val - self.min_val) / (self.max_val - self.min_val)
def inv_scale_value(self, scaled_val):
return self.min_val + scaled_val * (self.max_val - self.min_val)
def read_marcap(start, end, codes, marcap_data):
dfs = []
for year in range(start.year, end.year + 1):
csv_file = os.path.join(marcap_data, f'marcap-{year}.csv.gz')
df = pd.read_csv(csv_file, dtype={'Code': str})
dfs.append(df)
# 데이터 합치기
df_all = pd.concat(dfs)
# string을 date로 변환
df_all['Date'] = pd.to_datetime(df_all['Date'])
# codes 적용
df_all = df_all[df_all['Code'].isin(codes)]
# date 기간 적용
df_all = df_all[(start <= df_all["Date"]) & (df_all["Date"] <= end)]
# date 순으로 정렬
df_all = df_all.sort_values('Date', ascending=True)
return df_all
def load_datas(df, x_cols, y_col, train_start, train_end, test_start, test_end, n_seq):
train_inputs, train_labels = [], []
test_inputs, test_labels = [], []
for i in trange(0, len(df) - n_seq):
x = df.iloc[i:i + n_seq][x_cols].to_numpy()
y = df.iloc[i + n_seq][y_col]
date = df.iloc[i + n_seq]['Date']
if train_start <= date <= train_end:
train_inputs.append(x)
train_labels.append(y)
elif test_start <= date <= test_end:
test_inputs.append(x)
test_labels.append(y)
else:
print(f'discard {date}')
train_inputs = np.array(train_inputs)
train_labels = np.array(train_labels)
test_inputs = np.array(test_inputs)
test_labels = np.array(test_labels)
return train_inputs, train_labels, test_inputs, test_labels
def load_datas_scaled(df, x_cols, y_col, train_start, train_end, test_start, test_end, n_seq):
scaler_dic = {}
for col in x_cols:
min_val = df[col].min()
max_val = df[col].max()
scaler_dic[col] = MinMaxScaler(min_val, max_val)
train_inputs, train_labels = [], []
test_inputs, test_labels = [], []
for i in trange(0, len(df) - n_seq):
x = []
for j in range(n_seq):
xj = df.iloc[i + j]
xh = []
for col in x_cols:
x_scaler = scaler_dic[col]
xh.append(x_scaler.scale_value(xj[col]))
x.append(xh)
y_scaler = scaler_dic[y_col]
y = y_scaler.scale_value(df.iloc[i + n_seq][y_col])
date = df.iloc[i + n_seq]['Date']
if train_start <= date <= train_end:
train_inputs.append(x)
train_labels.append(y)
elif test_start <= date <= test_end:
test_inputs.append(x)
test_labels.append(y)
else:
print(f'discard {date}')
train_inputs = np.array(train_inputs)
train_labels = np.array(train_labels)
test_inputs = np.array(test_inputs)
test_labels = np.array(test_labels)
return train_inputs, train_labels, test_inputs, test_labels, scaler_dic
def _load_datas_by_code_x_multi(df, code, x_cols, y_col, n_seq, scaler_dic):
df_code = df[df['Code'] == code]
data_dic = {}
for i in trange(0, len(df_code) - n_seq):
x = []
for j in range(n_seq):
xj = df_code.iloc[i + j]
xh = []
for col in x_cols:
x_scaler = scaler_dic[col]
xh.append(x_scaler.scale_value(xj[col]))
x.append(xh)
y_scaler = scaler_dic[y_col]
y = y_scaler.scale_value(df_code.iloc[i + n_seq][y_col])
date = df_code.iloc[i + n_seq]['Date']
data_dic[date] = (x, y)
return data_dic
def load_datas_scaled_x_multi(df, code_to_id, y_code, x_cols, y_col, train_start, train_end, test_start, test_end, n_seq):
scaler_dic = {}
for col in x_cols:
min_val = df[col].min()
max_val = df[col].max()
scaler_dic[col] = MinMaxScaler(min_val, max_val)
train_inputs, train_codes, train_labels = [], [], []
test_inputs, test_codes, test_labels = [], [], []
data_code_dic = {}
for code in code_to_id.keys():
data_dic = _load_datas_by_code_x_multi(df, code, x_cols, y_col, n_seq, scaler_dic)
data_code_dic[code] = data_dic
date_list = df['Date'].unique()
for i, date in enumerate(tqdm(date_list)):
date = pd.to_datetime(date)
for code in code_to_id.keys():
data_dic = data_code_dic[code]
if date in data_dic:
x, y = data_dic[date]
if train_start <= date <= train_end:
train_inputs.append(x)
train_codes.append([code_to_id[code]] * n_seq)
train_labels.append(y)
elif test_start <= date <= test_end and code == y_code:
test_inputs.append(x)
test_codes.append([code_to_id[code]] * n_seq)
test_labels.append(y)
else:
print(f'discard {date} / {code}')
else:
print(f'not exists {date} / {code}')
train_inputs = np.array(train_inputs)
train_codes = np.array(train_codes)
train_labels = np.array(train_labels)
test_inputs = np.array(test_inputs)
test_codes = np.array(test_codes)
test_labels = np.array(test_labels)
return train_inputs, train_codes, train_labels, test_inputs, test_codes, test_labels, scaler_dic
def _load_datas_by_code_x_y_multi(df, code, x_cols, y_cols, n_seq, scaler_dic):
df_code = df[df['Code'] == code]
data_dic = {}
for i in trange(0, len(df_code) - n_seq):
x = []
for j in range(n_seq):
xj = df_code.iloc[i + j]
xh = []
for col in x_cols:
x_scaler = scaler_dic[col]
xh.append(x_scaler.scale_value(xj[col]))
x.append(xh)
y = []
yj = df_code.iloc[i + n_seq]
for col in y_cols:
y_scaler = scaler_dic[col]
y.append(y_scaler.scale_value(yj[col]))
date = df_code.iloc[i + n_seq]['Date']
data_dic[date] = (x, y)
return data_dic
def load_datas_scaled_x_y_multi(df, code_to_id, y_code, x_cols, y_cols, train_start, train_end, test_start, test_end, n_seq):
scaler_dic = {}
for col in x_cols:
min_val = df[col].min()
max_val = df[col].max()
scaler_dic[col] = MinMaxScaler(min_val, max_val)
train_inputs, train_codes, train_labels = [], [], []
test_inputs, test_codes, test_labels = [], [], []
data_code_dic = {}
for code in code_to_id.keys():
data_dic = _load_datas_by_code_x_y_multi(df, code, x_cols, y_cols, n_seq, scaler_dic)
data_code_dic[code] = data_dic
date_list = df['Date'].unique()
for i, date in enumerate(tqdm(date_list)):
date = | pd.to_datetime(date) | pandas.to_datetime |
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for Metrics."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from meterstick import metrics
from meterstick import operations
import mock
import numpy as np
import pandas as pd
from pandas import testing
import unittest
class MetricTest(unittest.TestCase):
"""Tests general features of Metric."""
df = pd.DataFrame({'X': [0, 1, 2, 3], 'Y': [0, 1, 1, 2]})
def test_precompute(self):
metric = metrics.Metric(
'foo',
precompute=lambda df, split_by: df[split_by],
compute=lambda x: x.sum().values[0])
output = metric.compute_on(self.df, 'Y')
expected = pd.DataFrame({'foo': [0, 2, 2]}, index=range(3))
expected.index.name = 'Y'
testing.assert_frame_equal(output, expected)
def test_compute(self):
metric = metrics.Metric('foo', compute=lambda x: x['X'].sum())
output = metric.compute_on(self.df)
expected = metrics.Sum('X', 'foo').compute_on(self.df)
testing.assert_frame_equal(output, expected)
def test_postcompute(self):
def postcompute(values, split_by):
del split_by
return values / values.sum()
output = metrics.Sum('X', postcompute=postcompute).compute_on(self.df, 'Y')
expected = operations.Distribution('Y',
metrics.Sum('X')).compute_on(self.df)
expected.columns = ['sum(X)']
testing.assert_frame_equal(output.astype(float), expected)
def test_compute_slices(self):
def _sum(df, split_by):
if split_by:
df = df.groupby(split_by)
return df['X'].sum()
metric = metrics.Metric('foo', compute_slices=_sum)
output = metric.compute_on(self.df)
expected = metrics.Sum('X', 'foo').compute_on(self.df)
testing.assert_frame_equal(output, expected)
def test_final_compute(self):
metric = metrics.Metric(
'foo', compute=lambda x: x, final_compute=lambda *_: 2)
output = metric.compute_on(None)
self.assertEqual(output, 2)
def test_pipeline_operator(self):
m = metrics.Count('X')
testing.assert_frame_equal(
m.compute_on(self.df), m | metrics.compute_on(self.df))
class SimpleMetricTest(unittest.TestCase):
df = pd.DataFrame({
'X': [1, 1, 1, 2, 2, 3, 4],
'Y': [3, 1, 1, 4, 4, 3, 5],
'grp': ['A'] * 3 + ['B'] * 4
})
def test_list_where(self):
metric = metrics.Mean('X', where=['grp == "A"'])
output = metric.compute_on(self.df, return_dataframe=False)
expected = self.df.query('grp == "A"')['X'].mean()
self.assertEqual(output, expected)
def test_single_list_where(self):
metric = metrics.Mean('X', where=['grp == "A"', 'Y < 2'])
output = metric.compute_on(self.df, return_dataframe=False)
expected = self.df.query('grp == "A" and Y < 2')['X'].mean()
self.assertEqual(output, expected)
def test_count_not_df(self):
metric = metrics.Count('X')
output = metric.compute_on(self.df, return_dataframe=False)
self.assertEqual(output, 7)
def test_count_split_by_not_df(self):
metric = metrics.Count('X')
output = metric.compute_on(self.df, 'grp', return_dataframe=False)
expected = self.df.groupby('grp')['X'].count()
expected.name = 'count(X)'
testing.assert_series_equal(output, expected)
def test_count_where(self):
metric = metrics.Count('X', where='grp == "A"')
output = metric.compute_on(self.df, return_dataframe=False)
self.assertEqual(output, 3)
def test_count_with_nan(self):
df = pd.DataFrame({'X': [1, 1, np.nan, 2, 2, 3, 4]})
metric = metrics.Count('X')
output = metric.compute_on(df, return_dataframe=False)
self.assertEqual(output, 6)
def test_count_unmelted(self):
metric = metrics.Count('X')
output = metric.compute_on(self.df)
expected = pd.DataFrame({'count(X)': [7]})
testing.assert_frame_equal(output, expected)
def test_count_melted(self):
metric = metrics.Count('X')
output = metric.compute_on(self.df, melted=True)
expected = pd.DataFrame({'Value': [7]}, index=['count(X)'])
expected.index.name = 'Metric'
testing.assert_frame_equal(output, expected)
def test_count_split_by_unmelted(self):
metric = metrics.Count('X')
output = metric.compute_on(self.df, 'grp')
expected = pd.DataFrame({'count(X)': [3, 4]}, index=['A', 'B'])
expected.index.name = 'grp'
testing.assert_frame_equal(output, expected)
def test_count_split_by_melted(self):
metric = metrics.Count('X')
output = metric.compute_on(self.df, 'grp', melted=True)
expected = pd.DataFrame({
'Value': [3, 4],
'grp': ['A', 'B']
},
index=['count(X)', 'count(X)'])
expected.index.name = 'Metric'
expected.set_index('grp', append=True, inplace=True)
testing.assert_frame_equal(output, expected)
def test_count_distinct(self):
df = pd.DataFrame({'X': [1, 1, np.nan, 2, 2, 3]})
metric = metrics.Count('X', distinct=True)
output = metric.compute_on(df, return_dataframe=False)
self.assertEqual(output, 3)
def test_sum_not_df(self):
metric = metrics.Sum('X')
output = metric.compute_on(self.df, return_dataframe=False)
self.assertEqual(output, 14)
def test_sum_split_by_not_df(self):
metric = metrics.Sum('X')
output = metric.compute_on(self.df, 'grp', return_dataframe=False)
expected = self.df.groupby('grp')['X'].sum()
expected.name = 'sum(X)'
testing.assert_series_equal(output, expected)
def test_sum_where(self):
metric = metrics.Sum('X', where='grp == "A"')
output = metric.compute_on(self.df, return_dataframe=False)
expected = self.df.query('grp == "A"')['X'].sum()
self.assertEqual(output, expected)
def test_sum_unmelted(self):
metric = metrics.Sum('X')
output = metric.compute_on(self.df)
expected = pd.DataFrame({'sum(X)': [14]})
testing.assert_frame_equal(output, expected)
def test_sum_melted(self):
metric = metrics.Sum('X')
output = metric.compute_on(self.df, melted=True)
expected = pd.DataFrame({'Value': [14]}, index=['sum(X)'])
expected.index.name = 'Metric'
testing.assert_frame_equal(output, expected)
def test_sum_split_by_unmelted(self):
metric = metrics.Sum('X')
output = metric.compute_on(self.df, 'grp')
expected = pd.DataFrame({'sum(X)': [3, 11]}, index=['A', 'B'])
expected.index.name = 'grp'
testing.assert_frame_equal(output, expected)
def test_sum_split_by_melted(self):
metric = metrics.Sum('X')
output = metric.compute_on(self.df, 'grp', melted=True)
expected = pd.DataFrame({
'Value': [3, 11],
'grp': ['A', 'B']
},
index=['sum(X)', 'sum(X)'])
expected.index.name = 'Metric'
expected.set_index('grp', append=True, inplace=True)
testing.assert_frame_equal(output, expected)
def test_dot_not_df(self):
metric = metrics.Dot('X', 'Y')
output = metric.compute_on(self.df, return_dataframe=False)
self.assertEqual(output, sum(self.df.X * self.df.Y))
def test_dot_split_by_not_df(self):
metric = metrics.Dot('X', 'Y')
output = metric.compute_on(self.df, 'grp', return_dataframe=False)
self.df['X * Y'] = self.df.X * self.df.Y
expected = self.df.groupby('grp')['X * Y'].sum()
expected.name = 'sum(X * Y)'
testing.assert_series_equal(output, expected)
def test_dot_where(self):
metric = metrics.Dot('X', 'Y', where='grp == "A"')
output = metric.compute_on(self.df, return_dataframe=False)
d = self.df.query('grp == "A"')
self.assertEqual(output, sum(d.X * d.Y))
def test_dot_unmelted(self):
metric = metrics.Dot('X', 'Y')
output = metric.compute_on(self.df)
expected = pd.DataFrame({'sum(X * Y)': [sum(self.df.X * self.df.Y)]})
testing.assert_frame_equal(output, expected)
def test_dot_normalized(self):
metric = metrics.Dot('X', 'Y', True)
output = metric.compute_on(self.df)
expected = pd.DataFrame({'mean(X * Y)': [(self.df.X * self.df.Y).mean()]})
testing.assert_frame_equal(output, expected)
def test_dot_melted(self):
metric = metrics.Dot('X', 'Y')
output = metric.compute_on(self.df, melted=True)
expected = pd.DataFrame({'Value': [sum(self.df.X * self.df.Y)]},
index=['sum(X * Y)'])
expected.index.name = 'Metric'
testing.assert_frame_equal(output, expected)
def test_dot_split_by_unmelted(self):
metric = metrics.Dot('X', 'Y')
output = metric.compute_on(self.df, 'grp')
expected = pd.DataFrame({'sum(X * Y)': [5, 45]}, index=['A', 'B'])
expected.index.name = 'grp'
testing.assert_frame_equal(output, expected)
def test_dot_split_by_melted(self):
metric = metrics.Dot('X', 'Y')
output = metric.compute_on(self.df, 'grp', melted=True)
expected = pd.DataFrame({
'Value': [5, 45],
'grp': ['A', 'B']
},
index=['sum(X * Y)', 'sum(X * Y)'])
expected.index.name = 'Metric'
expected.set_index('grp', append=True, inplace=True)
testing.assert_frame_equal(output, expected)
def test_mean_not_df(self):
metric = metrics.Mean('X')
output = metric.compute_on(self.df, return_dataframe=False)
self.assertEqual(output, 2)
def test_mean_split_by_not_df(self):
metric = metrics.Mean('X')
output = metric.compute_on(self.df, 'grp', return_dataframe=False)
expected = self.df.groupby('grp')['X'].mean()
expected.name = 'mean(X)'
testing.assert_series_equal(output, expected)
def test_mean_where(self):
metric = metrics.Mean('X', where='grp == "A"')
output = metric.compute_on(self.df, return_dataframe=False)
expected = self.df.query('grp == "A"')['X'].mean()
self.assertEqual(output, expected)
def test_mean_unmelted(self):
metric = metrics.Mean('X')
output = metric.compute_on(self.df)
expected = pd.DataFrame({'mean(X)': [2.]})
testing.assert_frame_equal(output, expected)
def test_mean_melted(self):
metric = metrics.Mean('X')
output = metric.compute_on(self.df, melted=True)
expected = pd.DataFrame({'Value': [2.]}, index=['mean(X)'])
expected.index.name = 'Metric'
testing.assert_frame_equal(output, expected)
def test_mean_split_by_unmelted(self):
metric = metrics.Mean('X')
output = metric.compute_on(self.df, 'grp')
expected = pd.DataFrame({'mean(X)': [1, 2.75]}, index=['A', 'B'])
expected.index.name = 'grp'
testing.assert_frame_equal(output, expected)
def test_mean_split_by_melted(self):
metric = metrics.Mean('X')
output = metric.compute_on(self.df, 'grp', melted=True)
expected = pd.DataFrame({
'Value': [1, 2.75],
'grp': ['A', 'B']
},
index=['mean(X)', 'mean(X)'])
expected.index.name = 'Metric'
expected.set_index('grp', append=True, inplace=True)
testing.assert_frame_equal(output, expected)
def test_max(self):
metric = metrics.Max('X')
output = metric.compute_on(self.df)
expected = pd.DataFrame({'max(X)': [4]})
testing.assert_frame_equal(output, expected)
def test_min(self):
metric = metrics.Min('X')
output = metric.compute_on(self.df)
expected = pd.DataFrame({'min(X)': [1]})
testing.assert_frame_equal(output, expected)
def test_weighted_mean_not_df(self):
df = pd.DataFrame({'X': [1, 2], 'Y': [3, 1]})
metric = metrics.Mean('X', 'Y')
output = metric.compute_on(df, return_dataframe=False)
self.assertEqual(output, 1.25)
def test_weighted_mean_split_by_not_df(self):
df = pd.DataFrame({
'X': [1, 2, 1, 3],
'Y': [3, 1, 0, 1],
'grp': ['A', 'A', 'B', 'B']
})
metric = metrics.Mean('X', 'Y')
output = metric.compute_on(df, 'grp', return_dataframe=False)
output.sort_index(level='grp', inplace=True) # For Py2
expected = pd.Series((1.25, 3.), index=['A', 'B'])
expected.index.name = 'grp'
expected.name = 'Y-weighted mean(X)'
testing.assert_series_equal(output, expected)
def test_weighted_mean_unmelted(self):
df = pd.DataFrame({'X': [1, 2], 'Y': [3, 1]})
metric = metrics.Mean('X', 'Y')
output = metric.compute_on(df)
expected = pd.DataFrame({'Y-weighted mean(X)': [1.25]})
testing.assert_frame_equal(output, expected)
def test_weighted_mean_melted(self):
df = pd.DataFrame({'X': [1, 2], 'Y': [3, 1]})
metric = metrics.Mean('X', 'Y')
output = metric.compute_on(df, melted=True)
expected = pd.DataFrame({'Value': [1.25]}, index=['Y-weighted mean(X)'])
expected.index.name = 'Metric'
testing.assert_frame_equal(output, expected)
def test_weighted_mean_split_by_unmelted(self):
df = pd.DataFrame({
'X': [1, 2, 1, 3],
'Y': [3, 1, 0, 1],
'grp': ['A', 'A', 'B', 'B']
})
metric = metrics.Mean('X', 'Y')
output = metric.compute_on(df, 'grp')
output.sort_index(level='grp', inplace=True) # For Py2
expected = pd.DataFrame({'Y-weighted mean(X)': [1.25, 3.]},
index=['A', 'B'])
expected.index.name = 'grp'
testing.assert_frame_equal(output, expected)
def test_weighted_mean_split_by_melted(self):
df = pd.DataFrame({
'X': [1, 2, 1, 3],
'Y': [3, 1, 0, 1],
'grp': ['A', 'A', 'B', 'B']
})
metric = metrics.Mean('X', 'Y')
output = metric.compute_on(df, 'grp', melted=True)
output.sort_index(level='grp', inplace=True) # For Py2
expected = pd.DataFrame({
'Value': [1.25, 3.],
'grp': ['A', 'B']
},
index=['Y-weighted mean(X)', 'Y-weighted mean(X)'])
expected.index.name = 'Metric'
expected.set_index('grp', append=True, inplace=True)
testing.assert_frame_equal(output, expected)
def test_quantile_raise(self):
with self.assertRaises(ValueError) as cm:
metrics.Quantile('X', 2)
self.assertEqual(str(cm.exception), 'quantiles must be in [0, 1].')
def test_quantile_multiple_quantiles_raise(self):
with self.assertRaises(ValueError) as cm:
metrics.Quantile('X', [0.1, 2])
self.assertEqual(str(cm.exception), 'quantiles must be in [0, 1].')
def test_quantile_not_df(self):
metric = metrics.Quantile('X', 0.5)
output = metric.compute_on(self.df, return_dataframe=False)
self.assertEqual(output, 2)
def test_quantile_where(self):
metric = metrics.Quantile('X', where='grp == "B"')
output = metric.compute_on(self.df, return_dataframe=False)
self.assertEqual(output, 2.5)
def test_quantile_interpolation(self):
metric = metrics.Quantile('X', 0.5, interpolation='lower')
output = metric.compute_on(
pd.DataFrame({'X': [1, 2]}), return_dataframe=False)
self.assertEqual(output, 1)
def test_quantile_split_by_not_df(self):
metric = metrics.Quantile('X', 0.5)
output = metric.compute_on(self.df, 'grp', return_dataframe=False)
expected = self.df.groupby('grp')['X'].quantile(0.5)
expected.name = 'quantile(X, 0.5)'
testing.assert_series_equal(output, expected)
def test_quantile_unmelted(self):
metric = metrics.Quantile('X', 0.5)
output = metric.compute_on(self.df)
expected = pd.DataFrame({'quantile(X, 0.5)': [2.]})
testing.assert_frame_equal(output, expected)
def test_quantile_melted(self):
metric = metrics.Quantile('X', 0.5)
output = metric.compute_on(self.df, melted=True)
expected = pd.DataFrame({'Value': [2.]}, index=['quantile(X, 0.5)'])
expected.index.name = 'Metric'
testing.assert_frame_equal(output, expected)
def test_quantile_split_by_unmelted(self):
metric = metrics.Quantile('X', 0.5)
output = metric.compute_on(self.df, 'grp')
expected = pd.DataFrame({'quantile(X, 0.5)': [1, 2.5]}, index=['A', 'B'])
expected.index.name = 'grp'
testing.assert_frame_equal(output, expected)
def test_quantile_split_by_melted(self):
metric = metrics.Quantile('X', 0.5)
output = metric.compute_on(self.df, 'grp', melted=True)
expected = pd.DataFrame({
'Value': [1, 2.5],
'grp': ['A', 'B']
},
index=['quantile(X, 0.5)'] * 2)
expected.index.name = 'Metric'
expected.set_index('grp', append=True, inplace=True)
| testing.assert_frame_equal(output, expected) | pandas.testing.assert_frame_equal |
# coding: utf-8
import numpy as np
import pandas as pd
from metpy.units import units
from metpy.calc import dewpoint_from_relative_humidity, equivalent_potential_temperature
##################################################
# メイン
##################################################
if __name__ == '__main__':
temp = [0, 5, 10, 15, 20, 25]
rh = [100, 100, 100, 100, 100, 100]
pres = [850, 850, 850, 850, 850, 850]
df = | pd.DataFrame({'Temp.': temp, 'Humidity': rh, 'Pressure': pres}) | pandas.DataFrame |
import glob
import math
import matplotlib.pyplot as plt
import numpy as np
import os
import pandas as pd
import scipy.ndimage as ndi
import skimage.feature
import skimage.io
import skimage.measure
def read_files(files):
'''
Reads groups of image files and loads them as numpy arrays.
'''
files = sorted(files)
file_order = [3, 0, 1, 2]
min_slice = 1 # First slice ignored
files = [files[i] for i in file_order]
files = files[:3]
image = list(map(skimage.io.imread, files))
image = np.stack(image)
image = np.max(image[:, min_slice:], axis=1)
assert image.ndim == 3
return image
def display_files(files):
''' Displays files for easier viewing. '''
image = read_files(files)
fig, ax = plt.subplots(1, image.shape[0], figsize=(16, 10))
for i in range(image.shape[0]):
ax[i].imshow(image[i])
ax[i].set_axis_off()
plt.show()
def nuclear_detection(image):
''' Detects and segments nuclear instances. '''
img = ndi.gaussian_filter(image, 6)
img = img > skimage.filters.threshold_otsu(img)
otsu = img
img = ndi.binary_erosion(img)
img = ndi.distance_transform_edt(img)
img = img > 5
lab = ndi.label(img)[0]
img = skimage.morphology.watershed(image, lab, mask=otsu)
return img
def cytoplasmic_segmentation(image, nucleus):
''' Basic foreground / background segmentation. '''
img = ndi.gaussian_filter(image, 5)
img = img > img.mean() * 0.75
img = skimage.morphology.watershed(img, nucleus, mask=img)
img -= nucleus
return img
def blob_detection(image):
'''
Detects spots in an image returning the coordinates and size.
Returns in the format "row (y), column (x), sigma"
'''
blobs = skimage.feature.blob_log(image, max_sigma=2, threshold=0.05)
return blobs
def blob_visualization(image, blobs, size=False):
''' Shows blob detected spots on an image. '''
fig, ax = plt.subplots(1, figsize=(10, 10))
ax.imshow(image, cmap='gray')
# Matplotlib functions plot in xy direction, not rc
if size:
blobs[:, 2] = blobs[:, 2] * math.sqrt(2)
for blob in blobs:
y, x, r = blob
c = plt.Circle((x, y), r, color='red', linewidth=1, fill=False)
ax.add_patch(c)
else:
ax.scatter(blobs[:, 1], blobs[:, 0], s=1, marker='x', c='red')
ax.set_axis_off()
plt.tight_layout()
plt.show()
def get_value(x, y, image, threshold=None):
'''
Returns the label value at a blob position.
Optional thresholding allows for boolean predictions.
'''
if threshold is not None:
image = image > threshold
label = image[int(y), int(x)]
return label
def get_count(x, y, region, subregion=None):
'''
Returns the number of blobs in the specified region / subregion.
'''
if subregion is not None:
region = region - subregion > 0
else:
region = region > 0
x_int = x.astype(int)
y_int = y.astype(int)
xy_true = [region[j, i] for i, j in zip(x_int, y_int)]
count = np.count_nonzero(xy_true)
return count
def main():
ROOT = 'PATH'
ROOT_SEG = 'PATH' # Output from Fluffy
files_nd = glob.glob(f'{ROOT}/*Ars*.nd')
basenames = sorted([os.path.splitext(f)[0] for f in files_nd])
files = []
for basename in basenames:
files.append(glob.glob(f'{basename}*.stk'))
files_seg = sorted(glob.glob(f'{ROOT_SEG}/*Ars*.tiff'))
images_seg = list(map(skimage.io.imread, files_seg))
rows_blobs = []
for i, file in enumerate(files):
image = read_files(file)
nucleus = nuclear_detection(image[0])
cytoplasm = cytoplasmic_segmentation(image[1], nucleus)
cell = nucleus + cytoplasm
granules = images_seg[i] > 0
spots = blob_detection(image[2])
for spot in spots:
x = spot[0]
y = spot[1]
row_blob = {
# General information
'file': file[0],
# Cellular measures
'cell': get_value(x, y, cell),
'nuclear': get_value(x, y, nucleus, threshold=0),
'granular': get_value(x, y, granules),
'granule': get_value(x, y, image[1]),
# Blob measures
'coord_x': spot[0],
'coord_y': spot[1],
}
rows_blobs.append( | pd.Series(row_blob) | pandas.Series |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import datetime
import numpy as np
import pandas as pd
import pandas.testing as pdt
import pyarrow as pa
import pytest
from pyarrow.parquet import ParquetFile
from kartothek.serialization import (
CsvSerializer,
DataFrameSerializer,
ParquetSerializer,
default_serializer,
)
from kartothek.serialization._util import ensure_unicode_string_type
TYPE_STABLE_SERIALISERS = [ParquetSerializer()]
SERLIALISERS = TYPE_STABLE_SERIALISERS + [
CsvSerializer(),
CsvSerializer(compress=False),
default_serializer(),
]
type_stable_serialisers = pytest.mark.parametrize("serialiser", TYPE_STABLE_SERIALISERS)
predicate_serialisers = pytest.mark.parametrize(
"serialiser",
[
ParquetSerializer(chunk_size=1),
ParquetSerializer(chunk_size=2),
ParquetSerializer(chunk_size=4),
]
+ SERLIALISERS,
)
def test_load_df_from_store_unsupported_format(store):
with pytest.raises(ValueError):
DataFrameSerializer.restore_dataframe(store, "test.unknown")
def test_store_df_to_store(store):
df = pd.DataFrame({"a": [1, 2], "b": [3.0, 4.0], "c": ["∆", "€"]})
dataframe_format = default_serializer()
assert isinstance(dataframe_format, ParquetSerializer)
key = dataframe_format.store(store, "prefix", df)
pdt.assert_frame_equal(DataFrameSerializer.restore_dataframe(store, key), df)
@pytest.mark.parametrize("serialiser", SERLIALISERS)
def test_store_table_to_store(serialiser, store):
df = pd.DataFrame({"a": [1, 2], "b": [3.0, 4.0], "c": ["∆", "€"]})
table = pa.Table.from_pandas(df)
key = serialiser.store(store, "prefix", table)
pdt.assert_frame_equal(DataFrameSerializer.restore_dataframe(store, key), df)
@pytest.mark.parametrize("serialiser", SERLIALISERS)
def test_dataframe_roundtrip(serialiser, store):
if serialiser in TYPE_STABLE_SERIALISERS:
df = pd.DataFrame(
{"a": [1, 2], "b": [3.0, 4.0], "c": ["∆", "€"], b"d": ["#", ";"]}
)
key = serialiser.store(store, "prefix", df)
df.columns = [ensure_unicode_string_type(col) for col in df.columns]
else:
df = pd.DataFrame(
{"a": [1, 2], "b": [3.0, 4.0], "c": ["∆", "€"], "d": ["#", ";"]}
)
key = serialiser.store(store, "prefix", df)
pdt.assert_frame_equal(DataFrameSerializer.restore_dataframe(store, key), df)
# Test partial restore
pdt.assert_frame_equal(
DataFrameSerializer.restore_dataframe(store, key, columns=["a", "c"]),
df[["a", "c"]],
)
# Test that all serialisers can ingest predicate_pushdown_to_io
pdt.assert_frame_equal(
DataFrameSerializer.restore_dataframe(
store, key, columns=["a", "c"], predicate_pushdown_to_io=False
),
df[["a", "c"]],
)
# Test that all serialisers can deal with categories
expected = df[["c", "d"]].copy()
expected["c"] = expected["c"].astype("category")
# Check that the dtypes match but don't care about the order of the categoricals.
pdt.assert_frame_equal(
DataFrameSerializer.restore_dataframe(
store, key, columns=["c", "d"], categories=["c"]
),
expected,
check_categorical=False,
)
# Test restore w/ empty col list
pdt.assert_frame_equal(
DataFrameSerializer.restore_dataframe(store, key, columns=[]), df[[]]
)
@pytest.mark.parametrize("serialiser", SERLIALISERS)
def test_missing_column(serialiser, store):
df = pd.DataFrame({"a": [1, 2], "b": [3.0, 4.0], "c": ["∆", "€"], "d": ["#", ";"]})
key = serialiser.store(store, "prefix", df)
with pytest.raises(ValueError):
DataFrameSerializer.restore_dataframe(store, key, columns=["a", "x"])
@pytest.mark.parametrize("serialiser", SERLIALISERS)
def test_dataframe_roundtrip_empty(serialiser, store):
df = pd.DataFrame({})
key = serialiser.store(store, "prefix", df)
pdt.assert_frame_equal(DataFrameSerializer.restore_dataframe(store, key), df)
# Test partial restore
pdt.assert_frame_equal(DataFrameSerializer.restore_dataframe(store, key), df)
@pytest.mark.parametrize("serialiser", SERLIALISERS)
def test_dataframe_roundtrip_no_rows(serialiser, store):
df = | pd.DataFrame({"a": [], "b": [], "c": []}) | pandas.DataFrame |
# Databricks notebook source
# MAGIC %md
# MAGIC # Putting it all together: Managing the Machine Learning Lifecycle
# MAGIC
# MAGIC Create a workflow that includes pre-processing logic, the optimal ML algorithm and hyperparameters, and post-processing logic.
# MAGIC
# MAGIC ## Instructions
# MAGIC
# MAGIC In this course, we've primarily used Random Forest in `sklearn` to model the Airbnb dataset. In this exercise, perform the following tasks:
# MAGIC <br><br>
# MAGIC 0. Create custom pre-processing logic to featurize the data
# MAGIC 0. Try a number of different algorithms and hyperparameters. Choose the most performant solution
# MAGIC 0. Create related post-processing logic
# MAGIC 0. Package the results and execute it as its own run
# MAGIC
# MAGIC ## Prerequisites
# MAGIC - Web browser: Chrome
# MAGIC - A cluster configured with **8 cores** and **DBR 7.0 ML**
# COMMAND ----------
# MAGIC %md
# MAGIC ##  Classroom-Setup
# MAGIC
# MAGIC For each lesson to execute correctly, please make sure to run the **`Classroom-Setup`** cell at the<br/>
# MAGIC start of each lesson (see the next cell) and the **`Classroom-Cleanup`** cell at the end of each lesson.
# COMMAND ----------
# MAGIC %run "./Includes/Classroom-Setup"
# COMMAND ----------
# Adust our working directory from what DBFS sees to what python actually sees
working_path = workingDir.replace("dbfs:", "/dbfs")
# COMMAND ----------
# MAGIC %md
# MAGIC ## Pre-processing
# MAGIC
# MAGIC Take a look at the dataset and notice that there are plenty of strings and `NaN` values present. Our end goal is to train a sklearn regression model to predict the price of an airbnb listing.
# MAGIC
# MAGIC
# MAGIC Before we can start training, we need to pre-process our data to be compatible with sklearn models by making all features purely numerical.
# COMMAND ----------
import pandas as pd
airbnbDF = spark.read.parquet("/mnt/training/airbnb/sf-listings/sf-listings-correct-types.parquet").toPandas()
display(airbnbDF)
# COMMAND ----------
# MAGIC %md
# MAGIC In the following cells we will walk you through the most basic pre-processing step necessary. Feel free to add additional steps afterwards to improve your model performance.
# COMMAND ----------
# MAGIC %md
# MAGIC
# MAGIC First, convert the `price` from a string to a float since the regression model will be predicting numerical values.
# COMMAND ----------
# TODO
import numpy as np
airbnbDF["price"] = airbnbDF["price"].str.replace("$", "", regex=True)
airbnbDF["price"] = airbnbDF["price"].str.replace(",", "", regex=True)
airbnbDF["price"] = airbnbDF.price.astype('float32')
print(airbnbDF["price"])
# airbnbDF["price"] = airbnbDF["price"].str.replace('$', '')
# COMMAND ----------
# MAGIC %md
# MAGIC Take a look at our remaining columns with strings (or numbers) and decide if you would like to keep them as features or not.
# MAGIC
# MAGIC Remove the features you decide not to keep.
# COMMAND ----------
# TODO
airbnbDF["trunc_lat"] = airbnbDF.latitude.round(decimals=2)
airbnbDF["trunc_long"] = airbnbDF.longitude.round(decimals=2)
airbnbDF["review_scores_sum"] = airbnbDF[['review_scores_accuracy', 'review_scores_cleanliness', 'review_scores_checkin', 'review_scores_communication', 'review_scores_location', 'review_scores_value']].mean(axis=1)
airbnbDF = airbnbDF.drop(["latitude", "longitude", 'review_scores_accuracy', 'review_scores_cleanliness', 'review_scores_checkin', 'review_scores_communication', 'review_scores_location', 'review_scores_value', "neighbourhood_cleansed", "property_type", "zipcode"], axis=1)
# COMMAND ----------
airbnbDF.columns
# COMMAND ----------
# MAGIC %md
# MAGIC
# MAGIC For the string columns that you've decided to keep, pick a numerical encoding for the string columns. Don't forget to deal with the `NaN` entries in those columns first.
# COMMAND ----------
# TODO
from sklearn.impute import SimpleImputer
airbnbDF["host_is_superhost"] = airbnbDF["host_is_superhost"].str.replace("t", "0", regex=True)
airbnbDF["host_is_superhost"] = airbnbDF["host_is_superhost"].str.replace("f", "1", regex=True)
airbnbDF["instant_bookable"] = airbnbDF["instant_bookable"].str.replace("t", "0", regex=True)
airbnbDF["instant_bookable"] = airbnbDF["instant_bookable"].str.replace("f", "1", regex=True)
# airbnbDF["host_is_superhost"] = airbnbDF.host_is_superhost.astype(int)
# airbnbDF["instant_bookable"] = airbnbDF["instant_bookable"].astype(int)
airbnbDF["host_is_superhost"] = pd.to_numeric(airbnbDF["host_is_superhost"])
airbnbDF["instant_bookable"] = | pd.to_numeric(airbnbDF["instant_bookable"]) | pandas.to_numeric |
from tensorflow.keras.preprocessing.image import ImageDataGenerator, load_img
from tensorflow.keras.utils import Sequence
import numpy as np
import pandas as pd
from PIL import Image
import os
from os import path
import shutil
import random
def log_experiment(ts, args):
"""Utility function for logging the experiment
Args:
ts (String): Timestamp string that identifies the experiment run
args (Object): Arguments passed while executing the file
Returns:
(Bool): Returns True if a new experiment is being performed, False if otherwise
"""
EXPERIMENTS_LOG_CSV_PATH = path.join(
os.getcwd(), 'src', 'train', 'experiments.csv')
experiment_csv_df = pd.read_csv(
EXPERIMENTS_LOG_CSV_PATH).drop('timestamp', axis='columns')
experiment_present = experiment_csv_df.isin({
'batch_size': [args.batch_size],
'epochs': [args.epochs],
'learning_rate': [args.lr],
'dropout_rate': [args.dropout_rate],
'n_filters': [args.n_filters],
'unet_block_type': [args.unet_block_type],
'unet_skip_conn_type': [args.unet_skip_conn_type]
}).all(axis=1).any()
if experiment_present:
return False
# Create a DataFrame for the new row
new_row = [
[ts, args.batch_size, args.epochs, args.lr, args.dropout_rate,
args.n_filters, args.unet_block_type, args.unet_skip_conn_type]
]
new_row_df = | pd.DataFrame(new_row) | pandas.DataFrame |
# This script analyzes the csv files output by PixDistStats2.py
# Updated Feb 2021.
# PixDistStats2 separates the data into biological replicates instead of aggregating all data for each sample group.
# This script takes those data and does stats and makes plots.
# pixel_distance.py actually performs the measurement of minimum distance
# between tumor and lyve-1 pixels, and outputs the results for each image.
# PixDistStats.py performs stats and makes plots on ALL the data separated by sample group. However,
# this is insufficient because it isn't split up into biological replicates, or normalized.
# PixDistStats2.py separates the data into biological replicates instead of aggregating
# all data for each sample group, and experiments with plots.
# PixDistStats3.py takes data from PixDistStats2, normalizes it to total pixels for each animal,
# does statistical comparisons and makes plots.
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import os
import pixel_distance as pxd
import pandas as pd
from scipy.stats import stats
from statsmodels.stats.multicomp import pairwise_tukeyhsd, MultiComparison
import joypy as jpy
def load_datas(dir):
distbypercentiles = | pd.read_csv(dir + 'dist_by_percentiles.csv', index_col='percentiles') | pandas.read_csv |
"""Tests suite for Period handling.
Parts derived from scikits.timeseries code, original authors:
- <NAME> & <NAME>
- pierregm_at_uga_dot_edu - mattknow_ca_at_hotmail_dot_com
"""
from unittest import TestCase
from datetime import datetime, timedelta
from numpy.ma.testutils import assert_equal
from pandas.tseries.period import Period, PeriodIndex
from pandas.tseries.index import DatetimeIndex, date_range
from pandas.tseries.tools import to_datetime
import pandas.core.datetools as datetools
import numpy as np
from pandas import Series, TimeSeries
from pandas.util.testing import assert_series_equal
class TestPeriodProperties(TestCase):
"Test properties such as year, month, weekday, etc...."
#
def __init__(self, *args, **kwds):
TestCase.__init__(self, *args, **kwds)
def test_interval_constructor(self):
i1 = Period('1/1/2005', freq='M')
i2 = Period('Jan 2005')
self.assertEquals(i1, i2)
i1 = Period('2005', freq='A')
i2 = Period('2005')
i3 = Period('2005', freq='a')
self.assertEquals(i1, i2)
self.assertEquals(i1, i3)
i4 = Period('2005', freq='M')
i5 = Period('2005', freq='m')
self.assert_(i1 != i4)
self.assertEquals(i4, i5)
i1 = Period.now('Q')
i2 = Period(datetime.now(), freq='Q')
i3 = Period.now('q')
self.assertEquals(i1, i2)
self.assertEquals(i1, i3)
# Biz day construction, roll forward if non-weekday
i1 = Period('3/10/12', freq='B')
i2 = Period('3/12/12', freq='D')
self.assertEquals(i1, i2.asfreq('B'))
i3 = Period('3/10/12', freq='b')
self.assertEquals(i1, i3)
i1 = Period(year=2005, quarter=1, freq='Q')
i2 = Period('1/1/2005', freq='Q')
self.assertEquals(i1, i2)
i1 = Period(year=2005, quarter=3, freq='Q')
i2 = Period('9/1/2005', freq='Q')
self.assertEquals(i1, i2)
i1 = Period(year=2005, month=3, day=1, freq='D')
i2 = Period('3/1/2005', freq='D')
self.assertEquals(i1, i2)
i3 = Period(year=2005, month=3, day=1, freq='d')
self.assertEquals(i1, i3)
i1 = Period(year=2012, month=3, day=10, freq='B')
i2 = Period('3/12/12', freq='B')
self.assertEquals(i1, i2)
i1 = Period('2005Q1')
i2 = Period(year=2005, quarter=1, freq='Q')
i3 = Period('2005q1')
self.assertEquals(i1, i2)
self.assertEquals(i1, i3)
i1 = Period('05Q1')
self.assertEquals(i1, i2)
lower = Period('05q1')
self.assertEquals(i1, lower)
i1 = Period('1Q2005')
self.assertEquals(i1, i2)
lower = Period('1q2005')
self.assertEquals(i1, lower)
i1 = Period('1Q05')
self.assertEquals(i1, i2)
lower = Period('1q05')
self.assertEquals(i1, lower)
i1 = Period('4Q1984')
self.assertEquals(i1.year, 1984)
lower = Period('4q1984')
self.assertEquals(i1, lower)
i1 = Period('1982', freq='min')
i2 = Period('1982', freq='MIN')
self.assertEquals(i1, i2)
i2 = Period('1982', freq=('Min', 1))
self.assertEquals(i1, i2)
def test_freq_str(self):
i1 = Period('1982', freq='Min')
self.assert_(i1.freq[0] != '1')
i2 = Period('11/30/2005', freq='2Q')
self.assertEquals(i2.freq[0], '2')
def test_to_timestamp(self):
intv = Period('1982', freq='A')
start_ts = intv.to_timestamp(which_end='S')
aliases = ['s', 'StarT', 'BEGIn']
for a in aliases:
self.assertEquals(start_ts, intv.to_timestamp(which_end=a))
end_ts = intv.to_timestamp(which_end='E')
aliases = ['e', 'end', 'FINIsH']
for a in aliases:
self.assertEquals(end_ts, intv.to_timestamp(which_end=a))
from_lst = ['A', 'Q', 'M', 'W', 'B',
'D', 'H', 'Min', 'S']
for i, fcode in enumerate(from_lst):
intv = Period('1982', freq=fcode)
result = intv.to_timestamp().to_period(fcode)
self.assertEquals(result, intv)
self.assertEquals(intv.start_time(), intv.to_timestamp('S'))
self.assertEquals(intv.end_time(), intv.to_timestamp('E'))
def test_properties_annually(self):
# Test properties on Periods with annually frequency.
a_date = Period(freq='A', year=2007)
assert_equal(a_date.year, 2007)
def test_properties_quarterly(self):
# Test properties on Periods with daily frequency.
qedec_date = Period(freq="Q-DEC", year=2007, quarter=1)
qejan_date = Period(freq="Q-JAN", year=2007, quarter=1)
qejun_date = Period(freq="Q-JUN", year=2007, quarter=1)
#
for x in range(3):
for qd in (qedec_date, qejan_date, qejun_date):
assert_equal((qd + x).qyear, 2007)
assert_equal((qd + x).quarter, x + 1)
def test_properties_monthly(self):
# Test properties on Periods with daily frequency.
m_date = Period(freq='M', year=2007, month=1)
for x in range(11):
m_ival_x = m_date + x
assert_equal(m_ival_x.year, 2007)
if 1 <= x + 1 <= 3:
assert_equal(m_ival_x.quarter, 1)
elif 4 <= x + 1 <= 6:
assert_equal(m_ival_x.quarter, 2)
elif 7 <= x + 1 <= 9:
assert_equal(m_ival_x.quarter, 3)
elif 10 <= x + 1 <= 12:
assert_equal(m_ival_x.quarter, 4)
assert_equal(m_ival_x.month, x + 1)
def test_properties_weekly(self):
# Test properties on Periods with daily frequency.
w_date = Period(freq='WK', year=2007, month=1, day=7)
#
assert_equal(w_date.year, 2007)
assert_equal(w_date.quarter, 1)
assert_equal(w_date.month, 1)
assert_equal(w_date.week, 1)
assert_equal((w_date - 1).week, 52)
def test_properties_daily(self):
# Test properties on Periods with daily frequency.
b_date = Period(freq='B', year=2007, month=1, day=1)
#
assert_equal(b_date.year, 2007)
assert_equal(b_date.quarter, 1)
assert_equal(b_date.month, 1)
assert_equal(b_date.day, 1)
assert_equal(b_date.weekday, 0)
assert_equal(b_date.day_of_year, 1)
#
d_date = Period(freq='D', year=2007, month=1, day=1)
#
assert_equal(d_date.year, 2007)
assert_equal(d_date.quarter, 1)
assert_equal(d_date.month, 1)
assert_equal(d_date.day, 1)
assert_equal(d_date.weekday, 0)
assert_equal(d_date.day_of_year, 1)
def test_properties_hourly(self):
# Test properties on Periods with hourly frequency.
h_date = Period(freq='H', year=2007, month=1, day=1, hour=0)
#
assert_equal(h_date.year, 2007)
assert_equal(h_date.quarter, 1)
assert_equal(h_date.month, 1)
assert_equal(h_date.day, 1)
assert_equal(h_date.weekday, 0)
assert_equal(h_date.day_of_year, 1)
assert_equal(h_date.hour, 0)
#
def test_properties_minutely(self):
# Test properties on Periods with minutely frequency.
t_date = Period(freq='Min', year=2007, month=1, day=1, hour=0,
minute=0)
#
assert_equal(t_date.quarter, 1)
assert_equal(t_date.month, 1)
assert_equal(t_date.day, 1)
assert_equal(t_date.weekday, 0)
assert_equal(t_date.day_of_year, 1)
assert_equal(t_date.hour, 0)
assert_equal(t_date.minute, 0)
def test_properties_secondly(self):
# Test properties on Periods with secondly frequency.
s_date = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0, second=0)
#
assert_equal(s_date.year, 2007)
assert_equal(s_date.quarter, 1)
assert_equal(s_date.month, 1)
assert_equal(s_date.day, 1)
assert_equal(s_date.weekday, 0)
assert_equal(s_date.day_of_year, 1)
assert_equal(s_date.hour, 0)
assert_equal(s_date.minute, 0)
assert_equal(s_date.second, 0)
def noWrap(item):
return item
class TestFreqConversion(TestCase):
"Test frequency conversion of date objects"
def __init__(self, *args, **kwds):
TestCase.__init__(self, *args, **kwds)
def test_conv_annual(self):
# frequency conversion tests: from Annual Frequency
ival_A = Period(freq='A', year=2007)
ival_AJAN = Period(freq="A-JAN", year=2007)
ival_AJUN = Period(freq="A-JUN", year=2007)
ival_ANOV = Period(freq="A-NOV", year=2007)
ival_A_to_Q_start = Period(freq='Q', year=2007, quarter=1)
ival_A_to_Q_end = Period(freq='Q', year=2007, quarter=4)
ival_A_to_M_start = Period(freq='M', year=2007, month=1)
ival_A_to_M_end = Period(freq='M', year=2007, month=12)
ival_A_to_W_start = Period(freq='WK', year=2007, month=1, day=1)
ival_A_to_W_end = Period(freq='WK', year=2007, month=12, day=31)
ival_A_to_B_start = Period(freq='B', year=2007, month=1, day=1)
ival_A_to_B_end = Period(freq='B', year=2007, month=12, day=31)
ival_A_to_D_start = Period(freq='D', year=2007, month=1, day=1)
ival_A_to_D_end = Period(freq='D', year=2007, month=12, day=31)
ival_A_to_H_start = Period(freq='H', year=2007, month=1, day=1,
hour=0)
ival_A_to_H_end = Period(freq='H', year=2007, month=12, day=31,
hour=23)
ival_A_to_T_start = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0)
ival_A_to_T_end = Period(freq='Min', year=2007, month=12, day=31,
hour=23, minute=59)
ival_A_to_S_start = Period(freq='S', year=2007, month=1, day=1,
hour=0, minute=0, second=0)
ival_A_to_S_end = Period(freq='S', year=2007, month=12, day=31,
hour=23, minute=59, second=59)
ival_AJAN_to_D_end = Period(freq='D', year=2007, month=1, day=31)
ival_AJAN_to_D_start = Period(freq='D', year=2006, month=2, day=1)
ival_AJUN_to_D_end = Period(freq='D', year=2007, month=6, day=30)
ival_AJUN_to_D_start = Period(freq='D', year=2006, month=7, day=1)
ival_ANOV_to_D_end = Period(freq='D', year=2007, month=11, day=30)
ival_ANOV_to_D_start = Period(freq='D', year=2006, month=12, day=1)
assert_equal(ival_A.asfreq('Q', 'S'), ival_A_to_Q_start)
assert_equal(ival_A.asfreq('Q', 'e'), ival_A_to_Q_end)
assert_equal(ival_A.asfreq('M', 's'), ival_A_to_M_start)
assert_equal(ival_A.asfreq('M', 'E'), ival_A_to_M_end)
assert_equal(ival_A.asfreq('WK', 'S'), ival_A_to_W_start)
assert_equal(ival_A.asfreq('WK', 'E'), ival_A_to_W_end)
assert_equal(ival_A.asfreq('B', 'S'), ival_A_to_B_start)
assert_equal(ival_A.asfreq('B', 'E'), ival_A_to_B_end)
assert_equal(ival_A.asfreq('D', 'S'), ival_A_to_D_start)
assert_equal(ival_A.asfreq('D', 'E'), ival_A_to_D_end)
assert_equal(ival_A.asfreq('H', 'S'), ival_A_to_H_start)
assert_equal(ival_A.asfreq('H', 'E'), ival_A_to_H_end)
assert_equal(ival_A.asfreq('min', 'S'), ival_A_to_T_start)
assert_equal(ival_A.asfreq('min', 'E'), ival_A_to_T_end)
assert_equal(ival_A.asfreq('T', 'S'), ival_A_to_T_start)
assert_equal(ival_A.asfreq('T', 'E'), ival_A_to_T_end)
assert_equal(ival_A.asfreq('S', 'S'), ival_A_to_S_start)
assert_equal(ival_A.asfreq('S', 'E'), ival_A_to_S_end)
assert_equal(ival_AJAN.asfreq('D', 'S'), ival_AJAN_to_D_start)
assert_equal(ival_AJAN.asfreq('D', 'E'), ival_AJAN_to_D_end)
assert_equal(ival_AJUN.asfreq('D', 'S'), ival_AJUN_to_D_start)
assert_equal(ival_AJUN.asfreq('D', 'E'), ival_AJUN_to_D_end)
assert_equal(ival_ANOV.asfreq('D', 'S'), ival_ANOV_to_D_start)
assert_equal(ival_ANOV.asfreq('D', 'E'), ival_ANOV_to_D_end)
assert_equal(ival_A.asfreq('A'), ival_A)
def test_conv_quarterly(self):
# frequency conversion tests: from Quarterly Frequency
ival_Q = Period(freq='Q', year=2007, quarter=1)
ival_Q_end_of_year = Period(freq='Q', year=2007, quarter=4)
ival_QEJAN = Period(freq="Q-JAN", year=2007, quarter=1)
ival_QEJUN = Period(freq="Q-JUN", year=2007, quarter=1)
ival_Q_to_A = Period(freq='A', year=2007)
ival_Q_to_M_start = Period(freq='M', year=2007, month=1)
ival_Q_to_M_end = Period(freq='M', year=2007, month=3)
ival_Q_to_W_start = Period(freq='WK', year=2007, month=1, day=1)
ival_Q_to_W_end = Period(freq='WK', year=2007, month=3, day=31)
ival_Q_to_B_start = Period(freq='B', year=2007, month=1, day=1)
ival_Q_to_B_end = Period(freq='B', year=2007, month=3, day=30)
ival_Q_to_D_start = Period(freq='D', year=2007, month=1, day=1)
ival_Q_to_D_end = Period(freq='D', year=2007, month=3, day=31)
ival_Q_to_H_start = Period(freq='H', year=2007, month=1, day=1,
hour=0)
ival_Q_to_H_end = Period(freq='H', year=2007, month=3, day=31,
hour=23)
ival_Q_to_T_start = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0)
ival_Q_to_T_end = Period(freq='Min', year=2007, month=3, day=31,
hour=23, minute=59)
ival_Q_to_S_start = Period(freq='S', year=2007, month=1, day=1,
hour=0, minute=0, second=0)
ival_Q_to_S_end = Period(freq='S', year=2007, month=3, day=31,
hour=23, minute=59, second=59)
ival_QEJAN_to_D_start = Period(freq='D', year=2006, month=2, day=1)
ival_QEJAN_to_D_end = Period(freq='D', year=2006, month=4, day=30)
ival_QEJUN_to_D_start = Period(freq='D', year=2006, month=7, day=1)
ival_QEJUN_to_D_end = Period(freq='D', year=2006, month=9, day=30)
assert_equal(ival_Q.asfreq('A'), ival_Q_to_A)
assert_equal(ival_Q_end_of_year.asfreq('A'), ival_Q_to_A)
assert_equal(ival_Q.asfreq('M', 'S'), ival_Q_to_M_start)
assert_equal(ival_Q.asfreq('M', 'E'), ival_Q_to_M_end)
assert_equal(ival_Q.asfreq('WK', 'S'), ival_Q_to_W_start)
assert_equal(ival_Q.asfreq('WK', 'E'), ival_Q_to_W_end)
assert_equal(ival_Q.asfreq('B', 'S'), ival_Q_to_B_start)
assert_equal(ival_Q.asfreq('B', 'E'), ival_Q_to_B_end)
assert_equal(ival_Q.asfreq('D', 'S'), ival_Q_to_D_start)
assert_equal(ival_Q.asfreq('D', 'E'), ival_Q_to_D_end)
assert_equal(ival_Q.asfreq('H', 'S'), ival_Q_to_H_start)
assert_equal(ival_Q.asfreq('H', 'E'), ival_Q_to_H_end)
assert_equal(ival_Q.asfreq('Min', 'S'), ival_Q_to_T_start)
assert_equal(ival_Q.asfreq('Min', 'E'), ival_Q_to_T_end)
assert_equal(ival_Q.asfreq('S', 'S'), ival_Q_to_S_start)
assert_equal(ival_Q.asfreq('S', 'E'), ival_Q_to_S_end)
assert_equal(ival_QEJAN.asfreq('D', 'S'), ival_QEJAN_to_D_start)
assert_equal(ival_QEJAN.asfreq('D', 'E'), ival_QEJAN_to_D_end)
assert_equal(ival_QEJUN.asfreq('D', 'S'), ival_QEJUN_to_D_start)
assert_equal(ival_QEJUN.asfreq('D', 'E'), ival_QEJUN_to_D_end)
assert_equal(ival_Q.asfreq('Q'), ival_Q)
def test_conv_monthly(self):
# frequency conversion tests: from Monthly Frequency
ival_M = Period(freq='M', year=2007, month=1)
ival_M_end_of_year = Period(freq='M', year=2007, month=12)
ival_M_end_of_quarter = Period(freq='M', year=2007, month=3)
ival_M_to_A = Period(freq='A', year=2007)
ival_M_to_Q = Period(freq='Q', year=2007, quarter=1)
ival_M_to_W_start = Period(freq='WK', year=2007, month=1, day=1)
ival_M_to_W_end = Period(freq='WK', year=2007, month=1, day=31)
ival_M_to_B_start = Period(freq='B', year=2007, month=1, day=1)
ival_M_to_B_end = Period(freq='B', year=2007, month=1, day=31)
ival_M_to_D_start = Period(freq='D', year=2007, month=1, day=1)
ival_M_to_D_end = Period(freq='D', year=2007, month=1, day=31)
ival_M_to_H_start = Period(freq='H', year=2007, month=1, day=1,
hour=0)
ival_M_to_H_end = Period(freq='H', year=2007, month=1, day=31,
hour=23)
ival_M_to_T_start = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0)
ival_M_to_T_end = Period(freq='Min', year=2007, month=1, day=31,
hour=23, minute=59)
ival_M_to_S_start = Period(freq='S', year=2007, month=1, day=1,
hour=0, minute=0, second=0)
ival_M_to_S_end = Period(freq='S', year=2007, month=1, day=31,
hour=23, minute=59, second=59)
assert_equal(ival_M.asfreq('A'), ival_M_to_A)
assert_equal(ival_M_end_of_year.asfreq('A'), ival_M_to_A)
assert_equal(ival_M.asfreq('Q'), ival_M_to_Q)
assert_equal(ival_M_end_of_quarter.asfreq('Q'), ival_M_to_Q)
assert_equal(ival_M.asfreq('WK', 'S'), ival_M_to_W_start)
assert_equal(ival_M.asfreq('WK', 'E'), ival_M_to_W_end)
assert_equal(ival_M.asfreq('B', 'S'), ival_M_to_B_start)
assert_equal(ival_M.asfreq('B', 'E'), ival_M_to_B_end)
assert_equal(ival_M.asfreq('D', 'S'), ival_M_to_D_start)
assert_equal(ival_M.asfreq('D', 'E'), ival_M_to_D_end)
assert_equal(ival_M.asfreq('H', 'S'), ival_M_to_H_start)
assert_equal(ival_M.asfreq('H', 'E'), ival_M_to_H_end)
assert_equal(ival_M.asfreq('Min', 'S'), ival_M_to_T_start)
assert_equal(ival_M.asfreq('Min', 'E'), ival_M_to_T_end)
assert_equal(ival_M.asfreq('S', 'S'), ival_M_to_S_start)
assert_equal(ival_M.asfreq('S', 'E'), ival_M_to_S_end)
assert_equal(ival_M.asfreq('M'), ival_M)
def test_conv_weekly(self):
# frequency conversion tests: from Weekly Frequency
ival_W = Period(freq='WK', year=2007, month=1, day=1)
ival_WSUN = Period(freq='WK', year=2007, month=1, day=7)
ival_WSAT = Period(freq='WK-SAT', year=2007, month=1, day=6)
ival_WFRI = Period(freq='WK-FRI', year=2007, month=1, day=5)
ival_WTHU = Period(freq='WK-THU', year=2007, month=1, day=4)
ival_WWED = Period(freq='WK-WED', year=2007, month=1, day=3)
ival_WTUE = Period(freq='WK-TUE', year=2007, month=1, day=2)
ival_WMON = Period(freq='WK-MON', year=2007, month=1, day=1)
ival_WSUN_to_D_start = Period(freq='D', year=2007, month=1, day=1)
ival_WSUN_to_D_end = Period(freq='D', year=2007, month=1, day=7)
ival_WSAT_to_D_start = Period(freq='D', year=2006, month=12, day=31)
ival_WSAT_to_D_end = Period(freq='D', year=2007, month=1, day=6)
ival_WFRI_to_D_start = Period(freq='D', year=2006, month=12, day=30)
ival_WFRI_to_D_end = Period(freq='D', year=2007, month=1, day=5)
ival_WTHU_to_D_start = Period(freq='D', year=2006, month=12, day=29)
ival_WTHU_to_D_end = Period(freq='D', year=2007, month=1, day=4)
ival_WWED_to_D_start = Period(freq='D', year=2006, month=12, day=28)
ival_WWED_to_D_end = Period(freq='D', year=2007, month=1, day=3)
ival_WTUE_to_D_start = Period(freq='D', year=2006, month=12, day=27)
ival_WTUE_to_D_end = Period(freq='D', year=2007, month=1, day=2)
ival_WMON_to_D_start = Period(freq='D', year=2006, month=12, day=26)
ival_WMON_to_D_end = Period(freq='D', year=2007, month=1, day=1)
ival_W_end_of_year = Period(freq='WK', year=2007, month=12, day=31)
ival_W_end_of_quarter = Period(freq='WK', year=2007, month=3, day=31)
ival_W_end_of_month = Period(freq='WK', year=2007, month=1, day=31)
ival_W_to_A = Period(freq='A', year=2007)
ival_W_to_Q = Period(freq='Q', year=2007, quarter=1)
ival_W_to_M = Period(freq='M', year=2007, month=1)
if Period(freq='D', year=2007, month=12, day=31).weekday == 6:
ival_W_to_A_end_of_year = Period(freq='A', year=2007)
else:
ival_W_to_A_end_of_year = Period(freq='A', year=2008)
if Period(freq='D', year=2007, month=3, day=31).weekday == 6:
ival_W_to_Q_end_of_quarter = Period(freq='Q', year=2007,
quarter=1)
else:
ival_W_to_Q_end_of_quarter = Period(freq='Q', year=2007,
quarter=2)
if Period(freq='D', year=2007, month=1, day=31).weekday == 6:
ival_W_to_M_end_of_month = Period(freq='M', year=2007, month=1)
else:
ival_W_to_M_end_of_month = Period(freq='M', year=2007, month=2)
ival_W_to_B_start = Period(freq='B', year=2007, month=1, day=1)
ival_W_to_B_end = Period(freq='B', year=2007, month=1, day=5)
ival_W_to_D_start = Period(freq='D', year=2007, month=1, day=1)
ival_W_to_D_end = Period(freq='D', year=2007, month=1, day=7)
ival_W_to_H_start = Period(freq='H', year=2007, month=1, day=1,
hour=0)
ival_W_to_H_end = Period(freq='H', year=2007, month=1, day=7,
hour=23)
ival_W_to_T_start = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0)
ival_W_to_T_end = Period(freq='Min', year=2007, month=1, day=7,
hour=23, minute=59)
ival_W_to_S_start = Period(freq='S', year=2007, month=1, day=1,
hour=0, minute=0, second=0)
ival_W_to_S_end = Period(freq='S', year=2007, month=1, day=7,
hour=23, minute=59, second=59)
assert_equal(ival_W.asfreq('A'), ival_W_to_A)
assert_equal(ival_W_end_of_year.asfreq('A'),
ival_W_to_A_end_of_year)
assert_equal(ival_W.asfreq('Q'), ival_W_to_Q)
assert_equal(ival_W_end_of_quarter.asfreq('Q'),
ival_W_to_Q_end_of_quarter)
assert_equal(ival_W.asfreq('M'), ival_W_to_M)
assert_equal(ival_W_end_of_month.asfreq('M'),
ival_W_to_M_end_of_month)
assert_equal(ival_W.asfreq('B', 'S'), ival_W_to_B_start)
assert_equal(ival_W.asfreq('B', 'E'), ival_W_to_B_end)
assert_equal(ival_W.asfreq('D', 'S'), ival_W_to_D_start)
assert_equal(ival_W.asfreq('D', 'E'), ival_W_to_D_end)
assert_equal(ival_WSUN.asfreq('D', 'S'), ival_WSUN_to_D_start)
assert_equal(ival_WSUN.asfreq('D', 'E'), ival_WSUN_to_D_end)
assert_equal(ival_WSAT.asfreq('D', 'S'), ival_WSAT_to_D_start)
assert_equal(ival_WSAT.asfreq('D', 'E'), ival_WSAT_to_D_end)
assert_equal(ival_WFRI.asfreq('D', 'S'), ival_WFRI_to_D_start)
assert_equal(ival_WFRI.asfreq('D', 'E'), ival_WFRI_to_D_end)
assert_equal(ival_WTHU.asfreq('D', 'S'), ival_WTHU_to_D_start)
assert_equal(ival_WTHU.asfreq('D', 'E'), ival_WTHU_to_D_end)
assert_equal(ival_WWED.asfreq('D', 'S'), ival_WWED_to_D_start)
assert_equal(ival_WWED.asfreq('D', 'E'), ival_WWED_to_D_end)
assert_equal(ival_WTUE.asfreq('D', 'S'), ival_WTUE_to_D_start)
assert_equal(ival_WTUE.asfreq('D', 'E'), ival_WTUE_to_D_end)
assert_equal(ival_WMON.asfreq('D', 'S'), ival_WMON_to_D_start)
assert_equal(ival_WMON.asfreq('D', 'E'), ival_WMON_to_D_end)
assert_equal(ival_W.asfreq('H', 'S'), ival_W_to_H_start)
assert_equal(ival_W.asfreq('H', 'E'), ival_W_to_H_end)
assert_equal(ival_W.asfreq('Min', 'S'), ival_W_to_T_start)
assert_equal(ival_W.asfreq('Min', 'E'), ival_W_to_T_end)
assert_equal(ival_W.asfreq('S', 'S'), ival_W_to_S_start)
assert_equal(ival_W.asfreq('S', 'E'), ival_W_to_S_end)
assert_equal(ival_W.asfreq('WK'), ival_W)
def test_conv_business(self):
# frequency conversion tests: from Business Frequency"
ival_B = Period(freq='B', year=2007, month=1, day=1)
ival_B_end_of_year = Period(freq='B', year=2007, month=12, day=31)
ival_B_end_of_quarter = Period(freq='B', year=2007, month=3, day=30)
ival_B_end_of_month = Period(freq='B', year=2007, month=1, day=31)
ival_B_end_of_week = Period(freq='B', year=2007, month=1, day=5)
ival_B_to_A = Period(freq='A', year=2007)
ival_B_to_Q = | Period(freq='Q', year=2007, quarter=1) | pandas.tseries.period.Period |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.