prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# (C) British Crown Copyright 2017-2021 Met Office.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
Unit tests for the utilities within the `calibration.dataframe_utilities`
module.
"""
import unittest
import iris
import numpy as np
import pandas as pd
import pytest
from improver.calibration.dataframe_utilities import (
forecast_and_truth_dataframes_to_cubes,
forecast_dataframe_to_cube,
truth_dataframe_to_cube,
)
from improver.metadata.constants.time_types import TIME_COORDS
from improver.spotdata.build_spotdata_cube import build_spotdata_cube
from improver_tests import ImproverTest
def _chunker(seq, size):
"""Helper function to iterate through a sequence in chunks.
Args:
seq:
The sequence to be chunked.
size:
The size of the chunks.
Return:
A sequence split into chunks.
"""
return (seq[pos : pos + size] for pos in range(0, len(seq), size))
class SetupSharedDataFrames(ImproverTest):
"""A shared dataframe creation class."""
def setUp(self):
"""Set-up forecast and truth dataframes."""
pytest.importorskip("pandas")
data = np.array(
[5.2, 0.3, 20.4, 6.5, 3.1, 21.5, 7.2, 4.2, 24.3], dtype=np.float32
)
self.forecast_data = np.tile(data, 3)
self.frt1 = pd.Timestamp("2017-07-20T12:00:00", tz="UTC")
self.frt2 = pd.Timestamp("2017-07-21T12:00:00", tz="UTC")
self.frt3 = pd.Timestamp("2017-07-22T12:00:00", tz="UTC")
self.fp = pd.Timedelta(6 * 3600, unit="s")
self.time1 = pd.Timestamp("2017-07-20T18:00:00", tz="UTC")
self.time2 = pd.Timestamp("2017-07-21T18:00:00", tz="UTC")
self.time3 = | pd.Timestamp("2017-07-22T18:00:00", tz="UTC") | pandas.Timestamp |
#!/usr/bin/env python
"""
Requirements:
* Python >= 3.6.2
* Pandas
* NumPy
Copyright (c) 2020 <NAME> <<EMAIL>>
MIT License <http://opensource.org/licenses/MIT>
"""
RELEASE = False
__version_info__ = (
"0",
"1",
)
__version__ = ".".join(__version_info__)
__version__ += "-dev" if not RELEASE else ""
import argparse
import math
import os, sys
import pandas as pd
import numpy as np
if __name__ == "__main__":
usage = __doc__.split("\n\n\n")
parser = argparse.ArgumentParser(description="Add CCF to neoepitopes")
parser.add_argument("--neoepitopes", required=True, help="neoepitope file")
parser.add_argument("--ccf", required=True, help="CCF file")
parser.add_argument("--outfile", required=True, help="Output file")
args = parser.parse_args()
neoepitope_reader = pd.read_csv(args.neoepitopes, sep="\t")
neoepitope_df = | pd.DataFrame(neoepitope_reader) | pandas.DataFrame |
import requests
from bs4 import BeautifulSoup
import pandas as pd
import numpy as np
import os
import datetime
from datetime import date
import time
import random
import string
def crawling(date, product_id, type_, product_name, proxy=None):
start_time = time.time()
headers = {
'authority': 'www.appannie.com',
'accept': 'text/plain, */*; q=0.01',
'x-requested-with': 'XMLHttpRequest',
'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.114 Safari/537.36',
'sec-gpc': '1',
'sec-fetch-site': 'same-origin',
'sec-fetch-mode': 'cors',
'sec-fetch-dest': 'empty',
'referer': 'https://www.appannie.com/headless/udw/apps/ios/app/'+product_id+'/app-ranking/?app_slug='+product_id+'&market_slug=ios&headless=yes&device=iphone&type='+type_+'&date='+date,
'accept-language': 'en-GB,en-US;q=0.9,en;q=0.8',
'cookie': 'csrftoken=<KEY>; aa_language=en; django_language=en; sessionId=".eJxVjb9Lw0AYQGNaU4n4q-isoy7BxlrTtZ1UXIoHtx1f7r62R-Mll-9OqSA4if6HgrN_hJspguL2ePB4z-GTXTvme0EQEBLp0lRYkyaHxr3yrUYL8G4uPGEttLr6ej8M2EYBZuZhhiyUhu_8tQIN5AWq65C3GwukFT9qoAfDocrOBlPsXfRlhtkgzVSWnk_TFBX2kW26koSvFDhUNnzj3f_jHOQCjeKnjX7AHAwUS6clJSBl6Y1LxkB4aQgNaafv8aZUWIx-IrYLBdZOyDnKhXD6DuVqs4L4F2yLxZ3Pzna034oOPmS1dI-xYLfj2LZPJnb9ZWIjn3wD8alh3A:1m3YFX:31ZUhwxGxIT6onl0fujeE9Lu_wg";',
}
params = (
('type', type_),
('device', 'iphone'),
('date', date),
)
response = requests.get('https://www.appannie.com/apps/ios/app/'+product_name+'/ranking_table/', headers=headers, params=params, proxies={'http':proxy, 'https':proxy})
if response.status_code != 200:
print('Error')
return None
print(proxy, response, date, 'Success!', round(time.time()-start_time, 2))
return response.content
def format_0(titles, stores, ranks, date):
store_names = []
store_nums = []
for i, store in enumerate(stores):
if i < 6:
store_names.append(store.text)
else:
ls = store.text.split()
to_add = []
for i, val in enumerate(ls):
if '▼' in val:
to_add.append(int('-' + val[1:]))
elif '▲' in val:
to_add.append(int(val[1:]))
elif val == '=':
to_add.append(0)
elif val == '0':
try:
to_add.append(int(val))
if '▼' in ls[i+1] or '▲' in ls[i+1] or '=' in ls[i+1]:
pass
else:
to_add.append(0)
except:
to_add.append(0)
else:
try:
to_add.append(int(val))
except:
to_add.append(float('NaN'))
store_nums.append(to_add)
store_titles = []
for title in titles:
store_titles.append(title.text)
store_nums = [dict(zip(store_titles, [nums[i:i+2] for i in range(0, len(nums), 2)])) for nums in store_nums]
stores_ls = list(zip(store_names, store_nums))
rank_names = []
rank_nums = []
for i, rank in enumerate(ranks):
if len(rank.find_all('a')) > 0:
rank_names.append(rank.text.strip())
else:
ls = rank.text.split()
to_add = []
for i, val in enumerate(ls):
if '▼' in val:
to_add.append(int('-' + val[1:]))
elif '▲' in val:
to_add.append(int(val[1:]))
elif val == '=':
to_add.append(0)
elif val == '-':
to_add.append(float('NaN'))
to_add.append(float('NaN'))
else:
try:
to_add.append(int(val))
except:
to_add.append(float('NaN'))
rank_nums.append(to_add)
rank_nums = [dict(zip(store_titles, [nums[i:i+2] for i in range(0, len(nums), 2)])) for nums in rank_nums]
ranks_ls = list(zip(rank_names, rank_nums))
final_ls = stores_ls + ranks_ls
final_d = {date.strftime('%Y-%m-%d') : final_ls}
final_df = pd.DataFrame(final_d)
return final_df
def add():
df = pd.DataFrame()
product_id = input("product ID: ")
product_name = input("product name: ")
desired_name = input("desired name: ")
type_ = input("ranks/grossing-ranks: ")
today = (date.today() - datetime.timedelta(days=2)).strftime('%Y-%m-%d')
line = product_id + " " + product_name + " " + desired_name + " " + type_
with open('id_name.txt') as f:
lines = f.read().splitlines()
if line in lines:
print("File already exists")
return None
start = date.today() - datetime.timedelta(days=89)
end = date.today() - datetime.timedelta(days=2)
print(f"from {start} to {end}")
date_interval = pd.date_range(start = start, end = end, freq="d")
for date_ in date_interval:
start_time = time.time()
content = crawling(date_.strftime('%Y-%m-%d'), product_id, type_, product_name)
if content == None:
print("Please go to check the box to prove that you are a human...")
break
soup = BeautifulSoup(content, 'html.parser')
titles = soup.find_all('th', class_='rank')
stores = soup.find_all('tr', class_='stores')
ranks = soup.find_all('tr', class_='ranks')
final_df = format_0(titles, stores, ranks, date_)
df = pd.concat([df, final_df], axis=1)
print('Done!')
df.to_csv(f'data pool/{desired_name}_{type_}.csv')
with open('id_name.txt') as f:
lines = f.read().splitlines()
with open('id_name.txt', 'a') as f:
if line not in lines:
f.write(line + '\n')
def check_update():
path = 'data pool'
for file in os.listdir(path):
if '.csv' in file:
df = | pd.read_csv(path+'/'+file) | pandas.read_csv |
import gc
import re
from functools import partial
import numpy as np
import pandas as pd
import plotly.express as px
import torch
from lime.lime_text import LimeTextExplainer
from sklearn.metrics import f1_score, precision_score, recall_score
from sklearn.metrics import roc_auc_score
from sklearn.metrics import roc_curve, auc
from tqdm.notebook import tqdm
def get_prefix(word_relevance_df, el, side: str):
assert side in ['left', 'right']
word_relevance_el = word_relevance_df.copy().reset_index(drop=True)
mapper = Mapper([x for x in el.columns if x.startswith(side + '_') and x != side + '_id'], r' ')
available_prefixes = mapper.encode_attr(el).split()
assigned_pref = []
word_prefixes = []
attr_to_code = {v: k for k, v in mapper.attr_map.items()}
for i in range(word_relevance_el.shape[0]):
word = str(word_relevance_el.loc[i, side + '_word'])
if word == '[UNP]':
word_prefixes.append('[UNP]')
else:
col = word_relevance_el.loc[i, side + '_attribute']
col_code = attr_to_code[side + '_' + col]
turn_prefixes = [x for x in available_prefixes if x[0] == col_code]
idx = 0
while idx < len(turn_prefixes) and word != turn_prefixes[idx][4:]:
idx += 1
if idx < len(turn_prefixes):
tmp = turn_prefixes[idx]
del turn_prefixes[idx]
word_prefixes.append(tmp)
assigned_pref.append(tmp)
else:
idx = 0
while idx < len(assigned_pref) and word != assigned_pref[idx][4:]:
idx += 1
if idx < len(assigned_pref):
word_prefixes.append(assigned_pref[idx])
else:
assert False, word
return word_prefixes
def append_prefix(word_relevance, df, decision_unit_view=False, exclude_attrs=['id', 'left_id', 'right_id', 'label']):
ids = word_relevance['id'].unique()
res_df = []
for id in ids:
el = df[df.id == id]
word_relevance_el = word_relevance[word_relevance.id == id]
if decision_unit_view is True:
word_relevance_el['left_word_prefixes'] = get_prefix(word_relevance_el, el, 'left')
word_relevance_el['right_word_prefixes'] = get_prefix(word_relevance_el, el, 'right')
res_df.append(word_relevance_el.copy())
res_df = pd.concat(res_df)
if decision_unit_view is True:
mapper = Mapper(df.loc[:, np.setdiff1d(df.columns, exclude_attrs)], r' ')
assert len(mapper.attr_map.keys()) % 2 == 0, 'The attributes must be the same for the two sources.'
shift = int(len(mapper.attr_map.keys()) / 2)
res_df['right_word_prefixes'] = res_df['right_word_prefixes'].apply(
lambda x: chr(ord(x[0]) + shift) + x[1:] if x != '[UNP]' else x)
return res_df
def evaluate_df(word_relevance, df_to_process, predictor, exclude_attrs=['id', 'left_id', 'right_id', 'label'],
score_col='pred'):
print(f'Testing unit remotion with -- {score_col}')
assert df_to_process.shape[
0] > 0, f'DataFrame to evaluate must have some elements. Passed df has shape {df_to_process.shape[0]}'
evaluation_df = df_to_process.copy().replace(pd.NA, '')
word_relevance_prefix = append_prefix(word_relevance, evaluation_df)
if score_col == 'pred':
word_relevance_prefix['impact'] = word_relevance_prefix[score_col] - 0.5
else:
word_relevance_prefix['impact'] = word_relevance_prefix[score_col]
word_relevance_prefix['conf'] = 'bert'
res_list = []
for side in ['left', 'right']:
evaluation_df['pred'] = predictor(evaluation_df)
side_word_relevance_prefix = word_relevance_prefix.copy()
side_word_relevance_prefix['word_prefix'] = side_word_relevance_prefix[side + '_word_prefixes']
side_word_relevance_prefix = side_word_relevance_prefix.query(f'{side}_word != "[UNP]"')
ev = Evaluate_explanation(side_word_relevance_prefix, evaluation_df, predict_method=predictor,
exclude_attrs=exclude_attrs, percentage=.25, num_round=3)
fixed_side = 'right' if side == 'left' else 'left'
res_df = ev.evaluate_set(df_to_process.id.values, 'bert', variable_side=side, fixed_side=fixed_side,
utility=True)
res_list.append(res_df.copy())
return | pd.concat(res_list) | pandas.concat |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import warnings
warnings.filterwarnings('ignore')
df = pd.read_csv('ratings.csv', sep=',')
#print(df.head())
movie_titles = | pd.read_csv('movies.csv') | pandas.read_csv |
from tqdm import tqdm
import pandas as pd
import sys, os
import collections
"""
Small script to concat ENCODE files into a single dataframe to process it easily
5 cols = SRS sequencing
12 cols = LRS sequencing
"""
encode_dl_directory = "/gstock/biolo_datasets/ENCODE/DL/"
dict_df = collections.defaultdict(list)
for file in tqdm(os.listdir(encode_dl_directory)):
# print(file)
cols = open(encode_dl_directory + file, "r").readline().strip().split("\t")
cols_length = len(cols)
if cols_length == 5:
dict_df[cols_length].append(pd.read_csv(encode_dl_directory + file, sep="\t"))
elif cols_length == 12:
dict_df[cols_length].append(pd.read_csv(encode_dl_directory + file, sep="\t").drop([cols[-1]], axis=1))
if dict_df[5]:
pd.concat(dict_df[5]).to_csv(
"/gstock/biolo_datasets/ENCODE/ENCODE_SRS_concat.tsv.gz", compression="gzip", sep="\t", index=False
)
if dict_df[12]:
| pd.concat(dict_df[12]) | pandas.concat |
import sys
import argparse
import torch
import csv
import pandas as pd
from torchtext.data.functional import generate_sp_model
import params
from rcnn import RCNN
from train import *
from dataset import *
##data path
train_df_path = params.train_df
test_df_path = params.test_df
val_df_path = params.val_df
def train_sentencepiece(df_path):
"""
function to train sentence piece on training and validation data
df (str): path to the csv
"""
#clean the dataset
def clean(text):
"""
params:
text (str)
returns:
clean text
"""
text = text.lower()
letters = string.ascii_lowercase
not_letters = set([char_ for char_ in text if char_ not in letters and char_ != ' '])
for char in not_letters:
text = text.replace(char, " ")
return text
df = pd.read_csv(df_path)
df["tweet"] = df.tweet.apply(lambda x: clean(x))
with open('./sample.csv', 'w', newline='', encoding='utf-8') as f:
for x in df["tweet"].tolist():
f.write(x)
f.write("\n")
#train and it will save a model bames spm_use.model
generate_sp_model('./sample.csv',vocab_size=19184,model_prefix='output/spm_user')
def fetch_inference_tokens(text, sentencepiece):
"""
gets the token ids for single sentence using wither sentencepiece implemented textdataset or simple tokenization
params:
text (str): sentence for inference
sentencepiece (bool): wheather sentenpiece or not
returns:
tensor input for the given sentence
"""
if sentencepiece:
x = inference_tokens_spm(text)
else:
x = inference_tokens(text)
return x
def get_dataloader(type_action, sentencepiece):
"""
loads dataloader for train, test and validation dataset
type_action (str): either train or test
sentencepiece (bool): wheather sentenpiece or not
returns:
dataloader
"""
if type_action == "train":
# reads datafram
train_df = pd.read_csv(train_df_path)
val_df = pd.read_csv(val_df_path)
test_df = | pd.read_csv(test_df_path) | pandas.read_csv |
from datetime import datetime
import numpy as np
import pandas as pd
import pytest
from dask import dataframe as dd
from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import Categorical, Datetime, Double, Integer
import featuretools as ft
from featuretools import Timedelta
from featuretools.computational_backends.feature_set import FeatureSet
from featuretools.computational_backends.feature_set_calculator import (
FeatureSetCalculator
)
from featuretools.entityset.relationship import RelationshipPath
from featuretools.feature_base import DirectFeature, IdentityFeature
from featuretools.primitives import (
And,
Count,
CumSum,
EqualScalar,
GreaterThanEqualToScalar,
GreaterThanScalar,
LessThanEqualToScalar,
LessThanScalar,
Mean,
Min,
Mode,
Negate,
NMostCommon,
NotEqualScalar,
NumTrue,
Sum,
TimeSinceLast,
Trend
)
from featuretools.primitives.base import AggregationPrimitive
from featuretools.tests.testing_utils import backward_path, to_pandas
from featuretools.utils import Trie
from featuretools.utils.gen_utils import Library
def test_make_identity(es):
f = IdentityFeature(es['log'].ww['datetime'])
feature_set = FeatureSet([f])
calculator = FeatureSetCalculator(es,
time_last=None,
feature_set=feature_set)
df = to_pandas(calculator.run(np.array([0])))
v = df[f.get_name()][0]
assert (v == datetime(2011, 4, 9, 10, 30, 0))
def test_make_dfeat(es):
f = DirectFeature(ft.Feature(es['customers'].ww['age']),
child_dataframe_name='sessions')
feature_set = FeatureSet([f])
calculator = FeatureSetCalculator(es,
time_last=None,
feature_set=feature_set)
df = to_pandas(calculator.run(np.array([0])))
v = df[f.get_name()][0]
assert (v == 33)
def test_make_agg_feat_of_identity_column(es):
agg_feat = ft.Feature(es['log'].ww['value'], parent_dataframe_name='sessions', primitive=Sum)
feature_set = FeatureSet([agg_feat])
calculator = FeatureSetCalculator(es,
time_last=None,
feature_set=feature_set)
df = to_pandas(calculator.run(np.array([0])))
v = df[agg_feat.get_name()][0]
assert (v == 50)
# full_dataframe not supported with Dask
def test_full_dataframe_trans_of_agg(pd_es):
agg_feat = ft.Feature(pd_es['log'].ww['value'], parent_dataframe_name='customers',
primitive=Sum)
trans_feat = ft.Feature(agg_feat, primitive=CumSum)
feature_set = FeatureSet([trans_feat])
calculator = FeatureSetCalculator(pd_es,
time_last=None,
feature_set=feature_set)
df = calculator.run(np.array([1]))
v = df[trans_feat.get_name()].values[0]
assert v == 82
def test_full_dataframe_error_dask(dask_es):
agg_feat = ft.Feature(dask_es['log'].ww['value'], parent_dataframe_name='customers',
primitive=Sum)
trans_feat = ft.Feature(agg_feat, primitive=CumSum)
feature_set = FeatureSet([trans_feat])
calculator = FeatureSetCalculator(dask_es,
time_last=None,
feature_set=feature_set)
error_text = "Cannot use primitives that require full dataframe with Dask"
with pytest.raises(ValueError, match=error_text):
calculator.run(np.array([1]))
def test_make_agg_feat_of_identity_index_column(es):
agg_feat = ft.Feature(es['log'].ww['id'], parent_dataframe_name='sessions', primitive=Count)
feature_set = FeatureSet([agg_feat])
calculator = FeatureSetCalculator(es,
time_last=None,
feature_set=feature_set)
df = to_pandas(calculator.run(np.array([0])))
v = df[agg_feat.get_name()][0]
assert (v == 5)
def test_make_agg_feat_where_count(es):
agg_feat = ft.Feature(es['log'].ww['id'],
parent_dataframe_name='sessions',
where=IdentityFeature(es['log'].ww['product_id']) == 'coke zero',
primitive=Count)
feature_set = FeatureSet([agg_feat])
calculator = FeatureSetCalculator(es,
time_last=None,
feature_set=feature_set)
df = to_pandas(calculator.run(np.array([0])))
v = df[agg_feat.get_name()][0]
assert (v == 3)
def test_make_agg_feat_using_prev_time(es):
agg_feat = ft.Feature(es['log'].ww['id'],
parent_dataframe_name='sessions',
use_previous=Timedelta(10, 's'),
primitive=Count)
feature_set = FeatureSet([agg_feat])
calculator = FeatureSetCalculator(es,
time_last=datetime(2011, 4, 9, 10, 30, 10),
feature_set=feature_set)
df = to_pandas(calculator.run(np.array([0])))
v = df[agg_feat.get_name()][0]
assert (v == 2)
calculator = FeatureSetCalculator(es,
time_last=datetime(2011, 4, 9, 10, 30, 30),
feature_set=feature_set)
df = to_pandas(calculator.run(np.array([0])))
v = df[agg_feat.get_name()][0]
assert (v == 1)
def test_make_agg_feat_using_prev_n_events(es):
if es.dataframe_type != Library.PANDAS.value:
pytest.xfail('Distrubuted entitysets do not support use_previous')
agg_feat_1 = ft.Feature(es['log'].ww['value'],
parent_dataframe_name='sessions',
use_previous=Timedelta(1, 'observations'),
primitive=Min)
agg_feat_2 = ft.Feature(es['log'].ww['value'],
parent_dataframe_name='sessions',
use_previous=Timedelta(3, 'observations'),
primitive=Min)
assert agg_feat_1.get_name() != agg_feat_2.get_name(), \
'Features should have different names based on use_previous'
feature_set = FeatureSet([agg_feat_1, agg_feat_2])
calculator = FeatureSetCalculator(es,
time_last=datetime(2011, 4, 9, 10, 30, 6),
feature_set=feature_set)
df = calculator.run(np.array([0]))
# time_last is included by default
v1 = df[agg_feat_1.get_name()][0]
v2 = df[agg_feat_2.get_name()][0]
assert v1 == 5
assert v2 == 0
calculator = FeatureSetCalculator(es,
time_last=datetime(2011, 4, 9, 10, 30, 30),
feature_set=feature_set)
df = calculator.run(np.array([0]))
v1 = df[agg_feat_1.get_name()][0]
v2 = df[agg_feat_2.get_name()][0]
assert v1 == 20
assert v2 == 10
def test_make_agg_feat_multiple_dtypes(es):
if es.dataframe_type != Library.PANDAS.value:
pytest.xfail('Currently no Dask or Koalas compatible agg prims that use multiple dtypes')
compare_prod = IdentityFeature(es['log'].ww['product_id']) == 'coke zero'
agg_feat = ft.Feature(es['log'].ww['id'],
parent_dataframe_name='sessions',
where=compare_prod,
primitive=Count)
agg_feat2 = ft.Feature(es['log'].ww['product_id'],
parent_dataframe_name='sessions',
where=compare_prod,
primitive=Mode)
feature_set = FeatureSet([agg_feat, agg_feat2])
calculator = FeatureSetCalculator(es,
time_last=None,
feature_set=feature_set)
df = calculator.run(np.array([0]))
v = df[agg_feat.get_name()][0]
v2 = df[agg_feat2.get_name()][0]
assert (v == 3)
assert (v2 == 'coke zero')
def test_make_agg_feat_where_different_identity_feat(es):
feats = []
where_cmps = [LessThanScalar, GreaterThanScalar, LessThanEqualToScalar,
GreaterThanEqualToScalar, EqualScalar, NotEqualScalar]
for where_cmp in where_cmps:
feats.append(ft.Feature(es['log'].ww['id'],
parent_dataframe_name='sessions',
where=ft.Feature(es['log'].ww['datetime'], primitive=where_cmp(datetime(2011, 4, 10, 10, 40, 1))),
primitive=Count))
df = ft.calculate_feature_matrix(entityset=es, features=feats, instance_ids=[0, 1, 2, 3])
df = to_pandas(df, index='id', sort_index=True)
for i, where_cmp in enumerate(where_cmps):
name = feats[i].get_name()
instances = df[name]
v0, v1, v2, v3 = instances[0:4]
if where_cmp == LessThanScalar:
assert (v0 == 5)
assert (v1 == 4)
assert (v2 == 1)
assert (v3 == 1)
elif where_cmp == GreaterThanScalar:
assert (v0 == 0)
assert (v1 == 0)
assert (v2 == 0)
assert (v3 == 0)
elif where_cmp == LessThanEqualToScalar:
assert (v0 == 5)
assert (v1 == 4)
assert (v2 == 1)
assert (v3 == 2)
elif where_cmp == GreaterThanEqualToScalar:
assert (v0 == 0)
assert (v1 == 0)
assert (v2 == 0)
assert (v3 == 1)
elif where_cmp == EqualScalar:
assert (v0 == 0)
assert (v1 == 0)
assert (v2 == 0)
assert (v3 == 1)
elif where_cmp == NotEqualScalar:
assert (v0 == 5)
assert (v1 == 4)
assert (v2 == 1)
assert (v3 == 1)
def test_make_agg_feat_of_grandchild_dataframe(es):
agg_feat = ft.Feature(es['log'].ww['id'], parent_dataframe_name='customers', primitive=Count)
feature_set = FeatureSet([agg_feat])
calculator = FeatureSetCalculator(es,
time_last=None,
feature_set=feature_set)
df = calculator.run(np.array([0]))
df = to_pandas(df, index='id')
v = df[agg_feat.get_name()].values[0]
assert (v == 10)
def test_make_agg_feat_where_count_feat(es):
"""
Feature we're creating is:
Number of sessions for each customer where the
number of logs in the session is less than 3
"""
log_count_feat = ft.Feature(es['log'].ww['id'], parent_dataframe_name='sessions', primitive=Count)
feat = ft.Feature(es['sessions'].ww['id'],
parent_dataframe_name='customers',
where=log_count_feat > 1,
primitive=Count)
feature_set = FeatureSet([feat])
calculator = FeatureSetCalculator(es,
time_last=None,
feature_set=feature_set)
df = calculator.run(np.array([0, 1]))
df = to_pandas(df, index='id', sort_index=True)
name = feat.get_name()
instances = df[name]
v0, v1 = instances[0:2]
assert (v0 == 2)
assert (v1 == 2)
def test_make_compare_feat(es):
"""
Feature we're creating is:
Number of sessions for each customer where the
number of logs in the session is less than 3
"""
log_count_feat = ft.Feature(es['log'].ww['id'], parent_dataframe_name='sessions', primitive=Count)
mean_agg_feat = ft.Feature(log_count_feat, parent_dataframe_name='customers', primitive=Mean)
mean_feat = DirectFeature(mean_agg_feat, child_dataframe_name='sessions')
feat = log_count_feat > mean_feat
feature_set = FeatureSet([feat])
calculator = FeatureSetCalculator(es,
time_last=None,
feature_set=feature_set)
df = calculator.run(np.array([0, 1, 2]))
df = to_pandas(df, index='id', sort_index=True)
name = feat.get_name()
instances = df[name]
v0, v1, v2 = instances[0:3]
assert v0
assert v1
assert not v2
def test_make_agg_feat_where_count_and_device_type_feat(es):
"""
Feature we're creating is:
Number of sessions for each customer where the
number of logs in the session is less than 3
"""
log_count_feat = ft.Feature(es['log'].ww['id'], parent_dataframe_name='sessions', primitive=Count)
compare_count = log_count_feat == 1
compare_device_type = IdentityFeature(es['sessions'].ww['device_type']) == 1
and_feat = ft.Feature([compare_count, compare_device_type], primitive=And)
feat = ft.Feature(es['sessions'].ww['id'],
parent_dataframe_name='customers',
where=and_feat,
primitive=Count)
feature_set = FeatureSet([feat])
calculator = FeatureSetCalculator(es,
time_last=None,
feature_set=feature_set)
df = calculator.run(np.array([0]))
df = to_pandas(df, index='id')
name = feat.get_name()
instances = df[name]
assert (instances.values[0] == 1)
def test_make_agg_feat_where_count_or_device_type_feat(es):
"""
Feature we're creating is:
Number of sessions for each customer where the
number of logs in the session is less than 3
"""
log_count_feat = ft.Feature(es['log'].ww['id'], parent_dataframe_name='sessions', primitive=Count)
compare_count = log_count_feat > 1
compare_device_type = IdentityFeature(es['sessions'].ww['device_type']) == 1
or_feat = compare_count.OR(compare_device_type)
feat = ft.Feature(es['sessions'].ww['id'],
parent_dataframe_name='customers',
where=or_feat,
primitive=Count)
feature_set = FeatureSet([feat])
calculator = FeatureSetCalculator(es,
time_last=None,
feature_set=feature_set)
df = calculator.run(np.array([0]))
df = to_pandas(df, index='id', int_index=True)
name = feat.get_name()
instances = df[name]
assert (instances.values[0] == 3)
def test_make_agg_feat_of_agg_feat(es):
log_count_feat = ft.Feature(es['log'].ww['id'], parent_dataframe_name='sessions', primitive=Count)
customer_sum_feat = ft.Feature(log_count_feat, parent_dataframe_name='customers', primitive=Sum)
feature_set = FeatureSet([customer_sum_feat])
calculator = FeatureSetCalculator(es,
time_last=None,
feature_set=feature_set)
df = calculator.run(np.array([0]))
df = to_pandas(df, index='id')
v = df[customer_sum_feat.get_name()].values[0]
assert (v == 10)
@pytest.fixture
def pd_df():
return pd.DataFrame({
"id": ["a", "b", "c", "d", "e"],
"e1": ["h", "h", "i", "i", "j"],
"e2": ["x", "x", "y", "y", "x"],
"e3": ["z", "z", "z", "z", "z"],
"val": [1, 1, 1, 1, 1]
})
@pytest.fixture
def dd_df(pd_df):
return dd.from_pandas(pd_df, npartitions=2)
@pytest.fixture
def ks_df(pd_df):
ks = pytest.importorskip('databricks.koalas', reason="Koalas not installed, skipping")
return ks.from_pandas(pd_df)
@pytest.fixture(params=['pd_df', 'dd_df', 'ks_df'])
def df(request):
return request.getfixturevalue(request.param)
def test_make_3_stacked_agg_feats(df):
"""
Tests stacking 3 agg features.
The test specifically uses non numeric indices to test how ancestor columns are handled
as dataframes are merged together
"""
if isinstance(df, dd.DataFrame):
pytest.xfail('normalize_datdataframe fails with dask DataFrame')
es = ft.EntitySet()
ltypes = {
'e1': Categorical,
'e2': Categorical,
'e3': Categorical,
'val': Double
}
es.add_dataframe(dataframe=df,
index="id",
dataframe_name="e0",
logical_types=ltypes)
es.normalize_dataframe(base_dataframe_name="e0",
new_dataframe_name="e1",
index="e1",
additional_columns=["e2", "e3"])
es.normalize_dataframe(base_dataframe_name="e1",
new_dataframe_name="e2",
index="e2",
additional_columns=["e3"])
es.normalize_dataframe(base_dataframe_name="e2",
new_dataframe_name="e3",
index="e3")
sum_1 = ft.Feature(es["e0"].ww["val"], parent_dataframe_name="e1", primitive=Sum)
sum_2 = ft.Feature(sum_1, parent_dataframe_name="e2", primitive=Sum)
sum_3 = ft.Feature(sum_2, parent_dataframe_name="e3", primitive=Sum)
feature_set = FeatureSet([sum_3])
calculator = FeatureSetCalculator(es,
time_last=None,
feature_set=feature_set)
df = calculator.run(np.array(["z"]))
v = df[sum_3.get_name()][0]
assert (v == 5)
def test_make_dfeat_of_agg_feat_on_self(es):
"""
The graph looks like this:
R R = Regions, a parent of customers
|
C C = Customers, the dataframe we're trying to predict on
|
etc.
We're trying to calculate a DFeat from C to R on an agg_feat of R on C.
"""
customer_count_feat = ft.Feature(es['customers'].ww['id'], parent_dataframe_name=u'régions', primitive=Count)
num_customers_feat = DirectFeature(customer_count_feat, child_dataframe_name='customers')
feature_set = FeatureSet([num_customers_feat])
calculator = FeatureSetCalculator(es,
time_last=None,
feature_set=feature_set)
df = calculator.run(np.array([0]))
df = to_pandas(df, index='id')
v = df[num_customers_feat.get_name()].values[0]
assert (v == 3)
def test_make_dfeat_of_agg_feat_through_parent(es):
"""
The graph looks like this:
R C = Customers, the dataframe we're trying to predict on
/ \\ R = Regions, a parent of customers
S C S = Stores, a child of regions
|
etc.
We're trying to calculate a DFeat from C to R on an agg_feat of R on S.
"""
store_id_feat = IdentityFeature(es['stores'].ww['id'])
store_count_feat = ft.Feature(store_id_feat, parent_dataframe_name=u'régions', primitive=Count)
num_stores_feat = DirectFeature(store_count_feat, child_dataframe_name='customers')
feature_set = FeatureSet([num_stores_feat])
calculator = FeatureSetCalculator(es,
time_last=None,
feature_set=feature_set)
df = calculator.run(np.array([0]))
df = to_pandas(df, index='id')
v = df[num_stores_feat.get_name()].values[0]
assert (v == 3)
def test_make_deep_agg_feat_of_dfeat_of_agg_feat(es):
"""
The graph looks like this (higher implies parent):
C C = Customers, the dataframe we're trying to predict on
| S = Sessions, a child of Customers
P S L = Log, a child of both Sessions and Log
\\ / P = Products, a parent of Log which is not a descendent of customers
L
We're trying to calculate a DFeat from L to P on an agg_feat of P on L, and
then aggregate it with another agg_feat of C on L.
"""
log_count_feat = ft.Feature(es['log'].ww['id'], parent_dataframe_name='products', primitive=Count)
product_purchases_feat = DirectFeature(log_count_feat,
child_dataframe_name='log')
purchase_popularity = ft.Feature(product_purchases_feat, parent_dataframe_name='customers', primitive=Mean)
feature_set = FeatureSet([purchase_popularity])
calculator = FeatureSetCalculator(es,
time_last=None,
feature_set=feature_set)
df = calculator.run(np.array([0]))
df = to_pandas(df, index='id')
v = df[purchase_popularity.get_name()].values[0]
assert (v == 38.0 / 10.0)
def test_deep_agg_feat_chain(es):
"""
Agg feat of agg feat:
region.Mean(customer.Count(Log))
"""
customer_count_feat = ft.Feature(es['log'].ww['id'], parent_dataframe_name='customers', primitive=Count)
region_avg_feat = ft.Feature(customer_count_feat, parent_dataframe_name=u'régions', primitive=Mean)
feature_set = FeatureSet([region_avg_feat])
calculator = FeatureSetCalculator(es,
time_last=None,
feature_set=feature_set)
df = calculator.run(np.array(['United States']))
df = to_pandas(df, index='id')
v = df[region_avg_feat.get_name()][0]
assert (v == 17 / 3.)
# NMostCommon not supported with Dask or Koalas
def test_topn(pd_es):
topn = ft.Feature(pd_es['log'].ww['product_id'],
parent_dataframe_name='customers',
primitive=NMostCommon(n=2))
feature_set = FeatureSet([topn])
calculator = FeatureSetCalculator(pd_es,
time_last=None,
feature_set=feature_set)
df = calculator.run(np.array([0, 1, 2]))
true_results = pd.DataFrame([
['toothpaste', 'coke zero'],
['coke zero', 'Haribo sugar-free gummy bears'],
['taco clock', np.nan]
])
assert ([name in df.columns for name in topn.get_feature_names()])
for i in range(df.shape[0]):
true = true_results.loc[i]
actual = df.loc[i]
if i == 0:
# coke zero and toothpase have same number of occurrences
assert set(true.values) == set(actual.values)
else:
for i1, i2 in zip(true, actual):
assert (pd.isnull(i1) and pd.isnull(i2)) or (i1 == i2)
# Trend not supported with Dask or Koalas
def test_trend(pd_es):
trend = ft.Feature([ft.Feature(pd_es['log'].ww['value']), ft.Feature(pd_es['log'].ww['datetime'])],
parent_dataframe_name='customers',
primitive=Trend)
feature_set = FeatureSet([trend])
calculator = FeatureSetCalculator(pd_es,
time_last=None,
feature_set=feature_set)
df = calculator.run(np.array([0, 1, 2]))
true_results = [-0.812730, 4.870378, np.nan]
np.testing.assert_almost_equal(df[trend.get_name()].tolist(), true_results, decimal=5)
def test_direct_squared(es):
feature = IdentityFeature(es['log'].ww['value'])
squared = feature * feature
feature_set = FeatureSet([feature, squared])
calculator = FeatureSetCalculator(es,
time_last=None,
feature_set=feature_set)
df = to_pandas(calculator.run(np.array([0, 1, 2])))
for i, row in df.iterrows():
assert (row[0] * row[0]) == row[1]
def test_agg_empty_child(es):
customer_count_feat = ft.Feature(es['log'].ww['id'], parent_dataframe_name='customers', primitive=Count)
feature_set = FeatureSet([customer_count_feat])
# time last before the customer had any events, so child frame is empty
calculator = FeatureSetCalculator(es,
time_last=datetime(2011, 4, 8),
feature_set=feature_set)
df = to_pandas(calculator.run(np.array([0])), index='id')
assert df["COUNT(log)"].iloc[0] == 0
def test_diamond_entityset(diamond_es):
es = diamond_es
amount = ft.IdentityFeature(es['transactions'].ww['amount'])
path = backward_path(es, ['regions', 'customers', 'transactions'])
through_customers = ft.AggregationFeature(amount, 'regions',
primitive=ft.primitives.Sum,
relationship_path=path)
path = backward_path(es, ['regions', 'stores', 'transactions'])
through_stores = ft.AggregationFeature(amount, 'regions',
primitive=ft.primitives.Sum,
relationship_path=path)
feature_set = FeatureSet([through_customers, through_stores])
calculator = FeatureSetCalculator(es,
time_last=datetime(2011, 4, 8),
feature_set=feature_set)
df = calculator.run(np.array([0, 1, 2]))
df = to_pandas(df, index='id', sort_index=True)
assert (df['SUM(stores.transactions.amount)'] == [94, 261, 128]).all()
assert (df['SUM(customers.transactions.amount)'] == [72, 411, 0]).all()
def test_two_relationships_to_single_dataframe(games_es):
es = games_es
home_team, away_team = es.relationships
path = RelationshipPath([(False, home_team)])
mean_at_home = ft.AggregationFeature(ft.Feature(es['games'].ww['home_team_score']),
'teams',
relationship_path=path,
primitive=ft.primitives.Mean)
path = RelationshipPath([(False, away_team)])
mean_at_away = ft.AggregationFeature(ft.Feature(es['games'].ww['away_team_score']),
'teams',
relationship_path=path,
primitive=ft.primitives.Mean)
home_team_mean = ft.DirectFeature(mean_at_home, 'games',
relationship=home_team)
away_team_mean = ft.DirectFeature(mean_at_away, 'games',
relationship=away_team)
feature_set = FeatureSet([home_team_mean, away_team_mean])
calculator = FeatureSetCalculator(es,
time_last=datetime(2011, 8, 28),
feature_set=feature_set)
df = calculator.run(np.array(range(3)))
df = to_pandas(df, index='id', sort_index=True)
assert (df[home_team_mean.get_name()] == [1.5, 1.5, 2.5]).all()
assert (df[away_team_mean.get_name()] == [1, 0.5, 2]).all()
@pytest.fixture
def pd_parent_child():
parent_df = pd.DataFrame({"id": [1]})
child_df = pd.DataFrame({"id": [1, 2, 3],
"parent_id": [1, 1, 1],
"time_index": pd.date_range(start='1/1/2018', periods=3),
"value": [10, 5, 2],
"cat": ['a', 'a', 'b']}).astype({'cat': 'category'})
return (parent_df, child_df)
@pytest.fixture
def dd_parent_child(pd_parent_child):
parent_df, child_df = pd_parent_child
parent_df = dd.from_pandas(parent_df, npartitions=2)
child_df = dd.from_pandas(child_df, npartitions=2)
return (parent_df, child_df)
@pytest.fixture
def ks_parent_child(pd_parent_child):
ks = pytest.importorskip('databricks.koalas', reason="Koalas not installed, skipping")
parent_df, child_df = pd_parent_child
parent_df = ks.from_pandas(parent_df)
child_df = ks.from_pandas(child_df)
return (parent_df, child_df)
@pytest.fixture(params=['pd_parent_child', 'dd_parent_child', 'ks_parent_child'])
def parent_child(request):
return request.getfixturevalue(request.param)
def test_empty_child_dataframe(parent_child):
parent_df, child_df = parent_child
child_ltypes = {
'parent_id': Integer,
'time_index': Datetime,
'value': Double,
'cat': Categorical
}
es = ft.EntitySet(id="blah")
es.add_dataframe(dataframe_name="parent",
dataframe=parent_df,
index="id")
es.add_dataframe(dataframe_name="child",
dataframe=child_df,
index="id",
time_index="time_index",
logical_types=child_ltypes)
es.add_relationship("parent", "id", "child", "parent_id")
# create regular agg
count = ft.Feature(es["child"].ww["id"], parent_dataframe_name="parent", primitive=Count)
# create agg feature that requires multiple arguments
trend = ft.Feature([ft.Feature(es["child"].ww["value"]), ft.Feature(es["child"].ww['time_index'])],
parent_dataframe_name="parent",
primitive=Trend)
# create multi-output agg feature
n_most_common = ft.Feature(es["child"].ww["cat"], parent_dataframe_name="parent", primitive=NMostCommon)
# create aggs with where
where = ft.Feature(es["child"].ww["value"]) == 1
count_where = ft.Feature(es["child"].ww["id"], parent_dataframe_name="parent", where=where, primitive=Count)
trend_where = ft.Feature([ft.Feature(es["child"].ww["value"]), ft.Feature(es["child"].ww["time_index"])],
parent_dataframe_name="parent",
where=where,
primitive=Trend)
n_most_common_where = ft.Feature(es["child"].ww["cat"], parent_dataframe_name="parent", where=where, primitive=NMostCommon)
if isinstance(parent_df, pd.DataFrame):
features = [count, count_where, trend, trend_where, n_most_common, n_most_common_where]
data = {count.get_name(): | pd.Series([0], dtype="Int64") | pandas.Series |
import numpy as np
import pandas as pd
from woodwork.logical_types import (
URL,
Age,
AgeNullable,
Boolean,
BooleanNullable,
Categorical,
CountryCode,
Datetime,
Double,
EmailAddress,
Filepath,
Integer,
IntegerNullable,
IPAddress,
LatLong,
NaturalLanguage,
Ordinal,
PersonFullName,
PhoneNumber,
PostalCode,
SubRegionCode,
Timedelta
)
from woodwork.statistics_utils import (
_get_describe_dict,
_get_mode,
_make_categorical_for_mutual_info,
_replace_nans_for_mutual_info
)
from woodwork.tests.testing_utils import mi_between_cols, to_pandas
from woodwork.utils import import_or_none
dd = import_or_none('dask.dataframe')
ks = import_or_none('databricks.koalas')
def test_get_mode():
series_list = [
pd.Series([1, 2, 3, 4, 2, 2, 3]),
pd.Series(['a', 'b', 'b', 'c', 'b']),
pd.Series([3, 2, 3, 2]),
pd.Series([np.nan, np.nan, np.nan]),
pd.Series([pd.NA, pd.NA, pd.NA]),
pd.Series([1, 2, np.nan, 2, np.nan, 3, 2]),
pd.Series([1, 2, pd.NA, 2, pd.NA, 3, 2])
]
answer_list = [2, 'b', 2, None, None, 2, 2]
for series, answer in zip(series_list, answer_list):
mode = _get_mode(series)
if answer is None:
assert mode is None
else:
assert mode == answer
def test_accessor_replace_nans_for_mutual_info():
df_nans = pd.DataFrame({
'ints': pd.Series([2, pd.NA, 5, 2], dtype='Int64'),
'floats': pd.Series([3.3, None, 2.3, 1.3]),
'bools': pd.Series([True, None, True, False]),
'bools_pdna': pd.Series([True, pd.NA, True, False], dtype='boolean'),
'int_to_cat_nan': pd.Series([1, np.nan, 3, 1], dtype='category'),
'str': pd.Series(['test', np.nan, 'test2', 'test']),
'str_no_nan': pd.Series(['test', 'test2', 'test2', 'test']),
'dates': pd.Series(['2020-01-01', None, '2020-01-02', '2020-01-03'])
})
df_nans.ww.init()
formatted_df = _replace_nans_for_mutual_info(df_nans.ww.schema, df_nans.copy())
assert isinstance(formatted_df, pd.DataFrame)
assert formatted_df['ints'].equals(pd.Series([2, 3, 5, 2], dtype='Int64'))
assert formatted_df['floats'].equals(pd.Series([3.3, 2.3, 2.3, 1.3], dtype='float'))
assert formatted_df['bools'].equals(pd.Series([True, True, True, False], dtype='category'))
assert formatted_df['bools_pdna'].equals(pd.Series([True, True, True, False], dtype='boolean'))
assert formatted_df['int_to_cat_nan'].equals(pd.Series([1, 1, 3, 1], dtype='category'))
assert formatted_df['str'].equals(pd.Series(['test', 'test', 'test2', 'test'], dtype='category'))
assert formatted_df['str_no_nan'].equals(pd.Series(['test', 'test2', 'test2', 'test'], dtype='category'))
assert formatted_df['dates'].equals(pd.Series(['2020-01-01', '2020-01-02', '2020-01-02', '2020-01-03'], dtype='datetime64[ns]'))
def test_accessor_make_categorical_for_mutual_info():
df = pd.DataFrame({
'ints1': pd.Series([1, 2, 3, 2]),
'ints2': pd.Series([1, 100, 1, 100]),
'ints3': pd.Series([1, 2, 3, 2], dtype='Int64'),
'bools': pd.Series([True, False, True, False]),
'booleans': pd.Series([True, False, True, False], dtype='boolean'),
'categories': pd.Series(['test', 'test2', 'test2', 'test']),
'dates': pd.Series(['2020-01-01', '2019-01-02', '2020-08-03', '1997-01-04'])
})
df.ww.init()
formatted_num_bins_df = _make_categorical_for_mutual_info(df.ww.schema, df.copy(), num_bins=4)
assert isinstance(formatted_num_bins_df, pd.DataFrame)
assert formatted_num_bins_df['ints1'].equals(pd.Series([0, 1, 3, 1], dtype='int8'))
assert formatted_num_bins_df['ints2'].equals(pd.Series([0, 1, 0, 1], dtype='int8'))
assert formatted_num_bins_df['ints3'].equals(pd.Series([0, 1, 3, 1], dtype='int8'))
assert formatted_num_bins_df['bools'].equals(pd.Series([1, 0, 1, 0], dtype='int8'))
assert formatted_num_bins_df['booleans'].equals( | pd.Series([1, 0, 1, 0], dtype='int8') | pandas.Series |
##########################################################
# BASIC CLASSIFICATION FUNCTIONS #
##########################################################
# rcATT is a tool to prediction tactics and techniques
# from the ATT&CK framework, using multilabel text
# classification and post processing.
# Version: 1.00
# Author: <NAME>
# Date: 2019_10_22
# Important global constants and functions for
# classifications: training and prediction.
import ast
import joblib
import configparser
import pandas as pd
from sklearn.svm import LinearSVC
from sklearn.multiclass import OneVsRestClassifier
from sklearn.pipeline import Pipeline
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_selection import chi2, SelectPercentile
from nltk.corpus import stopwords
import classification_tools.preprocessing as prp
import classification_tools.postprocessing as pop
##########################################################
# LABELS AND DATAFRAME LISTS AND RELATIONSHIP #
##########################################################
config = configparser.ConfigParser()
config.read("classification_tools/rcatt.ini")
TEXT_FEATURES = config["VARIABLES"]["TEXT_FEATURES"].split(",")
CODE_TACTICS = config["VARIABLES"]["CODE_TACTICS"].split(",")
NAME_TACTICS = config["VARIABLES"]["NAME_TACTICS"].split(",")
CODE_TECHNIQUES = config["VARIABLES"]["CODE_TECHNIQUES"].split(",")
NAME_TECHNIQUES = config["VARIABLES"]["NAME_TECHNIQUES"].split(",")
STIX_IDENTIFIERS = config["VARIABLES"]["STIX_IDENTIFIERS"].split(",")
TACTICS_TECHNIQUES_RELATIONSHIP_DF = ast.literal_eval(config["VARIABLES"]["RELATIONSHIP"])
for k, v in TACTICS_TECHNIQUES_RELATIONSHIP_DF.items():
TACTICS_TECHNIQUES_RELATIONSHIP_DF[k] = pd.Series(v)
TACTICS_TECHNIQUES_RELATIONSHIP_DF = pd.DataFrame(TACTICS_TECHNIQUES_RELATIONSHIP_DF)
ALL_TTPS = config["VARIABLES"]["ALL_TTPS"].split(",")
TRAINING_FILE = config["PATH"]["TRAINING_FILE"]
ADDED_FILE = config["PATH"]["ADDED_FILE"]
##########################################################
# RETRAIN AND PREDICT FUNCTIONS #
##########################################################
def train(cmd):
"""
Train again rcATT with a new dataset
"""
# stopwords with additional words found during the development
stop_words = stopwords.words("english")
new_stop_words = [
"'ll",
"'re",
"'ve",
"ha",
"wa",
"'d",
"'s",
"abov",
"ani",
"becaus",
"befor",
"could",
"doe",
"dure",
"might",
"must",
"n't",
"need",
"onc",
"onli",
"ourselv",
"sha",
"themselv",
"veri",
"whi",
"wo",
"would",
"yourselv",
]
stop_words.extend(new_stop_words)
# load all possible data
train_data_df = pd.read_csv(TRAINING_FILE, encoding="ISO-8859-1")
train_data_added = pd.read_csv(ADDED_FILE, encoding="ISO-8859-1")
train_data_df.append(train_data_added, ignore_index=True)
train_data_df = prp.processing(train_data_df)
reports = train_data_df[TEXT_FEATURES]
tactics = train_data_df[CODE_TACTICS]
techniques = train_data_df[CODE_TECHNIQUES]
if cmd:
pop.print_progress_bar(0)
# Define a pipeline combining a text feature extractor with multi label classifier for tactics prediction
pipeline_tactics = Pipeline(
[
("columnselector", prp.TextSelector(key="processed")),
(
"tfidf",
TfidfVectorizer(
tokenizer=prp.LemmaTokenizer(), stop_words=stop_words, max_df=0.90
),
),
("selection", SelectPercentile(chi2, percentile=50)),
(
"classifier",
OneVsRestClassifier(
LinearSVC(
penalty="l2",
loss="squared_hinge",
dual=True,
class_weight="balanced",
),
n_jobs=1,
),
),
]
)
# train the model for tactics
pipeline_tactics.fit(reports, tactics)
if cmd:
pop.print_progress_bar(2)
# Define a pipeline combining a text feature extractor with multi label classifier for techniques prediction
pipeline_techniques = Pipeline(
[
("columnselector", prp.TextSelector(key="processed")),
(
"tfidf",
TfidfVectorizer(
tokenizer=prp.StemTokenizer(),
stop_words=stop_words,
min_df=2,
max_df=0.99,
),
),
("selection", SelectPercentile(chi2, percentile=50)),
(
"classifier",
OneVsRestClassifier(
LinearSVC(
penalty="l2",
loss="squared_hinge",
dual=False,
max_iter=1000,
class_weight="balanced",
),
n_jobs=1,
),
),
]
)
# train the model for techniques
pipeline_techniques.fit(reports, techniques)
if cmd:
pop.print_progress_bar(4)
pop.find_best_post_processing(cmd)
# Save model
joblib.dump(pipeline_tactics, "classification_tools/data/pipeline_tactics.joblib")
joblib.dump(
pipeline_techniques, "classification_tools/data/pipeline_techniques.joblib"
)
def predict(report_to_predict, post_processing_parameters):
"""
Predict tactics and techniques from a report in a txt file.
"""
# loading the models
pipeline_tactics = joblib.load("classification_tools/data/pipeline_tactics.joblib")
pipeline_techniques = joblib.load(
"classification_tools/data/pipeline_techniques.joblib"
)
report = prp.processing( | pd.DataFrame([report_to_predict], columns=["Text"]) | pandas.DataFrame |
import numpy as np
import pandas as pd
from bffs.bf import BF
from sklearn import datasets
from sklearn.linear_model import LogisticRegression
import warnings
warnings.filterwarnings('ignore')
def name(num):
if num == 0:
return 'Setosa'
elif num == 1:
return 'Veriscolour'
else:
return 'Virginica'
if __name__ == "__main__":
selector = BF(verbose=True)
data = datasets.load_iris()
dataX = | pd.DataFrame(data=data.data,columns=data.feature_names) | pandas.DataFrame |
import numpy as np
import pandas as pd
import pytest
import piso
import piso.graph as piso_graph
from piso import register_accessors
register_accessors()
def get_accessor_method(self, function):
return {
piso_graph.adjacency_matrix: self.piso.adjacency_matrix,
}[function]
def get_package_method(function):
return {
piso_graph.adjacency_matrix: piso.adjacency_matrix,
}[function]
def perform_op(*args, how, function, **kwargs):
# how = "supplied, accessor, or package"
if how == "accessor":
self, *args = args
return get_accessor_method(self, function)(*args, **kwargs)
elif how == "package":
return get_package_method(function)(*args, **kwargs)
else:
return function(*args, **kwargs)
def map_to_dates(obj, date_type):
def make_date(x):
ts = pd.to_datetime(x, unit="d", origin="2021-09-30")
if date_type == "numpy":
return ts.to_numpy()
if date_type == "datetime":
return ts.to_pydatetime()
if date_type == "timedelta":
return ts - pd.Timestamp("2021-10-1")
return ts
if isinstance(obj, (pd.IntervalIndex, pd.arrays.IntervalArray)):
return obj.from_arrays(
obj.left.map(make_date),
obj.right.map(make_date),
obj.closed,
)
elif isinstance(obj, list):
return [make_date(x) for x in obj]
@pytest.mark.parametrize(
"closed",
["left", "right", "neither"],
)
@pytest.mark.parametrize(
"interval_index",
[True, False],
)
@pytest.mark.parametrize(
"include_index",
[True, False],
)
@pytest.mark.parametrize(
"date_type",
["timestamp", "numpy", "datetime", "timedelta", None],
)
@pytest.mark.parametrize(
"how",
["supplied", "accessor", "package"],
)
def test_adjacency_matrix_intersects_1(
closed, interval_index, include_index, date_type, how
):
interval_array = pd.arrays.IntervalArray.from_tuples(
[(0, 4), (3, 6), (5, 7), (8, 9), (9, 10)],
closed=closed,
)
if interval_index:
interval_array = pd.IntervalIndex(interval_array)
if date_type:
interval_array = map_to_dates(interval_array, date_type)
expected = np.array(
[
[False, True, False, False, False],
[True, False, True, False, False],
[False, True, False, False, False],
[False, False, False, False, False],
[False, False, False, False, False],
]
)
result = perform_op(
interval_array,
how=how,
function=piso_graph.adjacency_matrix,
edges="intersect",
include_index=include_index,
)
if include_index:
expected = pd.DataFrame(expected, columns=interval_array, index=interval_array)
pd.testing.assert_frame_equal(result, expected)
else:
assert np.array_equal(result, expected)
@pytest.mark.parametrize(
"interval_index",
[True, False],
)
@pytest.mark.parametrize(
"include_index",
[True, False],
)
@pytest.mark.parametrize(
"date_type",
["timestamp", "numpy", "datetime", "timedelta", None],
)
@pytest.mark.parametrize(
"how",
["supplied", "accessor", "package"],
)
def test_adjacency_matrix_intersects_2(interval_index, include_index, date_type, how):
interval_array = pd.arrays.IntervalArray.from_tuples(
[(0, 4), (3, 6), (5, 7), (8, 9), (9, 10)],
closed="both",
)
if interval_index:
interval_array = pd.IntervalIndex(interval_array)
if date_type:
interval_array = map_to_dates(interval_array, date_type)
expected = np.array(
[
[False, True, False, False, False],
[True, False, True, False, False],
[False, True, False, False, False],
[False, False, False, False, True],
[False, False, False, True, False],
]
)
result = perform_op(
interval_array,
how=how,
function=piso_graph.adjacency_matrix,
edges="intersect",
include_index=include_index,
)
if include_index:
expected = pd.DataFrame(expected, columns=interval_array, index=interval_array)
pd.testing.assert_frame_equal(result, expected)
else:
assert np.array_equal(result, expected)
@pytest.mark.parametrize(
"closed",
["left", "right", "neither"],
)
@pytest.mark.parametrize(
"interval_index",
[True, False],
)
@pytest.mark.parametrize(
"include_index",
[True, False],
)
@pytest.mark.parametrize(
"date_type",
["timestamp", "numpy", "datetime", "timedelta", None],
)
@pytest.mark.parametrize(
"how",
["supplied", "accessor", "package"],
)
def test_adjacency_matrix_disjoint_1(
closed, interval_index, include_index, date_type, how
):
interval_array = pd.arrays.IntervalArray.from_tuples(
[(0, 4), (3, 6), (5, 7), (8, 9), (9, 10)],
closed=closed,
)
if interval_index:
interval_array = pd.IntervalIndex(interval_array)
if date_type:
interval_array = map_to_dates(interval_array, date_type)
expected = np.array(
[
[False, False, True, True, True],
[False, False, False, True, True],
[True, False, False, True, True],
[True, True, True, False, True],
[True, True, True, True, False],
]
)
result = perform_op(
interval_array,
how=how,
function=piso_graph.adjacency_matrix,
edges="disjoint",
include_index=include_index,
)
if include_index:
expected = pd.DataFrame(expected, columns=interval_array, index=interval_array)
pd.testing.assert_frame_equal(result, expected)
else:
assert np.array_equal(result, expected)
@pytest.mark.parametrize(
"interval_index",
[True, False],
)
@pytest.mark.parametrize(
"include_index",
[True, False],
)
@pytest.mark.parametrize(
"date_type",
["timestamp", "numpy", "datetime", "timedelta", None],
)
@pytest.mark.parametrize(
"how",
["supplied", "accessor", "package"],
)
def test_adjacency_matrix_disjoint_2(interval_index, include_index, date_type, how):
interval_array = pd.arrays.IntervalArray.from_tuples(
[(0, 4), (3, 6), (5, 7), (8, 9), (9, 10)],
closed="both",
)
if interval_index:
interval_array = | pd.IntervalIndex(interval_array) | pandas.IntervalIndex |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# @Filename: DensityPeaks.py
# @Author: <NAME>
# @Time: 5/3/22 09:55
# @Version: 4.0
import math
from collections import defaultdict
import numpy as np
import pandas as pd
from sklearn.neighbors import KNeighborsClassifier, NearestNeighbors
from sklearn.preprocessing import LabelEncoder
from sklearn.semi_supervised import SelfTrainingClassifier
from sklearn.svm import SVC
from instance_selection import ENN
from .utils import split
class STDPNF:
"""
<NAME>., <NAME>., & <NAME>. (2019). A self-training method based on density
peaks and an extended parameter-free local noise filter for k nearest
neighbor. Knowledge-Based Systems, 184, 104895.
<NAME>., <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., & <NAME>. (2018).
Self-training semi-supervised classification based on density peaks of
data. Neurocomputing, 275, 180-191.
"""
def __init__(
self,
dc=None,
distance_metric="euclidean",
k=3,
gauss_cutoff=True,
percent=2.0,
density_threshold=None,
distance_threshold=None,
anormal=True,
filtering=False,
classifier=None,
classifier_params=None,
filter_method=None,
):
"""Semi Supervised Algorithm based on Density Peaks."""
self.dc = dc
self.distance_metric = distance_metric
self.k = k
self.gauss_cutoff = gauss_cutoff
self.percent = percent
self.density_threshold = density_threshold
self.distance_threshold = distance_threshold
self.anormal = anormal
self.filtering = filtering
if classifier is not None:
if isinstance(classifier_params, dict):
self.classifier = classifier(**classifier_params)
else:
self.classifier = classifier()
else:
self.classifier = None
if filter_method is not None and filter_method != "ENANE":
self.filter = filter_method()
elif isinstance(filter_method, str) and filter_method == "ENANE":
self.filter = filter_method
else:
self.filter = None
self.y = None
self.low = None
self.u = None
self.classifier_stdpnf = None
self.order = None
self.structure = None
self.structure_stdnpf = None
self.n_id = None
self.distances = None
self.max_dis = None
self.min_dis = None
self.rho = None
self.delta = None
self.nneigh = None
self.data = None
def __build_distance(self):
"""
Calculate distance dict.
:return: distance dict, max distance, min distance
"""
from scipy.spatial.distance import pdist, squareform
distance_matrix = pdist(self.data, metric=self.distance_metric)
distance_matrix = squareform(distance_matrix)
triangle_upper = np.triu_indices(self.data.shape[0], 1)
triangle_upper = distance_matrix[triangle_upper]
distance = {}
for i in range(self.n_id):
for j in range(i + 1, self.n_id):
distance[(i, j)] = distance_matrix[i, j]
distance[(j, i)] = distance_matrix[i, j]
max_dis, min_dis = np.max(triangle_upper), np.min(triangle_upper)
return distance, max_dis, min_dis
def __auto_select_dc(self):
"""
Auto select the local density threshold that let average neighbor is 1-2
percent of all nodes.
:return: dc that local density threshold
"""
max_dis, min_dis = self.max_dis, self.min_dis
dc = (max_dis + min_dis) / 2
while True:
nneighs = (
sum([1 for v in self.distances.values() if v < dc]) / self.n_id**2
)
if 0.01 <= nneighs <= 0.02:
break
# binary search
if nneighs < 0.01:
min_dis = dc
else:
max_dis = dc
dc = (max_dis + min_dis) / 2
if max_dis - min_dis < 0.0001:
break
return dc
def __select_dc(self):
"""
Select the local density threshold, default is the method used in paper,
'auto' is auto select.
:return: dc that local density threshold
"""
if self.dc == "auto":
dc = self.__auto_select_dc()
else:
position = int(self.n_id * (self.n_id + 1) /
2 * self.percent / 100)
dc = np.sort(list(self.distances.values()))[
position * 2 + self.n_id]
return dc
def __local_density(self):
"""
Compute all points' local density.
:return: local density vector that index is the point index
"""
def gauss_func(dij, dc):
"""
> The function takes in a distance value and a cutoff value, and
returns the value of the Gaussian function at that point
:param dij: distance between two nodes
:param dc: The cutoff distance
:return: the value of the gaussian function.
"""
return math.exp(-((dij / dc) ** 2))
def cutoff_func(dij, dc):
"""
If the distance between two atoms is less than the cutoff distance,
return 1, otherwise return 0
:param dij: distance between atoms i and j
:param dc: cutoff distance
:return: 1 if dij < dc, else 0
"""
return 1 if dij < dc else 0
func = gauss_func if self.gauss_cutoff else cutoff_func
rho = [0] * self.n_id
for i in range(self.n_id):
for j in range(i + 1, self.n_id):
temp = func(self.distances[(i, j)], self.dc)
rho[i] += temp
rho[j] += temp
return np.array(rho, np.float32)
def __min_neighbor_and_distance(self):
"""
Compute all points' min util to the higher local density point(which is
the nearest neighbor).
:return: distance vector, nearest neighbor vector
"""
if self.rho is None:
raise ValueError("Encountered rho as None.")
sort_rho_idx = np.argsort(-self.rho)
delta, nneigh = [float(self.max_dis)] * self.n_id, [0] * self.n_id
delta[sort_rho_idx[0]] = -1.0
for i in range(self.n_id):
for j in range(0, i):
old_i, old_j = sort_rho_idx[i], sort_rho_idx[j]
if self.distances[(old_i, old_j)] < delta[old_i]:
delta[old_i] = self.distances[(old_i, old_j)]
nneigh[old_i] = old_j
delta[sort_rho_idx[0]] = max(delta)
return np.array(delta, np.float32), np.array(nneigh, np.float32)
def __structure(self):
"""
The function takes the data and the nearest neighbor indices and creates
a dataframe with the following columns:
- sample: the data point
- next: the index of the nearest neighbor
- previous: the index of the nearest neighbor of the nearest neighbor
- label: the label of the data point
The function also creates a copy of the dataframe called
structure_stdnpf
"""
self.structure = dict.fromkeys(range(self.n_id))
for index, sample in enumerate(self.data):
self.structure[index] = [
sample,
int(self.nneigh[index]),
None,
self.y[index] if index < len(self.y) else -1,
]
for index in range(self.n_id):
if self.structure[self.structure[index][1]][2] is None:
self.structure[self.structure[index][1]][2] = index
self.structure = pd.DataFrame(
self.structure, index=["sample", "next", "previous", "label"]
).transpose()
self.structure_stdnpf = self.structure.copy(deep=True)
def __step_a(self):
"""
> The function takes the labeled samples and trains the classifier on
them
:return: The samples that have been labeled.
"""
samples_labeled = self.structure.loc[self.structure["label"] != -1]
sam_lab = samples_labeled["sample"].to_list()
y_without = samples_labeled["label"].to_list()
self.classifier.fit(sam_lab, y_without)
return samples_labeled
def __discover_structure(self):
"""Discovers the under laying structure."""
self._fit_without()
def __nan_search(self):
"""
For each point, find the set of points that are within a distance of r,
and the set of points that are within a distance of r+1.
The set of points that are within a distance of r+1 is a superset of the
set of points that are within a distance of r.
The set of points that are within a distance of r+1 is also a superset
of the set of points that are within a distance of r+2.
The set of points that are within a distance of r+2 is also a superset
of the set of points that are within a distance of r+3.
And so on.
The set of points that are within a distance of r+1 is also a superset
of the set of points that are within a distance of r+2.
The set of points that are within a distance of r+2 is
:return: nan, r
"""
r = 1
nan = defaultdict(set)
nb = dict.fromkeys(range(self.n_id), 0)
knn = defaultdict(set)
rnn = defaultdict(set)
cnt = defaultdict(int)
while True:
search = NearestNeighbors(n_neighbors=r + 1, algorithm="kd_tree")
search.fit(self.data)
for index, sample in enumerate(self.data):
r_neighs = search.kneighbors(
[sample], return_distance=False)[0][1:]
knn[index].update(list(r_neighs))
for neigh in r_neighs:
nb[neigh] += 1
rnn[neigh].add(index)
cnt[r] = np.count_nonzero((np.array(list(nb.values())) == 0))
if r > 2 and cnt[r] == cnt[r - 1]:
r -= 1
break
r += 1
for index in range(self.n_id):
nan[index] = knn[index].intersection(rnn[index])
return nan, r
def __enane(self, fx, nan, r):
"""
> The function takes in the dataframe, the list of indices of the
unlabeled data, the list of indices of the neighbors of the unlabeled
data, and the number of neighbors to use in the KNN classifier. It
then creates a new dataframe with the labeled data and the unlabeled
data, and uses the KNN classifier to predict the labels of the
unlabeled data. It then checks if the predicted label is the same as
the label of the majority of the neighbors of the unlabeled data. If
it is, then it adds the index of the unlabeled data to the list of
indices of the data to be labeled
:param fx: the indexes of the unlabeled data
:param nan: a list of lists, where each list contains the indices of the
neighbors of a sample
:param r: the number of neighbors to consider
:return: The indexes of the samples that are going to be labeled and the
labels that are going to be assigned to them.
"""
es = []
es_pred = []
local_structure = self.structure_stdnpf.copy(deep=True)
base_estimator = KNeighborsClassifier(
n_neighbors=r, metric=self.distance_metric
)
labeled_data = local_structure.loc[local_structure["label"] != -1]
nan_unlabeled = local_structure.loc[fx]
data = pd.concat([labeled_data, nan_unlabeled], join="inner")
enane_model = SelfTrainingClassifier(base_estimator)
enane_model.fit(data["sample"].tolist(), data["label"].tolist())
enane_pred = enane_model.predict(nan_unlabeled["sample"].tolist())
for (row_index, _), pred in zip(nan_unlabeled.iterrows(), enane_pred):
usefulness = 0
harmfulness = 0
for neigh in nan[row_index]:
if local_structure.loc[neigh, "label"] == pred:
usefulness += 1
else:
harmfulness += 1
if usefulness >= harmfulness:
es.append(row_index)
es_pred.append(pred)
return es, es_pred
def __init_values(self, low, u, y):
"""
It takes in the lower and upper bounds of the data, and the data itself,
and then calculates the distances between the data points,
the maximum distance, the minimum distance, the dc value, the rho
value, the delta value, the number of neighbors, and the structure
of the data
:param low: lower bound of the data
:param u: upper bound of the data
:param y: the labels of the data
"""
self.y = y
self.low = low
self.u = u
self.data = np.concatenate((low, u), axis=0)
self.n_id = self.data.shape[0]
self.distances, self.max_dis, self.min_dis = self.__build_distance()
self.dc = self.__select_dc()
self.rho = self.__local_density()
self.delta, self.nneigh = self.__min_neighbor_and_distance()
self.__structure()
def _fit_without(self):
"""
The function takes in a classifier, and then labels the next point,
and then labels the previous points, without filtering.
"""
if self.classifier is None:
self.classifier = SVC()
count = 1
self.order = dict.fromkeys(range(self.n_id), 0)
count = self._label_next_point(count)
self._label_previous_points(count)
def _label_previous_points(self, count):
"""
> The function takes the samples labeled in the previous step and finds
the previous samples of those samples. It then labels those samples
and repeats the process until there are no more samples to label
:param count: the number of the current iteration
"""
while True:
samples_labeled = self.__step_a()
prev_rows = samples_labeled["previous"].to_numpy()
prev_unlabeled = []
samples_labeled_index = samples_labeled.index.to_list()
for prev_row in prev_rows:
if prev_row not in samples_labeled_index and prev_row is not None:
prev_unlabeled.append(prev_row)
self.order[prev_row] = count
if len(prev_unlabeled) == 0:
break
unlabeled_prev_of_labeled = self.structure.loc[prev_unlabeled]
lu = unlabeled_prev_of_labeled["sample"].to_list()
y_pred = self.classifier.predict(lu)
for new_label, pos in zip(y_pred, prev_unlabeled):
self.structure.at[pos, "label"] = new_label
count += 1
def _label_next_point(self, count):
"""
> The function takes the samples labeled in the previous step and finds
the next samples in the structure. If the next samples are not
labeled, it labels them and updates the order of the samples
:param count: the number of the next point to be labeled
:return: The number of labeled samples.
"""
while True:
samples_labeled = self.__step_a()
next_rows = samples_labeled["next"].to_numpy()
next_unlabeled = []
samples_labeled_index = samples_labeled.index.to_list()
for next_row in next_rows:
if next_row not in samples_labeled_index:
next_unlabeled.append(next_row)
self.order[next_row] = count
if len(next_unlabeled) == 0:
break
unlabeled_next_of_labeled = self.structure.loc[next_unlabeled]
lu = unlabeled_next_of_labeled["sample"].to_list()
y_pred = self.classifier.predict(lu)
for new_label, pos in zip(y_pred, next_unlabeled):
self.structure.at[pos, "label"] = new_label
count += 1
return count
def _fit_stdpnf(self):
"""
Self Training based on Density Peaks and a parameter-free noise
filter.
"""
self.__discover_structure()
nan, lambda_param = self.__nan_search()
self.classifier_stdpnf = KNeighborsClassifier(
n_neighbors=self.k, metric=self.distance_metric
)
self.classifier_stdpnf.fit(self.low, self.y)
count = 1
while count <= max(self.order.values()):
unlabeled_rows = self.structure_stdnpf.loc[
self.structure_stdnpf["label"] == -1
].index.to_list()
unlabeled_indexes = []
for row in unlabeled_rows:
if self.order[row] == count:
unlabeled_indexes.append(row)
if isinstance(self.filter, str) and self.filter == "ENANE":
filtered_indexes, filtered_labels = self.__enane(
unlabeled_indexes, nan, lambda_param
)
self.structure_stdnpf.at[filtered_indexes,
"label"] = filtered_labels
else:
labeled_data = self.structure_stdnpf.loc[
self.structure_stdnpf["label"] != -1
]
complete = labeled_data["sample"]
complete_y = labeled_data["label"]
result = self._if_filter(complete, complete_y)
self._results_to_structure(complete, result)
labeled_data = self.structure_stdnpf.loc[
self.structure_stdnpf["label"] != -1
]
self.classifier_stdpnf.fit(
labeled_data["sample"].tolist(), labeled_data["label"].tolist()
)
count += 1
labeled_data = self.structure_stdnpf.loc[self.structure_stdnpf["label"] != -1]
self.classifier_stdpnf.fit(
labeled_data["sample"].tolist(), labeled_data["label"].tolist()
)
def _results_to_structure(self, complete, result):
"""
> This function takes the results of the model and compares them to the
complete data set. If the result is not in the complete data set, it is
added to the structure data set.
:param complete: the complete dataset
:param result: the result of the clustering
"""
results_to_unlabeled = []
for r in result.to_numpy():
is_in = False
for c in complete:
if np.array_equal(r, c):
is_in = True
if not is_in:
results_to_unlabeled.append(r)
for r in results_to_unlabeled:
self.structure_stdnpf.at[np.array(self.structure_stdnpf["sample"], r)][
"label"
] = -1
def _if_filter(self, complete, complete_y):
"""
If the filter is an ENN, then filter the original data, otherwise
filter the complete data
:param complete: the complete dataframe
:param complete_y: the complete y values
:return: The result is a dataframe with the filtered data.
"""
if isinstance(self.filter, ENN):
original = pd.DataFrame(self.low)
original_y = | pd.DataFrame(self.y) | pandas.DataFrame |
# Copyright 2017 Sidewalk Labs | https://www.apache.org/licenses/LICENSE-2.0
"""Library for modeling generative typed bayesian networks.
Each model consists of multiple networks, one for each type of input data.
For instance, a person model might have a different network for persons of
each household type. This allows the model to learn transition probabilities
for data of each type.
`SegmentedData` helps train this typed network structure by allowing users to
specify a segmentation function to segment the training data by type.
"""
from __future__ import (
absolute_import, division, print_function, unicode_literals
)
from builtins import range, str
from collections import defaultdict, Counter
import json
import itertools
import sys
import pandas
from pomegranate import BayesianNetwork
def default_segmenter(x):
return 'one_segment'
class SegmentedData(object):
"""Segmented data for use with the segemented BayesianNetworkModel.
Like the model itself, training data uses a mapping of type -> data.
"""
def __init__(self, type_to_data, segmenter=None):
self.type_to_data = type_to_data
self.segmenter = segmenter
@staticmethod
def from_data(cleaned_data, fields, weight_field=None, segmenter=None):
"""Input more data.
Args:
cleaned_data (CleanedData): data to train on
segmenter: function mapping a dict of data to a type for
segmentation
weight_field (unicode): Name of the int field that shows how much
this row of data should be weighted.
"""
segmenter = segmenter or default_segmenter
type_to_data = defaultdict(list)
for _, row in cleaned_data.data.iterrows():
type_ = segmenter(row)
weight = row[weight_field] if weight_field else 1
cleaned_row = tuple(row[fields])
for _ in range(weight):
type_to_data[type_].append(cleaned_row)
return SegmentedData(type_to_data, segmenter)
def num_rows_data(self):
return sum(len(data) for data in self.type_to_data.values())
def types(self):
return self.type_to_data.keys()
class BayesianNetworkModel(object):
"""A typed Bayesian network model.
This bayesian network model has a fixed list of nodes passed in at creation.
It holds a separate network for each user-defined type.
"""
def __init__(self, type_to_network, fields, segmenter=None):
self.type_to_network = type_to_network
self.fields = fields
self.distribution_cache = {}
self.segmenter = segmenter or default_segmenter
@staticmethod
def from_file(filename, segmenter=None):
with open(filename) as infile:
json_string = infile.read()
return BayesianNetworkModel.from_json(json_string, segmenter)
def write(self, outfilename):
with open(outfilename, 'w') as outfile:
json_string = self.to_json()
outfile.write(json_string)
def to_json(self):
blob = {'fieldnames': self.fields}
blob['type_to_network'] = {
type_: json.loads(network.to_json()) for type_, network in self.type_to_network.items()
}
return json.dumps(blob, indent=4, sort_keys=True)
@staticmethod
def _df_from_conditional(probabilities):
"""
Helper method to extract a probability table from pomegranate's json
format for conditional probability distributions.
"""
state_map = defaultdict(dict)
for row in probabilities:
evidence = tuple(row[:-2])
value = row[-2]
probability = float(row[-1])
state_map[evidence][value] = probability
return | pandas.DataFrame.from_dict(state_map) | pandas.DataFrame.from_dict |
# -*- coding: utf-8 -*-
"""
Created on Tue Aug 13 16:30:16 2019
@author: <NAME>
"""
### Program for controlling BK8542B DC Electronic Load for IV curve measurement of solar panel ###
import serial, time, csv, os
import pandas as pd
import itertools as it
from time import strftime
from array import array
global ser, ser_relay, resp_status_dict, mode_cc, mode_cv, mode_cw, mode_cr
global scale_curr, scale_volt, scale_watt, scale_resi
global r1, r2, r3, r4, r5, r6, r7, r8
global sample_id
sample_id = 29381
# Initialize PC-load serial communication and global variables
def init_load():
"""Docstring"""
global ser, resp_status_dict, mode_cc, mode_cv, mode_cw, mode_cr
global scale_curr, scale_volt, scale_watt, scale_resi
baudrate = 9600
port = "COM4"
ser = serial.Serial(port,baudrate, timeout=1)
resp_status_dict = {
0x90: "ERROR: Invalid checksum",
0xA0: "ERROR: Invalid value",
0xB0: "ERROR: Unable to execute",
0xC0: "ERROR: invalid command",
0x80: True,
}
mode_cc = 0 # constant current mode
mode_cv = 1 # constant voltage mode
mode_cw = 2 # constant power mode
mode_cr = 3 # constant resistance mode
scale_volt = 1000
scale_curr = 10000
scale_watt = 1000
scale_resi = 1000
def close():
"""Docstring"""
ser.close()
def parse_data(resp):
"""Docstring"""
data = resp[4] | (resp[5] << 8) | (resp[6] << 16) | (resp[7] << 24)
print(data)
return data
def check_resp(resp):
"""Docstring"""
if len(resp) == 26:
# Confirm start byte
if resp[0] == 0xAA:
resp_type = resp[2]
if resp_type == 0x12: # Status type
return resp_status_dict[resp[3]]
else:
return True
else:
print('Start byte mismatch')
return None
else:
print('Packet length mismatch')
return None
def build_cmd(cmd, value=None):
"""Docstring"""
build_cmd = array('B', [0x00]*26)
build_cmd[0] = 0xAA # Packet start
build_cmd[1] = 0x00 # Unsupported address location
build_cmd[2] = cmd & 0xFF # Command value
if value is not None:
build_cmd[3] = value & 0xFF # value 1st byte little endian
build_cmd[4] = (value >> 8) & 0xFF # value 2nd byte little endian
build_cmd[5] = (value >> 16) & 0xFF # value 3rd byte little endian
build_cmd[6] = (value >> 24) & 0xFF # value 4th byte little endian
checksum = 0
for item in build_cmd:
checksum += item
checksum %= 256
build_cmd[25] = checksum & 0xFF
return build_cmd.tobytes()
def send_recv_cmd(cmd_packet):
"""Docstring"""
# House cleaning, flush serial input and output bufferss
ser.reset_output_buffer()
ser.reset_input_buffer()
# Send and receive
ser.write(cmd_packet)
time.sleep(0.250) # Provide time for response
resp_array = array('B', ser.read(26)) # get resp and put in array
check = check_resp(resp_array)
if check is True:
return resp_array
else:
print('Response check failed')
print(check)
return None
def get_device_info():
"""Docstring"""
built_packet = build_cmd(0x6A)
resp = send_recv_cmd(built_packet)
if resp is not None:
model = chr(resp[3]) + chr(resp[4]) + chr(resp[5]) + chr(resp[6])
version = str(resp[9]) + '.' + str(resp[8])
serial = chr(resp[10]) + chr(resp[11]) + chr(resp[12]) + chr(resp[13]) + chr(resp[14]) + chr(resp[16]) + chr(resp[17]) + chr(resp[18]) + chr(resp[19])
return (model, version, serial)
else:
return None
def get_input_values():
"""Docstring"""
built_packet = build_cmd(0x5F)
resp = send_recv_cmd(built_packet)
if resp is not None:
volts = (resp[3] | (resp[4] << 8) | (resp[5] << 16) | (resp[6] << 24)) / scale_volt
current = (resp[7] | (resp[8] << 8) | (resp[9] << 16) | (resp[10] << 24)) / scale_curr
power = (resp[11] | (resp[12] << 8) | (resp[13] << 16) | (resp[14] << 24)) / scale_watt
return (volts, current, power)
else:
return None
def set_function(function):
"""Docstring"""
built_packet = build_cmd(0x5D, value=function)
resp = send_recv_cmd(built_packet)
return resp
def get_function():
built_packet = build_cmd(0x5E)
resp = send_recv_cmd(built_packet)
if resp is not None:
return resp[3]
else:
return None
def set_remote_sense(is_remote=False):
"""Docstring"""
built_packet = build_cmd(0x56, value=int(is_remote))
resp = send_recv_cmd(built_packet)
return resp
def get_remote_sense():
"""Docstring"""
built_packet = build_cmd(0x57)
resp = send_recv_cmd(built_packet)
if resp is not None:
return parse_data(resp)
else:
return None
def set_remote_control(is_remote=False):
"""Docstring"""
built_packet = build_cmd(0x20, value=int(is_remote))
resp = send_recv_cmd(built_packet)
if is_remote == False:
return False
else:
return True
def set_local_control(is_local=True):
"""Docstring"""
built_packet = build_cmd(0x55, value=int(is_local))
resp = send_recv_cmd(built_packet)
return resp
def set_mode(mode):
"""Docstring"""
built_packet = build_cmd(0x28, value=mode)
resp = send_recv_cmd(built_packet)
return resp
def get_mode():
"""Docstring"""
built_packet = build_cmd(0x29)
resp = send_recv_cmd(built_packet)
if resp is not None:
return parse_data(resp)
else:
return None
def set_enable_load(is_enabled=False):
"""Docstring"""
built_packet = build_cmd(0x21, value=int(is_enabled))
resp = send_recv_cmd(built_packet)
return resp
def set_max_volts(max_volts=0):
"""Docstring"""
built_packet = build_cmd(0x22, value=int(max_volts))
resp = send_recv_cmd(built_packet)
return resp
def get_max_volts():
"""Docstring"""
built_packet = build_cmd(0x23)
resp = send_recv_cmd(built_packet)
if resp is not None:
return parse_data(resp) / scale_volt
else:
return None
def set_max_current( max_current=0):
"""Docstring"""
built_packet = build_cmd(0x24, value=int(max_current * scale_curr))
resp = send_recv_cmd(built_packet)
return resp
def get_max_current():
"""Docstring"""
built_packet = build_cmd(0x25)
resp = send_recv_cmd(built_packet)
if resp is not None:
return parse_data(resp) / scale_curr
else:
return None
def set_max_power( max_power=0):
"""Docstring"""
built_packet = build_cmd(0x24, value=int(max_power * scale_watt))
resp = send_recv_cmd(built_packet)
return resp
def get_max_power():
"""Docstring"""
built_packet = build_cmd(0x27)
resp = send_recv_cmd(built_packet)
if resp is not None:
return parse_data(resp) / scale_volt
else:
return None
def set_CV_volts(cv_volts=0):
"""Docstring"""
built_packet = build_cmd(0x2C, value=int(cv_volts * scale_volt))
resp = send_recv_cmd(built_packet)
return resp
def get_CV_volts():
"""Docstring"""
built_packet = build_cmd(0x2D)
resp = send_recv_cmd(built_packet)
if resp is not None:
return parse_data(resp) / scale_volt
else:
return None
def set_CC_current(cc_current=0):
"""Docstring"""
built_packet = build_cmd(0x2A, value=int(cc_current * scale_curr))
resp = send_recv_cmd(built_packet)
return resp
def get_CC_current():
"""Docstring"""
built_packet = build_cmd(0x2B)
resp = send_recv_cmd(built_packet)
if resp is not None:
return parse_data(resp) / scale_curr
else:
return None
def get_CP_power():
"""Docstring"""
def set_CP_power(cp_power=0):
"""Docstring"""
built_packet = build_cmd(0x2E, value=int(cp_power * scale_watt))
resp = send_recv_cmd(built_packet)
return resp
built_packet = build_cmd(0x2F)
resp = send_recv_cmd(built_packet)
if resp is not None:
return parse_data(resp) / scale_watt
else:
return None
def set_CR_resistance(cr_resistance=0):
"""Docstring"""
built_packet = build_cmd(0x30, value=int(cr_resistance * scale_resi))
resp = send_recv_cmd(built_packet)
return resp
def get_CR_resistance():
"""Docstring"""
built_packet = build_cmd(0x31)
resp = send_recv_cmd(built_packet)
if resp is not None:
return parse_data(resp) / scale_resi
else:
return None
def set_bat_volts_min(min_volts=3):
"""Docstring"""
built_packet = build_cmd(0x4E, value=int(min_volts * scale_volt))
resp = send_recv_cmd(built_packet)
return resp
def get_bat_volts_min():
"""Docstring"""
built_packet = build_cmd(0x4F)
resp = send_recv_cmd(built_packet)
if resp is not None:
return parse_data(resp) / scale_volt
else:
return None
#______________________________________________________________________________
### Vellemen VM8090 relay card communication and control
def init_relay_card():
global ser_relay
port = "COM5"
baudrate = 19200
ser_relay = serial.Serial(port,baudrate, timeout=1)
def close_relay():
"""Disconnect from relay card"""
ser_relay.close()
def build_cmd_relay(cmd_relay, which_relay):
"""Construct command for relay card"""
global r1, r2, r3, r4, r5, r6, r7, r8
r1 = 0x01
r2 = 0x02
r3 = 0x04
r4 = 0x08
r5 = 0x10
r6 = 0x20
r7 = 0x40
r8 = 0x80
build_cmd_relay = array('B', [0x00]*7)
stx = build_cmd_relay[0]
cmd = build_cmd_relay[1]
msk = build_cmd_relay[2]
param1 = build_cmd_relay[3]
param2 = build_cmd_relay[4]
chk = build_cmd_relay[5]
etx = build_cmd_relay[6]
stx = 0x04 # start byte
cmd = cmd_relay & 0xFF # command byte
msk = which_relay # mask byte to select relay
chk = -(stx + cmd + msk + param1 + param2) + 1 # checksum of byte packet
etx = 0x0F # end byte
return build_cmd_relay.tobytes()
def send_cmd_relay(cmd_relay_packet):
"""Send or receive command packet from relay card"""
ser_relay.reset_output_buffer()
ser_relay.reset_input_buffer()
ser_relay.write(cmd_relay_packet)
def switch_relay_on(which_relay):
"""Switch on one or more relays"""
built_packet_relay = build_cmd_relay(0x11, which_relay)
resp = send_cmd_relay(built_packet_relay)
return resp
def switch_relay_off(which_relay):
built_packet_relay = build_cmd_relay(0x12, which_relay)
resp = send_cmd_relay(built_packet_relay)
return resp
# Save data: current, voltage, and power
def data_file(log_file, log_file_header):
"""Docstring"""
if os.path.exists(log_file) is not True:
with open(log_file, mode='a',newline='') as the_file:
writer = csv.writer(the_file, dialect='excel')
writer.writerow(log_file_header)
return log_file
def data_point(inputs: list):
"""Organizes data for export to excel"""
opv = '1'
timenow = strftime("%#m/%#d/%Y %#H:%M")
volts = inputs[0]
current = inputs[1]
power = inputs[2]
data_point = [opv, timenow, volts, current, power]
return data_point
def write_data_tofile(data_point):
global sample_id
if data_point is not None:
sample_id += 1
sample_id_lst = [sample_id]
log_file = data_file()
with open(log_file, mode='a',newline='') as the_file:
writer = csv.writer(the_file, dialect='excel')
writer.writerow(sample_id_lst + data_point)
# IV curve measurement
def open_circ():
"""Open circuit voltage measurement"""
set_mode(mode_cc) # set operation mode to CC
time.sleep(.250)
set_CC_current(cc_current=0) # set CC mode current to 0 amps
time.sleep(.1)
oc_vals = get_input_values() # read open circuits levels
oc_data_point = data_point(oc_vals) # create data point for open circuit measurement
voc = oc_data_point[2] # open circuit voltage measurement
print('Open circuit voltage: ', voc)
write_data_tofile(oc_data_point) # write data to file
return voc
def iv_curve(voc):
"""Measure intermediate current voltage points"""
set_mode(mode_cv) # set operation mode to CC
time.sleep(.250)
volt_step = voc
while volt_step > 0.5:
set_CV_volts(volt_step)
time.sleep(.1)
curve_vals = get_input_values()
curve_data_point = data_point(curve_vals)
print('voltage, current, power: ', curve_data_point[2], curve_data_point[3], curve_data_point[4])
write_data_tofile(curve_data_point)
new_volt_step = curve_data_point[2] - 1.0
volt_step = new_volt_step
pass
def short_circ():
"""Measure short circuit current (nearest to 0 volts)"""
set_mode(mode_cv)
time.sleep(.250)
set_CV_volts(0.1)
time.sleep(.250)
sc_vals = get_input_values()
sc_data_point = data_point(sc_vals)
jsc = sc_data_point[3]
print('Short circuit current: ', jsc)
write_data_tofile(sc_data_point)
def sweep():
"""Measure entire IV curve"""
set_enable_load(True) # turn input ON
time.sleep(.250)
print('Begin IV curve measurement')
voc = open_circ() # measure open circuit voltage
iv_curve(voc) # measure iv curve
short_circ() # measure short circuit current
time.sleep(.250)
set_enable_load(False) # turn input OFF
#______________________________________________________________________________
def process_data(in_file=str, out_file=str, opv_num=str):
"""Process data for each IV curve measurement"""
out_file_header = ['opv', 'curve_id', 'time', 'hour', 'voc', 'jsc', 'mpp', 'ff']
data_file(out_file, out_file_header)
df = pd.read_csv(in_file)
out_file_header = ['opv', 'curve_id', 'time', 'hour', 'voc', 'jsc', 'mpp', 'ff']
curve_id_count = 1
curve = df.loc[df['curve_id'] == curve_id_count]
while curve is not None:
opv = opv_num
time = curve['time'].iloc[0] # start time of IV curve measurement
hour = float(time[-2] + time[-1])/60.0 + float(time[-5] + time[-4])
voc = curve['volts'].max()
jsc = curve['current'].max()
mpp = curve['power'].max()
ff = mpp / (voc * jsc)
data_point = [opv, curve_id_count, time, hour, voc, jsc, mpp, ff]
with open(out_file, mode='a',newline='') as the_file:
writer = csv.writer(the_file, dialect='excel')
writer.writerow(data_point)
new_curve_id_count = curve_id_count + 1
curve_id_count = new_curve_id_count
curve = df.loc[df['curve_id'] == curve_id_count]
pass
return
def match_env(opv_in_file, env_in_file, out_file):
"""Match corresponding environmental measurement to IV curve measurement"""
df_opv = pd.read_csv(opv_in_file)
df_env = pd.read_csv(env_in_file)
# df_env['TIMESTAMP'] = pd.to_datetime(df_env['TIMESTAMP'],format='%m/%d/%y %H:%M').drop_duplicates() # 10-27-19 to 10-31-19
df_env['TIMESTAMP'] = pd.to_datetime(df_env['TIMESTAMP'],format='%Y-%m-%d %H:%M:%S') # 10-31-19 onwards
df_opv['time'] = | pd.to_datetime(df_opv['time'],format='%m/%d/%Y %H:%M') | pandas.to_datetime |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Oct 7 16:29:10 2019
@author: hduser
"""
import pandas as pd
survey_raw = pd.read_pickle("survey.pickle")
image_metrics_raw = pd.read_pickle("image_metrics.pickle")
image_data_raw = pd.read_pickle("image_data.pickle")
object_labels_raw = | pd.read_pickle("object_labels.pickle") | pandas.read_pickle |
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
from sklearn.metrics import confusion_matrix
from sklearn.externals import joblib
from matplotlib import pyplot as plt
import numpy as np
import os
dir_path = os.path.dirname(os.path.realpath(__file__))
# Load Yelp Data
businesses = pd.read_json(dir_path + '/yelp_data/yelp_business.json',lines=True)
reviews = pd.read_json(dir_path + '/yelp_data/yelp_review.json',lines=True)
users = | pd.read_json(dir_path + '/yelp_data/yelp_user.json',lines=True) | pandas.read_json |
import numpy as np
import pandas as pd
import pytest
from pandas.util import testing as tm
from pytest import param
from ibis.pandas.aggcontext import Summarize, window_agg_udf
df = pd.DataFrame(
{
'id': [1, 2, 1, 2],
'v1': [1.0, 2.0, 3.0, 4.0],
'v2': [10.0, 20.0, 30.0, 40.0],
}
)
@pytest.mark.parametrize(
('agg_fn', 'expected_fn'),
[
param(lambda v1: v1.mean(), lambda df: df['v1'].mean(), id='udf',),
param('mean', lambda df: df['v1'].mean(), id='string',),
],
)
def test_summarize_single_series(agg_fn, expected_fn):
"""Test Summarize.agg operating on a single Series."""
aggcontext = Summarize()
result = aggcontext.agg(df['v1'], agg_fn)
expected = expected_fn(df)
assert result == expected
@pytest.mark.parametrize(
('agg_fn', 'expected_fn'),
[
param(lambda v1: v1.mean(), lambda df: df['v1'].mean(), id='udf',),
param('mean', lambda df: df['v1'].mean(), id='string',),
],
)
def test_summarize_single_seriesgroupby(agg_fn, expected_fn):
"""Test Summarize.agg operating on a single SeriesGroupBy."""
aggcontext = Summarize()
df_grouped = df.sort_values('id').groupby('id')
result = aggcontext.agg(df_grouped['v1'], agg_fn)
expected = expected_fn(df_grouped)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
('agg_fn', 'expected_fn'),
[
param(
lambda v1, v2: v1.mean() - v2.mean(),
lambda df: df['v1'].mean() - df['v2'].mean(),
id='two-column',
),
# Two columns, but only the second one is actually used in UDF
param(
lambda v1, v2: v2.mean(),
lambda df: df['v2'].mean(),
id='redundant-column',
),
],
)
def test_summarize_multiple_series(agg_fn, expected_fn):
"""Test Summarize.agg operating on many Series."""
aggcontext = Summarize()
args = [df['v1'], df['v2']]
result = aggcontext.agg(args[0], agg_fn, *args[1:])
expected = expected_fn(df)
assert result == expected
@pytest.mark.parametrize(
'param',
[
(
pd.Series([True, True, True, True]),
pd.Series([1.0, 2.0, 2.0, 3.0]),
),
(
pd.Series([False, True, True, False]),
pd.Series([np.NaN, 2.0, 2.0, np.NaN]),
),
],
)
def test_window_agg_udf(param):
""" Test passing custom window indices for window aggregation."""
mask, expected = param
grouped_data = df.sort_values('id').groupby('id')['v1']
result_index = grouped_data.obj.index
window_lower_indices = pd.Series([0, 0, 2, 2])
window_upper_indices = pd.Series([1, 2, 3, 4])
result = window_agg_udf(
grouped_data,
lambda s: s.mean(),
window_lower_indices,
window_upper_indices,
mask,
result_index,
dtype='float',
max_lookback=None,
)
expected.index = grouped_data.obj.index
tm.assert_series_equal(result, expected)
def test_window_agg_udf_different_freq():
""" Test that window_agg_udf works when the window series and data series
have different frequencies.
"""
time = pd.Series([pd.Timestamp('20200101'), pd.Timestamp('20200201')])
data = | pd.Series([1, 2, 3, 4, 5, 6]) | pandas.Series |
from typing import List, NamedTuple, Tuple, Union
import geopandas as gpd
import gmsh
import numpy as np
import pandas as pd
import shapely.geometry as sg
from .common import FloatArray, IntArray, coord_dtype, flatten, separate
Z_DEFAULT = 0.0
POINT_DIM = 0
LINE_DIM = 1
PLANE_DIM = 2
class PolygonInfo(NamedTuple):
index: int
size: int
interior_indices: List[int]
interior_sizes: List[int]
polygon_id: int
class LineStringInfo(NamedTuple):
index: int
size: int
embedded_in: Union[int, None]
def polygon_info(
polygon: sg.Polygon, cellsize: float, index: int, polygon_id: int
) -> Tuple[PolygonInfo, FloatArray, FloatArray, int]:
exterior_coords = np.array(polygon.exterior.coords)[:-1]
size = len(exterior_coords)
vertices = [exterior_coords]
cellsizes = [np.full(size, cellsize)]
info = PolygonInfo(index, size, [], [], polygon_id)
index += size
for interior in polygon.interiors:
interior_coords = np.array(interior.coords)[:-1]
vertices.append(interior_coords)
size = len(interior_coords)
cellsizes.append(np.full(size, cellsize))
info.interior_indices.append(index)
info.interior_sizes.append(size)
index += size
return info, vertices, cellsizes, index
def linestring_info(
linestring: sg.LineString, cellsize: float, index: int, inside: Union[int, None]
) -> Tuple[LineStringInfo, FloatArray, FloatArray, int]:
vertices = np.array(linestring.coords)
size = len(vertices)
cellsizes = np.full(size, cellsize)
info = LineStringInfo(index, size, inside)
index += size
return info, vertices, cellsizes, index
def add_vertices(vertices, cellsizes, tags) -> None:
for (x, y), cellsize, tag in zip(vertices, cellsizes, tags):
gmsh.model.geo.addPoint(x, y, Z_DEFAULT, cellsize, tag)
def add_linestrings(
features: List[LineStringInfo], tags: IntArray
) -> Tuple[IntArray, IntArray]:
n_lines = sum(info.size - 1 for info in features)
line_indices = np.empty(n_lines, dtype=np.int64)
embedded_in = np.empty(n_lines, dtype=np.int64)
i = 0
for info in features:
point_tags = tags[info.index : info.index + info.size]
first = point_tags[0]
for second in point_tags[1:]:
line_index = gmsh.model.geo.addLine(first, second)
line_indices[i] = line_index
embedded_in[i] = info.embedded_in
first = second
i += 1
return line_indices, embedded_in
def add_curve_loop(point_tags: FloatArray) -> int:
tags = []
first = point_tags[-1]
for second in point_tags:
line_tag = gmsh.model.geo.addLine(first, second)
tags.append(line_tag)
first = second
curve_loop_tag = gmsh.model.geo.addCurveLoop(tags)
return curve_loop_tag
def add_polygons(
features: List[PolygonInfo], tags: IntArray
) -> Tuple[List[int], List[int]]:
plane_tags = []
for info in features:
# Add the exterior loop first
curve_loop_tags = [add_curve_loop(tags[info.index : info.index + info.size])]
# Now add holes
for start, size in zip(info.interior_indices, info.interior_sizes):
loop_tag = add_curve_loop(tags[start : start + size])
curve_loop_tags.append(loop_tag)
plane_tag = gmsh.model.geo.addPlaneSurface(curve_loop_tags, tag=info.polygon_id)
plane_tags.append(plane_tag)
return curve_loop_tags, plane_tags
def add_points(points: gpd.GeoDataFrame) -> Tuple[IntArray, IntArray]:
n_points = len(points)
indices = np.empty(n_points, dtype=np.int64)
embedded_in = points["__polygon_id"].values
# We have to add points one by one due to the Gmsh addPoint API
for i, row in enumerate(points.to_dict("records")):
point = row["geometry"]
# Rely on the automatic number of gmsh now to generate the indices
point_index = gmsh.model.geo.addPoint(
point.x, point.y, Z_DEFAULT, row["cellsize"]
)
indices[i] = point_index
return indices, embedded_in
def collect_polygons(
polygons: gpd.GeoDataFrame, index: int
) -> Tuple[int, FloatArray, IntArray, List[PolygonInfo]]:
vertices = []
cellsizes = []
features = []
for row in polygons.to_dict("records"):
info, coords, cells, index = polygon_info(
row["geometry"], row["cellsize"], index, row["__polygon_id"]
)
vertices.extend(coords)
cellsizes.extend(cells)
features.append(info)
return index, vertices, cellsizes, features
def collect_linestrings(
linestrings: gpd.GeoDataFrame, index: int
) -> Tuple[int, FloatArray, IntArray, List[LineStringInfo]]:
vertices = []
cellsizes = []
features = []
for row in linestrings.to_dict("records"):
info, coords, cells, index = linestring_info(
row["geometry"], row["cellsize"], index, row["__polygon_id"]
)
vertices.append(coords)
cellsizes.append(cells)
features.append(info)
return index, vertices, cellsizes, features
def collect_points(points: gpd.GeoDataFrame) -> FloatArray:
return np.stack((points["geometry"].x, points["geometry"].y), axis=1)
def embed_where(gdf: gpd.GeoDataFrame, polygons: gpd.GeoDataFrame) -> gpd.GeoDataFrame:
tmp = gpd.sjoin(gdf, polygons, predicate="within", how="inner")
tmp["cellsize"] = tmp[["cellsize_left", "cellsize_right"]].min(axis=1)
return tmp[["cellsize", "__polygon_id", "geometry"]]
def add_geometry(
polygons: gpd.GeoDataFrame, linestrings: gpd.GeoDataFrame, points: gpd.GeoDataFrame
):
# Assign unique ids
polygons["__polygon_id"] = np.arange(1, len(polygons) + 1)
# Figure out in which polygon the points and linestrings will be embedded.
linestrings = embed_where(linestrings, polygons)
embedded_points = embed_where(points, polygons)
# Collect all coordinates, and store the length and type of every element
index, poly_vertices, poly_cellsizes, polygon_features = collect_polygons(
polygons, index=0
)
index, line_vertices, line_cellsizes, linestring_features = collect_linestrings(
linestrings, index
)
vertices = np.concatenate(poly_vertices + line_vertices)
cellsizes = np.concatenate(poly_cellsizes + line_cellsizes)
# Get the unique vertices, and generate the array of indices pointing to
# them for every feature
vertices, indices = np.unique(
vertices.reshape(-1).view(coord_dtype), return_inverse=True
)
vertex_tags = np.arange(1, len(vertices) + 1)
tags = vertex_tags[indices]
# Get the smallest cellsize per vertex
cellsizes = pd.Series(cellsizes).groupby(tags).min().values
# Add all unique vertices. This includes vertices for linestrings and polygons.
add_vertices(vertices, cellsizes, vertex_tags)
# Add all geometries to gmsh
add_polygons(polygon_features, tags)
linestring_indices, linestring_embedded = add_linestrings(linestring_features, tags)
gmsh.model.geo.synchronize()
# Now embed the points and linestrings in the polygons
for polygon_id, embed_indices in pd.Series(linestring_indices).groupby(
linestring_embedded
):
gmsh.model.mesh.embed(LINE_DIM, embed_indices, PLANE_DIM, polygon_id)
if len(embedded_points) > 0:
point_indices, point_embedded = add_points(embedded_points)
gmsh.model.geo.synchronize()
for polygon_id, embed_indices in | pd.Series(point_indices) | pandas.Series |
'''
/**
* real_time_insights.py
*
* Streams a real-time insights for the supplied pair
* An example of the real-time insights is available here:
* https://app.ae3platform.com/insights
*
* Disclaimer:
* APEX:E3 is a financial technology company based in the United Kingdom https://www.apexe3.com
*
* None of this code constitutes financial advice. APEX:E3 is not
* liable for any loss resulting from the use of this code or the API.
*
* This code is governed by The MIT License (MIT)
*
* Copyright (c) 2020 APEX:E3 Team
*
**/
'''
import sys
sys.path.append('..')
from apexe3.apexe3 import initialise
from apexe3.apexe3 import initialise_stream
from apexe3.apexe3 import initialise_insights_for_pair
import pandas as pd
#Change these values to a base or quote you are interested in
base = 'btc'
quote = 'usdt'
def process_spread(event):
print('Best spreads for ' + str(base) +' '+ str(quote))
table= | pd.DataFrame(event["values"]) | pandas.DataFrame |
# pylint: disable=E1101
from datetime import time, datetime
from datetime import timedelta
import numpy as np
from pandas.core.index import Index, Int64Index
from pandas.tseries.frequencies import infer_freq, to_offset
from pandas.tseries.offsets import DateOffset, generate_range, Tick
from pandas.tseries.tools import parse_time_string, normalize_date
from pandas.util.decorators import cache_readonly
import pandas.core.common as com
import pandas.tseries.offsets as offsets
import pandas.tseries.tools as tools
from pandas.lib import Timestamp
import pandas.lib as lib
import pandas._algos as _algos
def _utc():
import pytz
return pytz.utc
# -------- some conversion wrapper functions
def _as_i8(arg):
if isinstance(arg, np.ndarray) and arg.dtype == np.datetime64:
return arg.view('i8', type=np.ndarray)
else:
return arg
def _field_accessor(name, field):
def f(self):
values = self.asi8
if self.tz is not None:
utc = _utc()
if self.tz is not utc:
values = lib.tz_convert(values, utc, self.tz)
return lib.fast_field_accessor(values, field)
f.__name__ = name
return property(f)
def _wrap_i8_function(f):
@staticmethod
def wrapper(*args, **kwargs):
view_args = [_as_i8(arg) for arg in args]
return f(*view_args, **kwargs)
return wrapper
def _wrap_dt_function(f):
@staticmethod
def wrapper(*args, **kwargs):
view_args = [_dt_box_array(_as_i8(arg)) for arg in args]
return f(*view_args, **kwargs)
return wrapper
def _join_i8_wrapper(joinf, with_indexers=True):
@staticmethod
def wrapper(left, right):
if isinstance(left, np.ndarray):
left = left.view('i8', type=np.ndarray)
if isinstance(right, np.ndarray):
right = right.view('i8', type=np.ndarray)
results = joinf(left, right)
if with_indexers:
join_index, left_indexer, right_indexer = results
join_index = join_index.view('M8[ns]')
return join_index, left_indexer, right_indexer
return results
return wrapper
def _dt_index_cmp(opname):
"""
Wrap comparison operations to convert datetime-like to datetime64
"""
def wrapper(self, other):
if isinstance(other, datetime):
func = getattr(self, opname)
result = func(_to_m8(other))
elif isinstance(other, np.ndarray):
func = getattr(super(DatetimeIndex, self), opname)
result = func(other)
else:
other = _ensure_datetime64(other)
func = getattr(super(DatetimeIndex, self), opname)
result = func(other)
try:
return result.view(np.ndarray)
except:
return result
return wrapper
def _ensure_datetime64(other):
if isinstance(other, np.datetime64):
return other
elif com.is_integer(other):
return np.int64(other).view('M8[us]')
else:
raise TypeError(other)
def _dt_index_op(opname):
"""
Wrap arithmetic operations to convert timedelta to a timedelta64.
"""
def wrapper(self, other):
if isinstance(other, timedelta):
func = getattr(self, opname)
return func(np.timedelta64(other))
else:
func = getattr(super(DatetimeIndex, self), opname)
return func(other)
return wrapper
class TimeSeriesError(Exception):
pass
_midnight = time(0, 0)
class DatetimeIndex(Int64Index):
"""
Immutable ndarray of datetime64 data, represented internally as int64, and
which can be boxed to Timestamp objects that are subclasses of datetime and
carry metadata such as frequency information.
Parameters
----------
data : array-like (1-dimensional), optional
Optional datetime-like data to construct index with
copy : bool
Make a copy of input ndarray
freq : string or pandas offset object, optional
One of pandas date offset strings or corresponding objects
start : starting value, datetime-like, optional
If data is None, start is used as the start point in generating regular
timestamp data.
periods : int, optional, > 0
Number of periods to generate, if generating index. Takes precedence
over end argument
end : end time, datetime-like, optional
If periods is none, generated index will extend to first conforming
time on or just past end argument
"""
_join_precedence = 10
_inner_indexer = _join_i8_wrapper(_algos.inner_join_indexer_int64)
_outer_indexer = _join_i8_wrapper(_algos.outer_join_indexer_int64)
_left_indexer = _join_i8_wrapper(_algos.left_join_indexer_int64)
_left_indexer_unique = _join_i8_wrapper(
_algos.left_join_indexer_unique_int64, with_indexers=False)
_groupby = lib.groupby_arrays # _wrap_i8_function(lib.groupby_int64)
_arrmap = _wrap_dt_function(_algos.arrmap_object)
__eq__ = _dt_index_cmp('__eq__')
__ne__ = _dt_index_cmp('__ne__')
__lt__ = _dt_index_cmp('__lt__')
__gt__ = _dt_index_cmp('__gt__')
__le__ = _dt_index_cmp('__le__')
__ge__ = _dt_index_cmp('__ge__')
# structured array cache for datetime fields
_sarr_cache = None
_engine_type = lib.DatetimeEngine
offset = None
def __new__(cls, data=None,
freq=None, start=None, end=None, periods=None,
copy=False, name=None, tz=None,
verify_integrity=True, normalize=False, **kwds):
warn = False
if 'offset' in kwds and kwds['offset']:
freq = kwds['offset']
warn = True
infer_freq = False
if not isinstance(freq, DateOffset):
if freq != 'infer':
freq = to_offset(freq)
else:
infer_freq = True
freq = None
if warn:
import warnings
warnings.warn("parameter 'offset' is deprecated, "
"please use 'freq' instead",
FutureWarning)
if isinstance(freq, basestring):
freq = to_offset(freq)
else:
if isinstance(freq, basestring):
freq = to_offset(freq)
offset = freq
if data is None and offset is None:
raise ValueError("Must provide freq argument if no data is "
"supplied")
if data is None:
return cls._generate(start, end, periods, name, offset,
tz=tz, normalize=normalize)
if not isinstance(data, np.ndarray):
if np.isscalar(data):
raise ValueError('DatetimeIndex() must be called with a '
'collection of some kind, %s was passed'
% repr(data))
if isinstance(data, datetime):
data = [data]
# other iterable of some kind
if not isinstance(data, (list, tuple)):
data = list(data)
data = np.asarray(data, dtype='O')
# try a few ways to make it datetime64
if lib.is_string_array(data):
data = _str_to_dt_array(data, offset)
else:
data = tools.to_datetime(data)
data.offset = offset
if issubclass(data.dtype.type, basestring):
subarr = _str_to_dt_array(data, offset)
elif issubclass(data.dtype.type, np.datetime64):
if isinstance(data, DatetimeIndex):
subarr = data.values
offset = data.offset
verify_integrity = False
else:
subarr = np.array(data, dtype='M8[ns]', copy=copy)
elif issubclass(data.dtype.type, np.integer):
subarr = np.array(data, dtype='M8[ns]', copy=copy)
else:
subarr = tools.to_datetime(data)
if not np.issubdtype(subarr.dtype, np.datetime64):
raise TypeError('Unable to convert %s to datetime dtype'
% str(data))
if tz is not None:
tz = tools._maybe_get_tz(tz)
# Convert local to UTC
ints = subarr.view('i8')
lib.tz_localize_check(ints, tz)
subarr = lib.tz_convert(ints, tz, _utc())
subarr = subarr.view('M8[ns]')
subarr = subarr.view(cls)
subarr.name = name
subarr.offset = offset
subarr.tz = tz
if verify_integrity and len(subarr) > 0:
if offset is not None and not infer_freq:
inferred = subarr.inferred_freq
if inferred != offset.freqstr:
raise ValueError('Dates do not conform to passed '
'frequency')
if infer_freq:
inferred = subarr.inferred_freq
if inferred:
subarr.offset = to_offset(inferred)
return subarr
@classmethod
def _generate(cls, start, end, periods, name, offset,
tz=None, normalize=False):
_normalized = True
if start is not None:
start = Timestamp(start)
if not isinstance(start, Timestamp):
raise ValueError('Failed to convert %s to timestamp'
% start)
if normalize:
start = normalize_date(start)
_normalized = True
else:
_normalized = _normalized and start.time() == _midnight
if end is not None:
end = Timestamp(end)
if not isinstance(end, Timestamp):
raise ValueError('Failed to convert %s to timestamp'
% end)
if normalize:
end = normalize_date(end)
_normalized = True
else:
_normalized = _normalized and end.time() == _midnight
start, end, tz = | tools._figure_out_timezone(start, end, tz) | pandas.tseries.tools._figure_out_timezone |
from os import getcwd
import sys
sys.path.append(getcwd() + '/..') # Add src/ dir to import path
import traceback
import logging
from os.path import join
from datetime import date, timedelta, datetime
from collections import Counter
import networkx as nx
import pandas as pd
from pymongo import MongoClient
from bson.objectid import ObjectId
import libs.networkAnalysis as na
import libs.pandasLib as pl
from libs.mongoLib import getContentDocsPerPlatform, getAllDocs, getMinMaxDay
import libs.visualization as vz
def getGraphRequirments(collectionEnt, collectionLoc, collectionCont, platforms, timeLimits=None):
# Convert to list due to multiple use
contentList = list(getContentDocsPerPlatform(collectionCont, platforms))
entitiesList = list(getAllDocs(collectionEnt))
locationsList = list(getAllDocs(collectionLoc))
minDay, maxDay = getMinMaxDay(collectionCont)
temporalPeriod = [minDay + timedelta(days=x) for x in range((maxDay - minDay).days + 1)]
if timeLimits is not None:
logging.info(f'Limiting time from {timeLimits[0]} to {timeLimits[1]}')
temporalPeriod = [d for d in temporalPeriod if d.year >= timeLimits[0] and d.year <= timeLimits[1]]
droppedTimestamps, contentToDrop = 0, []
for it, c in enumerate(contentList):
timestampsToKeep = [t for t in c['timestamp'] if t.year >= timeLimits[0] and t.year <= timeLimits[1]]
droppedTimestamps = len(c['timestamp']) - len(timestampsToKeep)
if len(timestampsToKeep) == 0:
contentToDrop.append(it)
else:
c['timestamp'] = timestampsToKeep
contentList = [c for it, c in enumerate(contentList) if it not in contentToDrop]
logging.info(f'Dropped {droppedTimestamps} content timestamps (outside of temporal range) - '
f'resulting in deleting {len(contentToDrop)} pieces of content entirely.')
return {
'contentList': contentList,
'entitiesList': entitiesList,
'locationsList': locationsList,
'temporalPeriod': temporalPeriod,
}
def createGraphNodes(G, nodesPerClass):
for clss, listOfNodes in nodesPerClass.items():
G.add_nodes_from(listOfNodes, nodeClass=clss)
return G
def createGraphEdges(G, temporalPeriod, contentDf, nodesPerClass):
'''
Adds the following types of connections to the graph:
(Day)-[temporal]-(Day)
(Day)-[action]-(Content)
(Content)-[$Variable]-(Location); $Variable={authorship, placed, impliedMention, inherentMention}
(Content)-[$Variable]-(Tags); $Mention={impliedMention, inherentMention}
:param G:
:param temporalPeriod:
:param contentDf:
:param nodesPerClass:
:return:
'''
try:
# (Day)-(Day)
timeEdges = [(temporalPeriod[i], temporalPeriod[i+1]) for i in range(len(temporalPeriod)-1)]
logging.info(f'> Time Edges {len(timeEdges)}')
# (Day)-(Content) edges
actionDf = pl.unrollListAttr(contentDf.reset_index(), 'timestamp', ['_id'])
actionDf = actionDf.set_index('_id')['value']
actionEdges = list(actionDf.items())
# Make sure every tail is already a node in the graph
assert(len([e[1] for e in actionEdges if e[1] not in nodesPerClass['time']]) == 0)
logging.info(f'> Action Edges {len(actionEdges)}')
# (Content)-(Location) edges
locationsDf = pl.unrollListOfDictsAttr(contentDf.reset_index(), 'locations', ['_id'])
locationsDf.set_index('_id', inplace=True)
sourceEdgeTypes = locationsDf['relationshipType'].tolist()
locationEdges = list(locationsDf['label'].items())
assert(len([e[1] for e in locationEdges if e[1] not in nodesPerClass['spatial']]) == 0)
logging.info(f'> Source Edges {len(locationEdges)}')
# (Content)-(Tags) edges
tagDf = pl.unrollListOfDictsAttr(contentDf.reset_index(), 'tags', ['_id'])
tagDf.set_index('_id', inplace=True)
tagEdgeTypes = tagDf['relationshipType'].tolist()
tagEdges = list(tagDf['label'].items())
assert(len([e[1] for e in tagEdges if e[1] not in nodesPerClass['tag']]) == 0)
logging.info(f'> Tag Edges {len(tagEdges)}')
# Add them all to the graph
G.add_edges_from(timeEdges, edgeClass='temporal')
G.add_edges_from(actionEdges, edgeClass='action')
G.add_edges_from(locationEdges, edgeClass=sourceEdgeTypes)
G.add_edges_from(tagEdges, edgeClass=tagEdgeTypes)
except Exception as ex:
print(traceback.format_exc())
breakpoint()
return G
def addNodeAttributes(G, data, platform=False, contentType=False, locationType=False):
if platform is True:
# Regarding content nodes
assert('contentDf' in data.keys())
platformPerIdCont = data['contentDf']['platform'].to_dict()
# Regarding location nodes
assert('locationDf' in data.keys())
platformPerIdLoc = data['locationDf']['platform'].to_dict()
# Make sure keys don't collide - this shouldn't be possible (IDs from content nodes != IDS from location nodes)
assert(len([x for x in platformPerIdCont.keys() if x in platformPerIdLoc.keys()]) == 0)
platformPerIdCont.update(platformPerIdLoc)
nx.set_node_attributes(G, platformPerIdCont, 'platform')
if contentType is True:
assert('contentDf' in data.keys())
contentType = data['contentDf']['type'].to_dict()
nx.set_node_attributes(G, contentType, 'contentType')
if locationType is True:
assert('locationDf' in data.keys())
locationType = data['locationDf']['type'].to_dict()
nx.set_node_attributes(G, locationType, 'locationType')
return G
if __name__ == '__main__':
root = logging.getLogger()
root.setLevel(logging.DEBUG)
baseDir = '../../data/'
platforms = ['Facebook', 'YouTube', 'Google Search', 'Reddit', 'Twitter'] # ,
minYear, maxYear = 2019, 2019
try:
# Set up DB
client = MongoClient()
db = client['digitalMe']
collectionCont = db['content']
collectionEnt = db['entities']
collectionLoc = db['locations']
logging.info(f'Querying Data')
data = getGraphRequirments(collectionEnt, collectionLoc, collectionCont, platforms, timeLimits=(minYear, maxYear))
nodesPerClass = {
'time': data['temporalPeriod'],
'content': [x['_id'] for x in data['contentList']],
'tag': [x['_id'] for x in data['entitiesList']],
'spatial': [x['_id'] for x in data['locationsList']],
}
logging.info(f'Data acquired, creating graph (temporal period: {data["temporalPeriod"][0]} -> {data["temporalPeriod"][-1]})')
# Transform lists to dataframe for faster operations
data['contentDf'] = pd.DataFrame(data['contentList']).set_index('_id')
data['contentDf'].timestamp = data['contentDf'].timestamp.apply(lambda x: [d.date() for d in x])
data['locationDf'] = pd.DataFrame(data['locationsList']).set_index('_id')
data['tagDf'] = | pd.DataFrame(data['entitiesList']) | pandas.DataFrame |
"""Tests for the sdv.constraints.tabular module."""
import uuid
import numpy as np
import pandas as pd
import pytest
from sdv.constraints.errors import MissingConstraintColumnError
from sdv.constraints.tabular import (
Between, ColumnFormula, CustomConstraint, GreaterThan, Negative, OneHotEncoding, Positive,
Rounding, UniqueCombinations)
def dummy_transform():
pass
def dummy_reverse_transform():
pass
def dummy_is_valid():
pass
class TestCustomConstraint():
def test___init__(self):
"""Test the ``CustomConstraint.__init__`` method.
The ``transform``, ``reverse_transform`` and ``is_valid`` methods
should be replaced by the given ones, importing them if necessary.
Setup:
- Create dummy functions (created above this class).
Input:
- dummy transform and revert_transform + is_valid FQN
Output:
- Instance with all the methods replaced by the dummy versions.
"""
is_valid_fqn = __name__ + '.dummy_is_valid'
# Run
instance = CustomConstraint(
transform=dummy_transform,
reverse_transform=dummy_reverse_transform,
is_valid=is_valid_fqn
)
# Assert
assert instance.transform == dummy_transform
assert instance.reverse_transform == dummy_reverse_transform
assert instance.is_valid == dummy_is_valid
class TestUniqueCombinations():
def test___init__(self):
"""Test the ``UniqueCombinations.__init__`` method.
It is expected to create a new Constraint instance and receiving the names of
the columns that need to produce unique combinations.
Side effects:
- instance._colums == columns
"""
# Setup
columns = ['b', 'c']
# Run
instance = UniqueCombinations(columns=columns)
# Assert
assert instance._columns == columns
def test_fit(self):
"""Test the ``UniqueCombinations.fit`` method.
The ``UniqueCombinations.fit`` method is expected to:
- Call ``UniqueCombinations._valid_separator``.
- Find a valid separator for the data and generate the joint column name.
Input:
- Table data (pandas.DataFrame)
"""
# Setup
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
# Run
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
instance.fit(table_data)
# Asserts
expected_combinations = pd.DataFrame({
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
assert instance._separator == '#'
assert instance._joint_column == 'b#c'
pd.testing.assert_frame_equal(instance._combinations, expected_combinations)
def test_is_valid_true(self):
"""Test the ``UniqueCombinations.is_valid`` method.
If the input data satisfies the constraint, result is a series of ``True`` values.
Input:
- Table data (pandas.DataFrame), satisfying the constraint.
Output:
- Series of ``True`` values (pandas.Series)
Side effects:
- Since the ``is_valid`` method needs ``self._combinations``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
out = instance.is_valid(table_data)
expected_out = pd.Series([True, True, True], name='b#c')
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_false(self):
"""Test the ``UniqueCombinations.is_valid`` method.
If the input data doesn't satisfy the constraint, result is a series of ``False`` values.
Input:
- Table data (pandas.DataFrame), which does not satisfy the constraint.
Output:
- Series of ``False`` values (pandas.Series)
Side effects:
- Since the ``is_valid`` method needs ``self._combinations``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
incorrect_table = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['D', 'E', 'F'],
'c': ['g', 'h', 'i']
})
out = instance.is_valid(incorrect_table)
# Assert
expected_out = pd.Series([False, False, False], name='b#c')
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_non_string_true(self):
"""Test the ``UniqueCombinations.is_valid`` method with non string columns.
If the input data satisfies the constraint, result is a series of ``True`` values.
Input:
- Table data (pandas.DataFrame), satisfying the constraint.
Output:
- Series of ``True`` values (pandas.Series)
Side effects:
- Since the ``is_valid`` method needs ``self._combinations``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': [1, 2, 3],
'c': ['g', 'h', 'i'],
'd': [2.4, 1.23, 5.6]
})
columns = ['b', 'c', 'd']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
out = instance.is_valid(table_data)
expected_out = pd.Series([True, True, True], name='b#c#d')
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_non_string_false(self):
"""Test the ``UniqueCombinations.is_valid`` method with non string columns.
If the input data doesn't satisfy the constraint, result is a series of ``False`` values.
Input:
- Table data (pandas.DataFrame), which does not satisfy the constraint.
Output:
- Series of ``False`` values (pandas.Series)
Side effects:
- Since the ``is_valid`` method needs ``self._combinations``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': [1, 2, 3],
'c': ['g', 'h', 'i'],
'd': [2.4, 1.23, 5.6]
})
columns = ['b', 'c', 'd']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
incorrect_table = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': [6, 7, 8],
'c': ['g', 'h', 'i'],
'd': [2.4, 1.23, 5.6]
})
out = instance.is_valid(incorrect_table)
# Assert
expected_out = pd.Series([False, False, False], name='b#c#d')
pd.testing.assert_series_equal(expected_out, out)
def test_transform(self):
"""Test the ``UniqueCombinations.transform`` method.
It is expected to return a Table data with the columns concatenated by the separator.
Input:
- Table data (pandas.DataFrame)
Output:
- Table data transformed, with the columns concatenated (pandas.DataFrame)
Side effects:
- Since the ``transform`` method needs ``self._joint_column``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
out = instance.transform(table_data)
# Assert
assert instance._combinations_to_uuids is not None
assert instance._uuids_to_combinations is not None
expected_out_a = pd.Series(['a', 'b', 'c'], name='a')
pd.testing.assert_series_equal(expected_out_a, out['a'])
try:
[uuid.UUID(u) for c, u in out['b#c'].items()]
except ValueError:
assert False
def test_transform_non_string(self):
"""Test the ``UniqueCombinations.transform`` method with non strings.
It is expected to return a Table data with the columns concatenated by the separator.
Input:
- Table data (pandas.DataFrame)
Output:
- Table data transformed, with the columns as UUIDs.
Side effects:
- Since the ``transform`` method needs ``self._joint_column``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': [1, 2, 3],
'c': ['g', 'h', 'i'],
'd': [2.4, 1.23, 5.6]
})
columns = ['b', 'c', 'd']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
out = instance.transform(table_data)
# Assert
assert instance._combinations_to_uuids is not None
assert instance._uuids_to_combinations is not None
expected_out_a = pd.Series(['a', 'b', 'c'], name='a')
pd.testing.assert_series_equal(expected_out_a, out['a'])
try:
[uuid.UUID(u) for c, u in out['b#c#d'].items()]
except ValueError:
assert False
def test_transform_not_all_columns_provided(self):
"""Test the ``UniqueCombinations.transform`` method.
If some of the columns needed for the transform are missing, and
``fit_columns_model`` is False, it will raise a ``MissingConstraintColumnError``.
Input:
- Table data (pandas.DataFrame)
Output:
- Raises ``MissingConstraintColumnError``.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns, fit_columns_model=False)
instance.fit(table_data)
# Run/Assert
with pytest.raises(MissingConstraintColumnError):
instance.transform(pd.DataFrame({'a': ['a', 'b', 'c']}))
def test_reverse_transform(self):
"""Test the ``UniqueCombinations.reverse_transform`` method.
It is expected to return the original data separating the concatenated columns.
Input:
- Table data transformed (pandas.DataFrame)
Output:
- Original table data, with the concatenated columns separated (pandas.DataFrame)
Side effects:
- Since the ``transform`` method needs ``self._joint_column``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
transformed_data = instance.transform(table_data)
out = instance.reverse_transform(transformed_data)
# Assert
assert instance._combinations_to_uuids is not None
assert instance._uuids_to_combinations is not None
expected_out = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
pd.testing.assert_frame_equal(expected_out, out)
def test_reverse_transform_non_string(self):
"""Test the ``UniqueCombinations.reverse_transform`` method with a non string column.
It is expected to return the original data separating the concatenated columns.
Input:
- Table data transformed (pandas.DataFrame)
Output:
- Original table data, with the concatenated columns separated (pandas.DataFrame)
Side effects:
- Since the ``transform`` method needs ``self._joint_column``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': [1, 2, 3],
'c': ['g', 'h', 'i'],
'd': [2.4, 1.23, 5.6]
})
columns = ['b', 'c', 'd']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
transformed_data = instance.transform(table_data)
out = instance.reverse_transform(transformed_data)
# Assert
assert instance._combinations_to_uuids is not None
assert instance._uuids_to_combinations is not None
expected_out = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': [1, 2, 3],
'c': ['g', 'h', 'i'],
'd': [2.4, 1.23, 5.6]
})
pd.testing.assert_frame_equal(expected_out, out)
class TestGreaterThan():
def test___init___strict_false(self):
"""Test the ``GreaterThan.__init__`` method.
The passed arguments should be stored as attributes.
Input:
- low = 'a'
- high = 'b'
Side effects:
- instance._low == 'a'
- instance._high == 'b'
- instance._strict == False
"""
# Run
instance = GreaterThan(low='a', high='b')
# Asserts
assert instance._low == 'a'
assert instance._high == 'b'
assert instance._strict is False
assert instance._high_is_scalar is None
assert instance._low_is_scalar is None
assert instance._drop is None
def test___init___all_parameters_passed(self):
"""Test the ``GreaterThan.__init__`` method.
The passed arguments should be stored as attributes.
Input:
- low = 'a'
- high = 'b'
- strict = True
- drop = 'high'
- high_is_scalar = True
- low_is_scalar = False
Side effects:
- instance._low == 'a'
- instance._high == 'b'
- instance._stric == True
- instance._drop = 'high'
- instance._high_is_scalar = True
- instance._low_is_scalar = False
"""
# Run
instance = GreaterThan(low='a', high='b', strict=True, drop='high',
high_is_scalar=True, low_is_scalar=False)
# Asserts
assert instance._low == 'a'
assert instance._high == 'b'
assert instance._strict is True
assert instance._high_is_scalar is True
assert instance._low_is_scalar is False
assert instance._drop == 'high'
def test_fit__low_is_scalar_is_none_determined_as_scalar(self):
"""Test the ``GreaterThan.fit`` method.
The ``GreaterThan.fit`` method should figure out if low is
a scalar if ``_low_is_scalar`` is None.
Input:
- Table without ``low`` in columns.
Side Effect:
- ``_low_is_scalar`` should be set to ``True``.
"""
# Setup
instance = GreaterThan(low=3, high='b')
# Run
table_data = pd.DataFrame({
'a': [1., 2., 3.],
'b': [4, 5, 6]
})
instance.fit(table_data)
# Asserts
assert instance._low_is_scalar is True
def test_fit__low_is_scalar_is_none_determined_as_column(self):
"""Test the ``GreaterThan.fit`` method.
The ``GreaterThan.fit`` method should figure out if low is
a column name if ``_low_is_scalar`` is None.
Input:
- Table with ``low`` in columns.
Side Effect:
- ``_low_is_scalar`` should be set to ``False``.
"""
# Setup
instance = GreaterThan(low='a', high='b')
# Run
table_data = pd.DataFrame({
'a': [1., 2., 3.],
'b': [4, 5, 6]
})
instance.fit(table_data)
# Asserts
assert instance._low_is_scalar is False
def test_fit__high_is_scalar_is_none_determined_as_scalar(self):
"""Test the ``GreaterThan.fit`` method.
The ``GreaterThan.fit`` method should figure out if high is
a scalar if ``_high_is_scalar`` is None.
Input:
- Table without ``high`` in columns.
Side Effect:
- ``_high_is_scalar`` should be set to ``True``.
"""
# Setup
instance = GreaterThan(low='a', high=3)
# Run
table_data = pd.DataFrame({
'a': [1., 2., 3.],
'b': [4, 5, 6]
})
instance.fit(table_data)
# Asserts
assert instance._high_is_scalar is True
def test_fit__high_is_scalar_is_none_determined_as_column(self):
"""Test the ``GreaterThan.fit`` method.
The ``GreaterThan.fit`` method should figure out if high is
a column name if ``_high_is_scalar`` is None.
Input:
- Table with ``high`` in columns.
Side Effect:
- ``_high_is_scalar`` should be set to ``False``.
"""
# Setup
instance = GreaterThan(low='a', high='b')
# Run
table_data = pd.DataFrame({
'a': [1., 2., 3.],
'b': [4, 5, 6]
})
instance.fit(table_data)
# Asserts
assert instance._high_is_scalar is False
def test_fit__high_is_scalar__low_is_scalar_raises_error(self):
"""Test the ``GreaterThan.fit`` method.
The ``GreaterThan.fit`` method should raise an error if
`_low_is_scalar` and `_high_is_scalar` are true.
Input:
- Table with one column.
Side Effect:
- ``TypeError`` is raised.
"""
# Setup
instance = GreaterThan(low=1, high=2)
# Run / Asserts
table_data = pd.DataFrame({'a': [1, 2, 3]})
with pytest.raises(TypeError):
instance.fit(table_data)
def test_fit__column_to_reconstruct_drop_high(self):
"""Test the ``GreaterThan.fit`` method.
The ``GreaterThan.fit`` method should set ``_column_to_reconstruct``
to ``instance._high`` if ``instance_drop`` is `high`.
Input:
- Table with two columns.
Side Effect:
- ``_column_to_reconstruct`` is ``instance._high``
"""
# Setup
instance = GreaterThan(low='a', high='b', drop='high')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6]
})
instance.fit(table_data)
# Asserts
assert instance._column_to_reconstruct == 'b'
def test_fit__column_to_reconstruct_drop_low(self):
"""Test the ``GreaterThan.fit`` method.
The ``GreaterThan.fit`` method should set ``_column_to_reconstruct``
to ``instance._low`` if ``instance_drop`` is `low`.
Input:
- Table with two columns.
Side Effect:
- ``_column_to_reconstruct`` is ``instance._low``
"""
# Setup
instance = GreaterThan(low='a', high='b', drop='low')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6]
})
instance.fit(table_data)
# Asserts
assert instance._column_to_reconstruct == 'a'
def test_fit__column_to_reconstruct_default(self):
"""Test the ``GreaterThan.fit`` method.
The ``GreaterThan.fit`` method should set ``_column_to_reconstruct``
to `high` by default.
Input:
- Table with two columns.
Side Effect:
- ``_column_to_reconstruct`` is ``instance._high``
"""
# Setup
instance = GreaterThan(low='a', high='b')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6]
})
instance.fit(table_data)
# Asserts
assert instance._column_to_reconstruct == 'b'
def test_fit__column_to_reconstruct_high_is_scalar(self):
"""Test the ``GreaterThan.fit`` method.
The ``GreaterThan.fit`` method should set ``_column_to_reconstruct``
to `low` if ``instance._high_is_scalar`` is ``True``.
Input:
- Table with two columns.
Side Effect:
- ``_column_to_reconstruct`` is ``instance._low``
"""
# Setup
instance = GreaterThan(low='a', high='b', high_is_scalar=True)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6]
})
instance.fit(table_data)
# Asserts
assert instance._column_to_reconstruct == 'a'
def test_fit__diff_column_one_column(self):
"""Test the ``GreaterThan.fit`` method.
The ``GreaterThan.fit`` method should set ``_diff_column``
to the one column in ``instance.constraint_columns`` plus a
token if there is only one column in that set.
Input:
- Table with one column.
Side Effect:
- ``_column_to_reconstruct`` is ``instance._low``
"""
# Setup
instance = GreaterThan(low='a', high=3, high_is_scalar=True)
# Run
table_data = pd.DataFrame({'a': [1, 2, 3]})
instance.fit(table_data)
# Asserts
assert instance._diff_column == 'a#'
def test_fit__diff_column_multiple_columns(self):
"""Test the ``GreaterThan.fit`` method.
The ``GreaterThan.fit`` method should set ``_diff_column``
to the two columns in ``instance.constraint_columns`` separated
by a token if there both columns are in that set.
Input:
- Table with two column.
Side Effect:
- ``_column_to_reconstruct`` is ``instance._low``
"""
# Setup
instance = GreaterThan(low='a', high='b')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6]
})
instance.fit(table_data)
# Asserts
assert instance._diff_column == 'a#b'
def test_fit_int(self):
"""Test the ``GreaterThan.fit`` method.
The ``GreaterThan.fit`` method should only learn and store the
``dtype`` of the ``high`` column as the ``_dtype`` attribute
if ``_low_is_scalar`` and ``high_is_scalar`` are ``False``.
Input:
- Table that contains two constrained columns with the high one
being made of integers.
Side Effect:
- The _dtype attribute gets `int` as the value even if the low
column has a different dtype.
"""
# Setup
instance = GreaterThan(low='a', high='b')
# Run
table_data = pd.DataFrame({
'a': [1., 2., 3.],
'b': [4, 5, 6],
'c': [7, 8, 9]
})
instance.fit(table_data)
# Asserts
assert instance._dtype.kind == 'i'
def test_fit_float(self):
"""Test the ``GreaterThan.fit`` method.
The ``GreaterThan.fit`` method should only learn and store the
``dtype`` of the ``high`` column as the ``_dtype`` attribute
if ``_low_is_scalar`` and ``high_is_scalar`` are ``False``.
Input:
- Table that contains two constrained columns with the high one
being made of float values.
Side Effect:
- The _dtype attribute gets `float` as the value even if the low
column has a different dtype.
"""
# Setup
instance = GreaterThan(low='a', high='b')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4., 5., 6.],
'c': [7, 8, 9]
})
instance.fit(table_data)
# Asserts
assert instance._dtype.kind == 'f'
def test_fit_datetime(self):
"""Test the ``GreaterThan.fit`` method.
The ``GreaterThan.fit`` method should only learn and store the
``dtype`` of the ``high`` column as the ``_dtype`` attribute
if ``_low_is_scalar`` and ``high_is_scalar`` are ``False``.
Input:
- Table that contains two constrained columns of datetimes.
Side Effect:
- The _dtype attribute gets `datetime` as the value.
"""
# Setup
instance = GreaterThan(low='a', high='b')
# Run
table_data = pd.DataFrame({
'a': pd.to_datetime(['2020-01-01']),
'b': pd.to_datetime(['2020-01-02'])
})
instance.fit(table_data)
# Asserts
assert instance._dtype.kind == 'M'
def test_fit_type__high_is_scalar(self):
"""Test the ``GreaterThan.fit`` method.
The ``GreaterThan.fit`` method should learn and store the
``dtype`` of the ``low`` column as the ``_dtype`` attribute
if ``_high_is_scalar`` is ``True``.
Input:
- Table that contains two constrained columns with the low one
being made of floats.
Side Effect:
- The _dtype attribute gets `float` as the value.
"""
# Setup
instance = GreaterThan(low='a', high=3)
# Run
table_data = pd.DataFrame({
'a': [1., 2., 3.],
'b': [4, 5, 6],
'c': [7, 8, 9]
})
instance.fit(table_data)
# Asserts
assert instance._dtype.kind == 'f'
def test_fit_type__low_is_scalar(self):
"""Test the ``GreaterThan.fit`` method.
The ``GreaterThan.fit`` method should learn and store the
``dtype`` of the ``high`` column as the ``_dtype`` attribute
if ``_low_is_scalar`` is ``True``.
Input:
- Table that contains two constrained columns with the high one
being made of floats.
Side Effect:
- The _dtype attribute gets `float` as the value.
"""
# Setup
instance = GreaterThan(low=3, high='b')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4., 5., 6.],
'c': [7, 8, 9]
})
instance.fit(table_data)
# Asserts
assert instance._dtype.kind == 'f'
def test_is_valid_strict_false(self):
"""Test the ``GreaterThan.is_valid`` method with strict False.
If strict is False, equal values should count as valid.
Input:
- Table with a strictly valid row, a strictly invalid row and
a row that has the same value for both high and low.
Output:
- False should be returned for the strictly invalid row and True
for the other two.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=False)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 2, 2],
'c': [7, 8, 9]
})
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([True, True, False])
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_strict_true(self):
"""Test the ``GreaterThan.is_valid`` method with strict True.
If strict is True, equal values should count as invalid.
Input:
- Table with a strictly valid row, a strictly invalid row and
a row that has the same value for both high and low.
Output:
- True should be returned for the strictly valid row and False
for the other two.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 2, 2],
'c': [7, 8, 9]
})
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([True, False, False])
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_low_is_scalar_high_is_column(self):
"""Test the ``GreaterThan.is_valid`` method.
If low is a scalar, and high is a column name, then
the values in that column should all be higher than
``instance._low``.
Input:
- Table with values above and below low.
Output:
- True should be returned for the rows where the high
column is above low.
"""
# Setup
instance = GreaterThan(low=3, high='b', strict=False, low_is_scalar=True)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 2, 2],
'c': [7, 8, 9]
})
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([True, False, False], name='b')
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_high_is_scalar_low_is_column(self):
"""Test the ``GreaterThan.is_valid`` method.
If high is a scalar, and low is a column name, then
the values in that column should all be lower than
``instance._high``.
Input:
- Table with values above and below high.
Output:
- True should be returned for the rows where the low
column is below high.
"""
# Setup
instance = GreaterThan(low='a', high=2, strict=False, high_is_scalar=True)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 2, 2],
'c': [7, 8, 9]
})
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([True, True, False], name='a')
pd.testing.assert_series_equal(expected_out, out)
def test_transform_int_drop_none(self):
"""Test the ``GreaterThan.transform`` method passing a high column of type int.
The ``GreaterThan.transform`` method is expected to compute the distance
between the high and low columns and create a diff column with the
logarithm of the distance + 1.
Setup:
- ``_drop`` is set to ``None``, so all original columns will be in output.
Input:
- Table with two columns two constrained columns at a constant distance of
exactly 3 and one additional dummy column.
Output:
- Same table with a diff column of the logarithms of the distances + 1,
which is np.log(4).
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True)
instance._diff_column = 'a#b'
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
out = instance.transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
pd.testing.assert_frame_equal(out, expected_out)
def test_transform_int_drop_high(self):
"""Test the ``GreaterThan.transform`` method passing a high column of type int.
The ``GreaterThan.transform`` method is expected to compute the distance
between the high and low columns and create a diff column with the
logarithm of the distance + 1. It should also drop the high column.
Setup:
- ``_drop`` is set to ``high``.
Input:
- Table with two columns two constrained columns at a constant distance of
exactly 3 and one additional dummy column.
Output:
- Same table with a diff column of the logarithms of the distances + 1,
which is np.log(4) and the high column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True, drop='high')
instance._diff_column = 'a#b'
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
out = instance.transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
pd.testing.assert_frame_equal(out, expected_out)
def test_transform_int_drop_low(self):
"""Test the ``GreaterThan.transform`` method passing a high column of type int.
The ``GreaterThan.transform`` method is expected to compute the distance
between the high and low columns and create a diff column with the
logarithm of the distance + 1. It should also drop the low column.
Setup:
- ``_drop`` is set to ``low``.
Input:
- Table with two columns two constrained columns at a constant distance of
exactly 3 and one additional dummy column.
Output:
- Same table with a diff column of the logarithms of the distances + 1,
which is np.log(4) and the low column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True, drop='low')
instance._diff_column = 'a#b'
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
out = instance.transform(table_data)
# Assert
expected_out = pd.DataFrame({
'b': [4, 5, 6],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
| pd.testing.assert_frame_equal(out, expected_out) | pandas.testing.assert_frame_equal |
# -*- coding: utf-8 -*-
"""Recommender_System.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1_yvJ9w2fZE6sxmTSjig7LhQhWl0HOG8p
# Importing data and lemmatizing the data
"""
import numpy as np
import pandas as pd
import re
import scipy
import math
URL = 'https://drive.google.com/file/d/137eW4F35OctoRuq5DasVUXw6GpmfXdBS/view?usp=sharing'
path = 'https://drive.google.com/uc?export=download&id='+URL.split('/')[-2]
#df = pd.read_pickle(path)
data = pd.read_csv(path, skip_blank_lines=True)
pd.set_option('display.max_colwidth', None)
print(data.shape)
data.drop_duplicates(subset='content', inplace=True, ignore_index=True)
data.shape
data.head(1)
data[data['_id'] == '6076fadb0b3e8bc9b779293e']['_id'].to_string()
def make_lower_case(text):
return text.lower()
import re
from pprint import pprint
import nltk, spacy, gensim
from sklearn.feature_extraction.text import CountVectorizer
def get_lemmatized_clean_data(df):
# Convert to list
data = df.content.tolist()
# Remove Emails
data = [re.sub('\S*@\S*\s?', '', sent) for sent in data]
# Remove new line characters
data = [re.sub('\s+', ' ', sent) for sent in data]
# Remove distracting single quotes
data = [re.sub("\'", "", sent) for sent in data]
# pprint(data[:1])
def sent_to_words(sentences):
for sentence in sentences:
yield(gensim.utils.simple_preprocess(str(sentence), deacc=True)) # deacc=True removes punctuations
data_words = list(sent_to_words(data))
# print(data_words[:1])
def lemmatization(texts, allowed_postags=['NOUN', 'ADJ', 'VERB', 'ADV']):
"""https://spacy.io/api/annotation"""
texts_out = []
for sent in texts:
doc = nlp(" ".join(sent))
texts_out.append(" ".join([token.lemma_ if token.lemma_ not in ['-PRON-'] else '' for token in doc if token.pos_ in allowed_postags]))
return texts_out
# Initialize spacy 'en' model, keeping only tagger component (for efficiency)
# Run in terminal: python3 -m spacy download en
nlp = spacy.load('en_core_web_sm', disable=['parser', 'ner'])
# Do lemmatization keeping only Noun, Adj, Verb, Adverb
data_lemmatized = lemmatization(data_words, allowed_postags=['NOUN', 'ADJ', 'VERB', 'ADV'])
return data_lemmatized
X = get_lemmatized_clean_data(data)
max_time = []
for i in X:
max_time.append(len(i.split(' '))/2.5)
data['Max_Time'] = max_time
data.head()
"""# SKlearn NewsData Import"""
from sklearn.datasets import fetch_20newsgroups
newsgroups_train = fetch_20newsgroups(subset='train')
def get_data(mydata):
mydata.keys()
df = pd.DataFrame([mydata['data'],[mydata['target_names'][idx] for idx in mydata['target']],mydata['target']])
df = df.transpose()
df.columns = ['content', 'target_names', 'target']
return df
df = get_data(newsgroups_train)
df.head()
news = data.drop(axis = 1, columns=['_id', 'y',]).to_numpy()
data_lemmatized = get_lemmatized_clean_data(df)
df.head()
"""# Converting the data to bag_of_word representation"""
import nltk
nltk.download('stopwords')
from nltk.corpus import stopwords
my_stopwords = stopwords.words('english')
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from sklearn.decomposition import TruncatedSVD
vectorizor = TfidfVectorizer(stop_words=my_stopwords, lowercase= True)
bag_of_words = vectorizor.fit_transform(X)
"""# Content Based Similarity Using TFIDF Vector"""
from numpy import dot
from numpy.linalg import norm
def similarity(a,b):
cos_sim = dot(a, b)/(norm(a)*norm(b))
return cos_sim
def ContentBasedFiltering(id, first_n = 10):
similarity_dic = {}
news_index = data[data['_id']==id].index[0]
for i in data['_id']:
an_index = data[data['_id']==i].index[0]
a = np.array(bag_of_words[news_index].todense())[0]
b = np.array(bag_of_words[an_index].todense())[0]
similarity_dic[i] = similarity(a, b)
sorted_most_similar = sorted(similarity_dic.items(), key =
lambda kv:(kv[1], kv[0]), reverse=True)
return sorted_most_similar[:first_n]
ContentBasedFiltering('6076fadb0b3e8bc9b779293e')
for keys in ContentBasedFiltering('6076fadb0b3e8bc9b779293e'):
print(data[data['_id'] == keys[0]]['title'])
"""# Content Based Similarity Using SVD
"""
# Performing SVD
svd = TruncatedSVD(n_components=50)
lsa = svd.fit_transform(bag_of_words)
def SVDContentBasedFiltering(id, first_n = 10):
similarity_dic = {}
news_index = data[data['_id']==id].index[0]
for i in data['_id']:
an_index = data[data['_id']==i].index[0]
a = np.array(lsa[news_index])
b = np.array(lsa[an_index])
similarity_dic[i] = similarity(a, b)
sorted_most_similar = sorted(similarity_dic.items(), key =
lambda kv:(kv[1], kv[0]), reverse=True)
return sorted_most_similar[:first_n]
SVDContentBasedFiltering('6076fadb0b3e8bc9b779293e')
for keys in SVDContentBasedFiltering('6076fadb0b3e8bc9b779293e'):
print(data[data['_id'] == keys[0]]['title'])
"""# LDA Implementation
"""
from sklearn.decomposition import LatentDirichletAllocation
from sklearn.model_selection import GridSearchCV
lda = LatentDirichletAllocation(learning_method='batch', n_jobs=-1)
bag_of_words.T
# LDA Cross-Validation
n_components = [20, 50, 70]
learning_decay = [0.5, 0.7, 0.8]
params = {'n_components': n_components, 'learning_decay': learning_decay}
model = GridSearchCV(lda, param_grid=params)
model.fit(bag_of_words.T)
best_params = model.best_estimator_
best_params
lda_res = best_params.components_.T
lda_res.shape
import pickle
pickle_file = 'lda_cross_validation_rev.pkl'
with open(pickle_file, 'wb') as file:
pickle.dump(model, file)
from pydrive.auth import GoogleAuth
from pydrive.drive import GoogleDrive
from google.colab import auth
from oauth2client.client import GoogleCredentials
# 1. Authenticate and create the PyDrive client.
auth.authenticate_user()
gauth = GoogleAuth()
gauth.credentials = GoogleCredentials.get_application_default()
drive = GoogleDrive(gauth)
# get the folder id where you want to save your file
file = drive.CreateFile({'parents':[{u'id': '19AI35wfuabh1JQ6b1Z3YH5uJ6uL3N6BD'}]})
file.SetContentFile(pickle_file)
file.Upload()
with open(pickle_file, 'rb') as file:
lda_pkl_model = pickle.load(file)
def LDAContentBasedFiltering(id, first_n = 10):
similarity_dic = {}
news_index = data[data['_id']==id].index[0]
for i in data['_id']:
an_index = data[data['_id']==i].index[0]
a = np.array(lda_res[news_index])
b = np.array(lda_res[an_index])
similarity_dic[i] = similarity(a, b)
sorted_most_similar = sorted(similarity_dic.items(), key =
lambda kv:(kv[1], kv[0]), reverse=True)
return sorted_most_similar[:first_n]
LDAContentBasedFiltering('6076fadb0b3e8bc9b779293e')
for keys in LDAContentBasedFiltering('6076fadb0b3e8bc9b779293e'):
print(data[data['_id'] == keys[0]]['title'])
"""# Word embedding using Glove Vectorizor"""
from google.colab import drive
drive.mount('/content/drive')
from numpy import array
from numpy import asarray
from numpy import zeros
embeddings_dictionary = dict()
glove_file = open('/content/drive/MyDrive/NLP and Text Analysis/glove.6B/glove.6B.100d.txt', encoding="utf8")
for line in glove_file:
records = line.split()
word = records[0]
vector_dimensions = asarray(records[1:], dtype='float32')
embeddings_dictionary[word] = vector_dimensions
glove_file.close()
i = 0
glov_stop = []
news_embedding_dict = dict()
for word in vectorizor.vocabulary_.keys():
if word in embeddings_dictionary:
news_embedding_dict[word] = embeddings_dictionary[word]
else:
glov_stop.append(word)
stopset = set(nltk.corpus.stopwords.words('english'))
new_stopwords_list = stopset.union(glov_stop)
vectorizor_glov = TfidfVectorizer(stop_words=new_stopwords_list)
glov_bag_of_words = vectorizor_glov.fit_transform(X)
y = np.array([val for (key, val) in news_embedding_dict.items()])
y.shape
glov_bag_of_words.shape
document_embedding = glov_bag_of_words*y
document_embedding.shape
document_embedding
def ContentBasedFilteringWordEmbedding(id, first_n = 10):
similarity_dic = {}
news_index = data[data['_id']==id].index[0]
for i in data['_id']:
an_index = data[data['_id']==i].index[0]
a = np.array(document_embedding[news_index])
b = np.array(document_embedding[an_index])
similarity_dic[i] = similarity(a, b)
sorted_most_similar = sorted(similarity_dic.items(), key =
lambda kv:(kv[1], kv[0]), reverse=True)
return sorted_most_similar[:first_n]
ContentBasedFilteringWordEmbedding('6076fadb0b3e8bc9b779293e')
"""# Collaborative Filtering"""
topics = ['cricket', 'football', 'golf', 'asia', 'africa', 'europe', 'americas', 'style', 'tech', 'science', 'hollywood', 'us politics', 'stock market', 'travel', 'coronavirus', 'black lives matter']
from random import sample
class User:
def __init__(self, id):
self.id = id
self.prefered_categories = sample(topics, np.random.randint(low=3, high= 5))
self.no_of_articles_served = np.random.randint(10, 50)*10
self.no_of_sessions = math.ceil((self.no_of_articles_served)/10)
self.ids = [self.id for _ in range(self.no_of_articles_served)]
self.sessions = []
self.articles_served = []
self.ratings = []
self.click = []
self.ranks = []
j = math.ceil(self.no_of_articles_served*0.7)
for m in range(j):
id_temp = np.random.choice(data[data['topics'].isin(self.prefered_categories)]['_id'])
self.articles_served.append(id_temp)
click = np.random.binomial(1, 0.7,1)[0]
self.click.append(click)
self.ratings.append('-' if click == 0 else np.random.randint((data[data['_id'] == id_temp]['Max_Time'])/4, data[data['_id'] == self.articles_served[m]]['Max_Time'])[0])
j = self.no_of_articles_served-j
for m in range(j):
id_temp = np.random.choice(data[~data['topics'].isin(self.prefered_categories)]['_id'])
self.articles_served.append(id_temp)
click = np.random.binomial(1, 0.1,1)[0]
self.click.append(click)
self.ratings.append('-' if click == 0 else np.random.randint(0, data[data['_id'] == id_temp]['Max_Time'])[0])
for i in range(self.no_of_sessions):
for k in range(10):
self.sessions.append(i)
self.ranks.append(k)
new_user = User(1)
data[data['_id'].isin(new_user.articles_served)].tail(10)
def CreateRandomUserProfiler(max_no_user = 40):
Users = []
for i in range(max_no_user):
Users.append(User(i))
print(Users[i-1].prefered_categories)
UserProfiler = pd.DataFrame(columns=['UserId', 'SessionID', 'ArticleID Served', 'Article Rank', 'Click', 'Time Spent'])
for user in Users:
df = pd.DataFrame()
df['UserId'] = user.ids
df['SessionID'] = user.sessions
df['ArticleID Served'] = user.articles_served
df['Article Rank'] = user.ranks
df['Click'] = user.click
df['Time Spent'] = user.ratings
UserProfiler = pd.concat([UserProfiler,df], ignore_index=True)
return UserProfiler
UserProfiler = CreateRandomUserProfiler(40)
UserProfiler.head()
UserProfiler.shape
"""# Matrix Factorization"""
def getNewsInfo(id):
return data[data['_id']==id]
import numpy as np
from scipy.sparse import csr_matrix
# Creating a user * news sparse matrix
sparseMatrix = csr_matrix((UserProfiler.UserId.unique().shape[0], data.shape[0])).toarray()
k = 0
user = UserProfiler.iloc[k]
for i in UserProfiler.UserId.unique():
while user.UserId == i and k < UserProfiler.shape[0]:
user = UserProfiler.iloc[k]
if user.Click:
newsInfo = getNewsInfo(user['ArticleID Served'])
rating = user['Time Spent']/newsInfo['Max_Time']
sparseMatrix[i][newsInfo.index] = rating
k+=1
userItem = csr_matrix(sparseMatrix)
from numpy import count_nonzero
sparsity = 1.0 - count_nonzero(sparseMatrix) / sparseMatrix.size
print(sparsity)
| pd.DataFrame(sparseMatrix) | pandas.DataFrame |
#!/usr/bin/python3
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generate a PDF report using seaborn."""
from absl import app
from absl import flags
from matplotlib.backends.backend_pdf import PdfPages
import gps_pb2
import common_lib
import timescale
import tracks
import attr
import matplotlib
import pandas
import psycopg2
import seaborn
FLAGS = flags.FLAGS
def LabelLaps(row, suffix=''):
minutes = row.lap_duration_ms // 60000
seconds = (row.lap_duration_ms - minutes * 60000) / 1000
return '%d:%0.02f Lap %d %s' % (minutes, seconds, row.lap_number, suffix)
def PointsNearTurn(row, turn):
point = gps_pb2.Point()
point.lat = row.lat
point.lon = row.lon
turn_point = gps_pb2.Point()
turn_point.lat = turn.lat
turn_point.lon = turn.lon
return common_lib.PointDelta(point, turn_point)
def ApplyTurnDistance(data, turn, report_range):
key = 't%s_distance' % turn.number
data[key] = data.apply(
PointsNearTurn, axis=1, turn=turn)
return data[data[key] < report_range]
def FindClosestTrack(data):
point = gps_pb2.Point()
point.lat = data['lat'].iloc[0]
point.lon = data['lon'].iloc[0]
_, track, _ = tracks.FindClosestTrack(point)
return track
@attr.s
class Report(object):
"""Generates a post session report vs personal best."""
conn = attr.ib(type=psycopg2.extensions.connection)
def GetSingleLapData(self, session_id, lap_id):
select_statement = """
SELECT
laps.number AS lap_number,
laps.duration_ms AS lap_duration_ms,
elapsed_duration_ms,
lat,
lon,
tps_voltage,
rpm,
oil_pressure_voltage,
speed
FROM POINTS
JOIN laps ON points.lap_id = laps.id
JOIN sessions ON laps.session_id = sessions.id
WHERE sessions.id = %s AND
lap_id = %s
ORDER BY points.time
"""
return pandas.io.sql.read_sql(
select_statement,
self.conn,
params=(session_id,lap_id))
def FindLastThreeBestLaps(self):
select_last_session = """
SELECT sessions.id FROM sessions
JOIN laps ON sessions.id = laps.session_id
WHERE laps.duration_ms IS NOT NULL
ORDER BY time LIMIT 1;
"""
data_frames = []
with self.conn.cursor() as cursor:
cursor.execute(select_last_session)
self.last_session_id = cursor.fetchone()[0]
select_best_three = """
SELECT laps.id FROM sessions
JOIN laps ON sessions.id = laps.session_id
WHERE session_id = %s AND
laps.duration_ms IS NOT NULL
ORDER BY laps.duration_ms LIMIT 3;
"""
with self.conn.cursor() as cursor:
cursor.execute(select_best_three, (self.last_session_id,))
for lap_id in cursor.fetchall():
data_frames.append(self.GetSingleLapData(self.last_session_id, lap_id))
return pandas.concat(data_frames)
def FindPersonalBest(self):
select_last_session_best_time = """
SELECT duration_ms, track FROM laps
JOIN sessions ON laps.session_id=sessions.id
WHERE session_id = %s
ORDER BY duration_ms
LIMIT 1;
"""
with self.conn.cursor() as cursor:
cursor.execute(select_last_session_best_time, (self.last_session_id,))
best_time, track = cursor.fetchone()
min_time = best_time - 1000
max_time = best_time + 1000
select_personal_best = """
SELECT session_id, laps.id FROM laps
JOIN sessions ON laps.session_id=sessions.id
WHERE track = %s AND
duration_ms IS NOT NULL AND
duration_ms > %s AND
duration_ms < %s
ORDER BY duration_ms
LIMIT 1;
"""
with self.conn.cursor() as cursor:
cursor.execute(select_personal_best, (track, min_time, max_time))
session_id, lap_id = cursor.fetchone()
return self.GetSingleLapData(session_id, lap_id)
def PlotOilPressure(self, data):
fig, ax = matplotlib.pyplot.subplots()
ax.set_title('Oil Pressure Scatter (whole lap)')
seaborn.scatterplot(
x='rpm',
y='oil_pressure_voltage',
hue='label',
data=data)
self.pdf.savefig(fig) # type: ignore
def PlotElapsedTimePerTurn(self, turn_datas):
fig, ax = matplotlib.pyplot.subplots()
turn_elapsed_time = []
for turn_number in sorted(turn_datas):
turn_data = turn_datas[turn_number]
grouped = turn_data.groupby('label')
# Delta per turn per label.
agg = grouped.agg(lambda x: x.max() - x.min()).reset_index()
agg['turn_number'] = turn_number
# Delta - best time in turn per label.
agg.update(agg['elapsed_duration_ms'] - agg['elapsed_duration_ms'].min())
turn_elapsed_time.append(agg)
data = pandas.concat(turn_elapsed_time).reset_index()
ax.set_title('Time Delta Per Turn')
seaborn.lineplot(
x='turn_number',
y='elapsed_duration_ms',
hue='label',
linewidth=0.5,
data=data)
self.pdf.savefig(fig) # type: ignore
def Plot(self, data, turn, y,
x='elapsed_duration_ms', hue='label', sort=False):
fig, ax = matplotlib.pyplot.subplots()
ax.set_title('Turn %s %s' % (turn.number, y))
seaborn.lineplot(
y=y,
x=x,
hue=hue,
sort=sort,
linewidth=0.5,
data=data)
self.pdf.savefig(fig) # type: ignore
def Run(self):
data = self.FindLastThreeBestLaps()
data['label'] = data.apply(LabelLaps, axis=1)
pob_data = self.FindPersonalBest()
pob_data['label'] = pob_data.apply(LabelLaps, axis=1, suffix=' *PoB')
data = | pandas.concat((pob_data, data)) | pandas.concat |
import numpy as np
from numpy.random import randn
import pytest
from pandas import DataFrame, Series
import pandas._testing as tm
@pytest.mark.parametrize("name", ["var", "vol", "mean"])
def test_ewma_series(series, name):
series_result = getattr(series.ewm(com=10), name)()
assert isinstance(series_result, Series)
@pytest.mark.parametrize("name", ["var", "vol", "mean"])
def test_ewma_frame(frame, name):
frame_result = getattr(frame.ewm(com=10), name)()
assert isinstance(frame_result, DataFrame)
def test_ewma_adjust():
vals = Series(np.zeros(1000))
vals[5] = 1
result = vals.ewm(span=100, adjust=False).mean().sum()
assert np.abs(result - 1) < 1e-2
@pytest.mark.parametrize("adjust", [True, False])
@pytest.mark.parametrize("ignore_na", [True, False])
def test_ewma_cases(adjust, ignore_na):
# try adjust/ignore_na args matrix
s = Series([1.0, 2.0, 4.0, 8.0])
if adjust:
expected = Series([1.0, 1.6, 2.736842, 4.923077])
else:
expected = Series([1.0, 1.333333, 2.222222, 4.148148])
result = s.ewm(com=2.0, adjust=adjust, ignore_na=ignore_na).mean()
tm.assert_series_equal(result, expected)
def test_ewma_nan_handling():
s = Series([1.0] + [np.nan] * 5 + [1.0])
result = s.ewm(com=5).mean()
tm.assert_series_equal(result, Series([1.0] * len(s)))
s = Series([np.nan] * 2 + [1.0] + [np.nan] * 2 + [1.0])
result = s.ewm(com=5).mean()
tm.assert_series_equal(result, Series([np.nan] * 2 + [1.0] * 4))
@pytest.mark.parametrize(
"s, adjust, ignore_na, w",
[
(
Series([np.nan, 1.0, 101.0]),
True,
False,
[np.nan, (1.0 - (1.0 / (1.0 + 2.0))), 1.0],
),
(
Series([np.nan, 1.0, 101.0]),
True,
True,
[np.nan, (1.0 - (1.0 / (1.0 + 2.0))), 1.0],
),
(
Series([np.nan, 1.0, 101.0]),
False,
False,
[np.nan, (1.0 - (1.0 / (1.0 + 2.0))), (1.0 / (1.0 + 2.0))],
),
(
Series([np.nan, 1.0, 101.0]),
False,
True,
[np.nan, (1.0 - (1.0 / (1.0 + 2.0))), (1.0 / (1.0 + 2.0))],
),
(
Series([1.0, np.nan, 101.0]),
True,
False,
[(1.0 - (1.0 / (1.0 + 2.0))) ** 2, np.nan, 1.0],
),
(
Series([1.0, np.nan, 101.0]),
True,
True,
[(1.0 - (1.0 / (1.0 + 2.0))), np.nan, 1.0],
),
(
Series([1.0, np.nan, 101.0]),
False,
False,
[(1.0 - (1.0 / (1.0 + 2.0))) ** 2, np.nan, (1.0 / (1.0 + 2.0))],
),
(
Series([1.0, np.nan, 101.0]),
False,
True,
[(1.0 - (1.0 / (1.0 + 2.0))), np.nan, (1.0 / (1.0 + 2.0))],
),
(
Series([np.nan, 1.0, np.nan, np.nan, 101.0, np.nan]),
True,
False,
[np.nan, (1.0 - (1.0 / (1.0 + 2.0))) ** 3, np.nan, np.nan, 1.0, np.nan],
),
(
Series([np.nan, 1.0, np.nan, np.nan, 101.0, np.nan]),
True,
True,
[np.nan, (1.0 - (1.0 / (1.0 + 2.0))), np.nan, np.nan, 1.0, np.nan],
),
(
Series([np.nan, 1.0, np.nan, np.nan, 101.0, np.nan]),
False,
False,
[
np.nan,
(1.0 - (1.0 / (1.0 + 2.0))) ** 3,
np.nan,
np.nan,
(1.0 / (1.0 + 2.0)),
np.nan,
],
),
(
Series([np.nan, 1.0, np.nan, np.nan, 101.0, np.nan]),
False,
True,
[
np.nan,
(1.0 - (1.0 / (1.0 + 2.0))),
np.nan,
np.nan,
(1.0 / (1.0 + 2.0)),
np.nan,
],
),
(
Series([1.0, np.nan, 101.0, 50.0]),
True,
False,
[
(1.0 - (1.0 / (1.0 + 2.0))) ** 3,
np.nan,
(1.0 - (1.0 / (1.0 + 2.0))),
1.0,
],
),
(
Series([1.0, np.nan, 101.0, 50.0]),
True,
True,
[
(1.0 - (1.0 / (1.0 + 2.0))) ** 2,
np.nan,
(1.0 - (1.0 / (1.0 + 2.0))),
1.0,
],
),
(
Series([1.0, np.nan, 101.0, 50.0]),
False,
False,
[
(1.0 - (1.0 / (1.0 + 2.0))) ** 3,
np.nan,
(1.0 - (1.0 / (1.0 + 2.0))) * (1.0 / (1.0 + 2.0)),
(1.0 / (1.0 + 2.0))
* ((1.0 - (1.0 / (1.0 + 2.0))) ** 2 + (1.0 / (1.0 + 2.0))),
],
),
(
Series([1.0, np.nan, 101.0, 50.0]),
False,
True,
[
(1.0 - (1.0 / (1.0 + 2.0))) ** 2,
np.nan,
(1.0 - (1.0 / (1.0 + 2.0))) * (1.0 / (1.0 + 2.0)),
(1.0 / (1.0 + 2.0)),
],
),
],
)
def test_ewma_nan_handling_cases(s, adjust, ignore_na, w):
# GH 7603
expected = (s.multiply(w).cumsum() / Series(w).cumsum()).fillna(method="ffill")
result = s.ewm(com=2.0, adjust=adjust, ignore_na=ignore_na).mean()
tm.assert_series_equal(result, expected)
if ignore_na is False:
# check that ignore_na defaults to False
result = s.ewm(com=2.0, adjust=adjust).mean()
tm.assert_series_equal(result, expected)
def test_ewma_span_com_args(series):
A = series.ewm(com=9.5).mean()
B = series.ewm(span=20).mean()
tm.assert_almost_equal(A, B)
msg = "comass, span, halflife, and alpha are mutually exclusive"
with pytest.raises(ValueError, match=msg):
series.ewm(com=9.5, span=20)
msg = "Must pass one of comass, span, halflife, or alpha"
with pytest.raises(ValueError, match=msg):
series.ewm().mean()
def test_ewma_halflife_arg(series):
A = series.ewm(com=13.932726172912965).mean()
B = series.ewm(halflife=10.0).mean()
tm.assert_almost_equal(A, B)
msg = "comass, span, halflife, and alpha are mutually exclusive"
with pytest.raises(ValueError, match=msg):
series.ewm(span=20, halflife=50)
with pytest.raises(ValueError):
series.ewm(com=9.5, halflife=50)
with pytest.raises(ValueError):
series.ewm(com=9.5, span=20, halflife=50)
with pytest.raises(ValueError):
series.ewm()
def test_ewm_alpha(arr):
# GH 10789
s = Series(arr)
a = s.ewm(alpha=0.61722699889169674).mean()
b = s.ewm(com=0.62014947789973052).mean()
c = s.ewm(span=2.240298955799461).mean()
d = s.ewm(halflife=0.721792864318).mean()
tm.assert_series_equal(a, b)
tm.assert_series_equal(a, c)
tm.assert_series_equal(a, d)
def test_ewm_alpha_arg(series):
# GH 10789
s = series
msg = "Must pass one of comass, span, halflife, or alpha"
with pytest.raises(ValueError, match=msg):
s.ewm()
msg = "comass, span, halflife, and alpha are mutually exclusive"
with pytest.raises(ValueError, match=msg):
s.ewm(com=10.0, alpha=0.5)
with pytest.raises(ValueError, match=msg):
s.ewm(span=10.0, alpha=0.5)
with pytest.raises(ValueError, match=msg):
s.ewm(halflife=10.0, alpha=0.5)
def test_ewm_domain_checks(arr):
# GH 12492
s = Series(arr)
msg = "comass must satisfy: comass >= 0"
with pytest.raises(ValueError, match=msg):
s.ewm(com=-0.1)
s.ewm(com=0.0)
s.ewm(com=0.1)
msg = "span must satisfy: span >= 1"
with pytest.raises(ValueError, match=msg):
s.ewm(span=-0.1)
with pytest.raises(ValueError, match=msg):
s.ewm(span=0.0)
with pytest.raises(ValueError, match=msg):
s.ewm(span=0.9)
s.ewm(span=1.0)
s.ewm(span=1.1)
msg = "halflife must satisfy: halflife > 0"
with pytest.raises(ValueError, match=msg):
s.ewm(halflife=-0.1)
with pytest.raises(ValueError, match=msg):
s.ewm(halflife=0.0)
s.ewm(halflife=0.1)
msg = "alpha must satisfy: 0 < alpha <= 1"
with pytest.raises(ValueError, match=msg):
s.ewm(alpha=-0.1)
with pytest.raises(ValueError, match=msg):
s.ewm(alpha=0.0)
s.ewm(alpha=0.1)
s.ewm(alpha=1.0)
with pytest.raises(ValueError, match=msg):
s.ewm(alpha=1.1)
@pytest.mark.parametrize("method", ["mean", "vol", "var"])
def test_ew_empty_series(method):
vals = Series([], dtype=np.float64)
ewm = vals.ewm(3)
result = getattr(ewm, method)()
tm.assert_almost_equal(result, vals)
@pytest.mark.parametrize("min_periods", [0, 1])
@pytest.mark.parametrize("name", ["mean", "var", "vol"])
def test_ew_min_periods(min_periods, name):
# excluding NaNs correctly
arr = randn(50)
arr[:10] = np.NaN
arr[-10:] = np.NaN
s = Series(arr)
# check min_periods
# GH 7898
result = getattr(s.ewm(com=50, min_periods=2), name)()
assert result[:11].isna().all()
assert not result[11:].isna().any()
result = getattr(s.ewm(com=50, min_periods=min_periods), name)()
if name == "mean":
assert result[:10].isna().all()
assert not result[10:].isna().any()
else:
# ewm.std, ewm.vol, ewm.var (with bias=False) require at least
# two values
assert result[:11].isna().all()
assert not result[11:].isna().any()
# check series of length 0
result = getattr(Series(dtype=object).ewm(com=50, min_periods=min_periods), name)()
tm.assert_series_equal(result, Series(dtype="float64"))
# check series of length 1
result = getattr(Series([1.0]).ewm(50, min_periods=min_periods), name)()
if name == "mean":
tm.assert_series_equal(result, Series([1.0]))
else:
# ewm.std, ewm.vol, ewm.var with bias=False require at least
# two values
tm.assert_series_equal(result, | Series([np.NaN]) | pandas.Series |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Retrieve bikeshare stations metadata."""
# pylint: disable=invalid-name
from io import BytesIO
from typing import Dict, List
from urllib.request import urlopen
from zipfile import ZipFile
import geopandas as gpd
import pandas as pd
import pandera as pa
import requests
ch_essentials_schema = pa.DataFrameSchema(
columns={
"ID": pa.Column(pa.Int),
"NAME": pa.Column(pd.StringDtype()),
"POI_LATITUDE": pa.Column(
pa.Float64,
nullable=True,
),
"POI_LONGITUDE": pa.Column(
pa.Float64,
nullable=True,
),
},
index=pa.Index(pa.Int),
)
poi_schema = pa.DataFrameSchema(
columns={
"ID": pa.Column(pa.Int, unique=True),
"ADDRESS_INFO": pa.Column(pd.StringDtype()),
"NAME": pa.Column(pd.StringDtype(), unique=True),
"CATEGORY": pa.Column(pd.StringDtype()),
"PHONE": pa.Column(pd.StringDtype()),
"EMAIL": pa.Column(pd.StringDtype()),
"WEBSITE": pa.Column(pd.StringDtype()),
"GEOID": pa.Column(pa.Float, nullable=True),
"RECEIVED_DATE": pa.Column(pd.StringDtype()),
"ADDRESS_POINT_ID": pa.Column(pa.Float, nullable=True),
"LINEAR_NAME_FULL": pa.Column(pd.StringDtype()),
"ADDRESS_FULL": pa.Column(pd.StringDtype()),
"POSTAL_CODE": pa.Column(pd.StringDtype()),
"MUNICIPALITY": pa.Column(pd.StringDtype()),
"CITY": pa.Column(pd.StringDtype()),
"PLACE_NAME": pa.Column(pd.StringDtype()),
"GENERAL_USE_CODE": pa.Column(pa.Float, nullable=True),
"CENTRELINE": pa.Column(pa.Float, nullable=True),
"LO_NUM": pa.Column(pa.Float, nullable=True),
"LO_NUM_SUF": pa.Column(pd.StringDtype()),
"HI_NUM": pa.Column(pd.StringDtype()),
"HI_NUM_SUF": pa.Column(pd.StringDtype()),
"LINEAR_NAME_ID": pa.Column(pa.Float, nullable=True),
"WARD": pa.Column(pd.StringDtype()),
"WARD_2003": pa.Column(pa.Float, nullable=True),
"WARD_2018": pa.Column(pa.Float, nullable=True),
"MI_PRINX": pa.Column(pa.Float, nullable=True),
"ATTRACTION": pa.Column(pd.StringDtype(), unique=True),
"MAP_ACCESS": pa.Column(pd.StringDtype()),
"POI_LONGITUDE": pa.Column(pa.Float, unique=False),
"POI_LATITUDE": pa.Column(pa.Float, unique=False),
},
index=pa.Index(pa.Int),
)
gdf_schema = pa.DataFrameSchema(
columns={
"AREA_ID": pa.Column(pa.Int),
"AREA_SHORT_CODE": pa.Column(pd.StringDtype()),
"AREA_LONG_CODE": pa.Column(pd.StringDtype()),
"AREA_NAME": pa.Column(pd.StringDtype()),
"Shape__Area": pa.Column(pa.Float64),
# "Shape__Length": pa.Column(pa.Float64),
# "LATITUDE": pa.Column(pd.StringDtype(), nullable=True),
"AREA_LATITUDE": pa.Column(pa.Float64),
# "LONGITUDE": pa.Column(pd.StringDtype(), nullable=True),
"AREA_LONGITUDE": pa.Column(pa.Float64),
},
index=pa.Index(pa.Int),
)
pub_trans_locations_schema = pa.DataFrameSchema(
columns={
"stop_id": pa.Column(pa.Int),
"stop_code": pa.Column(pa.Int),
"stop_name": pa.Column(pd.StringDtype()),
"stop_desc": pa.Column(pd.StringDtype(), nullable=True),
"lat": pa.Column(pa.Float64),
"lon": pa.Column(pa.Float64),
"zone_id": pa.Column(pa.Float64, nullable=True),
"stop_url": pa.Column(pd.StringDtype(), nullable=True),
"location_type": pa.Column(pa.Float64, nullable=True),
"parent_station": pa.Column(pa.Float64, nullable=True),
"stop_timezone": pa.Column(pa.Float64, nullable=True),
"wheelchair_boarding": pa.Column(pa.Int),
},
index=pa.Index(pa.Int),
)
coll_univ_schema = pa.DataFrameSchema(
columns={
"institution_id": pa.Column(pa.Int),
"institution_name": pa.Column(pd.StringDtype()),
"lat": pa.Column(pa.Float64),
"lon": pa.Column(pa.Float64),
},
index=pa.Index(pa.Int),
)
def get_lat_long(row):
"""Get latitude and longitude."""
return row["coordinates"]
@pa.check_output(poi_schema)
def get_poi_data(url: str, poi_params: Dict) -> pd.DataFrame:
"""Get points of interest within city boundaries."""
poi_dtypes_dict = dict(
ADDRESS_INFO=pd.StringDtype(),
NAME=pd.StringDtype(),
CATEGORY=pd.StringDtype(),
PHONE=pd.StringDtype(),
EMAIL=pd.StringDtype(),
WEBSITE=pd.StringDtype(),
RECEIVED_DATE=pd.StringDtype(),
LINEAR_NAME_FULL=pd.StringDtype(),
ADDRESS_FULL=pd.StringDtype(),
POSTAL_CODE=pd.StringDtype(),
MUNICIPALITY=pd.StringDtype(),
CITY=pd.StringDtype(),
PLACE_NAME=pd.StringDtype(),
LO_NUM_SUF=pd.StringDtype(),
HI_NUM=pd.StringDtype(),
HI_NUM_SUF=pd.StringDtype(),
WARD=pd.StringDtype(),
ATTRACTION=pd.StringDtype(),
MAP_ACCESS=pd.StringDtype(),
)
package = requests.get(url, params=poi_params).json()
poi_url = package["result"]["resources"][0]["url"]
df = pd.read_csv(poi_url)
df = df.rename(columns={list(df)[0]: "ID"})
df[["POI_LONGITUDE", "POI_LATITUDE"]] = pd.DataFrame(
df["geometry"].apply(eval).apply(get_lat_long).tolist()
)
# Verify no duplicates (by name) are in the data
assert df[df.duplicated(subset=["NAME"], keep=False)].empty
df = df.astype(poi_dtypes_dict)
return df
@pa.check_output(ch_essentials_schema)
def get_cultural_hotspots(url: str, params: Dict) -> pd.DataFrame:
"""Get cultural hotspots within city boundaries."""
package = requests.get(url, params=params).json()
ch_locations = package["result"]["resources"][0]["url"]
ch_locs_dir_path = "data/raw/cultural-hotspot-points-of-interest-wgs84"
with urlopen(ch_locations) as zipresp:
with ZipFile(BytesIO(zipresp.read())) as zfile:
zfile.extractall(ch_locs_dir_path)
df = gpd.read_file(f"{ch_locs_dir_path}/CULTURAL_HOTSPOT_WGS84.shp")
df = (
df.drop_duplicates(
subset=["PNT_OF_INT", "LATITUDE", "LONGITUDE"],
keep="first",
)
.reset_index(drop=True)
.copy()
)
df = (
df.drop_duplicates(
subset=["PNT_OF_INT"],
keep="first",
)
.reset_index(drop=True)
.copy()
)
assert df[df.duplicated(subset=["PNT_OF_INT"], keep=False)].empty
df_essentials = (
df[["RID", "PNT_OF_INT", "LATITUDE", "LONGITUDE"]]
.rename(
columns={
"RID": "ID",
"PNT_OF_INT": "NAME",
"LATITUDE": "POI_LATITUDE",
"LONGITUDE": "POI_LONGITUDE",
}
)
.astype({"NAME": pd.StringDtype()})
)
# print(df_essentials.dtypes)
return df_essentials
@pa.check_output(gdf_schema)
def get_neighbourhood_boundary_land_area_data(
url: str, params: Dict, cols_to_keep: List[str]
) -> pd.DataFrame:
"""Get citywide neighbourhood boundaries."""
package = requests.get(url, params=params).json()
files = package["result"]["resources"]
n_url = [f["url"] for f in files if f["url"].endswith("4326.geojson")][0]
gdf = gpd.read_file(n_url)
# print(gdf.head(2))
gdf["centroid"] = (
gdf["geometry"].to_crs(epsg=3395).centroid.to_crs(epsg=4326)
)
gdf["AREA_LATITUDE"] = gdf["centroid"].y
gdf["AREA_LONGITUDE"] = gdf["centroid"].x
gdf["Shape__Area"] = gdf["geometry"].to_crs(epsg=3857).area
gdf = gdf.astype(
{
"AREA_SHORT_CODE": pd.StringDtype(),
"AREA_LONG_CODE": pd.StringDtype(),
"AREA_NAME": pd.StringDtype(),
"AREA_LATITUDE": float,
"AREA_LONGITUDE": float,
}
)[cols_to_keep]
# print(len(gdf))
# assert len(gdf) == 140
return gdf
@pa.check_output(pub_trans_locations_schema)
def get_public_transit_locations(url: str, params: Dict) -> pd.DataFrame:
"""Get public transit locations within city boundaries."""
package = requests.get(url, params=params).json()
pt_locations = package["result"]["resources"][0]["url"]
pt_locs_dir_path = "data/raw/opendata_ttc_schedules"
with urlopen(pt_locations) as zipresp:
with ZipFile(BytesIO(zipresp.read())) as zfile:
zfile.extractall(pt_locs_dir_path)
df_pt = pd.read_csv(f"{pt_locs_dir_path}/stops.txt").astype(
{
"stop_name": pd.StringDtype(),
"stop_desc": pd.StringDtype(),
"stop_url": pd.StringDtype(),
}
)
df_pt = df_pt.rename(columns={"stop_lat": "lat", "stop_lon": "lon"})
return df_pt
@pa.check_output(coll_univ_schema)
def get_coll_univ_locations() -> pd.DataFrame:
"""Get college and university locations within city boundaries."""
coll_univ_locations = {
"centennial": {"lat": 43.7854, "lon": -79.22664},
"george-brown": {"lat": 43.6761, "lon": -79.4111},
"humber": {"lat": 43.7290, "lon": -79.6074},
"ocad": {"lat": 43.6530, "lon": -79.3912},
"ryerson": {"lat": 43.6577, "lon": -79.3788},
"seneca": {"lat": 43.7955, "lon": -79.3496},
"tynedale": {"lat": 43.7970, "lon": -79.3945},
"uoft-scarborough": {"lat": 43.7844, "lon": -79.1851},
"uoft": {"lat": 43.6629, "lon": -79.5019},
"yorku": {"lat": 43.7735, "lon": -79.5019},
"yorku-glendon": {"lat": 43.7279, "lon": -79.3780},
}
df_coll_univ = (
| pd.DataFrame.from_dict(coll_univ_locations, orient="index") | pandas.DataFrame.from_dict |
from datetime import datetime
import numpy as np
import pytest
import pandas as pd
from pandas import (
Categorical,
CategoricalIndex,
DataFrame,
Index,
MultiIndex,
Series,
qcut,
)
import pandas._testing as tm
def cartesian_product_for_groupers(result, args, names, fill_value=np.NaN):
"""Reindex to a cartesian production for the groupers,
preserving the nature (Categorical) of each grouper
"""
def f(a):
if isinstance(a, (CategoricalIndex, Categorical)):
categories = a.categories
a = Categorical.from_codes(
np.arange(len(categories)), categories=categories, ordered=a.ordered
)
return a
index = MultiIndex.from_product(map(f, args), names=names)
return result.reindex(index, fill_value=fill_value).sort_index()
_results_for_groupbys_with_missing_categories = {
# This maps the builtin groupby functions to their expected outputs for
# missing categories when they are called on a categorical grouper with
# observed=False. Some functions are expected to return NaN, some zero.
# These expected values can be used across several tests (i.e. they are
# the same for SeriesGroupBy and DataFrameGroupBy) but they should only be
# hardcoded in one place.
"all": np.NaN,
"any": np.NaN,
"count": 0,
"corrwith": np.NaN,
"first": np.NaN,
"idxmax": np.NaN,
"idxmin": np.NaN,
"last": np.NaN,
"mad": np.NaN,
"max": np.NaN,
"mean": np.NaN,
"median": np.NaN,
"min": np.NaN,
"nth": np.NaN,
"nunique": 0,
"prod": np.NaN,
"quantile": np.NaN,
"sem": np.NaN,
"size": 0,
"skew": np.NaN,
"std": np.NaN,
"sum": 0,
"var": np.NaN,
}
def test_apply_use_categorical_name(df):
cats = qcut(df.C, 4)
def get_stats(group):
return {
"min": group.min(),
"max": group.max(),
"count": group.count(),
"mean": group.mean(),
}
result = df.groupby(cats, observed=False).D.apply(get_stats)
assert result.index.names[0] == "C"
def test_basic():
cats = Categorical(
["a", "a", "a", "b", "b", "b", "c", "c", "c"],
categories=["a", "b", "c", "d"],
ordered=True,
)
data = DataFrame({"a": [1, 1, 1, 2, 2, 2, 3, 4, 5], "b": cats})
exp_index = CategoricalIndex(list("abcd"), name="b", ordered=True)
expected = DataFrame({"a": [1, 2, 4, np.nan]}, index=exp_index)
result = data.groupby("b", observed=False).mean()
tm.assert_frame_equal(result, expected)
cat1 = Categorical(["a", "a", "b", "b"], categories=["a", "b", "z"], ordered=True)
cat2 = Categorical(["c", "d", "c", "d"], categories=["c", "d", "y"], ordered=True)
df = DataFrame({"A": cat1, "B": cat2, "values": [1, 2, 3, 4]})
# single grouper
gb = df.groupby("A", observed=False)
exp_idx = CategoricalIndex(["a", "b", "z"], name="A", ordered=True)
expected = DataFrame({"values": Series([3, 7, 0], index=exp_idx)})
result = gb.sum()
tm.assert_frame_equal(result, expected)
# GH 8623
x = DataFrame(
[[1, "<NAME>"], [2, "<NAME>"], [1, "<NAME>"]],
columns=["person_id", "person_name"],
)
x["person_name"] = Categorical(x.person_name)
g = x.groupby(["person_id"], observed=False)
result = g.transform(lambda x: x)
tm.assert_frame_equal(result, x[["person_name"]])
result = x.drop_duplicates("person_name")
expected = x.iloc[[0, 1]]
tm.assert_frame_equal(result, expected)
def f(x):
return x.drop_duplicates("person_name").iloc[0]
result = g.apply(f)
expected = x.iloc[[0, 1]].copy()
expected.index = Index([1, 2], name="person_id")
expected["person_name"] = expected["person_name"].astype("object")
tm.assert_frame_equal(result, expected)
# GH 9921
# Monotonic
df = DataFrame({"a": [5, 15, 25]})
c = pd.cut(df.a, bins=[0, 10, 20, 30, 40])
result = df.a.groupby(c, observed=False).transform(sum)
tm.assert_series_equal(result, df["a"])
tm.assert_series_equal(
df.a.groupby(c, observed=False).transform(lambda xs: np.sum(xs)), df["a"]
)
tm.assert_frame_equal(df.groupby(c, observed=False).transform(sum), df[["a"]])
tm.assert_frame_equal(
df.groupby(c, observed=False).transform(lambda xs: np.max(xs)), df[["a"]]
)
# Filter
tm.assert_series_equal(df.a.groupby(c, observed=False).filter(np.all), df["a"])
tm.assert_frame_equal(df.groupby(c, observed=False).filter(np.all), df)
# Non-monotonic
df = DataFrame({"a": [5, 15, 25, -5]})
c = pd.cut(df.a, bins=[-10, 0, 10, 20, 30, 40])
result = df.a.groupby(c, observed=False).transform(sum)
tm.assert_series_equal(result, df["a"])
tm.assert_series_equal(
df.a.groupby(c, observed=False).transform(lambda xs: np.sum(xs)), df["a"]
)
tm.assert_frame_equal(df.groupby(c, observed=False).transform(sum), df[["a"]])
tm.assert_frame_equal(
df.groupby(c, observed=False).transform(lambda xs: np.sum(xs)), df[["a"]]
)
# GH 9603
df = DataFrame({"a": [1, 0, 0, 0]})
c = pd.cut(df.a, [0, 1, 2, 3, 4], labels=Categorical(list("abcd")))
result = df.groupby(c, observed=False).apply(len)
exp_index = CategoricalIndex(c.values.categories, ordered=c.values.ordered)
expected = Series([1, 0, 0, 0], index=exp_index)
expected.index.name = "a"
tm.assert_series_equal(result, expected)
# more basic
levels = ["foo", "bar", "baz", "qux"]
codes = np.random.randint(0, 4, size=100)
cats = Categorical.from_codes(codes, levels, ordered=True)
data = DataFrame(np.random.randn(100, 4))
result = data.groupby(cats, observed=False).mean()
expected = data.groupby(np.asarray(cats), observed=False).mean()
exp_idx = CategoricalIndex(levels, categories=cats.categories, ordered=True)
expected = expected.reindex(exp_idx)
tm.assert_frame_equal(result, expected)
grouped = data.groupby(cats, observed=False)
desc_result = grouped.describe()
idx = cats.codes.argsort()
ord_labels = np.asarray(cats).take(idx)
ord_data = data.take(idx)
exp_cats = Categorical(
ord_labels, ordered=True, categories=["foo", "bar", "baz", "qux"]
)
expected = ord_data.groupby(exp_cats, sort=False, observed=False).describe()
tm.assert_frame_equal(desc_result, expected)
# GH 10460
expc = Categorical.from_codes(np.arange(4).repeat(8), levels, ordered=True)
exp = CategoricalIndex(expc)
tm.assert_index_equal((desc_result.stack().index.get_level_values(0)), exp)
exp = Index(["count", "mean", "std", "min", "25%", "50%", "75%", "max"] * 4)
tm.assert_index_equal((desc_result.stack().index.get_level_values(1)), exp)
def test_level_get_group(observed):
# GH15155
df = DataFrame(
data=np.arange(2, 22, 2),
index=MultiIndex(
levels=[CategoricalIndex(["a", "b"]), range(10)],
codes=[[0] * 5 + [1] * 5, range(10)],
names=["Index1", "Index2"],
),
)
g = df.groupby(level=["Index1"], observed=observed)
# expected should equal test.loc[["a"]]
# GH15166
expected = DataFrame(
data=np.arange(2, 12, 2),
index=MultiIndex(
levels=[CategoricalIndex(["a", "b"]), range(5)],
codes=[[0] * 5, range(5)],
names=["Index1", "Index2"],
),
)
result = g.get_group("a")
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("ordered", [True, False])
def test_apply(ordered):
# GH 10138
dense = Categorical(list("abc"), ordered=ordered)
# 'b' is in the categories but not in the list
missing = Categorical(list("aaa"), categories=["a", "b"], ordered=ordered)
values = np.arange(len(dense))
df = DataFrame({"missing": missing, "dense": dense, "values": values})
grouped = df.groupby(["missing", "dense"], observed=True)
# missing category 'b' should still exist in the output index
idx = MultiIndex.from_arrays([missing, dense], names=["missing", "dense"])
expected = DataFrame([0, 1, 2.0], index=idx, columns=["values"])
# GH#21636 tracking down the xfail, in some builds np.mean(df.loc[[0]])
# is coming back as Series([0., 1., 0.], index=["missing", "dense", "values"])
# when we expect Series(0., index=["values"])
result = grouped.apply(lambda x: np.mean(x))
tm.assert_frame_equal(result, expected)
# we coerce back to ints
expected = expected.astype("int")
result = grouped.mean()
tm.assert_frame_equal(result, expected)
result = grouped.agg(np.mean)
tm.assert_frame_equal(result, expected)
# but for transform we should still get back the original index
idx = MultiIndex.from_arrays([missing, dense], names=["missing", "dense"])
expected = Series(1, index=idx)
result = grouped.apply(lambda x: 1)
tm.assert_series_equal(result, expected)
def test_observed(observed):
# multiple groupers, don't re-expand the output space
# of the grouper
# gh-14942 (implement)
# gh-10132 (back-compat)
# gh-8138 (back-compat)
# gh-8869
cat1 = Categorical(["a", "a", "b", "b"], categories=["a", "b", "z"], ordered=True)
cat2 = Categorical(["c", "d", "c", "d"], categories=["c", "d", "y"], ordered=True)
df = DataFrame({"A": cat1, "B": cat2, "values": [1, 2, 3, 4]})
df["C"] = ["foo", "bar"] * 2
# multiple groupers with a non-cat
gb = df.groupby(["A", "B", "C"], observed=observed)
exp_index = MultiIndex.from_arrays(
[cat1, cat2, ["foo", "bar"] * 2], names=["A", "B", "C"]
)
expected = DataFrame({"values": Series([1, 2, 3, 4], index=exp_index)}).sort_index()
result = gb.sum()
if not observed:
expected = cartesian_product_for_groupers(
expected, [cat1, cat2, ["foo", "bar"]], list("ABC"), fill_value=0
)
tm.assert_frame_equal(result, expected)
gb = df.groupby(["A", "B"], observed=observed)
exp_index = MultiIndex.from_arrays([cat1, cat2], names=["A", "B"])
expected = DataFrame({"values": [1, 2, 3, 4]}, index=exp_index)
result = gb.sum()
if not observed:
expected = cartesian_product_for_groupers(
expected, [cat1, cat2], list("AB"), fill_value=0
)
tm.assert_frame_equal(result, expected)
# https://github.com/pandas-dev/pandas/issues/8138
d = {
"cat": Categorical(
["a", "b", "a", "b"], categories=["a", "b", "c"], ordered=True
),
"ints": [1, 1, 2, 2],
"val": [10, 20, 30, 40],
}
df = DataFrame(d)
# Grouping on a single column
groups_single_key = df.groupby("cat", observed=observed)
result = groups_single_key.mean()
exp_index = CategoricalIndex(
list("ab"), name="cat", categories=list("abc"), ordered=True
)
expected = DataFrame({"ints": [1.5, 1.5], "val": [20.0, 30]}, index=exp_index)
if not observed:
index = CategoricalIndex(
list("abc"), name="cat", categories=list("abc"), ordered=True
)
expected = expected.reindex(index)
tm.assert_frame_equal(result, expected)
# Grouping on two columns
groups_double_key = df.groupby(["cat", "ints"], observed=observed)
result = groups_double_key.agg("mean")
expected = DataFrame(
{
"val": [10, 30, 20, 40],
"cat": Categorical(
["a", "a", "b", "b"], categories=["a", "b", "c"], ordered=True
),
"ints": [1, 2, 1, 2],
}
).set_index(["cat", "ints"])
if not observed:
expected = cartesian_product_for_groupers(
expected, [df.cat.values, [1, 2]], ["cat", "ints"]
)
tm.assert_frame_equal(result, expected)
# GH 10132
for key in [("a", 1), ("b", 2), ("b", 1), ("a", 2)]:
c, i = key
result = groups_double_key.get_group(key)
expected = df[(df.cat == c) & (df.ints == i)]
tm.assert_frame_equal(result, expected)
# gh-8869
# with as_index
d = {
"foo": [10, 8, 4, 8, 4, 1, 1],
"bar": [10, 20, 30, 40, 50, 60, 70],
"baz": ["d", "c", "e", "a", "a", "d", "c"],
}
df = DataFrame(d)
cat = pd.cut(df["foo"], np.linspace(0, 10, 3))
df["range"] = cat
groups = df.groupby(["range", "baz"], as_index=False, observed=observed)
result = groups.agg("mean")
groups2 = df.groupby(["range", "baz"], as_index=True, observed=observed)
expected = groups2.agg("mean").reset_index()
tm.assert_frame_equal(result, expected)
def test_observed_codes_remap(observed):
d = {"C1": [3, 3, 4, 5], "C2": [1, 2, 3, 4], "C3": [10, 100, 200, 34]}
df = DataFrame(d)
values = pd.cut(df["C1"], [1, 2, 3, 6])
values.name = "cat"
groups_double_key = df.groupby([values, "C2"], observed=observed)
idx = MultiIndex.from_arrays([values, [1, 2, 3, 4]], names=["cat", "C2"])
expected = DataFrame({"C1": [3, 3, 4, 5], "C3": [10, 100, 200, 34]}, index=idx)
if not observed:
expected = cartesian_product_for_groupers(
expected, [values.values, [1, 2, 3, 4]], ["cat", "C2"]
)
result = groups_double_key.agg("mean")
tm.assert_frame_equal(result, expected)
def test_observed_perf():
# we create a cartesian product, so this is
# non-performant if we don't use observed values
# gh-14942
df = DataFrame(
{
"cat": np.random.randint(0, 255, size=30000),
"int_id": np.random.randint(0, 255, size=30000),
"other_id": np.random.randint(0, 10000, size=30000),
"foo": 0,
}
)
df["cat"] = df.cat.astype(str).astype("category")
grouped = df.groupby(["cat", "int_id", "other_id"], observed=True)
result = grouped.count()
assert result.index.levels[0].nunique() == df.cat.nunique()
assert result.index.levels[1].nunique() == df.int_id.nunique()
assert result.index.levels[2].nunique() == df.other_id.nunique()
def test_observed_groups(observed):
# gh-20583
# test that we have the appropriate groups
cat = Categorical(["a", "c", "a"], categories=["a", "b", "c"])
df = DataFrame({"cat": cat, "vals": [1, 2, 3]})
g = df.groupby("cat", observed=observed)
result = g.groups
if observed:
expected = {"a": Index([0, 2], dtype="int64"), "c": Index([1], dtype="int64")}
else:
expected = {
"a": Index([0, 2], dtype="int64"),
"b": Index([], dtype="int64"),
"c": Index([1], dtype="int64"),
}
tm.assert_dict_equal(result, expected)
def test_observed_groups_with_nan(observed):
# GH 24740
df = DataFrame(
{
"cat": Categorical(["a", np.nan, "a"], categories=["a", "b", "d"]),
"vals": [1, 2, 3],
}
)
g = df.groupby("cat", observed=observed)
result = g.groups
if observed:
expected = {"a": Index([0, 2], dtype="int64")}
else:
expected = {
"a": Index([0, 2], dtype="int64"),
"b": Index([], dtype="int64"),
"d": Index([], dtype="int64"),
}
tm.assert_dict_equal(result, expected)
def test_observed_nth():
# GH 26385
cat = Categorical(["a", np.nan, np.nan], categories=["a", "b", "c"])
ser = Series([1, 2, 3])
df = DataFrame({"cat": cat, "ser": ser})
result = df.groupby("cat", observed=False)["ser"].nth(0)
index = Categorical(["a", "b", "c"], categories=["a", "b", "c"])
expected = Series([1, np.nan, np.nan], index=index, name="ser")
expected.index.name = "cat"
tm.assert_series_equal(result, expected)
def test_dataframe_categorical_with_nan(observed):
# GH 21151
s1 = Categorical([np.nan, "a", np.nan, "a"], categories=["a", "b", "c"])
s2 = Series([1, 2, 3, 4])
df = DataFrame({"s1": s1, "s2": s2})
result = df.groupby("s1", observed=observed).first().reset_index()
if observed:
expected = DataFrame(
{"s1": Categorical(["a"], categories=["a", "b", "c"]), "s2": [2]}
)
else:
expected = DataFrame(
{
"s1": Categorical(["a", "b", "c"], categories=["a", "b", "c"]),
"s2": [2, np.nan, np.nan],
}
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("ordered", [True, False])
@pytest.mark.parametrize("observed", [True, False])
@pytest.mark.parametrize("sort", [True, False])
def test_dataframe_categorical_ordered_observed_sort(ordered, observed, sort):
# GH 25871: Fix groupby sorting on ordered Categoricals
# GH 25167: Groupby with observed=True doesn't sort
# Build a dataframe with cat having one unobserved category ('missing'),
# and a Series with identical values
label = Categorical(
["d", "a", "b", "a", "d", "b"],
categories=["a", "b", "missing", "d"],
ordered=ordered,
)
val = Series(["d", "a", "b", "a", "d", "b"])
df = DataFrame({"label": label, "val": val})
# aggregate on the Categorical
result = df.groupby("label", observed=observed, sort=sort)["val"].aggregate("first")
# If ordering works, we expect index labels equal to aggregation results,
# except for 'observed=False': label 'missing' has aggregation None
label = Series(result.index.array, dtype="object")
aggr = Series(result.array)
if not observed:
aggr[aggr.isna()] = "missing"
if not all(label == aggr):
msg = (
"Labels and aggregation results not consistently sorted\n"
f"for (ordered={ordered}, observed={observed}, sort={sort})\n"
f"Result:\n{result}"
)
assert False, msg
def test_datetime():
# GH9049: ensure backward compatibility
levels = pd.date_range("2014-01-01", periods=4)
codes = np.random.randint(0, 4, size=100)
cats = Categorical.from_codes(codes, levels, ordered=True)
data = DataFrame(np.random.randn(100, 4))
result = data.groupby(cats, observed=False).mean()
expected = data.groupby(np.asarray(cats), observed=False).mean()
expected = expected.reindex(levels)
expected.index = CategoricalIndex(
expected.index, categories=expected.index, ordered=True
)
tm.assert_frame_equal(result, expected)
grouped = data.groupby(cats, observed=False)
desc_result = grouped.describe()
idx = cats.codes.argsort()
ord_labels = cats.take(idx)
ord_data = data.take(idx)
expected = ord_data.groupby(ord_labels, observed=False).describe()
tm.assert_frame_equal(desc_result, expected)
tm.assert_index_equal(desc_result.index, expected.index)
tm.assert_index_equal(
desc_result.index.get_level_values(0), expected.index.get_level_values(0)
)
# GH 10460
expc = Categorical.from_codes(np.arange(4).repeat(8), levels, ordered=True)
exp = CategoricalIndex(expc)
tm.assert_index_equal((desc_result.stack().index.get_level_values(0)), exp)
exp = Index(["count", "mean", "std", "min", "25%", "50%", "75%", "max"] * 4)
tm.assert_index_equal((desc_result.stack().index.get_level_values(1)), exp)
def test_categorical_index():
s = np.random.RandomState(12345)
levels = ["foo", "bar", "baz", "qux"]
codes = s.randint(0, 4, size=20)
cats = Categorical.from_codes(codes, levels, ordered=True)
df = DataFrame(np.repeat(np.arange(20), 4).reshape(-1, 4), columns=list("abcd"))
df["cats"] = cats
# with a cat index
result = df.set_index("cats").groupby(level=0, observed=False).sum()
expected = df[list("abcd")].groupby(cats.codes, observed=False).sum()
expected.index = CategoricalIndex(
Categorical.from_codes([0, 1, 2, 3], levels, ordered=True), name="cats"
)
tm.assert_frame_equal(result, expected)
# with a cat column, should produce a cat index
result = df.groupby("cats", observed=False).sum()
expected = df[list("abcd")].groupby(cats.codes, observed=False).sum()
expected.index = CategoricalIndex(
Categorical.from_codes([0, 1, 2, 3], levels, ordered=True), name="cats"
)
tm.assert_frame_equal(result, expected)
def test_describe_categorical_columns():
# GH 11558
cats = CategoricalIndex(
["qux", "foo", "baz", "bar"],
categories=["foo", "bar", "baz", "qux"],
ordered=True,
)
df = DataFrame(np.random.randn(20, 4), columns=cats)
result = df.groupby([1, 2, 3, 4] * 5).describe()
tm.assert_index_equal(result.stack().columns, cats)
tm.assert_categorical_equal(result.stack().columns.values, cats.values)
def test_unstack_categorical():
# GH11558 (example is taken from the original issue)
df = DataFrame(
{"a": range(10), "medium": ["A", "B"] * 5, "artist": list("XYXXY") * 2}
)
df["medium"] = df["medium"].astype("category")
gcat = df.groupby(["artist", "medium"], observed=False)["a"].count().unstack()
result = gcat.describe()
exp_columns = CategoricalIndex(["A", "B"], ordered=False, name="medium")
tm.assert_index_equal(result.columns, exp_columns)
tm.assert_categorical_equal(result.columns.values, exp_columns.values)
result = gcat["A"] + gcat["B"]
expected = Series([6, 4], index=Index(["X", "Y"], name="artist"))
tm.assert_series_equal(result, expected)
def test_bins_unequal_len():
# GH3011
series = Series([np.nan, np.nan, 1, 1, 2, 2, 3, 3, 4, 4])
bins = pd.cut(series.dropna().values, 4)
# len(bins) != len(series) here
msg = r"Length of grouper \(8\) and axis \(10\) must be same length"
with pytest.raises(ValueError, match=msg):
series.groupby(bins).mean()
def test_as_index():
# GH13204
df = DataFrame(
{
"cat": Categorical([1, 2, 2], [1, 2, 3]),
"A": [10, 11, 11],
"B": [101, 102, 103],
}
)
result = df.groupby(["cat", "A"], as_index=False, observed=True).sum()
expected = DataFrame(
{
"cat": Categorical([1, 2], categories=df.cat.cat.categories),
"A": [10, 11],
"B": [101, 205],
},
columns=["cat", "A", "B"],
)
tm.assert_frame_equal(result, expected)
# function grouper
f = lambda r: df.loc[r, "A"]
result = df.groupby(["cat", f], as_index=False, observed=True).sum()
expected = DataFrame(
{
"cat": Categorical([1, 2], categories=df.cat.cat.categories),
"A": [10, 22],
"B": [101, 205],
},
columns=["cat", "A", "B"],
)
tm.assert_frame_equal(result, expected)
# another not in-axis grouper (conflicting names in index)
s = Series(["a", "b", "b"], name="cat")
result = df.groupby(["cat", s], as_index=False, observed=True).sum()
tm.assert_frame_equal(result, expected)
# is original index dropped?
group_columns = ["cat", "A"]
expected = DataFrame(
{
"cat": Categorical([1, 2], categories=df.cat.cat.categories),
"A": [10, 11],
"B": [101, 205],
},
columns=["cat", "A", "B"],
)
for name in [None, "X", "B"]:
df.index = Index(list("abc"), name=name)
result = df.groupby(group_columns, as_index=False, observed=True).sum()
tm.assert_frame_equal(result, expected)
def test_preserve_categories():
# GH-13179
categories = list("abc")
# ordered=True
df = DataFrame({"A": Categorical(list("ba"), categories=categories, ordered=True)})
index = CategoricalIndex(categories, categories, ordered=True, name="A")
tm.assert_index_equal(
df.groupby("A", sort=True, observed=False).first().index, index
)
tm.assert_index_equal(
df.groupby("A", sort=False, observed=False).first().index, index
)
# ordered=False
df = DataFrame({"A": Categorical(list("ba"), categories=categories, ordered=False)})
sort_index = CategoricalIndex(categories, categories, ordered=False, name="A")
nosort_index = CategoricalIndex(list("bac"), list("bac"), ordered=False, name="A")
tm.assert_index_equal(
df.groupby("A", sort=True, observed=False).first().index, sort_index
)
tm.assert_index_equal(
df.groupby("A", sort=False, observed=False).first().index, nosort_index
)
def test_preserve_categorical_dtype():
# GH13743, GH13854
df = DataFrame(
{
"A": [1, 2, 1, 1, 2],
"B": [10, 16, 22, 28, 34],
"C1": Categorical(list("abaab"), categories=list("bac"), ordered=False),
"C2": Categorical(list("abaab"), categories=list("bac"), ordered=True),
}
)
# single grouper
exp_full = DataFrame(
{
"A": [2.0, 1.0, np.nan],
"B": [25.0, 20.0, np.nan],
"C1": Categorical(list("bac"), categories=list("bac"), ordered=False),
"C2": Categorical(list("bac"), categories=list("bac"), ordered=True),
}
)
for col in ["C1", "C2"]:
result1 = df.groupby(by=col, as_index=False, observed=False).mean()
result2 = df.groupby(by=col, as_index=True, observed=False).mean().reset_index()
expected = exp_full.reindex(columns=result1.columns)
tm.assert_frame_equal(result1, expected)
tm.assert_frame_equal(result2, expected)
@pytest.mark.parametrize(
"func, values",
[
("first", ["second", "first"]),
("last", ["fourth", "third"]),
("min", ["fourth", "first"]),
("max", ["second", "third"]),
],
)
def test_preserve_on_ordered_ops(func, values):
# gh-18502
# preserve the categoricals on ops
c = Categorical(["first", "second", "third", "fourth"], ordered=True)
df = DataFrame({"payload": [-1, -2, -1, -2], "col": c})
g = df.groupby("payload")
result = getattr(g, func)()
expected = DataFrame(
{"payload": [-2, -1], "col": Series(values, dtype=c.dtype)}
).set_index("payload")
tm.assert_frame_equal(result, expected)
def test_categorical_no_compress():
data = Series(np.random.randn(9))
codes = np.array([0, 0, 0, 1, 1, 1, 2, 2, 2])
cats = Categorical.from_codes(codes, [0, 1, 2], ordered=True)
result = data.groupby(cats, observed=False).mean()
exp = data.groupby(codes, observed=False).mean()
exp.index = CategoricalIndex(
exp.index, categories=cats.categories, ordered=cats.ordered
)
| tm.assert_series_equal(result, exp) | pandas._testing.assert_series_equal |
from copy import (
copy,
deepcopy,
)
import numpy as np
import pytest
from pandas.core.dtypes.common import is_scalar
from pandas import (
DataFrame,
Series,
)
import pandas._testing as tm
# ----------------------------------------------------------------------
# Generic types test cases
def construct(box, shape, value=None, dtype=None, **kwargs):
"""
construct an object for the given shape
if value is specified use that if its a scalar
if value is an array, repeat it as needed
"""
if isinstance(shape, int):
shape = tuple([shape] * box._AXIS_LEN)
if value is not None:
if is_scalar(value):
if value == "empty":
arr = None
dtype = np.float64
# remove the info axis
kwargs.pop(box._info_axis_name, None)
else:
arr = np.empty(shape, dtype=dtype)
arr.fill(value)
else:
fshape = np.prod(shape)
arr = value.ravel()
new_shape = fshape / arr.shape[0]
if fshape % arr.shape[0] != 0:
raise Exception("invalid value passed in construct")
arr = np.repeat(arr, new_shape).reshape(shape)
else:
arr = np.random.randn(*shape)
return box(arr, dtype=dtype, **kwargs)
class Generic:
@pytest.mark.parametrize(
"func",
[
str.lower,
{x: x.lower() for x in list("ABCD")},
Series({x: x.lower() for x in list("ABCD")}),
],
)
def test_rename(self, frame_or_series, func):
# single axis
idx = list("ABCD")
for axis in frame_or_series._AXIS_ORDERS:
kwargs = {axis: idx}
obj = construct(4, **kwargs)
# rename a single axis
result = obj.rename(**{axis: func})
expected = obj.copy()
setattr(expected, axis, list("abcd"))
tm.assert_equal(result, expected)
def test_get_numeric_data(self, frame_or_series):
n = 4
kwargs = {
frame_or_series._get_axis_name(i): list(range(n))
for i in range(frame_or_series._AXIS_LEN)
}
# get the numeric data
o = construct(n, **kwargs)
result = o._get_numeric_data()
tm.assert_equal(result, o)
# non-inclusion
result = o._get_bool_data()
expected = construct(n, value="empty", **kwargs)
if isinstance(o, DataFrame):
# preserve columns dtype
expected.columns = o.columns[:0]
tm.assert_equal(result, expected)
# get the bool data
arr = np.array([True, True, False, True])
o = construct(n, value=arr, **kwargs)
result = o._get_numeric_data()
tm.assert_equal(result, o)
def test_nonzero(self, frame_or_series):
# GH 4633
# look at the boolean/nonzero behavior for objects
obj = construct(frame_or_series, shape=4)
msg = f"The truth value of a {frame_or_series.__name__} is ambiguous"
with pytest.raises(ValueError, match=msg):
bool(obj == 0)
with pytest.raises(ValueError, match=msg):
bool(obj == 1)
with pytest.raises(ValueError, match=msg):
bool(obj)
obj = construct(frame_or_series, shape=4, value=1)
with pytest.raises(ValueError, match=msg):
bool(obj == 0)
with pytest.raises(ValueError, match=msg):
bool(obj == 1)
with pytest.raises(ValueError, match=msg):
bool(obj)
obj = construct(frame_or_series, shape=4, value=np.nan)
with pytest.raises(ValueError, match=msg):
bool(obj == 0)
with pytest.raises(ValueError, match=msg):
bool(obj == 1)
with pytest.raises(ValueError, match=msg):
bool(obj)
# empty
obj = construct(frame_or_series, shape=0)
with pytest.raises(ValueError, match=msg):
bool(obj)
# invalid behaviors
obj1 = construct(frame_or_series, shape=4, value=1)
obj2 = construct(frame_or_series, shape=4, value=1)
with pytest.raises(ValueError, match=msg):
if obj1:
pass
with pytest.raises(ValueError, match=msg):
obj1 and obj2
with pytest.raises(ValueError, match=msg):
obj1 or obj2
with pytest.raises(ValueError, match=msg):
not obj1
def test_frame_or_series_compound_dtypes(self, frame_or_series):
# see gh-5191
# Compound dtypes should raise NotImplementedError.
def f(dtype):
return construct(frame_or_series, shape=3, value=1, dtype=dtype)
msg = (
"compound dtypes are not implemented "
f"in the {frame_or_series.__name__} frame_or_series"
)
with pytest.raises(NotImplementedError, match=msg):
f([("A", "datetime64[h]"), ("B", "str"), ("C", "int32")])
# these work (though results may be unexpected)
f("int64")
f("float64")
f("M8[ns]")
def test_metadata_propagation(self, frame_or_series):
# check that the metadata matches up on the resulting ops
o = construct(frame_or_series, shape=3)
o.name = "foo"
o2 = construct(frame_or_series, shape=3)
o2.name = "bar"
# ----------
# preserving
# ----------
# simple ops with scalars
for op in ["__add__", "__sub__", "__truediv__", "__mul__"]:
result = getattr(o, op)(1)
tm.assert_metadata_equivalent(o, result)
# ops with like
for op in ["__add__", "__sub__", "__truediv__", "__mul__"]:
result = getattr(o, op)(o)
tm.assert_metadata_equivalent(o, result)
# simple boolean
for op in ["__eq__", "__le__", "__ge__"]:
v1 = getattr(o, op)(o)
tm.assert_metadata_equivalent(o, v1)
tm.assert_metadata_equivalent(o, v1 & v1)
tm.assert_metadata_equivalent(o, v1 | v1)
# combine_first
result = o.combine_first(o2)
tm.assert_metadata_equivalent(o, result)
# ---------------------------
# non-preserving (by default)
# ---------------------------
# add non-like
result = o + o2
tm.assert_metadata_equivalent(result)
# simple boolean
for op in ["__eq__", "__le__", "__ge__"]:
# this is a name matching op
v1 = getattr(o, op)(o)
v2 = getattr(o, op)(o2)
tm.assert_metadata_equivalent(v2)
tm.assert_metadata_equivalent(v1 & v2)
tm.assert_metadata_equivalent(v1 | v2)
def test_size_compat(self, frame_or_series):
# GH8846
# size property should be defined
o = construct(frame_or_series, shape=10)
assert o.size == np.prod(o.shape)
assert o.size == 10 ** len(o.axes)
def test_split_compat(self, frame_or_series):
# xref GH8846
o = construct(frame_or_series, shape=10)
assert len(np.array_split(o, 5)) == 5
assert len(np.array_split(o, 2)) == 2
# See gh-12301
def test_stat_unexpected_keyword(self, frame_or_series):
obj = construct(frame_or_series, 5)
starwars = "Star Wars"
errmsg = "unexpected keyword"
with pytest.raises(TypeError, match=errmsg):
obj.max(epic=starwars) # stat_function
with pytest.raises(TypeError, match=errmsg):
obj.var(epic=starwars) # stat_function_ddof
with pytest.raises(TypeError, match=errmsg):
obj.sum(epic=starwars) # cum_function
with pytest.raises(TypeError, match=errmsg):
obj.any(epic=starwars) # logical_function
@pytest.mark.parametrize("func", ["sum", "cumsum", "any", "var"])
def test_api_compat(self, func, frame_or_series):
# GH 12021
# compat for __name__, __qualname__
obj = (frame_or_series, 5)
f = getattr(obj, func)
assert f.__name__ == func
assert f.__qualname__.endswith(func)
def test_stat_non_defaults_args(self, frame_or_series):
obj = construct(frame_or_series, 5)
out = np.array([0])
errmsg = "the 'out' parameter is not supported"
with pytest.raises(ValueError, match=errmsg):
obj.max(out=out) # stat_function
with pytest.raises(ValueError, match=errmsg):
obj.var(out=out) # stat_function_ddof
with pytest.raises(ValueError, match=errmsg):
obj.sum(out=out) # cum_function
with pytest.raises(ValueError, match=errmsg):
obj.any(out=out) # logical_function
def test_truncate_out_of_bounds(self, frame_or_series):
# GH11382
# small
shape = [2000] + ([1] * (frame_or_series._AXIS_LEN - 1))
small = construct(frame_or_series, shape, dtype="int8", value=1)
tm.assert_equal(small.truncate(), small)
tm.assert_equal(small.truncate(before=0, after=3e3), small)
tm.assert_equal(small.truncate(before=-1, after=2e3), small)
# big
shape = [2_000_000] + ([1] * (frame_or_series._AXIS_LEN - 1))
big = construct(frame_or_series, shape, dtype="int8", value=1)
tm.assert_equal(big.truncate(), big)
tm.assert_equal(big.truncate(before=0, after=3e6), big)
tm.assert_equal(big.truncate(before=-1, after=2e6), big)
@pytest.mark.parametrize(
"func",
[copy, deepcopy, lambda x: x.copy(deep=False), lambda x: x.copy(deep=True)],
)
@pytest.mark.parametrize("shape", [0, 1, 2])
def test_copy_and_deepcopy(self, frame_or_series, shape, func):
# GH 15444
obj = construct(frame_or_series, shape)
obj_copy = func(obj)
assert obj_copy is not obj
tm.assert_equal(obj_copy, obj)
class TestNDFrame:
# tests that don't fit elsewhere
@pytest.mark.parametrize(
"ser", [tm.makeFloatSeries(), tm.makeStringSeries(), tm.makeObjectSeries()]
)
def test_squeeze_series_noop(self, ser):
# noop
tm.assert_series_equal(ser.squeeze(), ser)
def test_squeeze_frame_noop(self):
# noop
df = | tm.makeTimeDataFrame() | pandas._testing.makeTimeDataFrame |
#
# Copyright 2017 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import division
from abc import abstractmethod
import math
import numpy as np
from pandas import isnull
from six import with_metaclass
from toolz import merge
from zipline.assets import Equity, Future
from zipline.errors import HistoryWindowStartsBeforeData
from zipline.finance.constants import ROOT_SYMBOL_TO_ETA
from zipline.finance.shared import AllowedAssetMarker, FinancialModelMeta
from zipline.finance.transaction import create_transaction
from zipline.utils.cache import ExpiringCache
from zipline.utils.dummy import DummyMapping
from zipline.utils.input_validation import (expect_bounded,
expect_strictly_bounded)
SELL = 1 << 0
BUY = 1 << 1
STOP = 1 << 2
LIMIT = 1 << 3
SQRT_252 = math.sqrt(252)
DEFAULT_EQUITY_VOLUME_SLIPPAGE_BAR_LIMIT = 0.025
DEFAULT_FUTURE_VOLUME_SLIPPAGE_BAR_LIMIT = 0.05
class LiquidityExceeded(Exception):
pass
def fill_price_worse_than_limit_price(fill_price, order):
"""
Checks whether the fill price is worse than the order's limit price.
Parameters
----------
fill_price: float
The price to check.
order: zipline.finance.order.Order
The order whose limit price to check.
Returns
-------
bool: Whether the fill price is above the limit price (for a buy) or below
the limit price (for a sell).
"""
if order.limit:
# this is tricky! if an order with a limit price has reached
# the limit price, we will try to fill the order. do not fill
# these shares if the impacted price is worse than the limit
# price. return early to avoid creating the transaction.
# buy order is worse if the impacted price is greater than
# the limit price. sell order is worse if the impacted price
# is less than the limit price
if (order.direction > 0 and fill_price > order.limit) or \
(order.direction < 0 and fill_price < order.limit):
return True
return False
class SlippageModel(with_metaclass(FinancialModelMeta)):
"""
Abstract base class for slippage models.
Slippage models are responsible for the rates and prices at which orders
fill during a simulation.
To implement a new slippage model, create a subclass of
:class:`~zipline.finance.slippage.SlippageModel` and implement
:meth:`process_order`.
The :meth:`process_order` method must return a tuple of `(execution_price,
execution_volume)`, which signifies the price and volume for the
transaction that your model wants to generate.
Your model gets passed the same data object that is passed to handle_data
and other functions, letting you do any price or history lookup for any
security in your model.
The order object has the following properties: `amount` (float), `asset`
(Asset), `stop` and `limit` (float), and `stop_reached` and `limit_reached`
(boolean).
The `create_transaction` method takes the given order, the data
object, and the price and amount calculated by your slippage model, and
returns the newly constructed transaction.
Many slippage models' behavior depends on how much of the total volume
traded is being captured by the algorithm. You can use
:meth:`volume_for_bar` to see how many shares of the current security
have been traded so far during this bar. If your algorithm has many
different orders for the same stock in the same bar, this is useful for
making sure you don't take an unrealistically large fraction of the traded
volume.
If your slippage model doesn't place a transaction for the full amount of
the order, the order stays open with an updated amount value, and will be
passed to :meth:`process_order` on the next bar. Orders that have limits
that have not been reached will not be passed to :meth:`process_order`.
If your transaction has 0 shares or more shares than the original order
amount, an exception will be thrown.
Attributes
----------
volume_for_bar : int
Number of shares that have already been filled for the
currently-filling asset in the current minute. This attribute is
maintained automatically by the base class. It can be used by
subclasses to keep track of the total amount filled if there are
multiple open orders for a single asset.
Notes
-----
Subclasses that define their own constructors should call
``super(<subclass name>, self).__init__()`` before performing other
initialization.
"""
# Asset types that are compatible with the given model.
allowed_asset_types = (Equity, Future)
def __init__(self):
self._volume_for_bar = 0
@property
def volume_for_bar(self):
return self._volume_for_bar
@abstractmethod
def process_order(self, data, order):
"""
Compute the number of shares and price to fill for ``order`` in the
current minute.
Parameters
----------
data : zipline.protocol.BarData
The data for the given bar.
order : zipline.finance.order.Order
The order to simulate.
Returns
-------
execution_price : float
The price of the fill.
execution_volume : int
The number of shares that should be filled. Must be between ``0``
and ``order.amount - order.filled``. If the amount filled is less
than the amount remaining, ``order`` will remain open and will be
passed again to this method in the next minute.
Raises
------
zipline.finance.slippage.LiquidityExceeded
May be raised if no more orders should be processed for the current
asset during the current bar.
Notes
-----
Before this method is called, :attr:`volume_for_bar` will be set to the
number of shares that have already been filled for ``order.asset`` in
the current minute.
:meth:`process_order` is not called by the base class on bars for which
there was no historical volume.
"""
raise NotImplementedError('process_order')
def simulate(self, data, asset, orders_for_asset):
self._volume_for_bar = 0
volume = data.current(asset, "volume")
if volume == 0:
return
# can use the close price, since we verified there's volume in this
# bar.
price = data.current(asset, "close")
# BEGIN
#
# Remove this block after fixing data to ensure volume always has
# corresponding price.
if isnull(price):
return
# END
dt = data.current_dt
for order in orders_for_asset:
if order.open_amount == 0:
continue
order.check_triggers(price, dt)
if not order.triggered:
continue
txn = None
try:
execution_price, execution_volume = \
self.process_order(data, order)
if execution_price is not None:
txn = create_transaction(
order,
data.current_dt,
execution_price,
execution_volume
)
except LiquidityExceeded:
break
if txn:
self._volume_for_bar += abs(txn.amount)
yield order, txn
def asdict(self):
return self.__dict__
class NoSlippage(SlippageModel):
"""A slippage model where all orders fill immediately and completely at the
current close price.
Notes
-----
This is primarily used for testing.
"""
@staticmethod
def process_order(data, order):
return (
data.current(order.asset, 'close'),
order.amount,
)
class EquitySlippageModel(with_metaclass(AllowedAssetMarker, SlippageModel)):
"""
Base class for slippage models which only support equities.
"""
allowed_asset_types = (Equity,)
class FutureSlippageModel(with_metaclass(AllowedAssetMarker, SlippageModel)):
"""
Base class for slippage models which only support futures.
"""
allowed_asset_types = (Future,)
class VolumeShareSlippage(SlippageModel):
"""
Model slippage as a quadratic function of percentage of historical volume.
Orders to buy will be filled at::
price * (1 + price_impact * (volume_share ** 2))
Orders to sell will be filled at::
price * (1 - price_impact * (volume_share ** 2))
where ``price`` is the close price for the bar, and ``volume_share`` is the
percentage of minutely volume filled, up to a max of ``volume_limit``.
In the VolumeShareSlippage model, the price you get is a function of your order
size relative to the security's actual traded volume. You provide a volume_limit
cap (default 0.025), which limits the proportion of volume that your order can
take up per bar. For example: if the backtest is running in one-minute bars, and
you place an order for 60 shares; then 1000 shares trade in each of the next several
minute; and the volume_limit is 0.025; then your trade order will be split into
three orders (25 shares, 25 shares, and 10 shares). Setting the volume_limit to
1.00 will permit the backtester to use up to 100% of the bar towards filling your
order. Using the same example, this will fill 60 shares in the next minute bar.
The price impact constant (default 0.1) defines how large of an impact your order
will have on the backtester's price calculation. The slippage is calculated by
multiplying the price impact constant by the square of the ratio of the order to
the total volume. In our previous example, for the 25-share orders, the price impact
is .1 * (25/1000) * (25/1000), or 0.00625%. For the 10-share order, the price impact
is .1 * (10/1000) * (10/1000), or .001%.
Parameters
----------
volume_limit : float, optional
Maximum percent of historical volume that can fill in each bar. 0.5
means 50% of historical volume. 1.0 means 100%. Default is 0.025 (i.e.,
2.5%).
price_impact : float, optional
Scaling coefficient for price impact. Larger values will result in more
simulated price impact. Smaller values will result in less simulated
price impact. Default is 0.1.
"""
def __init__(self,
volume_limit=DEFAULT_EQUITY_VOLUME_SLIPPAGE_BAR_LIMIT,
price_impact=0.1):
super(VolumeShareSlippage, self).__init__()
self.volume_limit = volume_limit
self.price_impact = price_impact
def __repr__(self):
return """
{class_name}(
volume_limit={volume_limit},
price_impact={price_impact})
""".strip().format(class_name=self.__class__.__name__,
volume_limit=self.volume_limit,
price_impact=self.price_impact)
def process_order(self, data, order):
volume = data.current(order.asset, "volume")
max_volume = self.volume_limit * volume
# price impact accounts for the total volume of transactions
# created against the current minute bar
remaining_volume = max_volume - self.volume_for_bar
if remaining_volume < 1:
# we can't fill any more transactions
raise LiquidityExceeded()
# the current order amount will be the min of the
# volume available in the bar or the open amount.
cur_volume = int(min(remaining_volume, abs(order.open_amount)))
if cur_volume < 1:
return None, None
# tally the current amount into our total amount ordered.
# total amount will be used to calculate price impact
total_volume = self.volume_for_bar + cur_volume
volume_share = min(total_volume / volume,
self.volume_limit)
price = data.current(order.asset, "close")
# BEGIN
#
# Remove this block after fixing data to ensure volume always has
# corresponding price.
if | isnull(price) | pandas.isnull |
import numpy as np
import pytest
from pandas import (
DataFrame,
Series,
)
import pandas._testing as tm
from pandas.core.base import DataError
# gh-12373 : rolling functions error on float32 data
# make sure rolling functions works for different dtypes
#
# further note that we are only checking rolling for fully dtype
# compliance (though both expanding and ewm inherit)
def get_dtype(dtype, coerce_int=None):
if coerce_int is False and "int" in dtype:
return None
if dtype != "category":
return np.dtype(dtype)
return dtype
@pytest.mark.parametrize(
"method, data, expected_data, coerce_int, min_periods",
[
("count", np.arange(5), [1, 2, 2, 2, 2], True, 0),
("count", np.arange(10, 0, -2), [1, 2, 2, 2, 2], True, 0),
("count", [0, 1, 2, np.nan, 4], [1, 2, 2, 1, 1], False, 0),
("max", np.arange(5), [np.nan, 1, 2, 3, 4], True, None),
("max", np.arange(10, 0, -2), [np.nan, 10, 8, 6, 4], True, None),
("max", [0, 1, 2, np.nan, 4], [np.nan, 1, 2, np.nan, np.nan], False, None),
("min", np.arange(5), [np.nan, 0, 1, 2, 3], True, None),
("min", np.arange(10, 0, -2), [np.nan, 8, 6, 4, 2], True, None),
("min", [0, 1, 2, np.nan, 4], [np.nan, 0, 1, np.nan, np.nan], False, None),
("sum", np.arange(5), [np.nan, 1, 3, 5, 7], True, None),
("sum", np.arange(10, 0, -2), [np.nan, 18, 14, 10, 6], True, None),
("sum", [0, 1, 2, np.nan, 4], [np.nan, 1, 3, np.nan, np.nan], False, None),
("mean", np.arange(5), [np.nan, 0.5, 1.5, 2.5, 3.5], True, None),
("mean", np.arange(10, 0, -2), [np.nan, 9, 7, 5, 3], True, None),
("mean", [0, 1, 2, np.nan, 4], [np.nan, 0.5, 1.5, np.nan, np.nan], False, None),
("std", np.arange(5), [np.nan] + [np.sqrt(0.5)] * 4, True, None),
("std", np.arange(10, 0, -2), [np.nan] + [np.sqrt(2)] * 4, True, None),
(
"std",
[0, 1, 2, np.nan, 4],
[np.nan] + [np.sqrt(0.5)] * 2 + [np.nan] * 2,
False,
None,
),
("var", np.arange(5), [np.nan, 0.5, 0.5, 0.5, 0.5], True, None),
("var", np.arange(10, 0, -2), [np.nan, 2, 2, 2, 2], True, None),
("var", [0, 1, 2, np.nan, 4], [np.nan, 0.5, 0.5, np.nan, np.nan], False, None),
("median", np.arange(5), [np.nan, 0.5, 1.5, 2.5, 3.5], True, None),
("median", np.arange(10, 0, -2), [np.nan, 9, 7, 5, 3], True, None),
(
"median",
[0, 1, 2, np.nan, 4],
[np.nan, 0.5, 1.5, np.nan, np.nan],
False,
None,
),
],
)
def test_series_dtypes(method, data, expected_data, coerce_int, dtypes, min_periods):
s = Series(data, dtype=get_dtype(dtypes, coerce_int=coerce_int))
if dtypes in ("m8[ns]", "M8[ns]") and method != "count":
msg = "No numeric types to aggregate"
with pytest.raises(DataError, match=msg):
getattr(s.rolling(2, min_periods=min_periods), method)()
else:
result = getattr(s.rolling(2, min_periods=min_periods), method)()
expected = Series(expected_data, dtype="float64")
tm.assert_almost_equal(result, expected)
@pytest.mark.parametrize(
"method, expected_data, min_periods",
[
("count", {0: Series([1, 2, 2, 2, 2]), 1: Series([1, 2, 2, 2, 2])}, 0),
(
"max",
{0: Series([np.nan, 2, 4, 6, 8]), 1: Series([np.nan, 3, 5, 7, 9])},
None,
),
(
"min",
{0: Series([np.nan, 0, 2, 4, 6]), 1: Series([np.nan, 1, 3, 5, 7])},
None,
),
(
"sum",
{0: Series([np.nan, 2, 6, 10, 14]), 1: Series([np.nan, 4, 8, 12, 16])},
None,
),
(
"mean",
{0: Series([np.nan, 1, 3, 5, 7]), 1: Series([np.nan, 2, 4, 6, 8])},
None,
),
(
"std",
{
0: Series([np.nan] + [np.sqrt(2)] * 4),
1: Series([np.nan] + [np.sqrt(2)] * 4),
},
None,
),
(
"var",
{0: Series([np.nan, 2, 2, 2, 2]), 1: Series([np.nan, 2, 2, 2, 2])},
None,
),
(
"median",
{0: Series([np.nan, 1, 3, 5, 7]), 1: Series([np.nan, 2, 4, 6, 8])},
None,
),
],
)
def test_dataframe_dtypes(method, expected_data, dtypes, min_periods):
if dtypes == "category":
pytest.skip("Category dataframe testing not implemented.")
df = DataFrame(np.arange(10).reshape((5, 2)), dtype=get_dtype(dtypes))
if dtypes in ("m8[ns]", "M8[ns]") and method != "count":
msg = "No numeric types to aggregate"
with pytest.raises(DataError, match=msg):
getattr(df.rolling(2, min_periods=min_periods), method)()
else:
result = getattr(df.rolling(2, min_periods=min_periods), method)()
expected = DataFrame(expected_data, dtype="float64")
| tm.assert_frame_equal(result, expected) | pandas._testing.assert_frame_equal |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
from typing import Any, Dict, List, Optional
import pandas as pd
try:
from fbprophet import Prophet
_no_prophet = False
except ImportError:
_no_prophet = True
Prophet = Dict[str, Any] # for Pyre
from kats.consts import Params, TimeSeriesData
from kats.models.model import Model
from kats.utils.parameter_tuning_utils import (
get_default_prophet_parameter_search_space,
)
class ProphetParams(Params):
"""Parameter class for Prophet model
This is the parameter class for prophet model, it contains all necessary
parameters as definied in Prophet implementation:
https://github.com/facebook/prophet/blob/master/python/prophet/forecaster.py
Attributes:
growth: String 'linear' or 'logistic' to specify a linear or logistic
trend.
changepoints: List of dates at which to include potential changepoints. If
not specified, potential changepoints are selected automatically.
n_changepoints: Number of potential changepoints to include. Not used
if input `changepoints` is supplied. If `changepoints` is not supplied,
then n_changepoints potential changepoints are selected uniformly from
the first `changepoint_range` proportion of the history.
changepoint_range: Proportion of history in which trend changepoints will
be estimated. Defaults to 0.8 for the first 80%. Not used if
`changepoints` is specified.
yearly_seasonality: Fit yearly seasonality.
Can be 'auto', True, False, or a number of Fourier terms to generate.
weekly_seasonality: Fit weekly seasonality.
Can be 'auto', True, False, or a number of Fourier terms to generate.
daily_seasonality: Fit daily seasonality.
Can be 'auto', True, False, or a number of Fourier terms to generate.
holidays: pd.DataFrame with columns holiday (string) and ds (date type)
and optionally columns lower_window and upper_window which specify a
range of days around the date to be included as holidays.
lower_window=-2 will include 2 days prior to the date as holidays. Also
optionally can have a column prior_scale specifying the prior scale for
that holiday.
seasonality_mode: 'additive' (default) or 'multiplicative'.
seasonality_prior_scale: Parameter modulating the strength of the
seasonality model. Larger values allow the model to fit larger seasonal
fluctuations, smaller values dampen the seasonality. Can be specified
for individual seasonalities using add_seasonality.
holidays_prior_scale: Parameter modulating the strength of the holiday
components model, unless overridden in the holidays input.
changepoint_prior_scale: Parameter modulating the flexibility of the
automatic changepoint selection. Large values will allow many
changepoints, small values will allow few changepoints.
mcmc_samples: Integer, if greater than 0, will do full Bayesian inference
with the specified number of MCMC samples. If 0, will do MAP
estimation.
interval_width: Float, width of the uncertainty intervals provided
for the forecast. If mcmc_samples=0, this will be only the uncertainty
in the trend using the MAP estimate of the extrapolated generative
model. If mcmc.samples>0, this will be integrated over all model
parameters, which will include uncertainty in seasonality.
uncertainty_samples: Number of simulated draws used to estimate
uncertainty intervals. Settings this value to 0 or False will disable
uncertainty estimation and speed up the calculation.
cap: capacity, provided for logistic growth
floor: floor, the fcst value must be greater than the specified floor
custom_seasonlities: customized seasonalities, dict with keys
"name", "period", "fourier_order"
extra_regressors: additional regressors used for fitting, each regressor
is a dict with keys "name" and "value"
"""
growth: str
changepoints: Optional[List[float]]
n_changepoints: int
changepoint_range: float
yearly_seasonality: str
weekly_seasonality: str
daily_seasonality: str
holidays: Optional[pd.DataFrame]
seasonality_mode: str
seasonality_prior_scale: float
holidays_prior_scale: float
changepoint_prior_scale: float
mcmc_samples: int
interval_width: float
uncertainty_samples: int
cap: Optional[float]
floor: Optional[float]
custom_seasonalities: List[Dict[str, Any]]
extra_regressors: List[Dict[str, Any]]
def __init__(
self,
growth: str = "linear",
changepoints: Optional[List[float]] = None,
n_changepoints: int = 25,
changepoint_range: float = 0.8,
yearly_seasonality: str = "auto",
weekly_seasonality: str = "auto",
daily_seasonality: str = "auto",
holidays: Optional[pd.DataFrame] = None,
seasonality_mode: str = "additive",
seasonality_prior_scale: float = 10.0,
holidays_prior_scale: float = 10.0,
changepoint_prior_scale: float = 0.05,
mcmc_samples: int = 0,
interval_width: float = 0.80,
uncertainty_samples: int = 1000,
cap: Optional[float] = None,
floor: Optional[float] = None,
custom_seasonalities: Optional[List[Dict[str, Any]]] = None,
extra_regressors: Optional[List[Dict[str, Any]]] = None,
) -> None:
if _no_prophet:
raise RuntimeError("requires fbprophet to be installed")
super().__init__()
self.growth = growth
self.changepoints = changepoints
self.n_changepoints = n_changepoints
self.changepoint_range = changepoint_range
self.yearly_seasonality = yearly_seasonality
self.weekly_seasonality = weekly_seasonality
self.daily_seasonality = daily_seasonality
self.holidays = holidays
self.seasonality_mode = seasonality_mode
self.seasonality_prior_scale = seasonality_prior_scale
self.holidays_prior_scale = holidays_prior_scale
self.changepoint_prior_scale = changepoint_prior_scale
self.mcmc_samples = mcmc_samples
self.interval_width = interval_width
self.uncertainty_samples = uncertainty_samples
self.cap = cap
self.floor = floor
self.custom_seasonalities = (
[] if custom_seasonalities is None else custom_seasonalities
)
self.extra_regressors = [] if extra_regressors is None else extra_regressors
logging.debug(
"Initialized Prophet with parameters. "
"growth:{growth},"
"changepoints:{changepoints},"
"n_changepoints:{n_changepoints},"
"changepoint_range:{changepoint_range},"
"yearly_seasonality:{yearly_seasonality},"
"weekly_seasonality:{weekly_seasonality},"
"daily_seasonality:{daily_seasonality},"
"holidays:{holidays},"
"seasonality_mode:{seasonality_mode},"
"seasonality_prior_scale:{seasonality_prior_scale},"
"holidays_prior_scale:{holidays_prior_scale},"
"changepoint_prior_scale:{changepoint_prior_scale},"
"mcmc_samples:{mcmc_samples},"
"interval_width:{interval_width},"
"uncertainty_samples:{uncertainty_samples},"
"cap:{cap},"
"floor:{floor},"
"custom_seasonalities:{custom_seasonalities},"
"extra_regressors:{extra_regressors}".format(
growth=growth,
changepoints=changepoints,
n_changepoints=n_changepoints,
changepoint_range=changepoint_range,
yearly_seasonality=yearly_seasonality,
weekly_seasonality=weekly_seasonality,
daily_seasonality=daily_seasonality,
holidays=holidays,
seasonality_mode=seasonality_mode,
seasonality_prior_scale=seasonality_prior_scale,
holidays_prior_scale=holidays_prior_scale,
changepoint_prior_scale=changepoint_prior_scale,
mcmc_samples=mcmc_samples,
interval_width=interval_width,
uncertainty_samples=uncertainty_samples,
cap=cap,
floor=floor,
custom_seasonalities=custom_seasonalities,
extra_regressors=None
if extra_regressors is None
else [x["name"] for x in extra_regressors],
)
)
def validate_params(self) -> None:
"""validate Prophet parameters
This method validates some key parameters including growth rate
and custom_seasonalities.
"""
# cap must be given when using logistic growth
if (self.growth == "logistic") and (self.cap is None):
msg = "Capacity must be provided for logistic growth"
logging.error(msg)
raise ValueError(msg)
# If custom_seasonalities passed, ensure they contain the required keys.
reqd_seasonality_keys = ["name", "period", "fourier_order"]
if not all(
req_key in seasonality
for req_key in reqd_seasonality_keys
for seasonality in self.custom_seasonalities
):
msg = f"Custom seasonality dicts must contain the following keys:\n{reqd_seasonality_keys}"
logging.error(msg)
raise ValueError(msg)
# If extra_regressors passed, ensure they contain the required keys.
reqd_regressor_keys = ["name", "value"]
if not all(
req_key in regressor
for req_key in reqd_regressor_keys
for regressor in self.extra_regressors
):
msg = f"Extra regressor dicts must contain the following keys:\n{reqd_regressor_keys}"
logging.error(msg)
raise ValueError(msg)
logging.info("Method validate_params() is not fully implemented.")
pass
class ProphetModel(Model[ProphetParams]):
"""Model class for Prophet
This class provides fit, predict, and plot methods for Prophet model
Attributes:
data: the input time series data as in :class:`kats.consts.TimeSeriesData`
params: the parameter class definied with `ProphetParams`
"""
model: Optional[Prophet] = None
freq: Optional[str] = None
def __init__(self, data: TimeSeriesData, params: ProphetParams) -> None:
super().__init__(data, params)
if _no_prophet:
raise RuntimeError("requires fbprophet to be installed")
if not isinstance(self.data.value, pd.Series):
msg = "Only support univariate time series, but get {type}.".format(
type=type(self.data.value)
)
logging.error(msg)
raise ValueError(msg)
def fit(self, **kwargs: Any) -> None:
"""fit Prophet model
Args:
None.
Returns:
The fitted prophet model object
"""
# prepare dataframe for Prophet.fit()
df = pd.DataFrame({"ds": self.data.time, "y": self.data.value})
logging.debug(
"Call fit() with parameters: "
"growth:{growth},"
"changepoints:{changepoints},"
"n_changepoints:{n_changepoints},"
"changepoint_range:{changepoint_range},"
"yearly_seasonality:{yearly_seasonality},"
"weekly_seasonality:{weekly_seasonality},"
"daily_seasonality:{daily_seasonality},"
"holidays:{holidays},"
"seasonality_mode:{seasonality_mode},"
"seasonality_prior_scale:{seasonality_prior_scale},"
"holidays_prior_scale:{holidays_prior_scale},"
"changepoint_prior_scale:{changepoint_prior_scale},"
"mcmc_samples:{mcmc_samples},"
"interval_width:{interval_width},"
"uncertainty_samples:{uncertainty_samples},"
"cap:{cap},"
"floor:{floor},"
"custom_seasonalities:{custom_seasonalities},"
"extra_regressors:{extra_regressors}".format(
growth=self.params.growth,
changepoints=self.params.changepoints,
n_changepoints=self.params.n_changepoints,
changepoint_range=self.params.changepoint_range,
yearly_seasonality=self.params.yearly_seasonality,
weekly_seasonality=self.params.weekly_seasonality,
daily_seasonality=self.params.daily_seasonality,
holidays=self.params.holidays,
seasonality_mode=self.params.seasonality_mode,
seasonality_prior_scale=self.params.seasonality_prior_scale,
holidays_prior_scale=self.params.holidays_prior_scale,
changepoint_prior_scale=self.params.changepoint_prior_scale,
mcmc_samples=self.params.mcmc_samples,
interval_width=self.params.interval_width,
uncertainty_samples=self.params.uncertainty_samples,
cap=self.params.cap,
floor=self.params.floor,
custom_seasonalities=self.params.custom_seasonalities,
extra_regressors=None
if self.params.extra_regressors is None
else [x["name"] for x in self.params.extra_regressors],
),
)
prophet = Prophet(
growth=self.params.growth,
changepoints=self.params.changepoints,
n_changepoints=self.params.n_changepoints,
changepoint_range=self.params.changepoint_range,
yearly_seasonality=self.params.yearly_seasonality,
weekly_seasonality=self.params.weekly_seasonality,
daily_seasonality=self.params.daily_seasonality,
holidays=self.params.holidays,
seasonality_mode=self.params.seasonality_mode,
seasonality_prior_scale=self.params.seasonality_prior_scale,
holidays_prior_scale=self.params.holidays_prior_scale,
changepoint_prior_scale=self.params.changepoint_prior_scale,
mcmc_samples=self.params.mcmc_samples,
interval_width=self.params.interval_width,
uncertainty_samples=self.params.uncertainty_samples,
)
if self.params.growth == "logistic":
# assign cap to a new col as Prophet required
df["cap"] = self.params.cap
# Adding floor if available
if self.params.floor is not None:
df["floor"] = self.params.floor
# Add any specified custom seasonalities.
for custom_seasonality in self.params.custom_seasonalities:
prophet.add_seasonality(**custom_seasonality)
# Add any extra regressors
if self.params.extra_regressors is not None:
for regressor in self.params.extra_regressors:
prophet.add_regressor(
**{k: v for k, v in regressor.items() if k not in ["value"]}
)
df[regressor["name"]] = pd.Series(regressor["value"], index=df.index)
self.model = prophet.fit(df=df)
logging.info("Fitted Prophet model. ")
def predict(
self, steps: int, *args: Any, include_history: bool = False, **kwargs: Any
) -> pd.DataFrame:
"""predict with fitted Prophet model
Args:
steps: the steps or length of prediction horizon
include_history: if include the historical data, default as False
Returns:
The predicted dataframe with following columns:
`time`, `fcst`, `fcst_lower`, and `fcst_upper`
"""
model = self.model
if model is None:
raise ValueError("Call fit() before predict().")
logging.debug(
"Call predict() with parameters. "
"steps:{steps}, kwargs:{kwargs}".format(steps=steps, kwargs=kwargs)
)
self.freq = kwargs.get("freq", pd.infer_freq(self.data.time))
self.include_history = include_history
# prepare future for Prophet.predict
future = kwargs.get("future")
raw = kwargs.get("raw", False)
if future is None:
future = model.make_future_dataframe(
periods=steps, freq=self.freq, include_history=self.include_history
)
if self.params.growth == "logistic":
# assign cap to a new col as Prophet required
future["cap"] = self.params.cap
if self.params.floor is not None:
future["floor"] = self.params.floor
extra_regressors = kwargs.get("extra_regressors", None)
if extra_regressors is not None:
for regressor in extra_regressors:
if not self.include_history:
future[regressor["name"]] = pd.Series(
regressor["value"], index=future.index
)
continue
# If history is required, it has to be pulled back from the
# parameter object and combined with future values
regressor_item = filter(
lambda x: x["name"] == regressor["name"],
self.params.extra_regressors,
).__next__()
regressor_value = pd.concat(
[ | pd.Series(regressor["value"]) | pandas.Series |
import glob
import pandas as pd
def get_stats():
labels = glob.glob("../data/surgeons_annotations/*.csv")
result = pd.DataFrame(columns=["video_name", "two_structures_score_1", "two_structures_score_2",
"cystic_plate_score_1", "cystic_plate_score_2", "hc_triangle_score_1",
"hc_triangle_score_2"])
for l in labels:
video_labels = | pd.read_csv(l) | pandas.read_csv |
import ssl
import websocket
import logging
import ast
from data import save_data_store, get_data_store
import pandas as pd
context = ssl.create_default_context()
logger = logging.getLogger("app")
def collect_frequency(messages):
try:
df = get_data_store("frequency")
new_df = pd.DataFrame(messages)
category_count = new_df["category"].value_counts().rename_axis("category").reset_index(name="count")
category_count["publication_date"] = messages[0]["publication_date"]
merged = pd.concat([df, category_count]).groupby(['publication_date', 'category']).sum().reset_index()
logger.info(merged)
save_data_store(merged, "frequency", "category")
except Exception as e:
print(e)
logger.error(e)
def collect_top(messages):
try:
df = get_data_store("top10")
new_df = pd.DataFrame(messages)
assets_count = new_df["name"].value_counts().rename_axis("name").reset_index(name="count")
merged = | pd.concat([df, assets_count]) | pandas.concat |
"""
MIT License
Copyright (c) 2022 RandomWalkAlpha
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
"""
import time
import json
import pandas as pd
import grequests
import requests
import re
from typing import List, Dict, Optional
from pandas import DataFrame, Series
class Collector:
def get_historical_data(self, code: str, start_date: str, end_date: str, freq: str):
raise NotImplementedError
def get_real_time_data(self, code: str):
raise NotImplementedError
def clean(self, record: List):
raise NotImplementedError
class DataCollector(Collector):
"""
This interface is designed to get the raw stock data by published http(s) request.
Now, it already collects data from Tencent, Sina, Netease and Hexun.
Returned data differs from providers, users need to distinguish by themselves.
"""
def __init__(self, provider: str = 'T'):
"""
This init function is to indicate a data provider.
:param provider: data provider: ['Tencent', 'Netease', 'Sina'], default: 'T' (for Tencent Stock)
"""
assert provider in ['T', 'N', 'S'], f"{provider} is not in the provider list."
self.provider = provider
self.url = None
self.requests = None
self.grequests = None
self.session = requests.Session()
self.raw_columns = ["交易所x", "股票名称x", "股票代码x", "现价", "昨收", "今开", "成交量", "外盘", "内盘",
"买一", "买一量", "买二", "买二量", "买三", "买三量", "买四", "买四量", "买五", "买五量",
"卖一", "卖一量", "卖二", "卖二量", "卖三", "卖三量", "卖四", "卖四量", "卖五", "卖五量",
"时间戳", "涨跌", "涨跌幅(%)", "最高", "最低", "现价/成交量(手)/成交额(元)x", "成交量x",
"成交额(万元)x", "换手率", "TTM市盈率", "最高x", "最低x", "振幅(%)", "流通市值", "总市值",
"LF市盈率", "涨停价", "跌停价", "量比", "A", "均价", "动态市盈率", "静态市盈率", "B", "成交额",
"nonex", "nonex", "nonex", "GP-Ax", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "x",
"x", "M", "N", "x", "买盘大单", "买盘小单", "卖盘大单", "卖盘小单"]
def get_historical_data(self, code: str, start_date: str, end_date: str, freq: str = 'day') -> Optional[DataFrame]:
"""
Return historical stock data, excluding the latest opening day.
:param code: stock code
:param start_date: data starts with this date, form: YYYY-mm-dd
:param end_date: data ends with this date, form: YYYY-mm-dd
:param freq: frequency: ['day', 'month'], default: 'day' (for day)
:return: a dataframe, including 6 basic stock factors (Date, Open, Close, High, Low, Volume)
"""
code = code.lower()
assert len(code) == 8 and re.compile(r"(sh68|sh60|sh90|sh00\d{4})|(sz\d{6})").match(code) is not None,\
f"Stock Code {code} is illegal."
assert time.strptime(start_date, '%Y-%m-%d') or time.strptime(end_date, '%Y-%m-%d')
assert freq in ['day', 'month'], f"{freq} doesn't belong to ['day', 'month']"
assert self.provider in ['T', 'N', 'S'], f"{self.provider} is not in the provider list."
columns = ["日期", "开盘价", "收盘价", "最高价", "最低价", "成交量(手)"]
clean_data = []
if self.provider == 'T':
self.url = "https://web.ifzq.gtimg.cn/appstock/app/fqkline/get?"
days = int(time.mktime(time.strptime(end_date, '%Y-%m-%d'))
- time.mktime(time.strptime(start_date, '%Y-%m-%d'))) // 86400
params = f"param={code},{freq},{start_date},{end_date},{days},qfq"
self.requests = self.session.get(f"{self.url}{params}")
data = json.loads(self.requests.text)
try:
data = data["data"][code][f"qfq{freq}"]
except KeyError:
data = data["data"][code][f"{freq}"]
for _data in data:
if len(_data) > 6:
_data = _data[: 6]
clean_data.append(_data)
return pd.DataFrame(clean_data, columns=columns)
else:
assert freq == 'day', f"Netease only supports freq: 'day'."
self.url = "https://quotes.money.163.com/service/chddata.html?"
start_date = start_date.replace('-', '')
end_date = end_date.replace('-', '')
if re.compile(r"sh68|sh60|sh90|sh00\d{4}").match(code):
code = '0' + code[2:]
else:
code = '1' + code[2:]
params = f'code={code}&start={start_date}&end={end_date}' \
f'&fields=TCLOSE;HIGH;LOW;TOPEN;LCLOSE;CHG;PCHG;VOTURNOVER'
self.requests = self.session.get(f"{self.url}{params}")
raw_data = self.requests.text.split('\r\n')
columns = raw_data[0].split(',')
raw_data = raw_data[1: -1]
df = pd.DataFrame(columns=columns)
for line in raw_data:
record = line.split(',')
df = df.append(pd.Series(record, index=columns), ignore_index=True)
return df
def get_batch_historical_data(self, code_list: List, start_date: str, end_date: str, freq: str = 'day') -> Dict:
"""
Return a batch of historical trading data with the stock code in list.
:param code_list: stock code list
:param start_date: data starts with this date, form: YYYY-mm-dd
:param end_date: data ends with this date, form: YYYY-mm-dd
:param freq: frequency: ['day', 'month'], default: 'day' (for day)
:return: a dict of Dataframe
"""
assert time.strptime(start_date, '%Y-%m-%d') or time.strptime(end_date, '%Y-%m-%d')
assert freq in ['day', 'month'], f"{freq} doesn't belong to ['day', 'month']"
assert self.provider in ['T', 'N', 'S'], f"{self.provider} is not in the provider list."
if self.provider == 'T':
self.url = "https://web.ifzq.gtimg.cn/appstock/app/fqkline/get?"
df_dict = {}
request_list = []
columns = ["日期", "开盘价", "收盘价", "最高价", "最低价", "成交量(手)"]
for code in code_list:
code = code.lower()
assert len(code) == 8 and re.compile(r"(sh68|sh60|sh90|sh00\d{4})|(sz\d{6})").match(code) is not None, \
f"Stock Code {code} is illegal."
days = int(time.mktime(time.strptime(end_date, '%Y-%m-%d'))
- time.mktime(time.strptime(start_date, '%Y-%m-%d'))) // 86400
params = f"param={code},{freq},{start_date},{end_date},{days},qfq"
request_list.append(grequests.get(f"{self.url}{params}", session=self.session))
self.requests = grequests.map(request_list)
for req, code in zip(self.requests, code_list):
clean_data = []
try:
data = json.loads(req.text)["data"][code.lower()][f"qfq{freq}"]
except KeyError:
data = json.loads(req.text)["data"][code.lower()][f"{freq}"]
for _data in data:
if len(_data) > 6:
_data = _data[: 6]
clean_data.append(_data)
df_dict[code] = pd.DataFrame(clean_data, columns=columns)
return df_dict
else:
assert freq == 'day', f"Netease only supports freq: 'day'."
self.url = "https://quotes.money.163.com/service/chddata.html?"
start_date = start_date.replace('-', '')
end_date = end_date.replace('-', '')
df_dict = {}
request_list = []
for code in code_list:
code = code.lower()
assert len(code) == 8 and re.compile(r"(sh68|sh60|sh90|sh00\d{4})|(sz\d{6})").match(code) is not None, \
f"Stock Code {code} is illegal."
if re.compile(r"sh68|sh60|sh90|sh00\d{4}").match(code):
code = '0' + code[2:]
else:
code = '1' + code[2:]
params = f'code={code}&start={start_date}&end={end_date}' \
f'&fields=TCLOSE;HIGH;LOW;TOPEN;LCLOSE;CHG;PCHG;VOTURNOVER'
request_list.append(grequests.get(f"{self.url}{params}", session=self.session))
print(f"{self.url}{params}")
self.requests = grequests.map(request_list)
for req, code in zip(self.requests, code_list):
raw_data = req.text.split('\r\n')
columns = raw_data[0].split(',')
raw_data = raw_data[1: -1]
df = pd.DataFrame(columns=columns)
for line in raw_data:
record = line.split(',')
df = df.append(pd.Series(record, index=columns), ignore_index=True)
df_dict[code] = df
return df_dict
def get_real_time_data(self, code: str) -> Optional[Series]:
"""
Return real time trading data with specific stock, this interface can be called every 5 seconds
at the fastest. If now time is not opening period, this would return latest valid data.
If you want to get more than one stock data at one time, please use get_batch_real_time_data() instead.
:param code: stock code, eg: SH600519, SZ399001
:return: latest stock data string
"""
assert self.provider == 'T', "Only Tencent Stock interface is supported!"
code = code.lower()
assert len(code) == 8 and re.compile(r"(sh68|sh60|sh90\d{4})|(sz\d{6})").match(code) is not None, \
f"Stock Code {code} is illegal."
self.url = "https://qt.gtimg.cn/q="
self.requests = self.session.get(f"{self.url}{code},s_pk{code}")
return self.clean(self.requests.text[: -2]) if 'v_pv_none_match' not in self.requests.text else None
def get_batch_real_time_data(self, code_list: List) -> Optional[Dict]:
"""
Return a batch of real time trading data with the stock code in list.
:param code_list: stock code list
:return: latest stock data dict
"""
assert self.provider == 'T', "Only Tencent Stock interface is supported!"
self.url = "https://qt.gtimg.cn/q="
request_list = []
data_dict = {}
for code in code_list:
code = code.lower()
assert len(code) == 8 and re.compile(r"(sh68|sh60|sh90\d{4})|(sz\d{6})").match(code) is not None, \
f"Stock Code {code} is illegal."
request_list.append(grequests.get(f"{self.url}{code},s_pk{code}", session=self.session))
self.requests = grequests.map(request_list)
for code, req in zip(code_list, self.requests):
data_dict[code] = self.clean(req.text[: -2]) if "v_pv_none_match" not in req.text else None
return data_dict
def get_minute_data(self, code) -> Optional[DataFrame]:
"""
Get brief minute-level stock data of last opening day
:param code: stock code
:return: dataframe
"""
assert self.provider == 'T', "Only Tencent Stock interface is supported!"
code = code.lower()
assert len(code) == 8 and re.compile(r"(sh68|sh60|sh90|sh00\d{4})|(sz\d{6})").match(code) is not None, \
f"Stock Code {code} is illegal."
self.url = f"https://web.ifzq.gtimg.cn/appstock/app/minute/query?code={code}"
self.requests = self.session.get(self.url)
data = json.loads(self.requests.text)
columns = ["时间戳", "现价", "累计成交量", "现成交量"]
df = pd.DataFrame(columns=columns)
if data["code"] == -1:
return df
try:
data = data["data"][f"{code}"]["data"]["data"]
except KeyError:
return df
if data[0] == " 0":
return df
else:
for record in data:
record = record.split(' ')
df = df.append(pd.Series(record, index=columns), ignore_index=True)
return df
def get_transaction_detail(self, code: str, start_date: str, end_date: str) -> Optional[DataFrame]:
assert self.provider == 'S', "Only Sina Stock interface is supported!"
code = code.lower()
assert len(code) == 8 and re.compile(r"(sh68|sh60|sh90|sh00\d{4})|(sz\d{6})").match(code) is not None, \
f"Stock Code {code} is illegal."
assert time.strptime(start_date, '%Y-%m-%d') or time.strptime(end_date, '%Y-%m-%d')
days = int(time.mktime(time.strptime(end_date, '%Y-%m-%d'))
- time.mktime(time.strptime(start_date, '%Y-%m-%d'))) // 86400
assert days < 270, "Query date range is too large, please narrow the scope with 10 months."
self.url = "https://market.finance.sina.com.cn/pricehis.php?"
params = f"symbol={code}&startdate={start_date}&enddate={end_date}"
self.requests = self.session.get(f"{self.url}{params}")
data = re.findall(r'>[0-9].*<', self.requests.text)
columns = ["成交价(元)", "成交量(股)", "占比"]
df = pd.DataFrame(columns=columns)
for element in range(0, len(data), 3):
val_1 = float(data[element][1: -1].replace(',', ''))
val_2 = int(data[element + 1][1: -1])
val_3 = float(data[element + 2][1: -2])
df = df.append( | pd.Series([val_1, val_2, val_3], index=columns) | pandas.Series |
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import argparse
import joblib
import pandas as pd
import numpy as np
from azureml.core.model import Model
from azureml.core.run import Run
# 0.0 Parse input arguments
parser = argparse.ArgumentParser("split")
parser.add_argument("--timestamp_column", type=str, help="timestamp column from data", required=True)
parser.add_argument("--timeseries_id_columns", type=str, nargs='*', required=True,
help="input columns identifying the timeseries")
parser.add_argument("--model_type", type=str, help="model type", required=True)
args, _ = parser.parse_known_args()
current_run = None
def init():
global current_run
current_run = Run.get_context()
def run(input_data):
# 1.0 Set up results dataframe
results = []
print('here')
# 2.0 Iterate through input data
for csv_file_path in input_data:
# 3.0 Set up data to predict on
data = ( | pd.read_csv(csv_file_path, parse_dates=[args.timestamp_column], header=0) | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
@author: <NAME>
"""
import math
import warnings
import matplotlib.figure as figure
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from sklearn import model_selection, svm, tree
from sklearn.cross_decomposition import PLSRegression
from sklearn.decomposition import PCA
from sklearn.ensemble import RandomForestRegressor
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import Matern, DotProduct, WhiteKernel, RBF, ConstantKernel
from sklearn.linear_model import Ridge, Lasso, ElasticNet, ElasticNetCV
from sklearn.model_selection import GridSearchCV
warnings.filterwarnings('ignore')
regression_method = 'pls' # 'pls' or 'rr' or 'lasso' or 'en' or 'lsvr' or 'nsvr' or 'dt' or 'rf' or 'gp'
max_pca_component_number = 150
threshold_of_rate_of_same_value = 1
fold_number = 2
max_pls_component_number = 30
ridge_lambdas = 2 ** np.arange(-5, 10, dtype=float) # L2 weight in ridge regression
lasso_lambdas = np.arange(0.01, 0.71, 0.01, dtype=float) # L1 weight in LASSO
elastic_net_lambdas = np.arange(0.01, 0.71, 0.01, dtype=float) # Lambda in elastic net
elastic_net_alphas = np.arange(0.01, 1.00, 0.01, dtype=float) # Alpha in elastic net
linear_svr_cs = 2 ** np.arange(-5, 5, dtype=float) # C for linear svr
linear_svr_epsilons = 2 ** np.arange(-10, 0, dtype=float) # Epsilon for linear svr
nonlinear_svr_cs = 2 ** np.arange(-5, 10, dtype=float) # C for nonlinear svr
nonlinear_svr_epsilons = 2 ** np.arange(-10, 0, dtype=float) # Epsilon for nonlinear svr
nonlinear_svr_gammas = 2 ** np.arange(-20, 10, dtype=float) # Gamma for nonlinear svr
dt_max_max_depth = 30 # 木の深さの最大値、の最大値
dt_min_samples_leaf = 3 # 葉ごとのサンプル数の最小値
random_forest_number_of_trees = 300 # Number of decision trees for random forest
random_forest_x_variables_rates = np.arange(1, 10,
dtype=float) / 10 # Ratio of the number of X-variables for random forest
# load data set
supervised_dataset = pd.read_csv('descriptors_with_logS.csv', encoding='SHIFT-JIS', index_col=0)
unsupervised_dataset = pd.read_csv('descriptors_for_prediction.csv', encoding='SHIFT-JIS', index_col=0)
number_of_supervised_samples = supervised_dataset.shape[0]
x_all_dataset = pd.concat([supervised_dataset.iloc[:, 1:], unsupervised_dataset], axis=0)
x_all_dataset = x_all_dataset.loc[:, x_all_dataset.mean().index] # 平均を計算できる変数だけ選択
x_all_dataset = x_all_dataset.replace(np.inf, np.nan).fillna(np.nan) # infをnanに置き換えておく
x_all_dataset = x_all_dataset.dropna(axis=1) # nanのある変数を削除
y_train = supervised_dataset.iloc[:, 0]
rate_of_same_value = list()
num = 0
for X_variable_name in x_all_dataset.columns:
num += 1
# print('{0} / {1}'.format(num, x_all_dataset.shape[1]))
same_value_number = x_all_dataset[X_variable_name].value_counts()
rate_of_same_value.append(float(same_value_number[same_value_number.index[0]] / x_all_dataset.shape[0]))
deleting_variable_numbers = np.where(np.array(rate_of_same_value) >= threshold_of_rate_of_same_value)
"""
# delete descriptors with zero variance
deleting_variable_numbers = np.where( raw_Xtrain.var() == 0 )
"""
if len(deleting_variable_numbers[0]) == 0:
x_all = x_all_dataset.copy()
else:
x_all = x_all_dataset.drop(x_all_dataset.columns[deleting_variable_numbers], axis=1)
print('Variable numbers zero variance: {0}'.format(deleting_variable_numbers[0] + 1))
print('# of X-variables: {0}'.format(x_all.shape[1]))
# autoscaling
autoscaled_x_all = (x_all - x_all.mean(axis=0)) / x_all.std(axis=0, ddof=1)
autoscaled_y_train = (y_train - y_train.mean(axis=0)) / y_train.std(axis=0, ddof=1)
# PCA
pca = PCA() # PCA を行ったり PCA の結果を格納したりするための変数を、pca として宣言
pca.fit(autoscaled_x_all) # PCA を実行
# score
score_all = pd.DataFrame(pca.transform(autoscaled_x_all), index=x_all.index) # 主成分スコアの計算した後、pandas の DataFrame 型に変換
score_train = score_all.iloc[:number_of_supervised_samples, :]
score_test = score_all.iloc[number_of_supervised_samples:, :]
# scaling
autoscaled_score_train = score_train / score_train.std(axis=0, ddof=1)
autoscaled_score_test = score_test / score_train.std(axis=0, ddof=1)
# optimization of number of PCs
set_max_pca_component_number = min(np.linalg.matrix_rank(autoscaled_score_train), max_pca_component_number)
r2cvs = []
for number_of_pcs in range(set_max_pca_component_number):
print('PC:', number_of_pcs + 1, '/', set_max_pca_component_number)
autoscaled_x_train = autoscaled_score_train.iloc[:, :number_of_pcs + 1]
if regression_method == 'pls': # Partial Least Squares
pls_components = np.arange(1, min(np.linalg.matrix_rank(autoscaled_x_train) + 1, max_pls_component_number + 1),
1)
r2cvall = []
for pls_component in pls_components:
pls_model_in_cv = PLSRegression(n_components=pls_component)
estimated_y_in_cv = np.ndarray.flatten(
model_selection.cross_val_predict(pls_model_in_cv, autoscaled_x_train, autoscaled_y_train,
cv=fold_number))
estimated_y_in_cv = estimated_y_in_cv * y_train.std(ddof=1) + y_train.mean()
r2cvall.append(float(1 - sum((y_train - estimated_y_in_cv) ** 2) / sum((y_train - y_train.mean()) ** 2)))
optimal_pls_component_number = np.where(r2cvall == np.max(r2cvall))[0][0] + 1
regression_model = PLSRegression(n_components=optimal_pls_component_number)
elif regression_method == 'rr': # ridge regression
r2cvall = list()
for ridge_lambda in ridge_lambdas:
rr_model_in_cv = Ridge(alpha=ridge_lambda)
estimated_y_in_cv = model_selection.cross_val_predict(rr_model_in_cv, autoscaled_x_train,
autoscaled_y_train,
cv=fold_number)
estimated_y_in_cv = estimated_y_in_cv * y_train.std(ddof=1) + y_train.mean()
r2cvall.append(float(1 - sum((y_train - estimated_y_in_cv) ** 2) / sum((y_train - y_train.mean()) ** 2)))
optimal_ridge_lambda = ridge_lambdas[np.where(r2cvall == np.max(r2cvall))[0][0]]
regression_model = Ridge(alpha=optimal_ridge_lambda)
elif regression_method == 'lasso': # LASSO
r2cvall = list()
for lasso_lambda in lasso_lambdas:
lasso_model_in_cv = Lasso(alpha=lasso_lambda)
estimated_y_in_cv = model_selection.cross_val_predict(lasso_model_in_cv, autoscaled_x_train,
autoscaled_y_train,
cv=fold_number)
estimated_y_in_cv = estimated_y_in_cv * y_train.std(ddof=1) + y_train.mean()
r2cvall.append(float(1 - sum((y_train - estimated_y_in_cv) ** 2) / sum((y_train - y_train.mean()) ** 2)))
optimal_lasso_lambda = lasso_lambdas[np.where(r2cvall == np.max(r2cvall))[0][0]]
regression_model = Lasso(alpha=optimal_lasso_lambda)
elif regression_method == 'en': # Elastic net
elastic_net_in_cv = ElasticNetCV(cv=fold_number, l1_ratio=elastic_net_lambdas, alphas=elastic_net_alphas)
elastic_net_in_cv.fit(autoscaled_x_train, autoscaled_y_train)
optimal_elastic_net_alpha = elastic_net_in_cv.alpha_
optimal_elastic_net_lambda = elastic_net_in_cv.l1_ratio_
regression_model = ElasticNet(l1_ratio=optimal_elastic_net_lambda, alpha=optimal_elastic_net_alpha)
elif regression_method == 'lsvr': # Linear SVR
linear_svr_in_cv = GridSearchCV(svm.SVR(kernel='linear'), {'C': linear_svr_cs, 'epsilon': linear_svr_epsilons},
cv=fold_number)
linear_svr_in_cv.fit(autoscaled_x_train, autoscaled_y_train)
optimal_linear_svr_c = linear_svr_in_cv.best_params_['C']
optimal_linear_svr_epsilon = linear_svr_in_cv.best_params_['epsilon']
regression_model = svm.SVR(kernel='linear', C=optimal_linear_svr_c, epsilon=optimal_linear_svr_epsilon)
elif regression_method == 'nsvr': # Nonlinear SVR
variance_of_gram_matrix = list()
numpy_autoscaled_Xtrain = np.array(autoscaled_x_train)
for nonlinear_svr_gamma in nonlinear_svr_gammas:
gram_matrix = np.exp(
-nonlinear_svr_gamma * ((numpy_autoscaled_Xtrain[:, np.newaxis] - numpy_autoscaled_Xtrain) ** 2).sum(
axis=2))
variance_of_gram_matrix.append(gram_matrix.var(ddof=1))
optimal_nonlinear_gamma = nonlinear_svr_gammas[
np.where(variance_of_gram_matrix == np.max(variance_of_gram_matrix))[0][0]]
# CV による ε の最適化
model_in_cv = GridSearchCV(svm.SVR(kernel='rbf', C=3, gamma=optimal_nonlinear_gamma),
{'epsilon': nonlinear_svr_epsilons},
cv=fold_number, iid=False, verbose=0)
model_in_cv.fit(autoscaled_x_train, autoscaled_y_train)
optimal_nonlinear_epsilon = model_in_cv.best_params_['epsilon']
# CV による C の最適化
model_in_cv = GridSearchCV(
svm.SVR(kernel='rbf', epsilon=optimal_nonlinear_epsilon, gamma=optimal_nonlinear_gamma),
{'C': nonlinear_svr_cs}, cv=fold_number, iid=False, verbose=0)
model_in_cv.fit(autoscaled_x_train, autoscaled_y_train)
optimal_nonlinear_c = model_in_cv.best_params_['C']
# CV による γ の最適化
model_in_cv = GridSearchCV(svm.SVR(kernel='rbf', epsilon=optimal_nonlinear_epsilon, C=optimal_nonlinear_c),
{'gamma': nonlinear_svr_gammas}, cv=fold_number, iid=False, verbose=0)
model_in_cv.fit(autoscaled_x_train, autoscaled_y_train)
optimal_nonlinear_gamma = model_in_cv.best_params_['gamma']
regression_model = svm.SVR(kernel='rbf', C=optimal_nonlinear_c, epsilon=optimal_nonlinear_epsilon,
gamma=optimal_nonlinear_gamma)
elif regression_method == 'dt': # Decision tree
# クロスバリデーションによる木の深さの最適化
r2cv_all = []
for max_depth in range(2, dt_max_max_depth):
model_in_cv = tree.DecisionTreeRegressor(max_depth=max_depth, min_samples_leaf=dt_min_samples_leaf)
estimated_y_in_cv = model_selection.cross_val_predict(model_in_cv, autoscaled_x_train, autoscaled_y_train,
cv=fold_number) * y_train.std(ddof=1) + y_train.mean()
r2cv_all.append(1 - sum((y_train - estimated_y_in_cv) ** 2) / sum((y_train - y_train.mean()) ** 2))
optimal_max_depth = np.where(r2cv_all == np.max(r2cv_all))[0][0] + 2 # r2cvが最も大きい木の深さ
regression_model = tree.DecisionTreeRegressor(max_depth=optimal_max_depth,
min_samples_leaf=dt_min_samples_leaf) # DTモデルの宣言
elif regression_method == 'rf': # Random forest
rmse_oob_all = list()
for random_forest_x_variables_rate in random_forest_x_variables_rates:
RandomForestResult = RandomForestRegressor(n_estimators=random_forest_number_of_trees, max_features=int(
max(math.ceil(autoscaled_x_train.shape[1] * random_forest_x_variables_rate), 1)), oob_score=True)
RandomForestResult.fit(autoscaled_x_train, autoscaled_y_train)
estimated_y_in_cv = RandomForestResult.oob_prediction_
estimated_y_in_cv = estimated_y_in_cv * y_train.std(ddof=1) + y_train.mean()
rmse_oob_all.append((sum((y_train - estimated_y_in_cv) ** 2) / len(y_train)) ** 0.5)
optimal_random_forest_x_variables_rate = random_forest_x_variables_rates[
np.where(rmse_oob_all == np.min(rmse_oob_all))[0][0]]
regression_model = RandomForestRegressor(n_estimators=random_forest_number_of_trees, max_features=int(
max(math.ceil(autoscaled_x_train.shape[1] * optimal_random_forest_x_variables_rate), 1)), oob_score=True)
elif regression_method == 'gp': # Gaussian process
regression_model = GaussianProcessRegressor(ConstantKernel() * RBF() + WhiteKernel())
estimated_y_in_cv = np.ndarray.flatten(
model_selection.cross_val_predict(regression_model, autoscaled_x_train, autoscaled_y_train, cv=fold_number))
estimated_y_in_cv = estimated_y_in_cv * y_train.std(ddof=1) + y_train.mean()
r2cvs.append(float(1 - sum((y_train - estimated_y_in_cv) ** 2) / sum((y_train - y_train.mean()) ** 2)))
plt.rcParams['font.size'] = 18 # 横軸や縦軸の名前の文字などのフォントのサイズ
plt.plot(np.arange(set_max_pca_component_number) + 1, r2cvs, 'b.-')
plt.ylim(0, 1)
plt.xlabel('Number of PCA components')
plt.ylabel('r2cv')
plt.show()
optimal_pca_component_number = np.where(r2cvs == np.max(r2cvs))[0][0] + 1
print('Optimal PCA component number : {0}'.format(optimal_pca_component_number))
autoscaled_x_train = autoscaled_score_train.iloc[:, :optimal_pca_component_number]
autoscaled_x_test = autoscaled_score_test.iloc[:, :optimal_pca_component_number]
if regression_method == 'pls': # Partial Least Squares
pls_components = np.arange(1, min(np.linalg.matrix_rank(autoscaled_x_train) + 1, max_pls_component_number + 1), 1)
r2cvall = []
for pls_component in pls_components:
pls_model_in_cv = PLSRegression(n_components=pls_component)
estimated_y_in_cv = np.ndarray.flatten(
model_selection.cross_val_predict(pls_model_in_cv, autoscaled_x_train, autoscaled_y_train, cv=fold_number))
estimated_y_in_cv = estimated_y_in_cv * y_train.std(ddof=1) + y_train.mean()
r2cvall.append(float(1 - sum((y_train - estimated_y_in_cv) ** 2) / sum((y_train - y_train.mean()) ** 2)))
optimal_pls_component_number = np.where(r2cvall == np.max(r2cvall))[0][0] + 1
regression_model = PLSRegression(n_components=optimal_pls_component_number)
elif regression_method == 'rr': # ridge regression
r2cvall = list()
for ridge_lambda in ridge_lambdas:
rr_model_in_cv = Ridge(alpha=ridge_lambda)
estimated_y_in_cv = model_selection.cross_val_predict(rr_model_in_cv, autoscaled_x_train, autoscaled_y_train,
cv=fold_number)
estimated_y_in_cv = estimated_y_in_cv * y_train.std(ddof=1) + y_train.mean()
r2cvall.append(float(1 - sum((y_train - estimated_y_in_cv) ** 2) / sum((y_train - y_train.mean()) ** 2)))
optimal_ridge_lambda = ridge_lambdas[np.where(r2cvall == np.max(r2cvall))[0][0]]
regression_model = Ridge(alpha=optimal_ridge_lambda)
elif regression_method == 'lasso': # LASSO
r2cvall = list()
for lasso_lambda in lasso_lambdas:
lasso_model_in_cv = Lasso(alpha=lasso_lambda)
estimated_y_in_cv = model_selection.cross_val_predict(lasso_model_in_cv, autoscaled_x_train, autoscaled_y_train,
cv=fold_number)
estimated_y_in_cv = estimated_y_in_cv * y_train.std(ddof=1) + y_train.mean()
r2cvall.append(float(1 - sum((y_train - estimated_y_in_cv) ** 2) / sum((y_train - y_train.mean()) ** 2)))
optimal_lasso_lambda = lasso_lambdas[np.where(r2cvall == np.max(r2cvall))[0][0]]
regression_model = Lasso(alpha=optimal_lasso_lambda)
elif regression_method == 'en': # Elastic net
elastic_net_in_cv = ElasticNetCV(cv=fold_number, l1_ratio=elastic_net_lambdas, alphas=elastic_net_alphas)
elastic_net_in_cv.fit(autoscaled_x_train, autoscaled_y_train)
optimal_elastic_net_alpha = elastic_net_in_cv.alpha_
optimal_elastic_net_lambda = elastic_net_in_cv.l1_ratio_
regression_model = ElasticNet(l1_ratio=optimal_elastic_net_lambda, alpha=optimal_elastic_net_alpha)
elif regression_method == 'lsvr': # Linear SVR
linear_svr_in_cv = GridSearchCV(svm.SVR(kernel='linear'), {'C': linear_svr_cs, 'epsilon': linear_svr_epsilons},
cv=fold_number)
linear_svr_in_cv.fit(autoscaled_x_train, autoscaled_y_train)
optimal_linear_svr_c = linear_svr_in_cv.best_params_['C']
optimal_linear_svr_epsilon = linear_svr_in_cv.best_params_['epsilon']
regression_model = svm.SVR(kernel='linear', C=optimal_linear_svr_c, epsilon=optimal_linear_svr_epsilon)
elif regression_method == 'nsvr': # Nonlinear SVR
variance_of_gram_matrix = list()
numpy_autoscaled_Xtrain = np.array(autoscaled_x_train)
for nonlinear_svr_gamma in nonlinear_svr_gammas:
gram_matrix = np.exp(
-nonlinear_svr_gamma * ((numpy_autoscaled_Xtrain[:, np.newaxis] - numpy_autoscaled_Xtrain) ** 2).sum(
axis=2))
variance_of_gram_matrix.append(gram_matrix.var(ddof=1))
optimal_nonlinear_gamma = nonlinear_svr_gammas[
np.where(variance_of_gram_matrix == np.max(variance_of_gram_matrix))[0][0]]
# CV による ε の最適化
model_in_cv = GridSearchCV(svm.SVR(kernel='rbf', C=3, gamma=optimal_nonlinear_gamma),
{'epsilon': nonlinear_svr_epsilons},
cv=fold_number, iid=False, verbose=0)
model_in_cv.fit(autoscaled_x_train, autoscaled_y_train)
optimal_nonlinear_epsilon = model_in_cv.best_params_['epsilon']
# CV による C の最適化
model_in_cv = GridSearchCV(svm.SVR(kernel='rbf', epsilon=optimal_nonlinear_epsilon, gamma=optimal_nonlinear_gamma),
{'C': nonlinear_svr_cs}, cv=fold_number, iid=False, verbose=0)
model_in_cv.fit(autoscaled_x_train, autoscaled_y_train)
optimal_nonlinear_c = model_in_cv.best_params_['C']
# CV による γ の最適化
model_in_cv = GridSearchCV(svm.SVR(kernel='rbf', epsilon=optimal_nonlinear_epsilon, C=optimal_nonlinear_c),
{'gamma': nonlinear_svr_gammas}, cv=fold_number, iid=False, verbose=0)
model_in_cv.fit(autoscaled_x_train, autoscaled_y_train)
optimal_nonlinear_gamma = model_in_cv.best_params_['gamma']
regression_model = svm.SVR(kernel='rbf', C=optimal_nonlinear_c, epsilon=optimal_nonlinear_epsilon,
gamma=optimal_nonlinear_gamma)
elif regression_method == 'dt': # Decision tree
# クロスバリデーションによる木の深さの最適化
r2cv_all = []
for max_depth in range(2, dt_max_max_depth):
model_in_cv = tree.DecisionTreeRegressor(max_depth=max_depth, min_samples_leaf=dt_min_samples_leaf)
estimated_y_in_cv = model_selection.cross_val_predict(model_in_cv, autoscaled_x_train, autoscaled_y_train,
cv=fold_number) * y_train.std(ddof=1) + y_train.mean()
r2cv_all.append(1 - sum((y_train - estimated_y_in_cv) ** 2) / sum((y_train - y_train.mean()) ** 2))
optimal_max_depth = np.where(r2cv_all == np.max(r2cv_all))[0][0] + 2 # r2cvが最も大きい木の深さ
regression_model = tree.DecisionTreeRegressor(max_depth=optimal_max_depth,
min_samples_leaf=dt_min_samples_leaf) # DTモデルの宣言
elif regression_method == 'rf': # Random forest
rmse_oob_all = list()
for random_forest_x_variables_rate in random_forest_x_variables_rates:
RandomForestResult = RandomForestRegressor(n_estimators=random_forest_number_of_trees, max_features=int(
max(math.ceil(autoscaled_x_train.shape[1] * random_forest_x_variables_rate), 1)), oob_score=True)
RandomForestResult.fit(autoscaled_x_train, autoscaled_y_train)
estimated_y_in_cv = RandomForestResult.oob_prediction_
estimated_y_in_cv = estimated_y_in_cv * y_train.std(ddof=1) + y_train.mean()
rmse_oob_all.append((sum((y_train - estimated_y_in_cv) ** 2) / len(y_train)) ** 0.5)
optimal_random_forest_x_variables_rate = random_forest_x_variables_rates[
np.where(rmse_oob_all == np.min(rmse_oob_all))[0][0]]
regression_model = RandomForestRegressor(n_estimators=random_forest_number_of_trees, max_features=int(
max(math.ceil(autoscaled_x_train.shape[1] * optimal_random_forest_x_variables_rate), 1)), oob_score=True)
elif regression_method == 'gp': # Gaussian process
regression_model = GaussianProcessRegressor(ConstantKernel() * RBF() + WhiteKernel())
regression_model.fit(autoscaled_x_train, autoscaled_y_train)
# calculate y for training data
calculated_ytrain = np.ndarray.flatten(regression_model.predict(autoscaled_x_train))
calculated_ytrain = calculated_ytrain * y_train.std(ddof=1) + y_train.mean()
# yy-plot
plt.figure(figsize=figure.figaspect(1))
plt.scatter(y_train, calculated_ytrain)
y_max = np.max(np.array([np.array(y_train), calculated_ytrain]))
y_min = np.min(np.array([np.array(y_train), calculated_ytrain]))
plt.plot([y_min - 0.05 * (y_max - y_min), y_max + 0.05 * (y_max - y_min)],
[y_min - 0.05 * (y_max - y_min), y_max + 0.05 * (y_max - y_min)], 'k-')
plt.ylim(y_min - 0.05 * (y_max - y_min), y_max + 0.05 * (y_max - y_min))
plt.xlim(y_min - 0.05 * (y_max - y_min), y_max + 0.05 * (y_max - y_min))
plt.xlabel('Actual Y')
plt.ylabel('Calculated Y')
plt.show()
# r2, RMSE, MAE
print('r2: {0}'.format(float(1 - sum((y_train - calculated_ytrain) ** 2) / sum((y_train - y_train.mean()) ** 2))))
print('RMSE: {0}'.format(float((sum((y_train - calculated_ytrain) ** 2) / len(y_train)) ** 0.5)))
print('MAE: {0}'.format(float(sum(abs(y_train - calculated_ytrain)) / len(y_train))))
# estimated_y in cross-validation
estimated_y_in_cv = np.ndarray.flatten(
model_selection.cross_val_predict(regression_model, autoscaled_x_train, autoscaled_y_train, cv=fold_number))
estimated_y_in_cv = estimated_y_in_cv * y_train.std(ddof=1) + y_train.mean()
# yy-plot
plt.figure(figsize=figure.figaspect(1))
plt.scatter(y_train, estimated_y_in_cv)
y_max = np.max(np.array([np.array(y_train), estimated_y_in_cv]))
y_min = np.min(np.array([np.array(y_train), estimated_y_in_cv]))
plt.plot([y_min - 0.05 * (y_max - y_min), y_max + 0.05 * (y_max - y_min)],
[y_min - 0.05 * (y_max - y_min), y_max + 0.05 * (y_max - y_min)], 'k-')
plt.ylim(y_min - 0.05 * (y_max - y_min), y_max + 0.05 * (y_max - y_min))
plt.xlim(y_min - 0.05 * (y_max - y_min), y_max + 0.05 * (y_max - y_min))
plt.xlabel('Actual Y')
plt.ylabel('Estimated Y in CV')
plt.show()
# r2cv, RMSEcv, MAEcv
print('r2cv: {0}'.format(float(1 - sum((y_train - estimated_y_in_cv) ** 2) / sum((y_train - y_train.mean()) ** 2))))
print('RMSEcv: {0}'.format(float((sum((y_train - estimated_y_in_cv) ** 2) / len(y_train)) ** 0.5)))
print('MAEcv: {0}'.format(float(sum(abs(y_train - estimated_y_in_cv)) / len(y_train))))
# estimate y for test data
autoscaled_x_test = np.ndarray.flatten(regression_model.predict(autoscaled_x_test))
autoscaled_x_test = autoscaled_x_test * y_train.std(ddof=1) + y_train.mean()
autoscaled_x_test = | pd.DataFrame(autoscaled_x_test, index=unsupervised_dataset.index, columns=['estimated y']) | pandas.DataFrame |
# @Author: <NAME>
# @Date: Tue, March 31st 2020, 12:34 am
# @Email: <EMAIL>
# @Filename: base_dataset.py
'''
Script for defining base class BaseDataset for managing information about a particular subset or collection of datasets during preparation for a particular experiment.
'''
from boltons.dictutils import OneToOne
from collections import OrderedDict
import dataset
import json
import numpy as np
import os
import pandas as pd
import random
from stuf import stuf
from toolz.itertoolz import frequencies
from pyleaves import leavesdb
import pyleaves
from pyleaves.tests.test_utils import MetaData
from typing import List
class BaseDataset(object):
__version__ = '0.1'
def __init__(self, name='', src_db=pyleaves.DATABASE_PATH, columns = ['path','family','catalog_number'], id_col=None):
"""
Base class meant to be subclassed for unique named datasets. Implements some property setters/getters for maintaining consistency
of data and filters (like min class count threshold).
Examples
-------
Examples should be written in doctest format, and
should illustrate how to use the function/class.
>>> dataset = BaseDataset()
>>> leaves_dataset = LeavesDataset()
... fossil_dataset = FossilDataset()
... pnas_dataset = PNASDataset()
... pnas_fossil_data = pnas_data+fossil_data
... pnas_leaves_data = pnas_data+leaves_data
>>>
"""
self.name = name
self.columns = columns
self.x_col = 'path'
self.y_col = 'family'
self.id_col = id_col
if src_db:
self.local_db = leavesdb.init_local_db(src_db = src_db, verbose=False)
self._threshold = 0
self._data = | pd.DataFrame(columns=self.columns) | pandas.DataFrame |
import pandas as pd
import pickle
from sklearn.model_selection import StratifiedShuffleSplit
from sklearn.svm import SVR
from evaluation_dir import evaluation_metrics
from split_train_test import convert_label
from sklearn import preprocessing
from sklearn.linear_model import LogisticRegression
from sklearn import svm
def replace_value_dataframe(df):
df = df.replace({True: 1, False: 0})
df = df.fillna(df.mean())
return df.values
def load_features(ids, path_data):
df = | pd.read_csv(path_data) | pandas.read_csv |
# coding: utf-8
# In[1]:
# -*- coding: utf-8 -*-
import pandas as pd
import numpy as np
import tensorflow as tf
import sys
start_idx = 1
bulk = 1
if len(sys.argv) == 3:
start_idx = int(sys.argv[1])
bulk = int(sys.argv[2])
# 하루 일과에서 찾아낼 패턴의 길이
n_size = 4
seq_length = n_size
data_dir = '/export/data/cert-data/processed/4.2/'
flag_additional_loss = True
# for ground-truth
self_user_dict = pd.read_csv(data_dir + 'dictionary.csv', sep=',')
self_user_dict = self_user_dict['user']
self_answer = | pd.read_csv(data_dir + 'answer_r4.2_all.csv', sep=',') | pandas.read_csv |
#!/bin/env python
# -*- coding: utf-8 -*-
#
# PROGRAMMER: <NAME>
# DATE CREATED: 01/12/2020
# REVISED DATE:
# PURPOSE: Create a Classifier class a lont with methods for compile, training,
# Evaluation, save and load the model.
#
##
# Imports python modules
from utility import process_image
import os
from time import time
from datetime import datetime
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import json
import torch
import torchvision.models as models
import torch.optim as optim
import torch.nn as nn
import torch.nn.functional as F
from tqdm import tqdm
# the following import is required for training to be robust to truncated images
from PIL import Image, ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True
# Set the models to use
densenet121 = models.densenet121(pretrained=True)
resnet18 = models.resnet18(pretrained=True)
alexnet = models.alexnet(pretrained=True)
vgg16 = models.vgg16(pretrained=True)
googlenet = models.googlenet(pretrained=True)
inception = models.inception_v3(pretrained=True, aux_logits=False)
models = {'inception': inception, 'googlenet': googlenet, 'densenet': densenet121, 'resnet': resnet18,
'alexnet': alexnet, 'vgg': vgg16}
class Classifier():
def __init__(self, device):
"""
Initialize the classifier class with the needed parameters for it's methods
param: model(str): model name to use
param: device: the device to use for training, or predicting CPU or GPU
"""
self.device = device
# initialize tracker for minimum validation loss
self.valid_loss_min = np.Inf
# Initialize empty lists to track training and validation losses and accuracy for each epoch
self.train_losses = []
self.valid_losses = []
self.train_acc = []
self.valid_acc = []
# Initialize start epoch
self.start_epoch = 1
# Initialize cat to name file with None
self.cat_to_name = None
def _create_classifier(self, n_inputs, n_outputs, hidden_units=[512], drop_p=0.5):
"""
Create a classifier to use for the model
param: n_inputs (int): number of input features to use
param: n_outputs (int): number of output features to use
param: hidden_units (list): a list of integers to use as hidden units
param: drop_p (float): a number between 0 and 1 to be used as the probability for dropout
"""
classifier = nn.ModuleList()
classifier.append(nn.Linear(n_inputs, hidden_units[0]))
classifier.append(nn.ReLU())
classifier.append(nn.Dropout(drop_p))
if len(hidden_units) > 1:
for (h1, h2) in zip(hidden_units[:-1], hidden_units[1:]):
classifier.append(nn.Linear(h1, h2))
classifier.append(nn.ReLU())
classifier.append(nn.Dropout(drop_p))
classifier.append(nn.Linear(hidden_units[-1], n_outputs))
return nn.Sequential(*classifier)
def compile(self, n_outputs, arch='vgg', hidden_units=[512], drop_p=0.5, learning_rate=0.01, weights=None):
"""
Compile the model and prepair it for training by setting the model archticture,
number of hidden layers to use, numer of hidden units for each layer, dropout,
optimizer, learning rate and weights
param: arch (str): the name of the archticture to use
param: hidden_units (list): a list with number of hidden units to use for each hidden layer
param: drop_p (float): a number between 0 and 1 to be used as the probability for dropout
param: learning_rate (float): a number between 0 and 1 to be used for optimizer learning rate
param: weights (tensot): a tensor with classes weights to be used for criterion
"""
# Create a model
self.model = models[arch]
self.arch = arch
# Freezing model parameters
for param in self.model.parameters():
param.requires_grad = False
# Get the input features for the model classifier layer
if arch == 'vgg':
n_inputs = self.model.classifier[0].in_features
elif arch == 'alexnet':
n_inputs = self.model.classifier[1].in_features
elif arch == 'densenet':
n_inputs = self.model.classifier.in_features
elif arch in ['resnet', 'googlenet', 'inception']:
n_inputs = self.model.fc.in_features
else:
print(
f'{arch} is not available please chose an archticture from {models.keys()}')
# Create a sequential model to use as a classifier
self.classifier = self._create_classifier(
n_inputs=n_inputs, n_outputs=n_outputs, hidden_units=hidden_units, drop_p=drop_p)
# Replace the model's classifier with the new classifier sequential layer
if arch in ['resnet', 'googlenet', 'inception']:
self.model.fc = self.classifier
else:
self.model.classifier = self.classifier
# Move model to GPU if available
self.model = self.model.to(self.device)
# Create criterion object
self.criterion = nn.CrossEntropyLoss(weight=weights.to(self.device))
# Create optimizer
if arch in ['resnet', 'googlenet', 'inception']:
self.optimizer = optim.SGD(
self.model.fc.parameters(), lr=learning_rate)
else:
self.optimizer = optim.SGD(
self.model.classifier.parameters(), lr=learning_rate)
def train(self, n_epochs, loaders, image_datasets, early_stopping=None):
"""
Train the model and save the best model weights to save_dir
param: loaders: data loaders contains train, validation and test data loaders
param: image_datasets: a dictionary thet contains ImageFolder objects for train, valid and test
param: early_stopping (int): a number of epochs to stop training if validation loss stop decreasing
"""
# Start time to calculate the time for training
print()
print('='*50)
print('Training ......')
train_start = time()
# Setting early stopping count
early_stopping_count = 0
# Setting model's best weights
self.best_weights = self.model.state_dict()
end_epoch = n_epochs + self.start_epoch - 1
for epoch in range(self.start_epoch, end_epoch + 1):
with tqdm(total=len(image_datasets['train'])) as t_epoch_pbar:
t_epoch_pbar.set_description(f'Train-> E({epoch}/{end_epoch})')
# Start time for epoch
# epoch_start = time()
# initialize variables to monitor training and validation loss
train_loss = 0.0
valid_loss = 0.0
train_correct = 0.0
train_total = 0.0
valid_correct = 0.0
valid_total = 0.0
###################
# train the model #
###################
self.model.train()
for batch_idx, (data, target) in enumerate(loaders['train']):
# move to GPU if available
data, target = data.to(self.device), target.to(self.device)
# find the loss and update the model parameters accordingly
# clear the gradients of all optimized variables
self.optimizer.zero_grad()
# forward pass
output = self.model(data)
# calculate the batch loss
loss = self.criterion(output, target)
# backward pass
loss.backward()
# perform a single optimization step to update model parameters
self.optimizer.step()
# update training loss
train_loss = train_loss + \
((1 / (batch_idx + 1)) * (loss.data - train_loss))
# convert output probabilities to predicted class
pred = output.data.max(1, keepdim=True)[1]
# compare predictions to true label
train_correct += np.sum(np.squeeze(
pred.eq(target.data.view_as(pred))).cpu().numpy())
train_total += data.size(0)
# Update the progress bar
desc = f'Train-> E({epoch}/{end_epoch}) - loss={train_loss:.4f} - Acc={train_correct/train_total:.2%}'
t_epoch_pbar.set_description(desc)
t_epoch_pbar.update(data.shape[0])
######################
# validate the model #
######################
self.model.eval()
with tqdm(total=len(image_datasets['valid'])) as v_epoch_pbar:
v_epoch_pbar.set_description(f'Valid-> E({epoch}/{end_epoch})')
for batch_idx, (data, target) in enumerate(loaders['valid']):
# move to GPU if available
data, target = data.to(self.device), target.to(self.device)
# update the average validation loss
# forward pass
output = self.model(data)
# calculate the batch loss
loss = self.criterion(output, target)
# update average validation loss
valid_loss = valid_loss + \
((1 / (batch_idx + 1)) * (loss.data - valid_loss))
# convert output probabilities to predicted class
pred = output.data.max(1, keepdim=True)[1]
# compare predictions to true label
valid_correct += np.sum(np.squeeze(
pred.eq(target.data.view_as(pred))).cpu().numpy())
valid_total += data.size(0)
# Update the progress bar
desc = f'Valid-> E({epoch}/{end_epoch}) - loss={valid_loss:.4f} - Acc={valid_correct/(valid_total+1e-10):.2%}'
v_epoch_pbar.set_description(desc)
v_epoch_pbar.update(data.shape[0])
# Add train and valid loss for each epoch to the train_losses and valid_losses lists
self.train_losses.append(train_loss.cpu().numpy())
self.valid_losses.append(valid_loss.cpu().numpy())
self.train_acc.append(100. * train_correct / train_total)
self.valid_acc.append(100. * valid_correct / valid_total)
# save the model if validation loss has decreased
if valid_loss <= self.valid_loss_min:
print(
f'Validation loss decreased ({self.valid_loss_min:.6f} --> {valid_loss:.6f}). Saving the model weights...')
early_stopping_count = 0
self.best_weights = self.model.state_dict()
self.valid_loss_min = valid_loss
else:
early_stopping_count += 1
if not early_stopping is None and early_stopping_count >= early_stopping:
break
self.model.load_state_dict(self.best_weights)
self.class_to_idx = image_datasets['train'].class_to_idx
self.start_epoch = epoch + 1
# Save Model Summary to a pandas DF
print('Saving the model training history ...')
history = {
'epoch': np.arange(1, self.start_epoch, 1),
'train_losses': self.train_losses,
'valid_losses': self.valid_losses,
'train_acc': self.train_acc,
'valid_acc': self.valid_acc,
}
self.history = | pd.DataFrame(history) | pandas.DataFrame |
import os, pickle
import pandas as pd
import numpy as np
import seaborn as sns
import statistics
import matplotlib.pyplot as plt
from matplotlib.gridspec import GridSpec
import missingno as msno
from sklearn.experimental import enable_iterative_imputer
from sklearn.impute import IterativeImputer
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
from imblearn.over_sampling import SMOTE
import sklearn
from sklearn.feature_selection import SelectPercentile, f_classif
from src.config import Config
import warnings
warnings.filterwarnings('ignore')
pd.set_option('display.max_rows', 500)
class Analysis(Config):
def __init__(self):
self.data = {}
def read_file(self, fname=None):
try:
if fname is None:
fname = os.path.join(Config.DATA["INPUT_PATH"])
print("Reading file: {} ...".format(fname))
data = pd.read_csv(fname)
for col in data.columns:
if len(data[col].unique()) < 20 or col in ["12", "64", "95", "target"]:
data[col] = data[col].astype("category")
print("Data import complete for file: {} ...".format(fname))
return data
except FileNotFoundError:
print(fname)
print("File {} is not found ... Please specify the correct path in config.py".format(fname))
def summary_statistics(self, data, dtype):
if dtype == "numerical":
df_stats_num = data.select_dtypes(["float", "int"]).describe()
kurtosis_list = []
skewness_list = []
numerical_column_list = [col for col in df_stats_num]
for col in df_stats_num:
kurtosis_list.append(data[col].kurtosis())
skewness_list.append(data[col].skew())
new_dict_kurtosis = dict(zip(numerical_column_list,kurtosis_list))
new_dict_skewness = dict(zip(numerical_column_list,skewness_list))
new_rows_kurtosis = pd.Series(data = new_dict_kurtosis, name='kurtosis')
new_rows_skewness = pd.Series(data = new_dict_skewness, name='skewness')
# Append the series of kurtosis and skewness to the .describe() dataframe
df_stats_num = df_stats_num.append(new_rows_kurtosis, ignore_index=False)
df_stats_num = df_stats_num.append(new_rows_skewness, ignore_index=False)
if (len(data) > 10):
df_stats_num = pd.DataFrame(df_stats_num.transpose())
# Set skewness and kurtosis type
df_stats_num.loc[df_stats_num['kurtosis'] < 3 , 'kurtosis type'] = 'Platykurtic' # thin tails
df_stats_num.loc[df_stats_num['kurtosis'] == 3 , 'kurtosis type'] = 'Normal - Mesokurtic'
df_stats_num.loc[df_stats_num['kurtosis'] > 3 , 'kurtosis type'] = 'Leptokurtic' # heavy tails
df_stats_num.loc[df_stats_num['skewness'] < 0, 'skewness type'] = 'Negatively Skewed'
df_stats_num.loc[df_stats_num['skewness'] == 0, 'skewness type'] = 'Symmetrical'
df_stats_num.loc[df_stats_num['skewness'] > 0, 'skewness type'] = 'Positively Skewed'
df_stats_num.loc[(df_stats_num['skewness'] > -0.5) & (df_stats_num['skewness'] < 0.5), 'skewness lvl'] \
= 'Fairly Symmetrical'
df_stats_num.loc[(df_stats_num['skewness'] > -1.0) & (df_stats_num['skewness'] < -0.5) , 'skewness lvl'] \
= 'Moderately Skewed'
df_stats_num.loc[(df_stats_num['skewness'] > 0.5) & (df_stats_num['skewness'] < 1.0), 'skewness lvl'] \
= 'Moderately Skewed'
df_stats_num.loc[(df_stats_num['skewness'] > 1.0) | (df_stats_num['skewness'] < -1.0), 'skewness lvl'] \
= 'Highly Skewed'
final_df = df_stats_num
elif dtype == "categorical":
df_stats_cat = data.select_dtypes(["category"]).describe()
if (len(data) > 10):
df_stats_cat = pd.DataFrame(df_stats_cat.transpose())
final_df = df_stats_cat
return final_df
def categorical_barplot(self, data, col, xlabel, title, type="standard"):
fig, ax = plt.subplots(figsize=(15, 5))
if type == "standard":
try:
cat_index = np.unique(data[col], return_counts=True)[0]
cat_df = pd.DataFrame(np.unique(data[col], return_counts=True)[1], index=cat_index)
y = list(cat_df[0])
except:
cat_df = pd.DataFrame(data[col].value_counts())
y = cat_df.iloc[:,0]
x = list(cat_df.index)
elif type == "missing":
x = list(data[col].index)
y = list(data[col])
ax.bar(x, y, color=['grey', 'red', 'green', 'blue', 'cyan'])
for i in range(len(x)):
ax.text(i, y[i], y[i], ha = 'center')
ax.set_title(title, fontsize=14)
ax.set_xlabel(xlabel, fontsize=14)
ax.set_ylabel(col, fontsize=14)
return fig
def data_scaling(self, data):
X = data.loc[:, ~data.columns.isin(['target'])].values
y = data.loc[:,['target']].values
X = pd.DataFrame(StandardScaler().fit_transform(X))
normalized_data= pd.concat([X, pd.DataFrame(y)], axis=1)
return X
def boxplot(self, X, col, start_col, end_col):
if col == 0:
fig, ax = plt.subplots(figsize=(20,8))
sns.boxplot(x="variable", y="value", data= | pd.melt(X.iloc[:,:col+11]) | pandas.melt |
import pandas as pd
from tabula import wrapper
import numpy as np
import csv
import re
import os
path = 'statements/'
files = [f for f in os.listdir(path)]
files = filter(lambda f: f.endswith(('.pdf','.PDF')), files)
for i, file in enumerate(files):
filePath = path + file
print(filePath)
wrapper.convert_into(filePath, "output"+ str(i) +".csv", output_format="csv", pages='all')
names = ['Date', 'Transaction Description', 'test1', 'Amount', 'test2']
df1 = pd.read_csv("output"+ str(i) +".csv", names=names, header=None)
del df1['test1']
del df1['test2']
df1 = df1[ | pd.notnull(df1['Date']) | pandas.notnull |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Sun Jan 20 10:24:34 2019
@author: labadmin
"""
# -*- coding: utf-8 -*-
"""
Created on Wed Jan 02 21:05:32 2019
@author: Hassan
"""
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.svm import SVC
from sklearn.linear_model import LogisticRegression
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import classification_report, confusion_matrix
from sklearn.model_selection import GridSearchCV
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis as QDA
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import AdaBoostClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import GradientBoostingClassifier as GBC
from imblearn.over_sampling import RandomOverSampler
from imblearn.over_sampling import BorderlineSMOTE
from imblearn.over_sampling import SMOTENC
data_ben1=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\bending1\\dataset1.csv",skiprows=4)
data_ben2=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\bending1\\dataset2.csv",skiprows=4)
data_ben3=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\bending1\\dataset3.csv",skiprows=4)
data_ben4=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\bending1\\dataset4.csv",skiprows=4)
data_ben5=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\bending1\\dataset5.csv",skiprows=4)
data_ben6=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\bending1\\dataset6.csv",skiprows=4)
data_ben7=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\bending1\\dataset7.csv",skiprows=4)
frames_ben1 = [data_ben1,data_ben2,data_ben3,data_ben4,data_ben5,data_ben6,data_ben7]
result_ben1 = pd.concat(frames_ben1)
result_ben1.index=range(3360)
df_ben1 = pd.DataFrame({'label': [1]},index=range(0,3360))
dat_ben1=pd.concat([result_ben1,df_ben1],axis=1)
#-------------------------------------------------------------------------------------------------
data__ben1=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\bending2\\dataset1.csv",skiprows=4)
data__ben2=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\bending2\\dataset2.csv",skiprows=4)
data__ben3=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\bending2\\dataset3.csv",skiprows=4)
data__ben4=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\bending2\\dataset4.csv",skiprows=4)
data__ben4=data__ben4['# Columns: time'].str.split(expand=True)
data__ben4.columns=['# Columns: time','avg_rss12','var_rss12','avg_rss13','var_rss13','avg_rss23','var_rss23']
data__ben5=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\bending2\\dataset5.csv",skiprows=4)
data__ben6=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\bending2\\dataset6.csv",skiprows=4)
frames_ben2 = [data__ben1,data__ben2,data__ben3,data__ben4,data__ben5,data__ben6]
result_ben2 = pd.concat(frames_ben2)
result_ben2.index=range(2880)
df_ben2 = pd.DataFrame({'label': [2]},index=range(0,2880))
dat__ben2=pd.concat([result_ben2,df_ben2],axis=1)
#-----------------------------------------------------------------------------------------------------
data_cyc1=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\cycling\\dataset1.csv",skiprows=4)
data_cyc2=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\cycling\\dataset2.csv",skiprows=4)
data_cyc3=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\cycling\\dataset3.csv",skiprows=4)
data_cyc4=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\cycling\\dataset4.csv",skiprows=4)
data_cyc5=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\cycling\\dataset5.csv",skiprows=4)
data_cyc6=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\cycling\\dataset6.csv",skiprows=4)
data_cyc7=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\cycling\\dataset7.csv",skiprows=4)
data_cyc8=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\cycling\\dataset8.csv",skiprows=4)
data_cyc9=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\cycling\\dataset99.csv",skiprows=4)
data_cyc10=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\cycling\\dataset10.csv",skiprows=4)
data_cyc11=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\cycling\\dataset11.csv",skiprows=4)
data_cyc12=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\cycling\\dataset12.csv",skiprows=4)
data_cyc13=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\cycling\\dataset13.csv",skiprows=4)
data_cyc14=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\cycling\\dataset144.csv",skiprows=4)
data_cyc15=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\cycling\\dataset15.csv",skiprows=4)
frames_cyc = [data_cyc1,data_cyc2,data_cyc3,data_cyc4,data_cyc5,data_cyc6,data_cyc7,data_cyc8,data_cyc9,data_cyc10,data_cyc11,data_cyc12,data_cyc13,data_cyc14,data_cyc15]
result_cyc = pd.concat(frames_cyc)
result_cyc.index=range(7200)
df_cyc = pd.DataFrame({'label': [3]},index=range(0,7200))
data_cyc=pd.concat([result_cyc,df_cyc],axis=1)
#----------------------------------------------------------------------------------------------
data_ly1=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\lying\\dataset1.csv",skiprows=4)
data_ly2=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\lying\\dataset2.csv",skiprows=4)
data_ly3=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\lying\\dataset3.csv",skiprows=4)
data_ly4=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\lying\\dataset4.csv",skiprows=4)
data_ly5=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\lying\\dataset5.csv",skiprows=4)
data_ly6=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\lying\\dataset6.csv",skiprows=4)
data_ly7=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\lying\\dataset7.csv",skiprows=4)
data_ly8=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\lying\\dataset8.csv",skiprows=4)
data_ly9=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\lying\\dataset9.csv",skiprows=4)
data_ly10=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\lying\\dataset10.csv",skiprows=4)
data_ly11=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\lying\\dataset11.csv",skiprows=4)
data_ly12=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\lying\\dataset12.csv",skiprows=4)
data_ly13=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\lying\\dataset13.csv",skiprows=4)
data_ly14=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\lying\\dataset14.csv",skiprows=4)
data_ly15=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\lying\\dataset15.csv",skiprows=4)
frames_ly = [data_ly1,data_ly2,data_ly3,data_ly4,data_ly5,data_ly6,data_ly7,data_ly8,data_ly9,data_ly10,data_ly11,data_ly12,data_ly13,data_ly14,data_ly15]
result_ly = pd.concat(frames_ly)
result_ly.index=range(7200)
df_ly = pd.DataFrame({'label': [4]},index=range(0,7200))
data_ly=pd.concat([result_ly,df_ly],axis=1)
#-------------------------------------------------------------------------------------------------
data_sit1=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\sitting\\dataset1.csv",skiprows=4)
data_sit2=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\sitting\\dataset2.csv",skiprows=4)
data_sit3=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\sitting\\dataset3.csv",skiprows=4)
data_sit4=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\sitting\\dataset4.csv",skiprows=4)
data_sit5=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\sitting\\dataset5.csv",skiprows=4)
data_sit6=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\sitting\\dataset6.csv",skiprows=4)
data_sit7=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\sitting\\dataset7.csv",skiprows=4)
data_sit8=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\sitting\\dataset8.csv",skiprows=4)
data_sit9= | pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\sitting\\dataset9.csv",skiprows=4) | pandas.read_csv |
from unittest import TestCase
from nose_parameterized import parameterized
import os
import gzip
import pandas as pd
from pandas import read_csv
from pyfolio.utils import to_utc
from pandas.util.testing import assert_frame_equal, assert_series_equal
from pyfolio.risk import (compute_style_factor_exposures,
compute_sector_exposures,
compute_cap_exposures,
compute_volume_exposures)
class RiskTestCase(TestCase):
__location__ = os.path.realpath(
os.path.join(os.getcwd(), os.path.dirname(__file__)))
test_pos = to_utc(read_csv(
gzip.open(__location__ + '/test_data/test_pos.csv.gz'),
index_col=0, parse_dates=True))
test_pos.columns = [351, 1419, 1787, 25317, 3321, 3951, 4922, 'cash']
test_txn = to_utc(read_csv(
gzip.open(
__location__ + '/test_data/test_txn.csv.gz'),
index_col=0, parse_dates=True))
test_sectors = to_utc(read_csv(
__location__ + '/test_data/test_sectors.csv',
index_col=0, parse_dates=True))
expected_sectors_longed = to_utc(read_csv(
__location__ + '/test_data/expected_sectors_longed.csv',
index_col=0, parse_dates=True))
expected_sectors_shorted = to_utc(read_csv(
__location__ + '/test_data/expected_sectors_shorted.csv',
index_col=0, parse_dates=True))
expected_sectors_grossed = to_utc(read_csv(
__location__ + '/test_data/expected_sectors_grossed.csv',
index_col=0, parse_dates=True))
test_caps = to_utc(read_csv(
__location__ + '/test_data/test_caps.csv',
index_col=0, parse_dates=True))
expected_caps_longed = to_utc(read_csv(
__location__ + '/test_data/expected_caps_longed.csv',
index_col=0, parse_dates=True))
expected_caps_shorted = to_utc(read_csv(
__location__ + '/test_data/expected_caps_shorted.csv',
index_col=0, parse_dates=True))
expected_caps_grossed = to_utc(read_csv(
__location__ + '/test_data/expected_caps_grossed.csv',
index_col=0, parse_dates=True))
expected_caps_netted = to_utc(read_csv(
__location__ + '/test_data/expected_caps_netted.csv',
index_col=0, parse_dates=True))
test_shares_held = to_utc(read_csv(
__location__ + '/test_data/test_shares_held.csv',
index_col=0, parse_dates=True))
test_volumes = to_utc(read_csv(
__location__ + '/test_data/test_volumes.csv',
index_col=0, parse_dates=True))
expected_volumes = to_utc(read_csv(
__location__ + '/test_data/expected_volumes.csv',
index_col=0, parse_dates=True))
test_dict = {}
styles = ['LT_MOMENTUM', 'LMCAP', 'VLTY', 'MACDSignal']
for style in styles:
df = to_utc(read_csv(
__location__ + '/test_data/test_{}.csv'.format(style),
index_col=0, parse_dates=True))
test_dict.update({style: df})
test_styles = pd.Panel()
test_styles = test_styles.from_dict(test_dict)
expected_styles = to_utc(read_csv(
__location__ + '/test_data/expected_styles.csv',
index_col=0, parse_dates=True))
@parameterized.expand([
(test_pos, test_styles, expected_styles)
])
def test_compute_style_factor_exposures(self, positions,
risk_factor_panel, expected):
style_list = []
for name, value in risk_factor_panel.iteritems():
risk_factor_panel[name].columns = \
risk_factor_panel[name].columns.astype(int)
style_list.append(
compute_style_factor_exposures(positions,
risk_factor_panel[name])
)
expected.columns = expected.columns.astype(int)
assert_frame_equal(pd.concat(style_list, axis=1), expected)
@parameterized.expand([
(test_pos, test_sectors, expected_sectors_longed,
expected_sectors_shorted, expected_sectors_grossed)
])
def test_compute_sector_exposures(self, positions, sectors,
expected_longed, expected_shorted,
expected_grossed):
sectors.columns = sectors.columns.astype(int)
sector_exposures = compute_sector_exposures(positions, sectors)
expected_longed.columns = expected_longed.columns.astype(int)
expected_shorted.columns = expected_shorted.columns.astype(int)
expected_grossed.columns = expected_grossed.columns.astype(int)
assert_frame_equal(pd.concat(sector_exposures[0], axis=1),
expected_longed)
assert_frame_equal(pd.concat(sector_exposures[1], axis=1),
expected_shorted)
assert_frame_equal(pd.concat(sector_exposures[2], axis=1),
expected_grossed)
@parameterized.expand([
(test_pos, test_caps, expected_caps_longed, expected_caps_shorted,
expected_caps_grossed, expected_caps_netted)
])
def test_compute_cap_exposures(self, positions, caps,
expected_longed, expected_shorted,
expected_grossed, expected_netted):
caps.columns = caps.columns.astype(int)
cap_exposures = compute_cap_exposures(positions, caps)
expected_longed.columns = expected_longed.columns.astype(int)
expected_shorted.columns = expected_shorted.columns.astype(int)
expected_grossed.columns = expected_grossed.columns.astype(int)
expected_netted.columns = expected_netted.columns.astype(int)
assert_frame_equal(pd.concat(cap_exposures[0], axis=1),
expected_longed)
assert_frame_equal(pd.concat(cap_exposures[1], axis=1),
expected_shorted)
assert_frame_equal(pd.concat(cap_exposures[2], axis=1),
expected_grossed)
assert_frame_equal( | pd.concat(cap_exposures[3], axis=1) | pandas.concat |
"""Summary
API end point to read input, delegate mining by creating strategies, and finally
formats patterns identified in a specific format
"""
import pandas as pd
from .utilities import print_fun
from .pruning import SupportPruning, UBPruning
from .mining import EnumeratedPattern
from .patterncount import SequenceMap
from .patternminer import PatternMiner
# Specific constants
dummy_col_name = 'dummy'
dummy_col_value = 'na'
ub_pruning = 'bi-dr'
supp_pruning = 'apriori'
support_output_metrics = ['crossk', 'support']
supported_output_types = ['threshold', 'topk']
def mine_sequence_patterns(series_df: pd.DataFrame, nc_window_col: str,
support_threshold: float, crossk_threshold: float,
pattern_length: int, confidence_threshold: float = -1,
lag: int = 0, invalid_seq_indexes: list = [],
output_metric: str = 'crossk',
output_type: str = 'topk',
output_threshold: float = -1, topk: int = 100,
pruning_type: str = 'apriori') -> pd.DataFrame:
"""Summary
Main function / interface for the package (Driver function)
"""
# Creating concrete strategy for counting patterns
message = 'Counting pattern occurences'
print_fun(message, status='step')
seqmap_inst = SequenceMap(series_df, nc_window_col)
seqmap_inst.init_seq_map(pattern_length)
# Instantiating instance for pattern enumeration
num_of_readings = len(series_df)
anomalous_windows = series_df.index[series_df[nc_window_col] == 1].tolist()
enum_patterns_inst = EnumeratedPattern(
anomalous_windows, num_of_readings, lag)
# Instantiate concrete strategy for pruning
num_of_dims = series_df.shape[1] - 1
if pruning_type == supp_pruning:
pruning_inst = SupportPruning(
num_of_dims, series_df, enum_patterns_inst, seqmap_inst, support_threshold)
else:
pruning_inst = UBPruning(num_of_dims, series_df, enum_patterns_inst,
seqmap_inst, support_threshold, crossk_threshold)
# Instantiate miner instance
message = 'Processing Anomalous Windows'
print_fun(message, status='step')
patternminer_inst = PatternMiner(
pattern_length, num_of_dims, invalid_seq_indexes, enum_patterns_inst, pruning_inst)
patternminer_inst.mine()
# Preparing final output
patterns_mined_df = pd.DataFrame()
col_names = series_df.columns[series_df.columns != nc_window_col].tolist()
patterns_mined_df = format_output(col_names, enum_patterns_inst,
pattern_length, output_metric, output_type,
k=topk, threshold=output_threshold)
return patterns_mined_df
def format_output(col_names: list, enum_pattern_inst: EnumeratedPattern,
pattern_length: int, metric: str, filter_type: str, k: int = -1,
threshold: float = -1) -> pd.DataFrame:
"""Summary
Retuns patterns and associated metrics as a dataframe
Args:
pattern_length (int): Fixed pattern length
metric (str): Type of metric to filter patterns upon
filter_type (str): filter_type of filter (topk vs threshold based)
k (int, optional): K if filter_type=topk
threshold (float, optional): threshold value if filter_type=threshold
"""
message = 'Formatting Enumerated Patterns ({0}) as Output via: ({1})'.format(
enum_pattern_inst.num_of_patterns, filter_type)
print_fun(message, status='step')
pattern_indexes = enum_pattern_inst.get_pattern_indexes(
metric, filter_type, k, threshold)
patterns_list = enum_pattern_inst.get_patterns(pattern_indexes)
pattern_metrics = enum_pattern_inst.get_pattern_metrics(pattern_indexes)
all_patterns_df = convert_patterns_to_df(
patterns_list, col_names, pattern_length)
# Join patterns and their respective metrics, side by side
return pd.concat([all_patterns_df, pattern_metrics], axis=1)
def convert_patterns_to_df(patterns_list: list, col_names: list,
pattern_length: int) -> pd.DataFrame:
"""Summary
Convert list of jsons into a dataframe of patterns
Args:
col_names (list): list of column names for attributes
patterns_list (list): list of all patterns enumerated, as jsons
pattern_length (int): Individual pattern length is constant
"""
all_patterns_df = | pd.DataFrame() | pandas.DataFrame |
import pandas as pd
df = pd.read_json('D:\IIIT Nagpur\IT Workshop1\pandas\data.json')
print(df.to_string())
#####################################
# Load a Python Dictionary into a DataFrame
import pandas as pd
data = {
"Duration":{
"0":60,
"1":60,
"2":60,
"3":45,
"4":45,
"5":60
},
"Pulse":{
"0":110,
"1":117,
"2":103,
"3":109,
"4":117,
"5":102
},
"Maxpulse":{
"0":130,
"1":145,
"2":135,
"3":175,
"4":148,
"5":127
},
"Calories":{
"0":409,
"1":479,
"2":340,
"3":282,
"4":406,
"5":300
}
}
df = | pd.DataFrame(data) | pandas.DataFrame |
'''
Integration tests for pipeline split behavior
'''
__author__ = '<NAME>'
import unittest
import pandas as pd
from abc import ABC, abstractmethod
from simpleml.tests.utils import assert_split_equal
from simpleml.datasets import SingleLabelPandasDataset, MultiLabelPandasDataset
from simpleml.datasets.dataset_splits import Split
from simpleml.pipelines import NoSplitPipeline, ExplicitSplitPipeline, RandomSplitPipeline
from simpleml.pipelines.projected_splits import ProjectedDatasetSplit
class _PipelineSplitTests(ABC):
'''
Pandas datasets are able to return copies of splits
in case of downstream inplace mutations
Validate consistent and resilient behavior
'''
def setUp(self):
self.dataset = self.dataset_cls(**self.dataset_params)
self.dataset.dataframe = self.build_dataset()
self.pipeline = self.pipeline_cls(**self.pipeline_params)
self.pipeline.add_dataset(self.dataset)
@property
def dataset_params(self):
return {}
@property
def pipeline_params(self):
return {}
@abstractmethod
def expected_split_contents(self):
pass
@abstractmethod
def build_dataset(self):
pass
@abstractmethod
def example_split_name(self):
pass
def test_getting_splits_with_mutation(self):
'''
Mutate split and re-retrieve
No split behavior passes all the data for any split
'''
split = self.pipeline.get_dataset_split(split=self.example_split_name())
projected_split = split.projected_split
self.assertTrue(isinstance(split, ProjectedDatasetSplit))
self.assertTrue(isinstance(projected_split, Split))
assert_split_equal(projected_split, self.expected_split_contents())
assert_split_equal(split, self.expected_split_contents())
# mutate
projected_split['X'] = None
with self.assertRaises(AssertionError):
assert_split_equal(projected_split, self.expected_split_contents())
# assert equality
new_split = self.pipeline.get_dataset_split(split=self.example_split_name())
assert_split_equal(split, self.expected_split_contents())
assert_split_equal(new_split, self.expected_split_contents())
class NoSplitPipelineSingleLabelPandasDatasetSplitTests(_PipelineSplitTests, unittest.TestCase):
'''
Pandas datasets are able to return copies of splits
in case of downstream inplace mutations
Validate consistent and resilient behavior
'''
dataset_cls = SingleLabelPandasDataset
pipeline_cls = NoSplitPipeline
@property
def dataset_params(self):
return {'label_columns': ['c'], 'other_named_split_sections': {'other': ['e']}}
def example_split_name(self):
return None
def expected_split_contents(self):
return Split(
X=pd.DataFrame([
{'a': 1, 'b': 2},
{'a': 2, 'b': 3},
{'a': 3, 'b': 4},
{'a': 4, 'b': 5},
{'a': 5, 'b': 6},
{'a': 6, 'b': 7}]),
y=pd.Series([3, 4, 5, 6, 7, 8], name='c'),
other=pd.Series([5, 6, 7, 8, 9, 10], name='e'))
def build_dataset(self):
return self.dataset_cls.concatenate_dataframes(
dataframes=[
pd.DataFrame([{'a': 1, 'b': 2, 'c': 3, 'e': 5}, {'a': 2, 'b': 3, 'c': 4, 'e': 6}]),
pd.DataFrame([{'a': 3, 'b': 4, 'c': 5, 'e': 7}, {'a': 4, 'b': 5, 'c': 6, 'e': 8}]),
pd.DataFrame([{'a': 5, 'b': 6, 'c': 7, 'e': 9}, {'a': 6, 'b': 7, 'c': 8, 'e': 10}]),
],
split_names=['first', 'second', 'third']
)
class ExplicitSplitPipelineSingleLabelPandasDatasetSplitTests(_PipelineSplitTests, unittest.TestCase):
'''
Pandas datasets are able to return copies of splits
in case of downstream inplace mutations
Validate consistent and resilient behavior
'''
dataset_cls = SingleLabelPandasDataset
pipeline_cls = ExplicitSplitPipeline
@property
def dataset_params(self):
return {'label_columns': ['c'], 'other_named_split_sections': {'other': ['e']}}
def example_split_name(self):
return 'first'
def expected_split_contents(self):
return Split(
X=pd.DataFrame([
{'a': 1, 'b': 2},
{'a': 2, 'b': 3}]),
y=pd.Series([3, 4], name='c'),
other=pd.Series([5, 6], name='e'))
def build_dataset(self):
return self.dataset_cls.concatenate_dataframes(
dataframes=[
| pd.DataFrame([{'a': 1, 'b': 2, 'c': 3, 'e': 5}, {'a': 2, 'b': 3, 'c': 4, 'e': 6}]) | pandas.DataFrame |
"""
Target Problem:
---------------
* To train a model to predict the brain connectivity for the next time point given the brain connectivity at current time point.
Proposed Solution (Machine Learning Pipeline):
----------------------------------------------
* K-NN
Input to Proposed Solution:
---------------------------
* Directories of training and testing data in csv file format
* These two types of data should be stored in n x m pattern in csv file format.
Typical Example:
----------------
n x m samples in training csv file (Explain n and m)
k x s samples in testing csv file (Explain k and s
Output of Proposed Solution:
----------------------------
* Predictions generated by learning model for testing set
* They are stored in "results_team12.csv" file. (Change the name file if needed)
Code Owner:
-----------
* Copyright © Team 12. All rights reserved.
* Copyright © Istanbul Technical University, Learning From Data Spring/Fall 2020. All rights reserved.
"""
import pandas as pd
from sklearn.model_selection import KFold
import numpy as np
from sklearn.metrics import mean_squared_error, mean_absolute_error
from sklearn.neighbors import NearestNeighbors
from scipy.stats.stats import pearsonr
import random as r
r.seed(1)
np.random.seed(1)
import warnings
warnings.filterwarnings('ignore')
def load_data(csv):
"""
The method reads train and test data from their dataset files.
Then, it splits train data into features and labels.
Parameters
----------
train_file: directory of the file in which train data set is located
test_file: directory of the file in which test data set is located
"""
# reading the data from the csv files
df = pd.read_csv(csv, sep=',')
# ignoring the index column of the data (0,...,149 or 0,...,79)
df = df.drop(columns=['ID'])
df_np = df.to_numpy()
return df_np
def train_model(train_t0, neighbourCount):
"""
The method creates a learning model and trains it by using training data.
Parameters
----------
train_t0: x
neighbourCount: number of neigbours in KNN
"""
nbrs = []
train_t0_single = np.transpose(train_t0)
for i in range(train_t0_single.shape[0]):
nbrs.append(NearestNeighbors(n_neighbors=neighbourCount, algorithm='ball_tree').fit(train_t0_single[i].reshape(-1,1)))
return nbrs
def predict(train_t0, train_t1, test_t0, nbrs):
"""
The method makes predictions for testing data samples by using trained learning model.
Parameters
----------
train_t0: x
train_t1: y
test_t0: x_test
nbrs: Nearest Neigbors model for each feature
"""
train_t0_single = np.transpose(train_t0)
train_t1_single = np.transpose(train_t1)
test_t0_single = np.transpose(test_t0)
prediction = np.zeros_like(test_t0)
for i in range(train_t0_single.shape[0]):
distances, indices = nbrs[i].kneighbors(test_t0_single[i].reshape(-1,1))
distances = np.ones_like(distances)* 0.7 - distances
mul = np.multiply(distances, train_t1_single[i,indices])
pred = np.divide(np.mean(mul, axis =1), np.mean(distances, axis = 1))
prediction[:,i] = pred.reshape(-1)
nanLocations = np.isnan(prediction)
prediction[nanLocations] = 0
return prediction
def cv5(data_t0, data_t1, neighbourCount):
kf = KFold(n_splits=5 , shuffle = True, random_state=1)
prediction_all = np.zeros_like(data_t1)
mses= []
maes = []
pears = []
for trainIndex, testIndex in kf.split(data_t0):
train_t0, test_t0 = data_t0[trainIndex], data_t0[testIndex] #Split Data into train and test sets
train_t1, test_t1 = data_t1[trainIndex], data_t1[testIndex]
train_t0_single = np.transpose(train_t0) # Use features as rows and subjects as columns
train_t1_single = np.transpose(train_t1)
test_t0_single = np.transpose(test_t0)
prediction = np.zeros_like(test_t0)
preds = []
for i in range(train_t0_single.shape[0]): #Loop through each feature
nbrs = NearestNeighbors(n_neighbors= neighbourCount, algorithm='ball_tree').fit(train_t0_single[i].reshape(-1,1))
distances, indices = nbrs.kneighbors(test_t0_single[i].reshape(-1,1))# Calculate the distances and indices of K closest neighbours of test subjects and train subjects in t0
distances = np.ones_like(distances)* 0.7 - distances # Set distances to (0.7 - d). Neighbours with low distance get larger values and vice versa
mul = np.multiply(distances, train_t1_single[i,indices]) # Use the changed distances as weights and multiply the corresponding t1 of the neighbours
pred = np.divide(np.mean(mul,axis =1),np.mean(distances, axis = 1)) #Take the mean of the weighted t1's and divide by the mean of distances to normalize
prediction[:,i] = pred.reshape(-1) #This is the prediction for this feature acroos all test subjects
preds.append(pred.reshape(-1))
nanLocations = np.isnan(prediction)
prediction[nanLocations] = 0 # Set nan locations to 0
preds = np.asarray(preds)
preds = np.transpose(preds)
mses.append( mean_squared_error(preds, test_t1) )
maes.append( mean_absolute_error(preds, test_t1) )
pears.append(pearsonr(preds.flatten(), test_t1.flatten())[0] )
prediction_all[testIndex] = prediction # Put all predictions for each CV fold into prediction_all
mse_error = mean_squared_error(data_t1, prediction_all)
mae_error = mean_absolute_error(data_t1, prediction_all)
print("mses: ", mses)
print("maes: ", maes)
print("pears", pears)
print("Average error of five fold cross validation MSE:", np.sum(mses) / 5)
print("Average error of five fold cross validation MAE:", np.sum(maes) / 5)
print("Average error of five fold cross validation pearson:", np.sum(pears) / 5)
print(" std of five fold cross validation MSE:", np.std(mses))
print(" std of five fold cross validation MAE:", np.std(maes))
print(" std of five fold cross validation pearson:", np.std(pears))
return mae_error, mse_error, prediction_all
def write_output(filename, predictions):
test_df = pd.DataFrame(predictions)
melted_df = test_df.to_numpy().flatten()
melted_df = | pd.DataFrame(data=melted_df, columns=['Predicted']) | pandas.DataFrame |
from flowsa.common import WITHDRAWN_KEYWORD
from flowsa.flowbyfunctions import assign_fips_location_system
from flowsa.location import US_FIPS
import math
import pandas as pd
import io
from flowsa.settings import log
from string import digits
YEARS_COVERED = {
"asbestos": "2014-2018",
"barite": "2014-2018",
"bauxite": "2013-2017",
"beryllium": "2014-2018",
"boron": "2014-2018",
"chromium": "2014-2018",
"clay": "2015-2016",
"cobalt": "2013-2017",
"copper": "2011-2015",
"diatomite": "2014-2018",
"feldspar": "2013-2017",
"fluorspar": "2013-2017",
"fluorspar_inports": ["2016", "2017"],
"gallium": "2014-2018",
"garnet": "2014-2018",
"gold": "2013-2017",
"graphite": "2013-2017",
"gypsum": "2014-2018",
"iodine": "2014-2018",
"ironore": "2014-2018",
"kyanite": "2014-2018",
"lead": "2012-2018",
"lime": "2014-2018",
"lithium": "2013-2017",
"magnesium": "2013-2017",
"manganese": "2012-2016",
"manufacturedabrasive": "2017-2018",
"mica": "2014-2018",
"molybdenum": "2014-2018",
"nickel": "2012-2016",
"niobium": "2014-2018",
"peat": "2014-2018",
"perlite": "2013-2017",
"phosphate": "2014-2018",
"platinum": "2014-2018",
"potash": "2014-2018",
"pumice": "2014-2018",
"rhenium": "2014-2018",
"salt": "2013-2017",
"sandgravelconstruction": "2013-2017",
"sandgravelindustrial": "2014-2018",
"silver": "2012-2016",
"sodaash": "2010-2017",
"sodaash_t4": ["2016", "2017"],
"stonecrushed": "2013-2017",
"stonedimension": "2013-2017",
"strontium": "2014-2018",
"talc": "2013-2017",
"titanium": "2013-2017",
"tungsten": "2013-2017",
"vermiculite": "2014-2018",
"zeolites": "2014-2018",
"zinc": "2013-2017",
"zirconium": "2013-2017",
}
def usgs_myb_year(years, current_year_str):
"""
Sets the column for the string based on the year. Checks that the year
you picked is in the last file.
:param years: string, with hypthon
:param current_year_str: string, year of interest
:return: string, year
"""
years_array = years.split("-")
lower_year = int(years_array[0])
upper_year = int(years_array[1])
current_year = int(current_year_str)
if lower_year <= current_year <= upper_year:
column_val = current_year - lower_year + 1
return "year_" + str(column_val)
else:
log.info("Your year is out of scope. Pick a year between %s and %s",
lower_year, upper_year)
def usgs_myb_name(USGS_Source):
"""
Takes the USGS source name and parses it so it can be used in other parts
of Flow by activity.
:param USGS_Source: string, usgs source name
:return:
"""
source_split = USGS_Source.split("_")
name_cc = str(source_split[2])
name = ""
for char in name_cc:
if char.isupper():
name = name + " " + char
else:
name = name + char
name = name.lower()
name = name.strip()
return name
def usgs_myb_static_variables():
"""
Populates the data values for Flow by activity that are the same
for all of USGS_MYB Files
:return:
"""
data = {}
data["Class"] = "Geological"
data['FlowType'] = "ELEMENTARY_FLOWS"
data["Location"] = US_FIPS
data["Compartment"] = "ground"
data["Context"] = None
data["ActivityConsumedBy"] = None
return data
def usgs_myb_remove_digits(value_string):
"""
Eliminates numbers in a string
:param value_string:
:return:
"""
remove_digits = str.maketrans('', '', digits)
return_string = value_string.translate(remove_digits)
return return_string
def usgs_myb_url_helper(*, build_url, **_):
"""
This helper function uses the "build_url" input from flowbyactivity.py,
which is a base url for data imports that requires parts of the url text
string to be replaced with info specific to the data year. This function
does not parse the data, only modifies the urls from which data is
obtained.
:param build_url: string, base url
:param config: dictionary, items in FBA method yaml
:param args: dictionary, arguments specified when running flowbyactivity.py
flowbyactivity.py ('year' and 'source')
:return: list, urls to call, concat, parse, format into Flow-By-Activity
format
"""
return [build_url]
def usgs_asbestos_call(*, resp, year, **_):
"""
Convert response for calling url to pandas dataframe, begin parsing df
into FBA format
:param resp: df, response from url call
:param year: year
:return: pandas dataframe of original source data
"""
df_raw_data = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T1')
df_data = pd.DataFrame(df_raw_data.loc[4:11]).reindex()
df_data = df_data.reset_index()
del df_data["index"]
if len(df_data.columns) > 12:
for x in range(12, len(df_data.columns)):
col_name = "Unnamed: " + str(x)
del df_data[col_name]
if len(df_data. columns) == 12:
df_data.columns = ["Production", "Unit", "space_1", "year_1",
"space_2", "year_2", "space_3",
"year_3", "space_4", "year_4", "space_5", "year_5"]
col_to_use = ["Production"]
col_to_use.append(usgs_myb_year(YEARS_COVERED['asbestos'], year))
for col in df_data.columns:
if col not in col_to_use:
del df_data[col]
return df_data
def usgs_asbestos_parse(*, df_list, source, year, **_):
"""
Combine, parse, and format the provided dataframes
:param df_list: list of dataframes to concat and format
:param source: source
:param year: year
:return: df, parsed and partially formatted to flowbyactivity
specifications
"""
data = {}
row_to_use = ["Quantity"]
product = ""
name = usgs_myb_name(source)
des = name
dataframe = pd.DataFrame()
col_name = usgs_myb_year(YEARS_COVERED['asbestos'], year)
for df in df_list:
for index, row in df.iterrows():
if df.iloc[index]["Production"].strip() == \
"Imports for consumption:":
product = "imports"
elif df.iloc[index]["Production"].strip() == \
"Exports and reexports:":
product = "exports"
if df.iloc[index]["Production"].strip() in row_to_use:
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
data["Unit"] = "Metric Tons"
data['FlowName'] = name + " " + product
data["Description"] = name
data["ActivityProducedBy"] = name
col_name = usgs_myb_year(YEARS_COVERED['asbestos'], year)
if str(df.iloc[index][col_name]) == "--":
data["FlowAmount"] = str(0)
elif str(df.iloc[index][col_name]) == "nan":
data["FlowAmount"] = WITHDRAWN_KEYWORD
else:
data["FlowAmount"] = str(df.iloc[index][col_name])
dataframe = dataframe.append(data, ignore_index=True)
dataframe = assign_fips_location_system(dataframe,
str(year))
return dataframe
def usgs_barite_call(*, resp, year, **_):
"""
Convert response for calling url to pandas dataframe, begin parsing df
into FBA format
:param url: string, url
:param resp: df, response from url call
:param args: dictionary, arguments specified when running
flowbyactivity.py ('year' and 'source')
:return: pandas dataframe of original source data
"""
df_raw_data = pd.io.excel.read_excel(
io.BytesIO(resp.content), sheet_name='T1')
df_data = pd.DataFrame(df_raw_data.loc[7:14]).reindex()
df_data = df_data.reset_index()
del df_data["index"]
if len(df_data. columns) == 11:
df_data.columns = ["Production", "space_1", "year_1", "space_2",
"year_2", "space_3", "year_3", "space_4",
"year_4", "space_5", "year_5"]
col_to_use = ["Production"]
col_to_use.append(usgs_myb_year(YEARS_COVERED['barite'], year))
for col in df_data.columns:
if col not in col_to_use:
del df_data[col]
return df_data
def usgs_barite_parse(*, df_list, source, year, **_):
"""
Combine, parse, and format the provided dataframes
:param df_list: list of dataframes to concat and format
:param args: dictionary, used to run flowbyactivity.py
('year' and 'source')
:return: df, parsed and partially formatted to flowbyactivity
specifications
"""
data = {}
row_to_use = ["Quantity"]
prod = ""
name = usgs_myb_name(source)
des = name
dataframe = pd.DataFrame()
col_name = usgs_myb_year(YEARS_COVERED['barite'], year)
for df in df_list:
for index, row in df.iterrows():
if df.iloc[index]["Production"].strip() == \
"Imports for consumption:3":
product = "imports"
elif df.iloc[index]["Production"].strip() == \
"Crude, sold or used by producers:":
product = "production"
elif df.iloc[index]["Production"].strip() == "Exports:2":
product = "exports"
if df.iloc[index]["Production"].strip() in row_to_use:
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
data["Unit"] = "Metric Tons"
data['FlowName'] = name + " " + product
data["Description"] = name
data["ActivityProducedBy"] = name
col_name = usgs_myb_year(YEARS_COVERED['barite'], year)
if str(df.iloc[index][col_name]) == "--" or \
str(df.iloc[index][col_name]) == "(3)":
data["FlowAmount"] = str(0)
else:
data["FlowAmount"] = str(df.iloc[index][col_name])
dataframe = dataframe.append(data, ignore_index=True)
dataframe = assign_fips_location_system(
dataframe, str(year))
return dataframe
def usgs_bauxite_call(*, resp, year, **_):
"""
Convert response for calling url to pandas dataframe, begin parsing df
into FBA format
:param resp: df, response from url call
:param year: year
:return: pandas dataframe of original source data
"""
df_raw_data_one = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T1')
df_data_one = pd.DataFrame(df_raw_data_one.loc[6:14]).reindex()
df_data_one = df_data_one.reset_index()
del df_data_one["index"]
if len(df_data_one. columns) == 11:
df_data_one.columns = ["Production", "space_2", "year_1", "space_3",
"year_2", "space_4", "year_3", "space_5",
"year_4", "space_6", "year_5"]
col_to_use = ["Production"]
col_to_use.append(usgs_myb_year(YEARS_COVERED['bauxite'], year))
for col in df_data_one.columns:
if col not in col_to_use:
del df_data_one[col]
frames = [df_data_one]
df_data = pd.concat(frames)
df_data = df_data.reset_index()
del df_data["index"]
return df_data
def usgs_bauxite_parse(*, df_list, source, year, **_):
"""
Combine, parse, and format the provided dataframes
:param df_list: list of dataframes to concat and format
:param args: dictionary, used to run flowbyactivity.py
('year' and 'source')
:return: df, parsed and partially formatted to flowbyactivity
specifications
"""
data = {}
row_to_use = ["Production", "Total"]
prod = ""
name = usgs_myb_name(source)
des = name
dataframe = pd.DataFrame()
col_name = usgs_myb_year(YEARS_COVERED['bauxite'], year)
for df in df_list:
for index, row in df.iterrows():
if df.iloc[index]["Production"].strip() == "Production":
prod = "production"
elif df.iloc[index]["Production"].strip() == \
"Imports for consumption, as shipped:":
prod = "import"
elif df.iloc[index]["Production"].strip() == \
"Exports, as shipped:":
prod = "export"
if df.iloc[index]["Production"].strip() in row_to_use:
product = df.iloc[index]["Production"].strip()
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
data["Unit"] = "Metric Tons"
flow_amount = str(df.iloc[index][col_name])
if str(df.iloc[index][col_name]) == "W":
data["FlowAmount"] = WITHDRAWN_KEYWORD
else:
data["FlowAmount"] = flow_amount
data["Description"] = des
data["ActivityProducedBy"] = name
data['FlowName'] = name + " " + prod
dataframe = dataframe.append(data, ignore_index=True)
dataframe = assign_fips_location_system(
dataframe, str(year))
return dataframe
def usgs_beryllium_call(*, resp, year, **_):
"""
Convert response for calling url to pandas dataframe, begin parsing df
into FBA format
:param url: string, url
:param resp: df, response from url call
:param args: dictionary, arguments specified when running
flowbyactivity.py ('year' and 'source')
:return: pandas dataframe of original source data
"""
df_raw_data = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T4')
df_raw_data_two = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T1')
df_data_1 = pd.DataFrame(df_raw_data_two.loc[6:9]).reindex()
df_data_1 = df_data_1.reset_index()
del df_data_1["index"]
df_data_2 = pd.DataFrame(df_raw_data.loc[12:12]).reindex()
df_data_2 = df_data_2.reset_index()
del df_data_2["index"]
if len(df_data_2.columns) > 11:
for x in range(11, len(df_data_2.columns)):
col_name = "Unnamed: " + str(x)
del df_data_2[col_name]
if len(df_data_1. columns) == 11:
df_data_1.columns = ["Production", "space_1", "year_1", "space_2",
"year_2", "space_3", "year_3", "space_4",
"year_4", "space_5", "year_5"]
if len(df_data_2. columns) == 11:
df_data_2.columns = ["Production", "space_1", "year_1", "space_2",
"year_2", "space_3", "year_3", "space_4",
"year_4", "space_5", "year_5"]
col_to_use = ["Production"]
col_to_use.append(usgs_myb_year(YEARS_COVERED['beryllium'], year))
for col in df_data_1.columns:
if col not in col_to_use:
del df_data_1[col]
for col in df_data_2.columns:
if col not in col_to_use:
del df_data_2[col]
frames = [df_data_1, df_data_2]
df_data = pd.concat(frames)
df_data = df_data.reset_index()
del df_data["index"]
return df_data
def usgs_beryllium_parse(*, df_list, source, year, **_):
"""
Combine, parse, and format the provided dataframes
:param df_list: list of dataframes to concat and format
:param args: dictionary, used to run flowbyactivity.py
('year' and 'source')
:return: df, parsed and partially formatted to flowbyactivity
specifications
"""
data = {}
row_to_use = ["United States6", "Mine shipments1",
"Imports for consumption, beryl2"]
prod = ""
name = usgs_myb_name(source)
des = name
dataframe = pd.DataFrame()
col_name = usgs_myb_year(YEARS_COVERED['beryllium'], year)
for df in df_list:
for index, row in df.iterrows():
prod = "production"
if df.iloc[index]["Production"].strip() == \
"Imports for consumption, beryl2":
prod = "imports"
if df.iloc[index]["Production"].strip() in row_to_use:
remove_digits = str.maketrans('', '', digits)
product = df.iloc[index][
"Production"].strip().translate(remove_digits)
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
data["Unit"] = "Thousand Metric Tons"
data["Description"] = name
data["ActivityProducedBy"] = name
data['FlowName'] = name + " " + prod
data["FlowAmount"] = str(df.iloc[index][col_name])
dataframe = dataframe.append(data, ignore_index=True)
dataframe = assign_fips_location_system(
dataframe, str(year))
return dataframe
def usgs_boron_call(*, resp, year, **_):
"""
Convert response for calling url to pandas dataframe, begin parsing
df into FBA format
:param url: string, url
:param resp: df, response from url call
:param args: dictionary, arguments specified when running
flowbyactivity.py ('year' and 'source')
:return: pandas dataframe of original source data
"""
df_raw_data = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T1')
df_data_one = pd.DataFrame(df_raw_data.loc[8:8]).reindex()
df_data_one = df_data_one.reset_index()
del df_data_one["index"]
df_data_two = pd.DataFrame(df_raw_data.loc[21:22]).reindex()
df_data_two = df_data_two.reset_index()
del df_data_two["index"]
df_data_three = pd.DataFrame(df_raw_data.loc[27:28]).reindex()
df_data_three = df_data_three.reset_index()
del df_data_three["index"]
if len(df_data_one. columns) == 11:
df_data_one.columns = ["Production", "space_1", "year_1", "space_2",
"year_2", "space_3", "year_3", "space_4",
"year_4", "space_5", "year_5"]
df_data_two.columns = ["Production", "space_1", "year_1", "space_2",
"year_2", "space_3", "year_3", "space_4",
"year_4", "space_5", "year_5"]
df_data_three.columns = ["Production", "space_1", "year_1", "space_2",
"year_2", "space_3", "year_3", "space_4",
"year_4", "space_5", "year_5"]
col_to_use = ["Production"]
col_to_use.append(usgs_myb_year(YEARS_COVERED['boron'], year))
for col in df_data_one.columns:
if col not in col_to_use:
del df_data_one[col]
del df_data_two[col]
del df_data_three[col]
frames = [df_data_one, df_data_two, df_data_three]
df_data = pd.concat(frames)
df_data = df_data.reset_index()
del df_data["index"]
return df_data
def usgs_boron_parse(*, df_list, source, year, **_):
"""
Combine, parse, and format the provided dataframes
:param df_list: list of dataframes to concat and format
:param args: dictionary, used to run flowbyactivity.py
('year' and 'source')
:return: df, parsed and partially formatted to flowbyactivity
specifications
"""
data = {}
row_to_use = ["B2O3 content", "Quantity"]
prod = ""
name = usgs_myb_name(source)
des = name
dataframe = pd.DataFrame()
col_name = usgs_myb_year(YEARS_COVERED['boron'], year)
for df in df_list:
for index, row in df.iterrows():
if df.iloc[index]["Production"].strip() == "B2O3 content" or \
df.iloc[index]["Production"].strip() == "Quantity":
product = "production"
if df.iloc[index]["Production"].strip() == "Colemanite:4":
des = "Colemanite"
elif df.iloc[index]["Production"].strip() == "Ulexite:4":
des = "Ulexite"
if df.iloc[index]["Production"].strip() in row_to_use:
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
data["Unit"] = "Metric Tons"
if des == name:
data['FlowName'] = name + " " + product
else:
data['FlowName'] = name + " " + product + " " + des
data["Description"] = des
data["ActivityProducedBy"] = name
if str(df.iloc[index][col_name]) == "--" or \
str(df.iloc[index][col_name]) == "(3)":
data["FlowAmount"] = str(0)
elif str(df.iloc[index][col_name]) == "W":
data["FlowAmount"] = WITHDRAWN_KEYWORD
else:
data["FlowAmount"] = str(df.iloc[index][col_name])
dataframe = dataframe.append(data, ignore_index=True)
dataframe = assign_fips_location_system(
dataframe, str(year))
return dataframe
def usgs_chromium_call(*, resp, year, **_):
""""
Convert response for calling url to pandas dataframe,
begin parsing df into FBA format
:param url: string, url
:param resp: df, response from url call
:param args: dictionary, arguments specified when running
flowbyactivity.py ('year' and 'source')
:return: pandas dataframe of original source data
"""
df_raw_data = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T1')
df_data = pd.DataFrame(df_raw_data.loc[4:24]).reindex()
df_data = df_data.reset_index()
del df_data["index"]
if len(df_data. columns) == 12:
df_data.columns = ["Production", "Unit", "space_1", "year_1",
"space_2", "year_2", "space_3", "year_3",
"space_4", "year_4", "space_5", "year_5"]
elif len(df_data. columns) == 13:
df_data.columns = ["Production", "Unit", "space_1", "year_1",
"space_2", "year_2", "space_3", "year_3",
"space_4", "year_4", "space_5", "year_5", "space_6"]
col_to_use = ["Production"]
col_to_use.append(usgs_myb_year(YEARS_COVERED['chromium'], year))
for col in df_data.columns:
if col not in col_to_use:
del df_data[col]
return df_data
def usgs_chromium_parse(*, df_list, source, year, **_):
"""
Combine, parse, and format the provided dataframes
:param df_list: list of dataframes to concat and format
:param args: dictionary, used to run flowbyactivity.py
('year' and 'source')
:return: df, parsed and partially formatted to flowbyactivity
specifications
"""
data = {}
row_to_use = ["Secondary2", "Total"]
prod = ""
name = usgs_myb_name(source)
des = name
dataframe = pd.DataFrame()
col_name = usgs_myb_year(YEARS_COVERED['chromium'], year)
for df in df_list:
for index, row in df.iterrows():
if df.iloc[index]["Production"].strip() == "Imports:":
product = "imports"
elif df.iloc[index]["Production"].strip() == "Secondary2":
product = "production"
elif df.iloc[index]["Production"].strip() == "Exports:":
product = "exports"
if df.iloc[index]["Production"].strip() in row_to_use:
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
data["Unit"] = "Metric Tons"
data['FlowName'] = name + " " + product
data["Description"] = name
data["ActivityProducedBy"] = name
col_name = usgs_myb_year(YEARS_COVERED['chromium'], year)
if str(df.iloc[index][col_name]) == "--" or \
str(df.iloc[index][col_name]) == "(3)":
data["FlowAmount"] = str(0)
else:
data["FlowAmount"] = str(df.iloc[index][col_name])
dataframe = dataframe.append(data, ignore_index=True)
dataframe = assign_fips_location_system(
dataframe, str(year))
return dataframe
def usgs_clay_call(*, resp, year, **_):
"""
Convert response for calling url to pandas dataframe, begin parsing
df into FBA format
:param url: string, url
:param resp: df, response from url call
:param args: dictionary, arguments specified when running
flowbyactivity.py ('year' and 'source')
:return: pandas dataframe of original source data
"""
df_raw_data_ball = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T3')
df_data_ball = pd.DataFrame(df_raw_data_ball.loc[19:19]).reindex()
df_data_ball = df_data_ball.reset_index()
del df_data_ball["index"]
df_raw_data_bentonite = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T4 ')
df_data_bentonite = pd.DataFrame(
df_raw_data_bentonite.loc[28:28]).reindex()
df_data_bentonite = df_data_bentonite.reset_index()
del df_data_bentonite["index"]
df_raw_data_common = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T5 ')
df_data_common = pd.DataFrame(df_raw_data_common.loc[40:40]).reindex()
df_data_common = df_data_common.reset_index()
del df_data_common["index"]
df_raw_data_fire = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T6 ')
df_data_fire = pd.DataFrame(df_raw_data_fire.loc[12:12]).reindex()
df_data_fire = df_data_fire.reset_index()
del df_data_fire["index"]
df_raw_data_fuller = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T7 ')
df_data_fuller = pd.DataFrame(df_raw_data_fuller.loc[17:17]).reindex()
df_data_fuller = df_data_fuller.reset_index()
del df_data_fuller["index"]
df_raw_data_kaolin = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T8 ')
df_data_kaolin = pd.DataFrame(df_raw_data_kaolin.loc[18:18]).reindex()
df_data_kaolin = df_data_kaolin.reset_index()
del df_data_kaolin["index"]
df_raw_data_export = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T13')
df_data_export = pd.DataFrame(df_raw_data_export.loc[6:15]).reindex()
df_data_export = df_data_export.reset_index()
del df_data_export["index"]
df_raw_data_import = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T14')
df_data_import = pd.DataFrame(df_raw_data_import.loc[6:13]).reindex()
df_data_import = df_data_import.reset_index()
del df_data_import["index"]
df_data_ball.columns = ["Production", "space_1", "year_1", "space_2",
"value_1", "space_3", "year_2", "space_4",
"value_2"]
df_data_bentonite.columns = ["Production", "space_1", "year_1", "space_2",
"value_1", "space_3", "year_2", "space_4",
"value_2"]
df_data_common.columns = ["Production", "space_1", "year_1", "space_2",
"value_1", "space_3", "year_2", "space_4",
"value_2"]
df_data_fire.columns = ["Production", "space_1", "year_1", "space_2",
"value_1", "space_3", "year_2", "space_4",
"value_2"]
df_data_fuller.columns = ["Production", "space_1", "year_1", "space_2",
"value_1", "space_3", "year_2", "space_4",
"value_2"]
df_data_kaolin.columns = ["Production", "space_1", "year_1", "space_2",
"value_1", "space_3", "year_2", "space_4",
"value_2"]
df_data_export.columns = ["Production", "space_1", "year_1", "space_2",
"value_1", "space_3", "year_2", "space_4",
"value_2", "space_5", "extra"]
df_data_import.columns = ["Production", "space_1", "year_1", "space_2",
"value_1", "space_3", "year_2", "space_4",
"value_2", "space_5", "extra"]
df_data_ball["type"] = "Ball clay"
df_data_bentonite["type"] = "Bentonite"
df_data_common["type"] = "Common clay"
df_data_fire["type"] = "Fire clay"
df_data_fuller["type"] = "Fuller’s earth"
df_data_kaolin["type"] = "Kaolin"
df_data_export["type"] = "export"
df_data_import["type"] = "import"
col_to_use = ["Production", "type"]
col_to_use.append(usgs_myb_year(YEARS_COVERED['clay'], year))
for col in df_data_import.columns:
if col not in col_to_use:
del df_data_import[col]
del df_data_export[col]
for col in df_data_ball.columns:
if col not in col_to_use:
del df_data_ball[col]
del df_data_bentonite[col]
del df_data_common[col]
del df_data_fire[col]
del df_data_fuller[col]
del df_data_kaolin[col]
frames = [df_data_import, df_data_export, df_data_ball, df_data_bentonite,
df_data_common, df_data_fire, df_data_fuller, df_data_kaolin]
df_data = pd.concat(frames)
df_data = df_data.reset_index()
del df_data["index"]
return df_data
def usgs_clay_parse(*, df_list, source, year, **_):
"""
Combine, parse, and format the provided dataframes
:param df_list: list of dataframes to concat and format
:param args: dictionary, used to run flowbyactivity.py
('year' and 'source')
:return: df, parsed and partially formatted to flowbyactivity
specifications
"""
data = {}
row_to_use = ["Ball clay", "Bentonite", "Fire clay", "Kaolin",
"Fuller’s earth", "Total", "Grand total",
"Artificially activated clay and earth",
"Clays, not elsewhere classified",
"Clays, not elsewhere classified"]
dataframe = pd.DataFrame()
for df in df_list:
for index, row in df.iterrows():
if df.iloc[index]["type"].strip() == "import":
product = "imports"
elif df.iloc[index]["type"].strip() == "export":
product = "exports"
else:
product = "production"
if str(df.iloc[index]["Production"]).strip() in row_to_use:
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
data["Unit"] = "Metric Tons"
if product == "production":
data['FlowName'] = \
df.iloc[index]["type"].strip() + " " + product
data["Description"] = df.iloc[index]["type"].strip()
data["ActivityProducedBy"] = df.iloc[index]["type"].strip()
else:
data['FlowName'] = \
df.iloc[index]["Production"].strip() + " " + product
data["Description"] = df.iloc[index]["Production"].strip()
data["ActivityProducedBy"] = \
df.iloc[index]["Production"].strip()
col_name = usgs_myb_year(YEARS_COVERED['clay'], year)
if str(df.iloc[index][col_name]) == "--" or \
str(df.iloc[index][col_name]) == "(3)" or \
str(df.iloc[index][col_name]) == "(2)":
data["FlowAmount"] = str(0)
else:
data["FlowAmount"] = str(df.iloc[index][col_name])
dataframe = dataframe.append(data, ignore_index=True)
dataframe = assign_fips_location_system(
dataframe, str(year))
return dataframe
def usgs_cobalt_call(*, resp, year, **_):
"""
Convert response for calling url to pandas dataframe, begin parsing
df into FBA format
:param url: string, url
:param resp: df, response from url call
:param args: dictionary, arguments specified when running
flowbyactivity.py ('year' and 'source')
:return: pandas dataframe of original source data
"""
df_raw_data = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T8')
df_raw_data_two = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T1')
df_data_1 = pd.DataFrame(df_raw_data_two.loc[6:11]).reindex()
df_data_1 = df_data_1.reset_index()
del df_data_1["index"]
df_data_2 = pd.DataFrame(df_raw_data.loc[23:23]).reindex()
df_data_2 = df_data_2.reset_index()
del df_data_2["index"]
if len(df_data_2.columns) > 11:
for x in range(11, len(df_data_2.columns)):
col_name = "Unnamed: " + str(x)
del df_data_2[col_name]
if len(df_data_1. columns) == 12:
df_data_1.columns = ["Production", "space_6", "space_1", "year_1",
"space_2", "year_2", "space_3", "year_3",
"space_4", "year_4", "space_5", "year_5"]
if len(df_data_2. columns) == 11:
df_data_2.columns = ["Production", "space_1", "year_1", "space_2",
"year_2", "space_3", "year_3", "space_4",
"year_4", "space_5", "year_5"]
col_to_use = ["Production"]
col_to_use.append(usgs_myb_year(YEARS_COVERED['cobalt'], year))
for col in df_data_1.columns:
if col not in col_to_use:
del df_data_1[col]
for col in df_data_2.columns:
if col not in col_to_use:
del df_data_2[col]
frames = [df_data_1, df_data_2]
df_data = pd.concat(frames)
df_data = df_data.reset_index()
del df_data["index"]
return df_data
def usgs_cobalt_parse(*, df_list, source, year, **_):
"""
Combine, parse, and format the provided dataframes
:param df_list: list of dataframes to concat and format
:param args: dictionary, used to run flowbyactivity.py
('year' and 'source')
:return: df, parsed and partially formatted to flowbyactivity
specifications
"""
data = {}
name = usgs_myb_name(source)
des = name
row_to_use = ["United Statese, 16, 17", "Mine productione",
"Imports for consumption", "Exports"]
dataframe = pd.DataFrame()
for df in df_list:
for index, row in df.iterrows():
prod = "production"
if df.iloc[index]["Production"].strip() == \
"United Statese, 16, 17":
prod = "production"
elif df.iloc[index]["Production"].strip() == \
"Imports for consumption":
prod = "imports"
elif df.iloc[index]["Production"].strip() == "Exports":
prod = "exports"
if df.iloc[index]["Production"].strip() in row_to_use:
remove_digits = str.maketrans('', '', digits)
product = df.iloc[index][
"Production"].strip().translate(remove_digits)
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
data["Unit"] = "Thousand Metric Tons"
col_name = usgs_myb_year(YEARS_COVERED['cobalt'], year)
data["Description"] = des
data["ActivityProducedBy"] = name
data['FlowName'] = name + " " + prod
data["FlowAmount"] = str(df.iloc[index][col_name])
remove_rows = ["(18)", "(2)"]
if data["FlowAmount"] not in remove_rows:
dataframe = dataframe.append(data, ignore_index=True)
dataframe = assign_fips_location_system(
dataframe, str(year))
return dataframe
def usgs_copper_call(*, resp, year, **_):
"""
Convert response for calling url to pandas dataframe, begin
parsing df into FBA format
:param resp: df, response from url call
:param year: year
:return: pandas dataframe of original source data
"""
df_raw_data = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T1')
df_data_1 = pd.DataFrame(df_raw_data.loc[12:12]).reindex()
df_data_1 = df_data_1.reset_index()
del df_data_1["index"]
df_data_2 = pd.DataFrame(df_raw_data.loc[30:31]).reindex()
df_data_2 = df_data_2.reset_index()
del df_data_2["index"]
if len(df_data_1. columns) == 12:
df_data_1.columns = ["Production", "Unit", "space_1", "year_1",
"space_2", "year_2", "space_3", "year_3",
"space_4", "year_4", "space_5", "year_5"]
df_data_2.columns = ["Production", "Unit", "space_1", "year_1",
"space_2", "year_2", "space_3", "year_3",
"space_4", "year_4", "space_5", "year_5"]
col_to_use = ["Production", "Unit"]
col_to_use.append(usgs_myb_year(YEARS_COVERED['copper'], year))
for col in df_data_1.columns:
if col not in col_to_use:
del df_data_1[col]
for col in df_data_2.columns:
if col not in col_to_use:
del df_data_2[col]
frames = [df_data_1, df_data_2]
df_data = pd.concat(frames)
df_data = df_data.reset_index()
del df_data["index"]
return df_data
def usgs_copper_parse(*, df_list, source, year, **_):
"""
Combine, parse, and format the provided dataframes
:param df_list: list of dataframes to concat and format
:param args: dictionary, used to run flowbyactivity.py
('year' and 'source')
:return: df, parsed and partially formatted to flowbyactivity
specifications
"""
data = {}
name = usgs_myb_name(source)
des = name
dataframe = pd.DataFrame()
for df in df_list:
for index, row in df.iterrows():
remove_digits = str.maketrans('', '', digits)
product = df.iloc[index][
"Production"].strip().translate(remove_digits)
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
if product == "Total":
prod = "production"
elif product == "Exports, refined":
prod = "exports"
elif product == "Imports, refined":
prod = "imports"
data["ActivityProducedBy"] = "Copper; Mine"
data['FlowName'] = name + " " + prod
data["Unit"] = "Metric Tons"
col_name = usgs_myb_year(YEARS_COVERED['copper'], year)
data["Description"] = "Copper; Mine"
data["FlowAmount"] = str(df.iloc[index][col_name])
dataframe = dataframe.append(data, ignore_index=True)
dataframe = assign_fips_location_system(
dataframe, str(year))
return dataframe
def usgs_diatomite_call(*, resp, year, **_):
"""
Convert response for calling url to pandas dataframe, begin parsing
df into FBA format
:param url: string, url
:param resp: df, response from url call
:param year: year
:return: pandas dataframe of original source data
"""
df_raw_data_one = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T1')
df_data_one = pd.DataFrame(df_raw_data_one.loc[7:10]).reindex()
df_data_one = df_data_one.reset_index()
del df_data_one["index"]
if len(df_data_one.columns) == 10:
df_data_one.columns = ["Production", "year_1", "space_2", "year_2",
"space_3", "year_3", "space_4", "year_4",
"space_5", "year_5"]
col_to_use = ["Production"]
col_to_use.append(usgs_myb_year(YEARS_COVERED['diatomite'], year))
for col in df_data_one.columns:
if col not in col_to_use:
del df_data_one[col]
frames = [df_data_one]
df_data = pd.concat(frames)
df_data = df_data.reset_index()
del df_data["index"]
return df_data
def usgs_diatomite_parse(*, df_list, source, year, **_):
"""
Combine, parse, and format the provided dataframes
:param df_list: list of dataframes to concat and format
:param source: source
:param year: year
:return: df, parsed and partially formatted to flowbyactivity
specifications
"""
data = {}
row_to_use = ["Quantity", "Exports2", "Imports for consumption2"]
prod = ""
name = usgs_myb_name(source)
des = name
dataframe = pd.DataFrame()
for df in df_list:
for index, row in df.iterrows():
if df.iloc[index]["Production"].strip() == "Exports2":
prod = "exports"
elif df.iloc[index]["Production"].strip() == \
"Imports for consumption2":
prod = "imports"
elif df.iloc[index]["Production"].strip() == "Quantity":
prod = "production"
if df.iloc[index]["Production"].strip() in row_to_use:
product = df.iloc[index]["Production"].strip()
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
data["Unit"] = "Thousand metric tons"
col_name = usgs_myb_year(YEARS_COVERED['diatomite'], year)
data["FlowAmount"] = str(df.iloc[index][col_name])
data["Description"] = name
data["ActivityProducedBy"] = name
data['FlowName'] = name + " " + prod
dataframe = dataframe.append(data, ignore_index=True)
dataframe = assign_fips_location_system(
dataframe, str(year))
return dataframe
def usgs_feldspar_call(*, resp, year, **_):
"""
Convert response for calling url to pandas dataframe, begin
parsing df into FBA format
:param url: string, url
:param resp: df, response from url call
:param year: year
:return: pandas dataframe of original source data
"""
df_raw_data_two = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T1')
df_data_two = pd.DataFrame(df_raw_data_two.loc[4:8]).reindex()
df_data_two = df_data_two.reset_index()
del df_data_two["index"]
df_data_one = pd.DataFrame(df_raw_data_two.loc[10:15]).reindex()
df_data_one = df_data_one.reset_index()
del df_data_one["index"]
if len(df_data_two. columns) == 13:
df_data_two.columns = ["Production", "space_1", "unit", "space_2",
"year_1", "space_3", "year_2", "space_4",
"year_3", "space_5", "year_4", "space_6",
"year_5"]
df_data_one.columns = ["Production", "space_1", "unit", "space_2",
"year_1", "space_3", "year_2", "space_4",
"year_3", "space_5", "year_4", "space_6",
"year_5"]
col_to_use = ["Production"]
col_to_use.append(usgs_myb_year(YEARS_COVERED['feldspar'], year))
for col in df_data_two.columns:
if col not in col_to_use:
del df_data_two[col]
del df_data_one[col]
frames = [df_data_two, df_data_one]
df_data = pd.concat(frames)
df_data = df_data.reset_index()
del df_data["index"]
return df_data
def usgs_feldspar_parse(*, df_list, source, year, **_):
"""
Combine, parse, and format the provided dataframes
:param df_list: list of dataframes to concat and format
:param args: dictionary, used to run flowbyactivity.py
('year' and 'source')
:return: df, parsed and partially formatted to flowbyactivity
specifications
"""
data = {}
row_to_use = ["Quantity", "Quantity3"]
prod = ""
name = usgs_myb_name(source)
des = name
dataframe = pd.DataFrame()
for df in df_list:
for index, row in df.iterrows():
if df.iloc[index]["Production"].strip() == "Exports, feldspar:4":
prod = "exports"
elif df.iloc[index]["Production"].strip() == \
"Imports for consumption:4":
prod = "imports"
elif df.iloc[index]["Production"].strip() == \
"Production, feldspar:e, 2":
prod = "production"
elif df.iloc[index]["Production"].strip() == "Nepheline syenite:":
prod = "production"
des = "Nepheline syenite"
if df.iloc[index]["Production"].strip() in row_to_use:
product = df.iloc[index]["Production"].strip()
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
data["Unit"] = "Metric Tons"
col_name = usgs_myb_year(YEARS_COVERED['feldspar'], year)
data["FlowAmount"] = str(df.iloc[index][col_name])
data["Description"] = des
data["ActivityProducedBy"] = name
if name == des:
data['FlowName'] = name + " " + prod
else:
data['FlowName'] = name + " " + prod + " " + des
dataframe = dataframe.append(data, ignore_index=True)
dataframe = assign_fips_location_system(
dataframe, str(year))
return dataframe
def usgs_fluorspar_call(*, resp, year, **_):
"""
Convert response for calling url to pandas dataframe, begin
parsing df into FBA format
:param url: string, url
:param resp: df, response from url call
:param year: year
:return: pandas dataframe of original source data
"""
df_raw_data_one = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T1')
if year in YEARS_COVERED['fluorspar_inports']:
df_raw_data_two = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T2')
df_raw_data_three = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T7')
df_raw_data_four = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T8')
df_data_one = pd.DataFrame(df_raw_data_one.loc[5:15]).reindex()
df_data_one = df_data_one.reset_index()
del df_data_one["index"]
if year in YEARS_COVERED['fluorspar_inports']:
df_data_two = pd.DataFrame(df_raw_data_two.loc[7:8]).reindex()
df_data_three = pd.DataFrame(df_raw_data_three.loc[19:19]).reindex()
df_data_four = pd.DataFrame(df_raw_data_four.loc[11:11]).reindex()
if len(df_data_two.columns) == 13:
df_data_two.columns = ["Production", "space_1", "not_1", "space_2",
"not_2", "space_3", "not_3", "space_4",
"not_4", "space_5", "year_4", "space_6",
"year_5"]
if len(df_data_three.columns) == 9:
df_data_three.columns = ["Production", "space_1", "year_4",
"space_2", "not_1", "space_3", "year_5",
"space_4", "not_2"]
df_data_four.columns = ["Production", "space_1", "year_4",
"space_2", "not_1", "space_3", "year_5",
"space_4", "not_2"]
if len(df_data_one. columns) == 13:
df_data_one.columns = ["Production", "space_1", "unit", "space_2",
"year_1", "space_3", "year_2", "space_4",
"year_3", "space_5", "year_4", "space_6",
"year_5"]
col_to_use = ["Production"]
col_to_use.append(usgs_myb_year(YEARS_COVERED['fluorspar'], year))
for col in df_data_one.columns:
if col not in col_to_use:
del df_data_one[col]
if year in YEARS_COVERED['fluorspar_inports']:
for col in df_data_two.columns:
if col not in col_to_use:
del df_data_two[col]
for col in df_data_three.columns:
if col not in col_to_use:
del df_data_three[col]
for col in df_data_four.columns:
if col not in col_to_use:
del df_data_four[col]
df_data_one["type"] = "data_one"
if year in YEARS_COVERED['fluorspar_inports']:
# aluminum fluoride
# cryolite
df_data_two["type"] = "data_two"
df_data_three["type"] = "Aluminum Fluoride"
df_data_four["type"] = "Cryolite"
frames = [df_data_one, df_data_two, df_data_three, df_data_four]
else:
frames = [df_data_one]
df_data = pd.concat(frames)
df_data = df_data.reset_index()
del df_data["index"]
return df_data
def usgs_fluorspar_parse(*, df_list, source, year, **_):
"""
Combine, parse, and format the provided dataframes
:param df_list: list of dataframes to concat and format
:param args: dictionary, used to run flowbyactivity.py
('year' and 'source')
:return: df, parsed and partially formatted to flowbyactivity
specifications
"""
data = {}
row_to_use = ["Quantity", "Quantity3", "Total", "Hydrofluoric acid",
"Metallurgical", "Production"]
prod = ""
name = usgs_myb_name(source)
dataframe = pd.DataFrame()
for df in df_list:
for index, row in df.iterrows():
if df.iloc[index]["Production"].strip() == "Exports:3":
prod = "exports"
des = name
elif df.iloc[index]["Production"].strip() == \
"Imports for consumption:3":
prod = "imports"
des = name
elif df.iloc[index]["Production"].strip() == "Fluorosilicic acid:":
prod = "production"
des = "Fluorosilicic acid:"
if str(df.iloc[index]["type"]).strip() == "data_two":
prod = "imports"
des = df.iloc[index]["Production"].strip()
elif str(df.iloc[index]["type"]).strip() == \
"Aluminum Fluoride" or \
str(df.iloc[index]["type"]).strip() == "Cryolite":
prod = "imports"
des = df.iloc[index]["type"].strip()
if df.iloc[index]["Production"].strip() in row_to_use:
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
data["Unit"] = "Metric Tons"
col_name = usgs_myb_year(YEARS_COVERED['fluorspar'], year)
if str(df.iloc[index][col_name]) == "W":
data["FlowAmount"] = WITHDRAWN_KEYWORD
else:
data["FlowAmount"] = str(df.iloc[index][col_name])
data["Description"] = des
data["ActivityProducedBy"] = name
data['FlowName'] = name + " " + prod
dataframe = dataframe.append(data, ignore_index=True)
dataframe = assign_fips_location_system(
dataframe, str(year))
return dataframe
def usgs_gallium_call(*, resp, year, **_):
"""
Convert response for calling url to pandas dataframe, begin parsing df
into FBA format
:param url: string, url
:param resp: df, response from url call
:param args: dictionary, arguments specified when running
flowbyactivity.py ('year' and 'source')
:return: pandas dataframe of original source data
"""
df_raw_data = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T1')
df_data = pd.DataFrame(df_raw_data.loc[5:7]).reindex()
df_data = df_data.reset_index()
del df_data["index"]
if len(df_data.columns) > 11:
for x in range(11, len(df_data.columns)):
col_name = "Unnamed: " + str(x)
del df_data[col_name]
if len(df_data.columns) == 11:
df_data.columns = ["Production", "space_1", "year_1", "space_2",
"year_2", "space_3", "year_3", "space_4",
"year_4", "space_5", "year_5"]
col_to_use = ["Production"]
col_to_use.append(usgs_myb_year(YEARS_COVERED['gallium'], year))
for col in df_data.columns:
if col not in col_to_use:
del df_data[col]
return df_data
def usgs_gallium_parse(*, df_list, source, year, **_):
"""
Combine, parse, and format the provided dataframes
:param df_list: list of dataframes to concat and format
:param source: source
:param year: year
:return: df, parsed and partially formatted to flowbyactivity
specifications
"""
data = {}
row_to_use = ["Production, primary crude", "Metal"]
prod = ""
name = usgs_myb_name(source)
des = name
dataframe = pd.DataFrame()
col_name = usgs_myb_year(YEARS_COVERED['gallium'], year)
for df in df_list:
for index, row in df.iterrows():
if df.iloc[index]["Production"].strip() == \
"Imports for consumption:":
product = "imports"
elif df.iloc[index]["Production"].strip() == \
"Production, primary crude":
product = "production"
if df.iloc[index]["Production"].strip() in row_to_use:
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
data["Unit"] = "Kilograms"
data['FlowName'] = name + " " + product
data["Description"] = name
data["ActivityProducedBy"] = name
col_name = usgs_myb_year(YEARS_COVERED['gallium'], year)
if str(df.iloc[index][col_name]).strip() == "--":
data["FlowAmount"] = str(0)
elif str(df.iloc[index][col_name]) == "nan":
data["FlowAmount"] = WITHDRAWN_KEYWORD
else:
data["FlowAmount"] = str(df.iloc[index][col_name])
dataframe = dataframe.append(data, ignore_index=True)
dataframe = assign_fips_location_system(
dataframe, str(year))
return dataframe
def usgs_garnet_call(*, resp, year, **_):
"""
Convert response for calling url to pandas dataframe, begin parsing
df into FBA format
:param resp: df, response from url call
:param year: year
:return: pandas dataframe of original source data
"""
df_raw_data_two = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T1')
df_data_two = pd.DataFrame(df_raw_data_two.loc[4:5]).reindex()
df_data_two = df_data_two.reset_index()
del df_data_two["index"]
df_data_one = pd.DataFrame(df_raw_data_two.loc[10:14]).reindex()
df_data_one = df_data_one.reset_index()
del df_data_one["index"]
if len(df_data_one.columns) > 13:
for x in range(13, len(df_data_one.columns)):
col_name = "Unnamed: " + str(x)
del df_data_one[col_name]
del df_data_two[col_name]
if len(df_data_two. columns) == 13:
df_data_two.columns = ["Production", "space_1", "unit", "space_2",
"year_1", "space_3", "year_2", "space_4",
"year_3", "space_5", "year_4", "space_6",
"year_5"]
df_data_one.columns = ["Production", "space_1", "unit", "space_2",
"year_1", "space_3", "year_2", "space_4",
"year_3", "space_5", "year_4", "space_6",
"year_5"]
col_to_use = ["Production"]
col_to_use.append(usgs_myb_year(YEARS_COVERED['garnet'], year))
for col in df_data_two.columns:
if col not in col_to_use:
del df_data_two[col]
del df_data_one[col]
frames = [df_data_two, df_data_one]
df_data = pd.concat(frames)
df_data = df_data.reset_index()
del df_data["index"]
return df_data
def usgs_garnet_parse(*, df_list, source, year, **_):
"""
Combine, parse, and format the provided dataframes
:param df_list: list of dataframes to concat and format
:param source: source
:param year: year
:return: df, parsed and partially formatted to flowbyactivity
specifications
"""
data = {}
row_to_use = ["Quantity"]
prod = ""
name = usgs_myb_name(source)
des = name
dataframe = pd.DataFrame()
for df in df_list:
for index, row in df.iterrows():
if df.iloc[index]["Production"].strip() == "Exports:2":
prod = "exports"
elif df.iloc[index]["Production"].strip() == \
"Imports for consumption: 3":
prod = "imports"
elif df.iloc[index]["Production"].strip() == "Crude production:":
prod = "production"
if df.iloc[index]["Production"].strip() in row_to_use:
product = df.iloc[index]["Production"].strip()
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
data["Unit"] = "Metric Tons"
col_name = usgs_myb_year(YEARS_COVERED['garnet'], year)
data["FlowAmount"] = str(df.iloc[index][col_name])
data["Description"] = des
data["ActivityProducedBy"] = name
data['FlowName'] = name + " " + prod
dataframe = dataframe.append(data, ignore_index=True)
dataframe = assign_fips_location_system(
dataframe, str(year))
return dataframe
def usgs_gold_call(*, resp, year, **_):
"""
Convert response for calling url to pandas dataframe, begin parsing
df into FBA format
:param url: string, url
:param resp: df, response from url call
:param args: dictionary, arguments specified when running
flowbyactivity.py ('year' and 'source')
:return: pandas dataframe of original source data
"""
df_raw_data = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T1')
df_data = pd.DataFrame(df_raw_data.loc[6:14]).reindex()
df_data = df_data.reset_index()
del df_data["index"]
if len(df_data.columns) == 13:
df_data.columns = ["Production", "Space", "Units", "space_1",
"year_1", "space_2", "year_2", "space_3",
"year_3", "space_4", "year_4", "space_5", "year_5"]
col_to_use = ["Production"]
col_to_use.append(usgs_myb_year(YEARS_COVERED['gold'], year))
for col in df_data.columns:
if col not in col_to_use:
del df_data[col]
return df_data
def usgs_gold_parse(*, df_list, source, year, **_):
"""
Combine, parse, and format the provided dataframes
:param df_list: list of dataframes to concat and format
:param source: source
:param year: year
:return: df, parsed and partially formatted to flowbyactivity
specifications
"""
data = {}
row_to_use = ["Quantity", "Exports, refined bullion",
"Imports for consumption, refined bullion"]
dataframe = pd.DataFrame()
product = "production"
name = usgs_myb_name(source)
des = name
for df in df_list:
for index, row in df.iterrows():
if df.iloc[index]["Production"].strip() == "Quantity":
product = "production"
elif df.iloc[index]["Production"].strip() == \
"Exports, refined bullion":
product = "exports"
elif df.iloc[index]["Production"].strip() == \
"Imports for consumption, refined bullion":
product = "imports"
if df.iloc[index]["Production"].strip() in row_to_use:
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
data["Unit"] = "kilograms"
data['FlowName'] = name + " " + product
data["Description"] = des
data["ActivityProducedBy"] = name
col_name = usgs_myb_year(YEARS_COVERED['gold'], year)
if str(df.iloc[index][col_name]) == "--":
data["FlowAmount"] = str(0)
else:
data["FlowAmount"] = str(df.iloc[index][col_name])
dataframe = dataframe.append(data, ignore_index=True)
dataframe = assign_fips_location_system(
dataframe, str(year))
return dataframe
def usgs_graphite_call(*, resp, year, **_):
"""
Convert response for calling url to pandas dataframe, begin parsing
df into FBA format
:param resp: df, response from url call
:param year: year
:return: pandas dataframe of original source data
"""
df_raw_data = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T1')
df_data = pd.DataFrame(df_raw_data.loc[5:9]).reindex()
df_data = df_data.reset_index()
del df_data["index"]
if len(df_data. columns) == 13:
df_data.columns = ["Production", "space_1", "Unit", "space_6",
"year_1", "space_2", "year_2", "space_3",
"year_3", "space_4", "year_4", "space_5", "year_5"]
col_to_use = ["Production"]
col_to_use.append(usgs_myb_year(YEARS_COVERED['graphite'], year))
for col in df_data.columns:
if col not in col_to_use:
del df_data[col]
return df_data
def usgs_graphite_parse(*, df_list, source, year, **_):
"""
Combine, parse, and format the provided dataframes
:param df_list: list of dataframes to concat and format
:param source: source
:param year: year
:return: df, parsed and partially formatted to flowbyactivity
specifications
"""
data = {}
row_to_use = ["Quantiy", "Quantity"]
prod = ""
name = usgs_myb_name(source)
des = name
dataframe = pd.DataFrame()
col_name = usgs_myb_year(YEARS_COVERED['graphite'], year)
for df in df_list:
for index, row in df.iterrows():
if df.iloc[index]["Production"].strip() == \
"Imports for consumption:":
product = "imports"
elif df.iloc[index]["Production"].strip() == "Exports:":
product = "exports"
if df.iloc[index]["Production"].strip() in row_to_use:
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
data["Unit"] = "Metric Tons"
data['FlowName'] = name + " " + product
data["Description"] = name
data["ActivityProducedBy"] = name
col_name = usgs_myb_year(YEARS_COVERED['graphite'], year)
if str(df.iloc[index][col_name]) == "--":
data["FlowAmount"] = str(0)
elif str(df.iloc[index][col_name]) == "nan":
data["FlowAmount"] = WITHDRAWN_KEYWORD
else:
data["FlowAmount"] = str(df.iloc[index][col_name])
dataframe = dataframe.append(data, ignore_index=True)
dataframe = assign_fips_location_system(
dataframe, str(year))
return dataframe
def usgs_gypsum_call(*, resp, year, **_):
"""
Convert response for calling url to pandas dataframe, begin
parsing df into FBA format
:param url: string, url
:param resp: df, response from url call
:param args: dictionary, arguments specified when running
flowbyactivity.py ('year' and 'source')
:return: pandas dataframe of original source data
"""
df_raw_data_one = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T1')
df_data_one = pd.DataFrame(df_raw_data_one.loc[7:10]).reindex()
df_data_one = df_data_one.reset_index()
del df_data_one["index"]
if len(df_data_one.columns) > 11:
for x in range(11, len(df_data_one.columns)):
col_name = "Unnamed: " + str(x)
del df_data_one[col_name]
if len(df_data_one.columns) == 11:
df_data_one.columns = ["Production", "space_1", "year_1", "space_3",
"year_2", "space_4", "year_3", "space_5",
"year_4", "space_6", "year_5"]
col_to_use = ["Production"]
col_to_use.append(usgs_myb_year(YEARS_COVERED['gypsum'], year))
for col in df_data_one.columns:
if col not in col_to_use:
del df_data_one[col]
frames = [df_data_one]
df_data = pd.concat(frames)
df_data = df_data.reset_index()
del df_data["index"]
return df_data
def usgs_gypsum_parse(*, df_list, source, year, **_):
"""
Combine, parse, and format the provided dataframes
:param df_list: list of dataframes to concat and format
:param source: source
:param year: year
:return: df, parsed and partially formatted to flowbyactivity
specifications
"""
data = {}
row_to_use = ["Quantity", "Imports for consumption"]
prod = ""
name = usgs_myb_name(source)
des = name
dataframe = pd.DataFrame()
col_name = usgs_myb_year(YEARS_COVERED['gypsum'], year)
for df in df_list:
for index, row in df.iterrows():
if df.iloc[index]["Production"].strip() == \
"Imports for consumption":
prod = "imports"
elif df.iloc[index]["Production"].strip() == "Quantity":
prod = "production"
if df.iloc[index]["Production"].strip() in row_to_use:
product = df.iloc[index]["Production"].strip()
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
data["Unit"] = "Metric Tons"
data["FlowAmount"] = str(df.iloc[index][col_name])
if str(df.iloc[index][col_name]) == "W":
data["FlowAmount"] = WITHDRAWN_KEYWORD
data["Description"] = des
data["ActivityProducedBy"] = name
data['FlowName'] = name + " " + prod
dataframe = dataframe.append(data, ignore_index=True)
dataframe = assign_fips_location_system(
dataframe, str(year))
return dataframe
def usgs_iodine_call(*, resp, year, **_):
"""
Convert response for calling url to pandas dataframe, begin parsing
df into FBA format
:param url: string, url
:param resp: df, response from url call
:param args: dictionary, arguments specified when running
flowbyactivity.py ('year' and 'source')
:return: pandas dataframe of original source data
"""
df_raw_data = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T1')
df_data = pd.DataFrame(df_raw_data.loc[6:10]).reindex()
df_data = df_data.reset_index()
del df_data["index"]
if len(df_data. columns) == 11:
df_data.columns = ["Production", "space_1", "year_1", "space_2",
"year_2", "space_3", "year_3", "space_4",
"year_4", "space_5", "year_5"]
elif len(df_data. columns) == 13:
df_data.columns = ["Production", "unit", "space_1", "year_1", "space_2",
"year_2", "space_3", "year_3", "space_4",
"year_4", "space_5", "year_5", "space_6"]
col_to_use = ["Production"]
col_to_use.append(usgs_myb_year(YEARS_COVERED['iodine'], year))
for col in df_data.columns:
if col not in col_to_use:
del df_data[col]
return df_data
def usgs_iodine_parse(*, df_list, source, year, **_):
"""
Combine, parse, and format the provided dataframes
:param df_list: list of dataframes to concat and format
:param source: source
:param year: year
:return: df, parsed and partially formatted to flowbyactivity
specifications
"""
data = {}
row_to_use = ["Production", "Quantity, for consumption", "Exports2"]
prod = ""
name = usgs_myb_name(source)
des = name
dataframe = pd.DataFrame()
col_name = usgs_myb_year(YEARS_COVERED['iodine'], year)
for df in df_list:
for index, row in df.iterrows():
if df.iloc[index]["Production"].strip() == "Imports:2":
product = "imports"
elif df.iloc[index]["Production"].strip() == "Production":
product = "production"
elif df.iloc[index]["Production"].strip() == "Exports2":
product = "exports"
if df.iloc[index]["Production"].strip() in row_to_use:
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
data["Unit"] = "Metric Tons"
data['FlowName'] = name + " " + product
data["Description"] = name
data["ActivityProducedBy"] = name
col_name = usgs_myb_year(YEARS_COVERED['iodine'], year)
if str(df.iloc[index][col_name]) == "--":
data["FlowAmount"] = str(0)
elif str(df.iloc[index][col_name]) == "W":
data["FlowAmount"] = WITHDRAWN_KEYWORD
else:
data["FlowAmount"] = str(df.iloc[index][col_name])
dataframe = dataframe.append(data, ignore_index=True)
dataframe = assign_fips_location_system(
dataframe, str(year))
return dataframe
def usgs_iron_ore_call(*, resp, year, **_):
"""
Convert response for calling url to pandas dataframe, begin parsing
df into FBA format
:param resp: df, response from url call
:param year: year
:return: pandas dataframe of original source data
"""
df_raw_data = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T1')
df_data = pd.DataFrame(df_raw_data.loc[7:25]).reindex()
df_data = df_data.reset_index()
del df_data["index"]
if len(df_data. columns) == 12:
df_data.columns = ["Production", "Units", "space_1", "year_1",
"space_2", "year_2", "space_3", "year_3",
"space_4", "year_4", "space_5", "year_5"]
col_to_use = ["Production", "Units"]
col_to_use.append(usgs_myb_year(YEARS_COVERED['ironore'], year))
for col in df_data.columns:
if col not in col_to_use:
del df_data[col]
return df_data
def usgs_iron_ore_parse(*, df_list, source, year, **_):
"""
Combine, parse, and format the provided dataframes
:param df_list: list of dataframes to concat and format
:param source: source
:param year: year
:return: df, parsed and partially formatted to flowbyactivity
specifications
"""
data = {}
name = usgs_myb_name(source)
des = name
row_to_use = ["Gross weight", "Quantity"]
dataframe = pd.DataFrame()
for df in df_list:
for index, row in df.iterrows():
if df.iloc[index]["Production"].strip() == "Production:":
product = "production"
elif df.iloc[index]["Production"].strip() == "Exports:":
product = "exports"
elif df.iloc[index]["Production"].strip() == \
"Imports for consumption:":
product = "imports"
if df.iloc[index]["Production"].strip() in row_to_use:
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
data["Unit"] = "Thousand Metric Tons"
data['FlowName'] = "Iron Ore " + product
data["Description"] = "Iron Ore"
data["ActivityProducedBy"] = "Iron Ore"
col_name = usgs_myb_year(YEARS_COVERED['ironore'], year)
if str(df.iloc[index][col_name]) == "--":
data["FlowAmount"] = str(0)
else:
data["FlowAmount"] = str(df.iloc[index][col_name])
dataframe = dataframe.append(data, ignore_index=True)
dataframe = assign_fips_location_system(
dataframe, str(year))
return dataframe
def usgs_kyanite_call(*, resp, year, **_):
"""
Convert response for calling url to pandas dataframe, begin parsing
df into FBA format
:param url: string, url
:param resp: df, response from url call
:param year: year
:return: pandas dataframe of original source data
"""
df_raw_data_one = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T1')
df_data_one = pd.DataFrame(df_raw_data_one.loc[4:13]).reindex()
df_data_one = df_data_one.reset_index()
del df_data_one["index"]
if len(df_data_one. columns) == 12:
df_data_one.columns = ["Production", "unit", "space_2", "year_1",
"space_3", "year_2", "space_4", "year_3",
"space_5", "year_4", "space_6", "year_5"]
col_to_use = ["Production"]
col_to_use.append(usgs_myb_year(YEARS_COVERED['kyanite'], year))
for col in df_data_one.columns:
if col not in col_to_use:
del df_data_one[col]
frames = [df_data_one]
df_data = pd.concat(frames)
df_data = df_data.reset_index()
del df_data["index"]
return df_data
def usgs_kyanite_parse(*, df_list, source, year, **_):
"""
Combine, parse, and format the provided dataframes
:param df_list: list of dataframes to concat and format
:param args: dictionary, used to run flowbyactivity.py
('year' and 'source')
:return: df, parsed and partially formatted to flowbyactivity
specifications
"""
data = {}
row_to_use = ["Quantity", "Quantity2"]
prod = ""
name = usgs_myb_name(source)
des = name
dataframe = pd.DataFrame()
col_name = usgs_myb_year(YEARS_COVERED['kyanite'], year)
for df in df_list:
for index, row in df.iterrows():
if df.iloc[index]["Production"].strip() == \
"Exports of kyanite concentrate:3":
prod = "exports"
elif df.iloc[index]["Production"].strip() == \
"Imports for consumption, all kyanite minerals:3":
prod = "imports"
elif df.iloc[index]["Production"].strip() == "Production:":
prod = "production"
if df.iloc[index]["Production"].strip() in row_to_use:
product = df.iloc[index]["Production"].strip()
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
data["Unit"] = "Metric Tons"
data["FlowAmount"] = str(df.iloc[index][col_name])
if str(df.iloc[index][col_name]) == "W":
data["FlowAmount"] = WITHDRAWN_KEYWORD
data["Description"] = des
data["ActivityProducedBy"] = name
data['FlowName'] = name + " " + prod
dataframe = dataframe.append(data, ignore_index=True)
dataframe = assign_fips_location_system(
dataframe, str(year))
return dataframe
def usgs_lead_url_helper(*, year, **_):
"""
This helper function uses the "build_url" input from flowbyactivity.py,
which is a base url for data imports that requires parts of the url text
string to be replaced with info specific to the data year. This function
does not parse the data, only modifies the urls from which data is
obtained.
:param build_url: string, base url
:return: list, urls to call, concat, parse, format into Flow-By-Activity
format
"""
if int(year) < 2013:
build_url = ('https://d9-wret.s3.us-west-2.amazonaws.com/assets/'
'palladium/production/atoms/files/myb1-2016-lead.xls')
elif int(year) < 2014:
build_url = ('https://d9-wret.s3.us-west-2.amazonaws.com/assets/'
'palladium/production/atoms/files/myb1-2017-lead.xls')
else:
build_url = ('https://d9-wret.s3.us-west-2.amazonaws.com/assets/'
'palladium/production/s3fs-public/media/files/myb1-2018-lead-advrel.xlsx')
url = build_url
return [url]
def usgs_lead_call(*, resp, year, **_):
"""
Convert response for calling url to pandas dataframe, begin parsing
df into FBA format
:param url: string, url
:param resp: df, response from url call
:param year: year
:return: pandas dataframe of original source data
"""
df_raw_data = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T1')
df_data = pd.DataFrame(df_raw_data.loc[8:15]).reindex()
df_data = df_data.reset_index()
del df_data["index"]
if len(df_data.columns) > 12:
for x in range(12, len(df_data.columns)):
col_name = "Unnamed: " + str(x)
del df_data[col_name]
if len(df_data. columns) == 12:
df_data.columns = ["Production", "Units", "space_1", "year_1",
"space_2", "year_2", "space_3", "year_3",
"space_4", "year_4", "space_5", "year_5"]
col_to_use = ["Production", "Units"]
if int(year) == 2013:
modified_sy = "2013-2018"
col_to_use.append(usgs_myb_year(modified_sy, year))
elif int(year) > 2013:
modified_sy = "2014-2018"
col_to_use.append(usgs_myb_year(modified_sy, year))
else:
col_to_use.append(usgs_myb_year(YEARS_COVERED['lead'], year))
for col in df_data.columns:
if col not in col_to_use:
del df_data[col]
return df_data
def usgs_lead_parse(*, df_list, source, year, **_):
"""
Combine, parse, and format the provided dataframes
:param df_list: list of dataframes to concat and format
:param source: source
:param year: year
:return: df, parsed and partially formatted to flowbyactivity
specifications
"""
data = {}
name = usgs_myb_name(source)
des = name
row_to_use = ["Primary lead, refined content, "
"domestic ores and base bullion",
"Secondary lead, lead content",
"Lead ore and concentrates", "Lead in base bullion"]
import_export = ["Exports, lead content:",
"Imports for consumption, lead content:"]
dataframe = pd.DataFrame()
product = "production"
for df in df_list:
for index, row in df.iterrows():
if df.iloc[index]["Production"].strip() in import_export:
if df.iloc[index]["Production"].strip() == \
"Exports, lead content:":
product = "exports"
elif df.iloc[index]["Production"].strip() == \
"Imports for consumption, lead content:":
product = "imports"
if df.iloc[index]["Production"].strip() in row_to_use:
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
data["Unit"] = "Metric Tons"
data['FlowName'] = name + " " + product
data["ActivityProducedBy"] = df.iloc[index]["Production"]
if int(year) == 2013:
modified_sy = "2013-2018"
col_name = usgs_myb_year(modified_sy, year)
elif int(year) > 2013:
modified_sy = "2014-2018"
col_name = usgs_myb_year(modified_sy, year)
else:
col_name = usgs_myb_year(YEARS_COVERED['lead'], year)
if str(df.iloc[index][col_name]) == "--":
data["FlowAmount"] = str(0)
else:
data["FlowAmount"] = str(df.iloc[index][col_name])
dataframe = dataframe.append(data, ignore_index=True)
dataframe = assign_fips_location_system(
dataframe, str(year))
return dataframe
def usgs_lime_call(*, resp, year, **_):
"""
Convert response for calling url to pandas dataframe, begin parsing
df into FBA format
:param url: string, url
:param resp: df, response from url call
:param year: year
:return: pandas dataframe of original source data
"""
df_raw_data_two = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T1')
df_data_1 = pd.DataFrame(df_raw_data_two.loc[16:16]).reindex()
df_data_1 = df_data_1.reset_index()
del df_data_1["index"]
df_data_2 = | pd.DataFrame(df_raw_data_two.loc[28:32]) | pandas.DataFrame |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2019-04-27 11:14
# @Author : Mayandev
# @Site : https://github.com/Mayandev/
# @Software: PyCharm
import os;
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "morec.settings") # NoQA
import django;
django.setup() #
import pandas as pd
import numpy as np
from scipy.sparse import csr_matrix
from sklearn.neighbors import NearestNeighbors
from sklearn.decomposition import TruncatedSVD
from movie.models import Movie
# 使用svd进行电影推荐,找出最接近的top10电影,插入数据库
path = '/Users/phillzou/code_workspace/flutter/django_morec/server/dataset/'
movies = pd.read_csv(path + 'movies.csv')
print('电影数目(有名称):%d' % movies[~pd.isnull(movies.title)].shape[0])
print('电影数目(没有名称):%d' % movies[pd.isnull(movies.title)].shape[0])
print('电影数目(总计):%d' % movies.shape[0])
ratings = | pd.read_csv(path + 'ratings.csv') | pandas.read_csv |
import itertools
import json
import logging
import os
import traceback
import uuid
from copy import deepcopy
from typing import Union, List, Dict
import genet.auxiliary_files as auxiliary_files
import genet.exceptions as exceptions
import genet.modify.change_log as change_log
import genet.modify.graph as modify_graph
import genet.modify.schedule as modify_schedule
import genet.outputs_handler.geojson as geojson
import genet.outputs_handler.matsim_xml_writer as matsim_xml_writer
import genet.outputs_handler.sanitiser as sanitiser
import genet.schedule_elements as schedule_elements
import genet.utils.dict_support as dict_support
import genet.utils.graph_operations as graph_operations
import genet.utils.pandas_helpers as pd_helpers
import genet.utils.parallel as parallel
import genet.utils.persistence as persistence
import genet.utils.plot as plot
import genet.utils.simplification as simplification
import genet.utils.spatial as spatial
import genet.validate.network_validation as network_validation
import geopandas as gpd
import networkx as nx
import numpy as np
import pandas as pd
from pyproj import Transformer
from s2sphere import CellId
logging.basicConfig(format='%(asctime)s - %(message)s', level=logging.INFO)
class Network:
def __init__(self, epsg):
self.epsg = epsg
self.transformer = Transformer.from_crs(epsg, 'epsg:4326', always_xy=True)
self.graph = nx.MultiDiGraph(name='Network graph', crs=self.epsg, simplified=False)
self.schedule = schedule_elements.Schedule(epsg)
self.change_log = change_log.ChangeLog()
self.auxiliary_files = {'node': {}, 'link': {}}
# link_id_mapping maps between (usually string literal) index per edge to the from and to nodes that are
# connected by the edge
self.link_id_mapping = {}
def __repr__(self):
return f"<{self.__class__.__name__} instance at {id(self)}: with \ngraph: {nx.info(self.graph)} and " \
f"\nschedule {self.schedule.info()}"
def __str__(self):
return self.info()
def add(self, other):
"""
This let's you add on `other` genet.Network to the network this method is called on.
This is deliberately not a magic function to discourage `new_network = network_1 + network_2` (and memory
goes out the window)
:param other:
:return:
"""
if self.is_simplified() != other.is_simplified():
raise RuntimeError('You cannot add simplified and non-simplified networks together')
# consolidate coordinate systems
if other.epsg != self.epsg:
logging.info(f'Attempting to merge two networks in different coordinate systems. '
f'Reprojecting from {other.epsg} to {self.epsg}')
other.reproject(other.epsg)
# consolidate node ids
other = graph_operations.consolidate_node_indices(self, other)
# consolidate link ids
other = graph_operations.consolidate_link_indices(self, other)
# finally, once the node and link ids have been sorted, combine the graphs
# nx.compose(left, right) overwrites data in left with data in right under matching ids
self.graph = nx.compose(other.graph, self.graph)
# finally, combine link_id_mappings
self.link_id_mapping = {**other.link_id_mapping, **self.link_id_mapping}
# combine schedules
self.schedule.add(other.schedule)
# merge change_log DataFrames
self.change_log = self.change_log.merge_logs(other.change_log)
def print(self):
print(self.info())
def info(self):
return f"Graph info: {nx.info(self.graph)} \nSchedule info: {self.schedule.info()}"
def plot(self, output_dir='', data=False):
"""
Plots the network graph and schedule on kepler map.
Ensure all prerequisites are installed https://docs.kepler.gl/docs/keplergl-jupyter#install
:param output_dir: output directory for the image, if passed, will save plot to html
:param data: Defaults to False, only the geometry and ID will be visible.
True will visualise all data on the map (not suitable for large networks)
A set of keys e.g. {'freespeed', 'capacity'}
:return:
"""
if not self.schedule:
logging.warning('This Network does not have a PT schedule. Only the graph will be visualised.')
return self.plot_graph(output_dir=output_dir)
network_links = self.to_geodataframe()['links']
schedule_routes = self.schedule_network_routes_geodataframe()
if data is not True:
network_links = sanitiser._subset_plot_gdf(data, network_links, base_keys={'id', 'geometry'})
schedule_routes = sanitiser._subset_plot_gdf(data, schedule_routes, base_keys={'route_id', 'geometry'})
m = plot.plot_geodataframes_on_kepler_map(
{'network_links': sanitiser.sanitise_geodataframe(network_links),
'schedule_routes': sanitiser.sanitise_geodataframe(schedule_routes)},
kepler_config='network_with_pt'
)
if output_dir:
persistence.ensure_dir(output_dir)
m.save_to_html(file_name=os.path.join(output_dir, 'network_with_pt_routes.html'))
return m
def plot_graph(self, output_dir='', data=False):
"""
Plots the network graph only on kepler map.
Ensure all prerequisites are installed https://docs.kepler.gl/docs/keplergl-jupyter#install
:param output_dir: output directory for the image, if passed, will save plot to html
:param data: Defaults to False, only the geometry and ID will be visible.
True will visualise all data on the map (not suitable for large networks)
A set of keys e.g. {'freespeed', 'capacity'}
:return:
"""
network_links = self.to_geodataframe()['links']
if data is not True:
network_links = sanitiser._subset_plot_gdf(data, network_links, base_keys={'id', 'geometry'})
m = plot.plot_geodataframes_on_kepler_map(
{'network_links': sanitiser.sanitise_geodataframe(network_links)},
kepler_config='network_with_pt'
)
if output_dir:
persistence.ensure_dir(output_dir)
m.save_to_html(file_name=os.path.join(output_dir, 'network_graph.html'))
return m
def plot_schedule(self, output_dir='', data=False):
"""
Plots original stop connections in the network's schedule over the network graph on kepler map.
Ensure all prerequisites are installed https://docs.kepler.gl/docs/keplergl-jupyter#install
:param output_dir: output directory for the image, if passed, will save plot to html
:param data: Defaults to False, only the geometry and ID will be visible.
True will visualise all data on the map (not suitable for large networks)
A set of keys e.g. {'freespeed', 'capacity'}
:return:
"""
network_links = self.to_geodataframe()['links']
schedule_gdf = self.schedule.to_geodataframe()
if data is not True:
network_links = sanitiser._subset_plot_gdf(data, network_links, base_keys={'id', 'geometry'})
schedule_gdf['links'] = sanitiser._subset_plot_gdf(data, schedule_gdf['links'],
base_keys={'route_id', 'geometry'})
schedule_gdf['nodes'] = sanitiser._subset_plot_gdf(data, schedule_gdf['nodes'],
base_keys={'id', 'geometry'})
m = plot.plot_geodataframes_on_kepler_map(
{'network_links': sanitiser.sanitise_geodataframe(network_links),
'schedule_links': sanitiser.sanitise_geodataframe(schedule_gdf['links']),
'schedule_stops': sanitiser.sanitise_geodataframe(schedule_gdf['nodes'])},
kepler_config='network_and_schedule'
)
if output_dir:
persistence.ensure_dir(output_dir)
m.save_to_html(file_name=os.path.join(output_dir, 'network_and_schedule.html'))
return m
def reproject(self, new_epsg, processes=1):
"""
Changes projection of the network to new_epsg
:param new_epsg: 'epsg:1234'
:param processes: max number of process to split computation across
:return:
"""
# reproject nodes
nodes_attribs = dict(self.nodes())
new_nodes_attribs = parallel.multiprocess_wrap(
data=nodes_attribs, split=parallel.split_dict, apply=modify_graph.reproj, combine=parallel.combine_dict,
processes=processes, from_proj=self.epsg, to_proj=new_epsg)
self.apply_attributes_to_nodes(new_nodes_attribs)
# reproject geometries
gdf_geometries = gpd.GeoDataFrame(self.link_attribute_data_under_keys(['geometry']), crs=self.epsg)
gdf_geometries = gdf_geometries.to_crs(new_epsg)
new_link_attribs = gdf_geometries.T.to_dict()
self.apply_attributes_to_links(new_link_attribs)
if self.schedule:
self.schedule.reproject(new_epsg, processes)
self.initiate_crs_transformer(new_epsg)
self.graph.graph['crs'] = self.epsg
def initiate_crs_transformer(self, epsg):
self.epsg = epsg
if epsg != 'epsg:4326':
self.transformer = Transformer.from_crs(epsg, 'epsg:4326', always_xy=True)
else:
self.transformer = None
def simplify(self, no_processes=1):
if self.is_simplified():
raise RuntimeError('This network has already been simplified. You cannot simplify the graph twice.')
simplification.simplify_graph(self, no_processes)
# mark graph as having been simplified
self.graph.graph["simplified"] = True
def is_simplified(self):
return self.graph.graph["simplified"]
def node_attribute_summary(self, data=False):
"""
Parses through data stored on nodes and gives a summary tree of the data stored on the nodes.
If data is True, shows also up to 5 unique values stored under such keys.
:param data: bool, False by default
:return:
"""
root = graph_operations.get_attribute_schema(self.nodes(), data=data)
graph_operations.render_tree(root, data)
def node_attribute_data_under_key(self, key):
"""
Generates a pandas.Series object indexed by node ids, with data stored on the nodes under `key`
:param key: either a string e.g. 'x', or if accessing nested information, a dictionary
e.g. {'attributes': {'osm:way:name': 'text'}}
:return: pandas.Series
"""
data = graph_operations.get_attribute_data_under_key(self.nodes(), key)
return pd.Series(data, dtype=pd_helpers.get_pandas_dtype(data))
def node_attribute_data_under_keys(self, keys: Union[list, set], index_name=None):
"""
Generates a pandas.DataFrame object indexed by link ids, with data stored on the nodes under `key`
:param keys: list of either a string e.g. 'x', or if accessing nested information, a dictionary
e.g. {'attributes': {'osm:way:name': 'text'}}
:param index_name: optional, gives the index_name to dataframes index
:return: pandas.DataFrame
"""
return graph_operations.build_attribute_dataframe(self.nodes(), keys=keys, index_name=index_name)
def link_attribute_summary(self, data=False):
"""
Parses through data stored on links and gives a summary tree of the data stored on the links.
If data is True, shows also up to 5 unique values stored under such keys.
:param data: bool, False by default
:return:
"""
root = graph_operations.get_attribute_schema(self.links(), data=data)
graph_operations.render_tree(root, data)
def link_attribute_data_under_key(self, key: Union[str, dict]):
"""
Generates a pandas.Series object indexed by link ids, with data stored on the links under `key`
:param key: either a string e.g. 'modes', or if accessing nested information, a dictionary
e.g. {'attributes': {'osm:way:name': 'text'}}
:return: pandas.Series
"""
return pd.Series(graph_operations.get_attribute_data_under_key(self.links(), key))
def link_attribute_data_under_keys(self, keys: Union[list, set], index_name=None):
"""
Generates a pandas.DataFrame object indexed by link ids, with data stored on the links under `key`
:param keys: list of either a string e.g. 'modes', or if accessing nested information, a dictionary
e.g. {'attributes': {'osm:way:name': 'text'}}
:param index_name: optional, gives the index_name to dataframes index
:return: pandas.DataFrame
"""
return graph_operations.build_attribute_dataframe(self.links(), keys=keys, index_name=index_name)
def extract_nodes_on_node_attributes(self, conditions: Union[list, dict], how=any, mixed_dtypes=True):
"""
Extracts graph node IDs based on values of attributes saved on the nodes. Fails silently,
assumes not all nodes have all of the attributes. In the case were the attributes stored are
a list or set, like in the case of a simplified network (there will be a mix of objects that are sets and not)
an intersection of values satisfying condition(s) is considered in case of iterable value, if not empty, it is
deemed successful by default. To disable this behaviour set mixed_dtypes to False.
:param conditions: {'attribute_key': 'target_value'} or nested
{'attribute_key': {'another_key': {'yet_another_key': 'target_value'}}}, where 'target_value' could be
- single value, string, int, float, where the edge_data[key] == value
(if mixed_dtypes==True and in case of set/list edge_data[key], value is in edge_data[key])
- list or set of single values as above, where edge_data[key] in [value1, value2]
(if mixed_dtypes==True and in case of set/list edge_data[key],
set(edge_data[key]) & set([value1, value2]) is non-empty)
- for int or float values, two-tuple bound (lower_bound, upper_bound) where
lower_bound <= edge_data[key] <= upper_bound
(if mixed_dtypes==True and in case of set/list edge_data[key], at least one item in
edge_data[key] satisfies lower_bound <= item <= upper_bound)
- function that returns a boolean given the value e.g.
def below_exclusive_upper_bound(value):
return value < 100
(if mixed_dtypes==True and in case of set/list edge_data[key], at least one item in
edge_data[key] returns True after applying function)
:param how : {all, any}, default any
The level of rigour used to match conditions
* all: means all conditions need to be met
* any: means at least one condition needs to be met
:param mixed_dtypes: True by default, used if values under dictionary keys queried are single values or lists of
values e.g. as in simplified networks.
:return: list of node ids in the network satisfying conditions
"""
return graph_operations.extract_on_attributes(
self.nodes(), conditions=conditions, how=how, mixed_dtypes=mixed_dtypes)
def extract_links_on_edge_attributes(self, conditions: Union[list, dict], how=any, mixed_dtypes=True):
"""
Extracts graph link IDs based on values of attributes saved on the edges. Fails silently,
assumes not all links have those attributes. In the case were the attributes stored are
a list or set, like in the case of a simplified network (there will be a mix of objects that are sets and not)
an intersection of values satisfying condition(s) is considered in case of iterable value, if not empty, it is
deemed successful by default. To disable this behaviour set mixed_dtypes to False.
:param conditions: {'attribute_key': 'target_value'} or nested
{'attribute_key': {'another_key': {'yet_another_key': 'target_value'}}}, where 'target_value' could be
- single value, string, int, float, where the edge_data[key] == value
(if mixed_dtypes==True and in case of set/list edge_data[key], value is in edge_data[key])
- list or set of single values as above, where edge_data[key] in [value1, value2]
(if mixed_dtypes==True and in case of set/list edge_data[key],
set(edge_data[key]) & set([value1, value2]) is non-empty)
- for int or float values, two-tuple bound (lower_bound, upper_bound) where
lower_bound <= edge_data[key] <= upper_bound
(if mixed_dtypes==True and in case of set/list edge_data[key], at least one item in
edge_data[key] satisfies lower_bound <= item <= upper_bound)
- function that returns a boolean given the value e.g.
def below_exclusive_upper_bound(value):
return value < 100
(if mixed_dtypes==True and in case of set/list edge_data[key], at least one item in
edge_data[key] returns True after applying function)
:param how : {all, any}, default any
The level of rigour used to match conditions
* all: means all conditions need to be met
* any: means at least one condition needs to be met
:param mixed_dtypes: True by default, used if values under dictionary keys queried are single values or lists of
values e.g. as in simplified networks.
:return: list of link ids in the network satisfying conditions
"""
return graph_operations.extract_on_attributes(
self.links(), conditions=conditions, how=how, mixed_dtypes=mixed_dtypes)
def links_on_modal_condition(self, modes: Union[str, list]):
"""
Finds link IDs with modes or singular mode given in `modes`
:param modes: string mode e.g. 'car' or a list of such modes e.g. ['car', 'walk']
:return: list of link IDs
"""
return self.extract_links_on_edge_attributes(conditions={'modes': modes}, mixed_dtypes=True)
def nodes_on_modal_condition(self, modes: Union[str, list]):
"""
Finds node IDs with modes or singular mode given in `modes`
:param modes: string mode e.g. 'car' or a list of such modes e.g. ['car', 'walk']
:return: list of link IDs
"""
links = self.links_on_modal_condition(modes)
nodes = {self.link(link)['from'] for link in links} | {self.link(link)['to'] for link in links}
return list(nodes)
def modal_subgraph(self, modes: Union[str, set, list]):
return self.subgraph_on_link_conditions(conditions={'modes': modes}, mixed_dtypes=True)
def nodes_on_spatial_condition(self, region_input):
"""
Returns node IDs which intersect region_input
:param region_input:
- path to a geojson file, can have multiple features
- string with comma separated hex tokens of Google's S2 geometry, a region can be covered with cells and
the tokens string copied using http://s2.sidewalklabs.com/regioncoverer/
e.g. '89c25985,89c25987,89c2598c,89c25994,89c25999ffc,89c2599b,89c259ec,89c259f4,89c25a1c,89c25a24'
- shapely.geometry object, e.g. Polygon or a shapely.geometry.GeometryCollection of such objects
:return: node IDs
"""
if not isinstance(region_input, str):
# assumed to be a shapely.geometry input
gdf = self.to_geodataframe()['nodes'].to_crs("epsg:4326")
return self._find_ids_on_shapely_geometry(gdf, how='intersect', shapely_input=region_input)
elif persistence.is_geojson(region_input):
gdf = self.to_geodataframe()['nodes'].to_crs("epsg:4326")
return self._find_ids_on_geojson(gdf, how='intersect', geojson_input=region_input)
else:
# is assumed to be hex
return self._find_node_ids_on_s2_geometry(region_input)
def links_on_spatial_condition(self, region_input, how='intersect'):
"""
Returns link IDs which intersect region_input
:param region_input:
- path to a geojson file, can have multiple features
- string with comma separated hex tokens of Google's S2 geometry, a region can be covered with cells and
the tokens string copied using http://s2.sidewalklabs.com/regioncoverer/
e.g. '89c25985,89c25987,89c2598c,89c25994,89c25999ffc,89c2599b,89c259ec,89c259f4,89c25a1c,89c25a24'
- shapely.geometry object, e.g. Polygon or a shapely.geometry.GeometryCollection of such objects
:param how:
- 'intersect' default, will return IDs of the Services whose at least one Stop intersects the
region_input
- 'within' will return IDs of the Services whose all of the Stops are contained within the region_input
:return: link IDs
"""
gdf = self.to_geodataframe()['links'].to_crs("epsg:4326")
if not isinstance(region_input, str):
# assumed to be a shapely.geometry input
return self._find_ids_on_shapely_geometry(gdf, how, region_input)
elif persistence.is_geojson(region_input):
return self._find_ids_on_geojson(gdf, how, region_input)
else:
# is assumed to be hex
return self._find_link_ids_on_s2_geometry(gdf, how, region_input)
def subnetwork(self, links: Union[list, set], services: Union[list, set] = None,
strongly_connected_modes: Union[list, set] = None, n_connected_components: int = 1):
"""
Subset a Network object using a collection of link IDs and (optionally) service IDs
:param links: Link IDs to be retained in the new Network
:param services: optional, collection of service IDs in the Schedule for subsetting.
:param strongly_connected_modes: modes in the network that need to be strongly connected. For MATSim those
are modes that agents are allowed to route on. Defaults to {'car', 'walk', 'bike'}
:param n_connected_components: number of expected strongly connected components for
`the strongly_connected_modes`. Defaults to 1, as that is what MATSim expects. Other number may be used
if disconnected islands are expected, and then connected up using the `connect_components` method.
:return: A new Network object that is a subset of the original
"""
logging.info('Subsetting a Network will likely result in a disconnected network graph. A cleaner will be ran '
'that will remove links to make the resulting Network strongly connected for modes: '
'car, walk, bike.')
subnetwork = Network(epsg=self.epsg)
links = set(links)
if self.schedule:
if services:
logging.info(
f'Schedule will be subsetted using given services: {services}. Links pertaining to their '
'network routes will also be retained.')
subschedule = self.schedule.subschedule(services)
routes = subschedule.route_attribute_data(keys=['route'])
links = links | set(np.concatenate(routes['route'].values))
subnetwork.schedule = subschedule
subnetwork.graph = self.subgraph_on_link_conditions(conditions={'id': links})
subnetwork.link_id_mapping = {k: v for k, v in self.link_id_mapping.items() if k in links}
if strongly_connected_modes is None:
logging.info("Param: strongly_connected_modes is defaulting to `{'car', 'walk', 'bike'}` "
"You can change this behaviour by passing the parameter.")
strongly_connected_modes = {'car', 'walk', 'bike'}
for mode in strongly_connected_modes:
if not subnetwork.is_strongly_connected(modes=mode):
logging.warning(f'The graph for mode {mode} is not strongly connected. '
f'The largest {n_connected_components} connected components will be extracted.')
if n_connected_components > 1:
logging.info('Number of requested connected components is larger than 1. Consider using '
'`connect_components` method to create modal graphs that are strongly connected.')
subnetwork.retain_n_connected_subgraphs(n=n_connected_components, mode=mode)
# TODO Inherit and subset Auxiliary files
logging.info('Subsetted Network is ready - do not forget to validate and visualise your subset!')
return subnetwork
def subnetwork_on_spatial_condition(self, region_input, how='intersect',
strongly_connected_modes: Union[list, set] = None,
n_connected_components: int = 1):
"""
Subset a Network object using a spatial bound
:param region_input:
- path to a geojson file, can have multiple features
- string with comma separated hex tokens of Google's S2 geometry, a region can be covered with cells and
the tokens string copied using http://s2.sidewalklabs.com/regioncoverer/
e.g. '89c25985,89c25987,89c2598c,89c25994,89c25999ffc,89c2599b,89c259ec,89c259f4,89c25a1c,89c25a24'
- shapely.geometry object, e.g. Polygon or a shapely.geometry.GeometryCollection of such objects
:param how:
- 'intersect' default, will return IDs of the Services whose at least one Stop intersects the
region_input
- 'within' will return IDs of the Services whose all of the Stops are contained within the region_input
:param strongly_connected_modes: modes in the network that need to be strongly connected. For MATSim those
are modes that agents are allowed to route on. Defaults to {'car', 'walk', 'bike'}
:param n_connected_components: number of expected strongly connected components for
`the strongly_connected_modes`. Defaults to 1, as that is what MATSim expects. Other number may be used
if disconnected islands are expected, and then connected up using the `connect_components` method.
:return: A new Network object that is a subset of the original
"""
if self.schedule:
services_to_keep = self.schedule.services_on_spatial_condition(region_input=region_input, how=how)
else:
services_to_keep = None
subset_links = set(self.links_on_spatial_condition(region_input=region_input, how=how))
return self.subnetwork(links=subset_links, services=services_to_keep,
strongly_connected_modes=strongly_connected_modes,
n_connected_components=n_connected_components)
def remove_mode_from_links(self, links: Union[set, list], mode: Union[set, list, str]):
"""
Method to remove modes from links. Deletes links which have no mode left after the process.
:param links: collection of link IDs to remove the mode from
:param mode: which mode to remove
:return: updates graph
"""
def empty_modes(mode_attrib):
if not mode_attrib:
return True
return False
links = self._setify(links)
mode = self._setify(mode)
df = self.link_attribute_data_under_keys(['modes'])
extra = links - set(df.index)
if extra:
logging.warning(f'The following links are not present: {extra}')
df['modes'] = df['modes'].apply(lambda x: self._setify(x))
df = df.loc[links & set(df.index)][df['modes'].apply(lambda x: bool(mode & x))]
df['modes'] = df['modes'].apply(lambda x: x - mode)
self.apply_attributes_to_links(df.T.to_dict())
# remove links without modes
no_mode_links = graph_operations.extract_on_attributes(
self.links(),
{'modes': empty_modes},
mixed_dtypes=False
)
self.remove_links(no_mode_links)
def retain_n_connected_subgraphs(self, n: int, mode: str):
"""
Method to remove modes from link which do not belong to largest connected n components. Deletes links which
have no mode left after the process.
:param n: number of components to retain
:param mode: which mode to consider
:return: updates graph
"""
modal_subgraph = self.modal_subgraph(mode)
# calculate how many connected subgraphs there are
connected_components = network_validation.find_connected_subgraphs(modal_subgraph)
connected_components_nodes = []
for i in range(0, n):
connected_components_nodes += connected_components[i][0]
connected_subgraphs_to_extract = modal_subgraph.subgraph(connected_components_nodes).copy().edges.data('id')
diff_links = set([e[2] for e in modal_subgraph.edges.data('id')]) - set(
[e[2] for e in connected_subgraphs_to_extract])
logging.info(f'Extracting largest connected components resulted in mode: {mode} being deleted from '
f'{len(diff_links)} edges')
self.remove_mode_from_links(diff_links, mode)
def _find_ids_on_geojson(self, gdf, how, geojson_input):
shapely_input = spatial.read_geojson_to_shapely(geojson_input)
return self._find_ids_on_shapely_geometry(gdf=gdf, how=how, shapely_input=shapely_input)
def _find_ids_on_shapely_geometry(self, gdf, how, shapely_input):
if how == 'intersect':
return list(gdf[gdf.intersects(shapely_input)]['id'])
if how == 'within':
return list(gdf[gdf.within(shapely_input)]['id'])
else:
raise NotImplementedError('Only `intersect` and `contain` options for `how` param.')
def _find_node_ids_on_s2_geometry(self, s2_input):
cell_union = spatial.s2_hex_to_cell_union(s2_input)
return [_id for _id, s2_id in self.graph.nodes(data='s2_id') if cell_union.intersects(CellId(s2_id))]
def _find_link_ids_on_s2_geometry(self, gdf, how, s2_input):
gdf['geometry'] = gdf['geometry'].apply(lambda x: spatial.swap_x_y_in_linestring(x))
gdf['s2_geometry'] = gdf['geometry'].apply(lambda x: spatial.generate_s2_geometry(x))
gdf = gdf.set_index('id')
links = gdf['s2_geometry'].T.to_dict()
cell_union = spatial.s2_hex_to_cell_union(s2_input)
if how == 'intersect':
return [_id for _id, s2_geom in links.items() if
any([cell_union.intersects(CellId(s2_id)) for s2_id in s2_geom])]
elif how == 'within':
return [_id for _id, s2_geom in links.items() if
all([cell_union.intersects(CellId(s2_id)) for s2_id in s2_geom])]
else:
raise NotImplementedError('Only `intersect` and `within` options for `how` param.')
def add_node(self, node: Union[str, int], attribs: dict = None, silent: bool = False):
"""
Adds a node.
:param node:
:param attribs: should include spatial information x,y in epsg cosistent with the network or lat lon in
epsg:4326
:param silent: whether to mute stdout logging messages
:return:
"""
if attribs is not None:
self.graph.add_node(node, **attribs)
else:
self.graph.add_node(node)
self.change_log.add(object_type='node', object_id=node, object_attributes=attribs)
if not silent:
logging.info(f'Added Node with index `{node}` and data={attribs}')
return node
def add_nodes(self, nodes_and_attribs: dict, silent: bool = False, ignore_change_log: bool = False):
"""
Adds nodes, reindexes if indices are clashing with nodes already in the network
:param nodes_and_attribs: {index_for_node: {attribute dictionary for that node}}
:param silent: whether to mute stdout logging messages
:param ignore_change_log: whether to ignore logging changes to the network in the changelog. False by default
and not recommended. Only used when an alternative changelog event is being produced (e.g. simplification) to
reduce changelog bloat.
:return:
"""
# check for clashing nodes
clashing_node_ids = set(dict(self.nodes()).keys()) & set(nodes_and_attribs.keys())
df_nodes = pd.DataFrame(nodes_and_attribs).T
reindexing_dict = {}
if df_nodes.empty:
df_nodes = pd.DataFrame({'id': list(nodes_and_attribs.keys())})
elif ('id' not in df_nodes.columns) or (df_nodes['id'].isnull().any()):
df_nodes['id'] = df_nodes.index
if clashing_node_ids:
reindexing_dict = dict(
zip(clashing_node_ids, self.generate_indices_for_n_nodes(
len(nodes_and_attribs), avoid_keys=set(nodes_and_attribs.keys()))))
clashing_mask = df_nodes['id'].isin(reindexing_dict.keys())
df_nodes.loc[clashing_mask, 'id'] = df_nodes.loc[clashing_mask, 'id'].map(reindexing_dict)
df_nodes = df_nodes.set_index('id', drop=False)
nodes_and_attribs_to_add = df_nodes.T.to_dict()
self.graph.add_nodes_from([(node_id, attribs) for node_id, attribs in nodes_and_attribs_to_add.items()])
if not ignore_change_log:
self.change_log = self.change_log.add_bunch(object_type='node',
id_bunch=list(nodes_and_attribs_to_add.keys()),
attributes_bunch=list(nodes_and_attribs_to_add.values()))
if not silent:
logging.info(f'Added {len(nodes_and_attribs)} nodes')
return reindexing_dict, nodes_and_attribs_to_add
def add_edge(self, u: Union[str, int], v: Union[str, int], multi_edge_idx: int = None, attribs: dict = None,
silent: bool = False):
"""
Adds an edge between u and v. If an edge between u and v already exists, adds an additional one. Generates
link id. If you already have a link id, use the method to add_link.
:param u: node in the graph
:param v: node in the graph
:param multi_edge_idx: you can specify which multi index to use if there are other edges between u and v.
Will generate new index if already used.
:param attribs:
:param silent: whether to mute stdout logging messages
:return:
"""
link_id = self.generate_index_for_edge(silent=silent)
self.add_link(link_id, u, v, multi_edge_idx, attribs, silent)
if not silent:
logging.info(f'Added edge from `{u}` to `{v}` with link_id `{link_id}`')
return link_id
def add_edges(self, edges_attributes: List[dict], silent: bool = False, ignore_change_log: bool = False):
"""
Adds multiple edges, generates their unique link ids
:param edges_attributes: List of edges, each item in list is a dictionary defining the edge attributes,
contains at least 'from': node_id and 'to': node_id entries,
:param silent: whether to mute stdout logging messages
:param ignore_change_log: whether to ignore logging changes to the network in the changelog. False by default
and not recommended. Only used when an alternative changelog event is being produced (e.g. simplification) to
reduce changelog bloat.
:return:
"""
# check for compulsory attribs
df_edges = pd.DataFrame(edges_attributes)
if ('from' not in df_edges.columns) or (df_edges['from'].isnull().any()):
raise RuntimeError('You are trying to add edges which are missing `from` (origin) nodes')
if ('to' not in df_edges.columns) or (df_edges['to'].isnull().any()):
raise RuntimeError('You are trying to add edges which are missing `to` (destination) nodes')
df_edges['id'] = list(self.generate_indices_for_n_edges(len(df_edges)))
df_edges = df_edges.set_index('id', drop=False)
return self.add_links(df_edges.T.to_dict(), silent=silent, ignore_change_log=ignore_change_log)
def add_link(self, link_id: Union[str, int], u: Union[str, int], v: Union[str, int], multi_edge_idx: int = None,
attribs: dict = None, silent: bool = False):
"""
Adds an link between u and v with id link_id, if available. If a link between u and v already exists,
adds an additional one.
:param link_id:
:param u: node in the graph
:param v: node in the graph
:param multi_edge_idx: you can specify which multi index to use if there are other edges between u and v.
Will generate new index if already used.
:param attribs:
:param silent: whether to mute stdout logging messages
:return:
"""
if link_id in self.link_id_mapping:
new_link_id = self.generate_index_for_edge(silent=silent)
logging.warning(f'`{link_id}` already exists. Generated a new unique_index: `{new_link_id}`')
link_id = new_link_id
if multi_edge_idx is None:
multi_edge_idx = self.graph.new_edge_key(u, v)
if self.graph.has_edge(u, v, multi_edge_idx):
old_idx = multi_edge_idx
multi_edge_idx = self.graph.new_edge_key(u, v)
logging.warning(f'Changing passed multi_edge_idx: `{old_idx}` as there already exists an edge stored under'
f' that index. New multi_edge_idx: `{multi_edge_idx}`')
if not isinstance(multi_edge_idx, int):
raise RuntimeError('Multi index key needs to be an integer')
self.link_id_mapping[link_id] = {'from': u, 'to': v, 'multi_edge_idx': multi_edge_idx}
compulsory_attribs = {'from': u, 'to': v, 'id': link_id}
if attribs is None:
attribs = compulsory_attribs
else:
attribs = {**attribs, **compulsory_attribs}
self.graph.add_edge(u, v, key=multi_edge_idx, **attribs)
self.change_log.add(object_type='link', object_id=link_id, object_attributes=attribs)
if not silent:
logging.info(f'Added Link with index {link_id}, from node:{u} to node:{v}, under '
f'multi-index:{multi_edge_idx}, and data={attribs}')
return link_id
def add_links(self, links_and_attributes: Dict[str, dict], silent: bool = False, ignore_change_log: bool = False):
"""
Adds multiple edges, generates their unique link ids
:param links_and_attributes: Dictionary of link ids and corresponding edge attributes, each edge attributes
contains at least 'from': node_id and 'to': node_id entries,
:param silent: whether to mute stdout logging messages
:param ignore_change_log: whether to ignore logging changes to the network in the changelog. False by default
and not recommended. Only used when an alternative changelog event is being produced (e.g. simplification) to
reduce changelog bloat.
:return:
"""
# check for compulsory attribs
df_links = pd.DataFrame(links_and_attributes).T
if ('from' not in df_links.columns) or (df_links['from'].isnull().any()):
raise RuntimeError('You are trying to add links which are missing `from` (origin) nodes')
if ('to' not in df_links.columns) or (df_links['to'].isnull().any()):
raise RuntimeError('You are trying to add links which are missing `to` (destination) nodes')
if ('id' not in df_links.columns) or (df_links['id'].isnull().any()):
df_links['id'] = df_links.index
# generate initial multi_edge_idxes for the links to be added
if 'multi_edge_idx' not in df_links.columns:
df_links['multi_edge_idx'] = 0
while df_links[['from', 'to', 'multi_edge_idx']].duplicated().any():
df_links.loc[df_links[['from', 'to', 'multi_edge_idx']].duplicated(), 'multi_edge_idx'] += 1
df_link_id_mapping = | pd.DataFrame(self.link_id_mapping) | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Aug 16 11:31:01 2019
@author: npittman
"""
import requests
import xarray as xr
import numpy as np
import matplotlib.pyplot as plt
import os
from bs4 import BeautifulSoup
import multiprocessing
import sys
import pandas as pd
from tarfile import TarFile
import gzip
import subprocess
import calendar
import shutil
import glob
import lxml
def downloader(urls,mooring=None, meta=False):
"""" Function designed to download TaoTriton data from: """
path='datasets/tao/tao_co2/pieces/downloaded/'
meta_path=path+'meta/'
if meta==True:
path=meta_path
def exists(fileloc):
if str(fileloc[-4:])=='.pdf':
try:
open(fileloc)
return True
except:
return False
#print(fileloc)
try:
pd.read_csv(fileloc,header=3)
return True
except:
return False
if not os.path.isdir(path):
print('Creating directory: ',path)
os.makedirs(path)
if not os.path.isdir(meta_path):
print('Creating directory: ',meta_path)
os.makedirs(meta_path)
if mooring != None:
for moor in mooring:
if not os.path.isdir(path+moor):
print('Creating directory: ',path+moor)
os.makedirs(path+moor)
file_locations=[]
for url in urls:
while True:
#Download the files to their file name in the directory we just created.
#ie: seawifs_data/S2000001.L3m_DAY_RRS_Rrs_443_9km.nc
try:
if mooring ==None:
fileloc=path+url.split('/')[-1]
else:
if str(url.split('/')[-1][7])!='_':
if str(url.split('/')[-1][3])!='_':
moorpath=url.split('/')[-1][0:7]+'_'+url.split('/')[-1][7:9]
fileloc=path+moorpath+'/'+url.split('/')[-1]
else:
moorpath=url.split('/')[-1][0:3]+url.split('/')[-1][4:11]
fileloc=path+moorpath+'/'+url.split('/')[-1]
else:
moorpath=url.split('/')[-1][0:10]
fileloc=path+moorpath+'/'+url.split('/')[-1]
except:
# print('something broke at:',url)
continue
if fileloc[-3:]=='txt':
fileloc=fileloc[0:-3]+'csv'
print(url)
if exists(fileloc):
print('Exists: ',fileloc)
file_locations.append(fileloc)
break
r = requests.get(url)#,timeout=s20)
with open(fileloc, 'wb') as f:
f.write(r.content)
#Ensure that the file actually downloaded, this can fail sometimes for some reason, maybe too soon.
#time.sleep(1) #Can fail sometimes so maybe this is a fix
if (r.status_code==200) & (exists(fileloc)==True):
print('Downloaded: ',fileloc)
file_locations.append(fileloc)
break
else:
print('Download failed:', fileloc,'status:',r.status_code)
return file_locations
def gather_links(url_list):
urls=[]
metadata_urls=[]
for url in url_list: #Loop through the different wavelengths
print("Gathering URLS for TaoTRITON")
print(url)
r = requests.get(url)
soup = BeautifulSoup(r.content,features="lxml")
for tag in soup.find_all('td'): #Loop through years
tags=str(tag).split('"')
try:
this_tag=tags[1] #Get a year's URL
if (str(this_tag[-4:])=='.csv') or (str(this_tag[-4:])=='.txt'):
print('DATA: '+this_tag)
urls.append(url+this_tag)
elif str(this_tag[-8:])=='Meta.pdf':
print('META: '+this_tag)
metadata_urls.append(url+this_tag)
except:
pass
return urls,metadata_urls
url_list=['https://www.nodc.noaa.gov/archive/arc0061/0113238/5.5/data/0-data/', #TAO165E_0N
'https://www.nodc.noaa.gov/archive/arc0063/0117073/4.4/data/0-data/', #TAO165E_8S
'https://www.nodc.noaa.gov/archive/arc0051/0100078/8.8/data/0-data/', #TAO170W_0N
'https://www.nodc.noaa.gov/archive/arc0051/0100084/6.6/data/0-data/', #TAO155W_0N
'https://www.nodc.noaa.gov/archive/arc0051/0100077/7.7/data/0-data/', #TAO140W_0N
'https://www.nodc.noaa.gov/archive/arc0051/0100076/8.8/data/0-data/', #TAO125W_0N
'https://www.nodc.noaa.gov/archive/arc0061/0112885/6.6/data/0-data/', #TAO110W_0N
'https://www.nodc.noaa.gov/archive/arc0051/0100079/1.1/data/0-data/TAO170W_2S_Aug07_Aug08/', #TAO170W_2S_Aug07_Aug08 1
'https://www.nodc.noaa.gov/archive/arc0051/0100079/1.1/data/0-data/TAO170W_2S_Jun98_Nov04/'] #TAO170W_2S_Jun98_Nov04 2
mooring=['TAO165E_0N','TAO165E_8S','TAO170W_0N','TAO155W_0N','TAO140W_0N','TAO125W_0N','TAO110W_0N','TAO170W_2N','TAO170W_2S']
urls,metadata=gather_links(url_list) #Can comment out onece complete.
downloader(urls,mooring) #Can comment out onece complete.
downloader(metadata,meta=True) #Can comment out onece complete.
data=[]
#Clean up and save into the pieces folder.
for x, folder in enumerate(glob.glob('datasets/tao/tao_co2/pieces/downloaded/T*')):
print('\n'+folder)
files=glob.glob(folder+'/*')
indexes=[]
data.append([])
#Remove files including LOG or QC from our list of files.
for file in files:
if 'Log' in file:
index=files.index(file)
indexes.append(index)
for i in reversed(indexes):
files.pop(i)
for floc in files:
#print(floc)
check_head=0
while True:
#print(check_head)
try:
dat= | pd.read_csv(floc,header=check_head) | pandas.read_csv |
#Here will be our strategy to handle gps_height:
# In fit() just save the input data as a data frame \
# with gps_height, lat, long, and gps_height > 0
# In transform(), if gps_height == 0, then
# start at 0.1 radius, and check if there are any non-zero gps_instances.
# If yes, get the average, else, increment search radius
# by 0.3 (0.1 increase corresponds to 11km approximately)
# If nothing is found within an increment of 2, then just ignore.
from sklearn.base import BaseEstimator, TransformerMixin
import numpy as np
import pandas as pd
import math
class GPSHeightImputer(BaseEstimator, TransformerMixin):
def __init__(self,init_radius=0.1,increment_radius=0.3,method = 'custom'):
self.column_names = []
self.init_radius = init_radius
self.increment_radius = increment_radius
self.method = method
def __get_subset_records(self, latitude, longitude, df, radius):
latitude_from = latitude - radius
latitude_to = latitude + radius
longitude_from = longitude - radius
longitude_to = longitude + radius
df_temp = df[(df['latitude'] >= latitude_from) & (df['latitude'] <= latitude_to) &
(df['longitude'] >= longitude_from) & (df['longitude'] <= longitude_to)]
return df_temp
def fit(self, X, y=None):
if self.method == 'custom':
X['gps_height'] = X['gps_height'].astype(float)
X['latitude'] = X['latitude'].astype(float)
X['longitude'] = X['longitude'].astype(float)
self.df = X[X['gps_height'] != 0]
elif self.method == 'median':
X['gps_height'] = X['gps_height'].astype(float)
#X['gps_height'] = X['gps_height'].fillna(0)
self.median = np.median(list(X[X['gps_height'] != 0]['gps_height']))
if math.isnan(self.median):
self.median = 0
elif self.method == 'mean':
X['gps_height'] = X['gps_height'].astype(float)
#X['gps_height'] = X['gps_height'].fillna(0)
self.mean = np.mean(list(X[X['gps_height'] != 0]['gps_height']))
if math.isnan(self.mean):
self.mean = 0
self.column_names = X.columns
return self
def transform(self,X):
if self.method == 'custom':
X['gps_height'] = X['gps_height'].astype(float)
X['latitude'] = X['latitude'].astype(float)
X['longitude'] = X['longitude'].astype(float)
gps_height_transformed = []
for latitude, longitude, gps_height in \
zip(X['latitude'],X['longitude'],X['gps_height']):
radius = self.init_radius
if gps_height == 0:
gps_height_temp = gps_height
while gps_height_temp == 0 and radius <= 2:
df_temp = self.__get_subset_records\
(latitude,longitude,self.df,radius)
gps_height_temp = np.mean(df_temp[df_temp['gps_height']!=0]\
['gps_height'])
if math.isnan(gps_height_temp):
gps_height_temp = 0
radius = self.increment_radius + radius
else:
gps_height_temp = gps_height
gps_height_transformed.append(gps_height_temp)
X['gps_height'] = gps_height_transformed
#self.column_names = list(X.columns)
elif self.method == 'median':
gps_height = np.array(list(X['gps_height']))
gps_height[gps_height == 0] = self.median
#self.column_names = list(X.columns)
#return X[['latitude','longitude','gps_height']]
X['gps_height'] = gps_height
elif self.method == 'mean':
gps_height = np.array(list(X['gps_height']))
gps_height[gps_height == 0] = self.mean
#self.column_names = list(X.columns)
#return X[['latitude','longitude','gps_height']]
X['gps_height'] = gps_height
self.column_names = X.columns
X['gps_height'] = X['gps_height'].astype(float)
X['gps_height'] = X['gps_height'].fillna(0)
return X
# Here will be our strategy to handle gps_height:
# In fit() just save the input data as a data frame \
# with gps_height, lat, long, and gps_height > 0
# In transform(), if gps_height == 0, then
# start at 0.1 radius, and check if there are any non-zero gps_instances.
# If yes, get the average, else, increment search radius
# by 0.3 (0.1 increase corresponds to 11km approximately)
# If nothing is found within an increment of 2, then just ignore.
class LatLongImputer(BaseEstimator, TransformerMixin):
def __init__(self, method='custom'):
self.column_names = []
self.method = method
self.df = pd.DataFrame()
self.long_mean_map = {}
self.lat_mean_map = {}
pass
def __generate_mean_maps(self):
temp_df = self.df[(self.df['latitude'] != -2e-08) & (self.df['longitude'] != 0)]
for geo in ['ward', 'region', 'basin']:
self.long_mean_map[geo] = dict(zip(temp_df.groupby(geo)['longitude'].mean(
).keys(), temp_df.groupby(geo)['longitude'].mean().values))
self.lat_mean_map[geo] = dict(zip(temp_df.groupby(geo)['latitude'].mean(
).keys(), temp_df.groupby(geo)['latitude'].mean().values))
def fit(self, X, y=None):
if self.method == 'mean':
# find mean of all non-zero values
self.mean_lat = np.mean(X[X['latitude'] != -2e-08]['latitude'])
self.mean_long = np.mean(X[X['longitude'] != 0]['longitude'])
elif self.method == 'median':
# find median of all non-zero values
self.median_lat = np.median(X[X['latitude'] != -2e-08]['latitude'])
self.median_long = np.median(X[X['longitude'] != 0]['longitude'])
elif self.method == 'custom':
self.df = X
self.__generate_mean_maps()
self.column_names = ['latitude', 'longitude', 'gps_height']
return self
def transform(self, X):
if self.method == 'mean':
X['latitude'].replace(-2e-08, self.mean_lat, inplace=True)
X['longitude'].replace(0, self.mean_long, inplace=True)
elif self.method == 'median':
X['latitude'].replace(-2e-08, self.median_lat, inplace=True)
X['longitude'].replace(0, self.median_long, inplace=True)
elif self.method == 'custom':
X[(X['latitude'] == -2e-08)]['latitude'] = X['latitude'].map(self.lat_mean_map)
X[(X['longitude'] == 0)]['longitude'] = X['longitude'].map(self.long_mean_map)
self.column_names = X.columns
return X
# will work the same way as the gps imputer.
class PopulationImputer(BaseEstimator, TransformerMixin):
def __init__(self, method='custom'):
self.columns_names = []
self.method = method
self.df = pd.DataFrame()
def fit(self, X, y=None):
if self.method == 'mean':
self.mean = np.mean(X[X['population'] > 0]['population'])
elif self.method == 'median':
self.median = np.median(X[X['population'] > 0]['population'])
elif self.method == 'custom':
self.df['population'] = X[X['population'] > 0]['population']
self.column_names = ['latitude', 'longitude', 'population']
return self
def transform(self, X):
X.fillna(0, inplace=True)
if self.method == 'mean':
X['population'].replace(0, self.mean, inplace=True)
elif self.method == 'median':
X['population'].replace(0, self.median, inplace=True)
elif self.method == 'custom':
pass
self.column_names = ['latitude', 'longitude', 'population']
return X[['latitude', 'longitude', 'population']]
class ConstructionYearTransformer(BaseEstimator, TransformerMixin):
def __init__(self,method = 'custom'):
self.column_names = []
#self.init_radius = init_radius
#self.increment_radius = increment_radius
self.method = method
pass ##Nothing else to do
def fit(self, X, y=None):
X['construction_year'] = X['construction_year'].astype(float)
if self.method == 'custom':
year_recorded = X[X['construction_year'] > 0]\
['date_recorded'].\
apply(lambda x: int(x.split("-")[0]))
year_constructed = X[X['construction_year'] > 0]['construction_year']
self.median_age = np.median(year_recorded - year_constructed)
self.column_names = ['age']
return self
if self.method == 'median':
X['construction_year'] = X['construction_year'].astype(float)
#X['gps_height'] = X['gps_height'].fillna(0)
self.median = \
np.median(list(X[X['construction_year'] != 0]['construction_year']))
if math.isnan(self.median):
self.median = 0
self.column_names = ['construction_year']
return self
if self.method == 'mean':
X['construction_year'] = X['construction_year'].astype(float)
#X['gps_height'] = X['gps_height'].fillna(0)
self.mean = np.mean(list(X[X['construction_year'] != 0]['construction_year']))
if math.isnan(self.mean):
self.mean = 0
self.column_names = ['construction_year']
return self
if self.method == 'ignore':
self.column_names = ['construction_year']
return self
def transform(self,X):
if self.method == 'custom':
year_recorded = list(X['date_recorded'].apply(lambda x: int(x.split("-")[0])))
year_constructed = list(X['construction_year'])
age = []
for i,j in enumerate(year_constructed):
if j == 0:
age.append(self.median_age)
else:
temp_age = year_recorded[i] - year_constructed[i]
if temp_age < 0:
temp_age = self.median_age
age.append(temp_age)
X['age'] = age
self.column_names = ['age']
#self.column_names = X.columns
return X[['age']]
if self.method == 'median':
X['construction_year'] = X['construction_year'].astype(float)
X['construction_year'] = X['construction_year'].fillna(0)
construction_year = np.array(list(X['construction_year']))
construction_year[construction_year == 0] = self.median
self.column_names = ['construction_year']
X['construction_year'] = construction_year
return X[['construction_year']]
if self.method == 'mean':
X['construction_year'] = X['construction_year'].astype(float)
X['construction_year'] = X['construction_year'].fillna(0)
construction_year = np.array(list(X['construction_year']))
construction_year[construction_year == 0] = self.mean
self.column_names = ['construction_year']
X['construction_year'] = construction_year
return X[['construction_year']]
if self.method == 'ignore':
X['construction_year'] = X['construction_year'].astype(float)
X['construction_year'] = X['construction_year'].fillna(0)
self.column_names = ['construction_year']
return X[['construction_year']]
# take columns and turn them into 3 numerical columns representing percent of target
class HighCardTransformer(BaseEstimator, TransformerMixin):
def __init__(self):
self.column_names = []
self.df = | pd.DataFrame() | pandas.DataFrame |
"""
This module contains functions which extract features from git log entries.
"""
import re
import numpy as np
import pandas as pd
from collections import defaultdict
from .gitcmds import get_git_log, get_bugfix_commits, link_fixes_to_bugs, \
trim_hash
def split_commits(whole_log):
"""Split the output of git log into separate entries per commit.
Parameters
----------
whole_log: str
A string containing the entire git log.
Returns
-------
list(str)
A list of log entries, with each commit as its own string.
"""
lines = whole_log.splitlines()
# find the indices which separate each commit's entry
commit_line_idxs = [i for i, line in enumerate(lines)
if re.match(r'^commit \w{40}$', line)]
# split the lines from the whole log into subsets for each log entry
commit_lines = np.array_split(lines, commit_line_idxs)
return ["\n".join(arr) for arr in commit_lines[1:]]
def parse_commit(commit_str):
"""Extract features from the text of a commit log entry.
Parameters
----------
commit_str: str
The text of a commit log entry.
Returns
-------
feats: defaultdict
A dictionary of feature values.
"""
feats = defaultdict(lambda: None)
lines = commit_str.splitlines()
# parse the commit line
commit_line = [line for line in lines if line.startswith('commit')][0]
feats['hash'] = \
trim_hash(re.match(r'commit (\w{40})', commit_line).group(1))
# NOTE: skip string features for now because the one-hot encoding is a pain
# parse the author line
# author_line = [line for line in lines if line.startswith('Author:')][0]
# author_matches = re.match(r'Author: (.+) <(.+)>', author_line)
# feats['user'] = author_matches.group(1)
# feats['email'] = author_matches.group(2)
# parse the date line
time_line = [line for line in lines if line.startswith('Date:')][0]
timestamp = re.match(r'Date: (.*)', time_line).group(1)
# TODO: fix the hardcoded timezone
created_at = | pd.to_datetime(timestamp, utc=True) | pandas.to_datetime |
from __future__ import division
import numpy as np
import pandas as pd
import pickle
import os
from math import ceil
import matplotlib
import matplotlib.gridspec as gridspec
import matplotlib.pyplot as plt
import seaborn as sns
import warnings
from sklearn.metrics import r2_score
warnings.simplefilter("ignore")
# colors = ["#3366cc", "#dc3912", "#109618", "#990099", "#ff9900"]
colors = sns.color_palette('muted')
labels = ['Remaining', 'First','Last']
def density_plot(df, Accuracy_base, Accuracy_LSTM, Accuracy_NG, save_fig, Out_put_name,model_name_list, Mean_or_median):
model = model_name_list
sns.set(font_scale=1.5)
sns.set_style("white", {"legend.frameon": True})
plt.figure(figsize=(14, 7))
ax1 = plt.subplot(1, 2, 1)
cols1 = [df, Accuracy_base, Accuracy_LSTM, Accuracy_NG]
for i in range(len(cols1)):
data = cols1[i]['first']
sns.kdeplot(data, ax=ax1, shade=True, color=colors[i], label=model[i],linewidth=2)
if Mean_or_median == 'Mean':
med = data.mean()
else:
med = data.median()
plt.axvline(med, color=colors[i], linestyle='dashed', linewidth=2)
if i == 1:
if 'duration' in Out_put_name:
plt.text(med - 0.02, 3.0, '{}'.format(round(med, 3)),
horizontalalignment='right', verticalalignment='center',
fontsize=18, color=colors[i])
else:
plt.text(med - 0.02, 3.0, '{}%'.format(round(med * 100, 1)),
horizontalalignment='right', verticalalignment='center',
fontsize=18, color=colors[i])
elif i== 2:
if 'duration' in Out_put_name:
plt.text(med + 0.02, 3.1, '{}'.format(round(med, 3)),
horizontalalignment='left', verticalalignment='center',
fontsize=18, color=colors[i])
else:
plt.text(med + 0.02, 3.1, '{}%'.format(round(med * 100, 1)),
horizontalalignment='left', verticalalignment='center',
fontsize=18, color=colors[i])
elif i == 0:
if 'duration' in Out_put_name:
plt.text(med + 0.02, 3.3, '{}'.format(round(med, 3)),
horizontalalignment='left', verticalalignment='center',
fontsize=18, color=colors[i])
else:
plt.text(med + 0.02, 3.3, '{}%'.format(round(med * 100, 1)),
horizontalalignment='left', verticalalignment='center',
fontsize=18, color=colors[i])
elif i == 3:
if 'duration' in Out_put_name:
plt.text(med - 0.02, 3.0, '{}'.format(round(med, 3)),
horizontalalignment='right', verticalalignment='center',
fontsize=18, color=colors[i])
else:
plt.text(med - 0.02, 3.3, '{}%'.format(round(med * 100, 1)),
horizontalalignment='right', verticalalignment='center',
fontsize=18, color=colors[i])
plt.xlim(0, 1.0)
plt.ylim(0, 3.5)
if 'location' in Out_put_name:
ax1.set_xticklabels([str(i) + '%' for i in range(0, 101, 20)])
plt.xlabel('Prediction accuracy', fontsize=20)
else:
plt.xlabel('R'+r'$^2$', fontsize=20)
plt.ylabel('Density', fontsize=20)
plt.yticks(fontsize=20)
plt.xticks(fontsize=20)
if 'location' in Out_put_name:
plt.legend(fontsize=18) #, loc='upper right'
else:
plt.legend(fontsize=18) #
plt.title('First activities',fontsize=20)
# plt.text(-0.1, 1.05, '(a)', fontdict={'size': 20, 'weight': 'bold'},
# transform=ax1.transAxes)
ax2 = plt.subplot(1, 2, 2)
for i in range(len(cols1)):
data = cols1[i]['Middle']
sns.kdeplot(data, ax=ax2, shade=True, color=colors[i], label=model[i], linewidth=2)
if Mean_or_median == 'Mean':
med = data.mean()
else:
med = data.median()
plt.axvline(med, color=colors[i], linestyle='dashed', linewidth=2, alpha = 1)
if i == 1:
if 'duration' in Out_put_name:
plt.text(med - 0.01, 3.3, '{}'.format(round(med, 3)),
horizontalalignment='right', verticalalignment='center',
fontsize=18, color=colors[i])
else:
plt.text(med + 0.01, 3.3, '{}%'.format(round(med * 100, 1)),
horizontalalignment='left', verticalalignment='center',
fontsize=18, color=colors[i])
elif i== 2:
if 'duration' in Out_put_name:
plt.text(med + 0.023, 3.0, '{}'.format(round(med, 3)),
horizontalalignment='left', verticalalignment='center',
fontsize=18, color=colors[i])
else:
plt.text(med - 0.01, 3.0, '{}%'.format(round(med * 100, 1)),
horizontalalignment='right', verticalalignment='center',
fontsize=18, color=colors[i])
elif i == 0:
if 'duration' in Out_put_name:
plt.text(med + 0.01, 3.0, '{}'.format(round(med, 3)),
horizontalalignment='left', verticalalignment='center',
fontsize=18, color=colors[i])
else:
plt.text(med + 0.01, 3.0, '{}%'.format(round(med* 100, 1)),
horizontalalignment='left', verticalalignment='center',
fontsize=18, color=colors[i])
elif i == 3:
if 'duration' in Out_put_name:
plt.text(med + 0.01, 3.3, '{}'.format(round(med, 3)),
horizontalalignment='left', verticalalignment='center',
fontsize=18, color=colors[i])
else:
plt.text(med + 0.01, 3.3, '{}%'.format(round(med* 100, 1)),
horizontalalignment='left', verticalalignment='center',
fontsize=18, color=colors[i])
plt.xlim(0, 1.0)
plt.ylim(0, 3.5)
if 'location' in Out_put_name:
ax2.set_xticklabels([str(i) + '%' for i in range(0, 101, 20)])
plt.xlabel('Prediction accuracy', fontsize=20)
else:
plt.xlabel('R'+r'$^2$', fontsize=20)
plt.yticks(fontsize=20)
plt.xticks(fontsize=20)
plt.ylabel('Density', fontsize=20)
if 'location' in Out_put_name:
plt.legend(fontsize=18) #, loc='upper right'
else:
plt.legend(fontsize=18) #
plt.title('Remaining activities',fontsize=20)
# plt.text(-0.1, 1.05, '(b)', fontdict={'size': 20, 'weight': 'bold'},
# transform=ax2.transAxes)
plt.tight_layout()
if save_fig == 0:
plt.show()
else:
plt.savefig('img/' + Out_put_name, dpi=200)
def density_plot_duration_error(df, Accuracy_base, Accuracy_LSTM, save_fig, Out_put_name, model_name_list, Mean_or_median):
model = model_name_list
sns.set(font_scale=1.5)
sns.set_style("white", {"legend.frameon": True})
plt.figure(figsize=(14, 7))
ax1 = plt.subplot(1, 2, 1)
cols1 = [df, Accuracy_base, Accuracy_LSTM]
for i in range(len(cols1)):
data = cols1[i]['first']
sns.kdeplot(data, ax=ax1, shade=True, color=colors[i], label=model[i])
if Mean_or_median == 'Mean':
med = data.mean()
else:
med = data.median()
plt.axvline(med, color=colors[i], linestyle='dashed', linewidth=2)
if i == 0:
plt.text(med + 0.02, 3.3, '{}%'.format(round(med * 100, 1)),
horizontalalignment='left', verticalalignment='center',
fontsize=18, color=colors[i])
elif i == 2:
if 'duration' in Out_put_name:
plt.text(med + 0.02, 3.0, '{}%'.format(round(med * 100, 1)),
horizontalalignment='left', verticalalignment='center',
fontsize=18, color=colors[i])
else:
plt.text(med + 0.02, 3.0, '{}%'.format(round(med * 100, 1)),
horizontalalignment='left', verticalalignment='center',
fontsize=18, color=colors[i])
else:
plt.text(med - 0.01, 3.3, '{}%'.format(round(med * 100, 1)),
horizontalalignment='right', verticalalignment='center',
fontsize=18, color=colors[i])
# plt.xlim(0, 1.0)
# plt.ylim(0, 3.5)
# ax1.set_xticklabels([str(i) + '%' for i in range(0, 101, 20)])
# plt.xlabel('Prediction Accuracy', fontsize=20)
plt.ylabel('Density (Users)', fontsize=20)
plt.yticks(fontsize=20)
plt.xticks(fontsize=20)
if 'location' in Out_put_name:
plt.legend(fontsize=18) # , loc='upper right'
else:
plt.legend(fontsize=18, loc='center right') #
plt.title('First Activities', fontsize=20)
# plt.text(-0.1, 1.05, '(a)', fontdict={'size': 20, 'weight': 'bold'},
# transform=ax1.transAxes)
ax2 = plt.subplot(1, 2, 2)
for i in range(len(cols1)):
data = cols1[i]['Remaining']
sns.kdeplot(data, ax=ax2, shade=True, color=colors[i], label=model[i])
if Mean_or_median == 'Mean':
med = data.mean()
else:
med = data.median()
plt.axvline(med, color=colors[i], linestyle='dashed', linewidth=2)
if i == 1:
if 'duration' in Out_put_name:
plt.text(med - 0.01, 3.3, '{}%'.format(round(med * 100, 1)),
horizontalalignment='right', verticalalignment='center',
fontsize=18, color=colors[i])
else:
plt.text(med + 0.01, 3.3, '{}%'.format(round(med * 100, 1)),
horizontalalignment='left', verticalalignment='center',
fontsize=18, color=colors[i])
elif i == 2:
if 'duration' in Out_put_name:
plt.text(med + 0.023, 3.0, '{}%'.format(round(med * 100, 1)),
horizontalalignment='left', verticalalignment='center',
fontsize=18, color=colors[i])
else:
plt.text(med - 0.01, 3.0, '{}%'.format(round(med * 100, 1)),
horizontalalignment='right', verticalalignment='center',
fontsize=18, color=colors[i])
else:
plt.text(med + 0.01, 3.3, '{}%'.format(round(med * 100, 1)),
horizontalalignment='left', verticalalignment='center',
fontsize=18, color=colors[i])
# plt.xlim(0, 1.0)
# plt.ylim(0, 3.5)
# ax2.set_xticklabels([str(i) + '%' for i in range(0, 101, 20)])
plt.yticks(fontsize=20)
plt.xticks(fontsize=20)
# plt.xlabel('Prediction Accuracy', fontsize=20)
plt.ylabel('Density (User-level)', fontsize=20)
if 'location' in Out_put_name:
plt.legend(fontsize=18) # , loc='upper right'
else:
plt.legend(fontsize=18, loc='center right') #
plt.title('Remaining Activities', fontsize=20)
# plt.text(-0.1, 1.05, '(b)', fontdict={'size': 20, 'weight': 'bold'},
# transform=ax2.transAxes)
plt.tight_layout()
if save_fig == 0:
plt.show()
else:
plt.savefig('img/' + Out_put_name, dpi=200)
def density_plot_not_seperate_mid_first(df, Accuracy_base, Accuracy_LSTM, save_fig, Out_put_name, model_name_list):
model = model_name_list
sns.set(font_scale=1.5)
sns.set_style("white", {"legend.frameon": True})
plt.figure(figsize=(7, 7))
ax1 = plt.subplot(1, 1, 1)
cols1 = [df, Accuracy_base, Accuracy_LSTM]
for i in range(len(cols1)):
data = cols1[i]['all']
sns.kdeplot(data, ax=ax1, shade=True, color=colors[i], label=model[i])
med = data.mean()
plt.axvline(med, color=colors[i], linestyle='dashed', linewidth=2)
if i == 0:
plt.text(med + 0.02, 3.3, '{}%'.format(round(med * 100, 1)),
horizontalalignment='left', verticalalignment='center',
fontsize=18, color=colors[i])
elif i == 2:
if 'duration' in Out_put_name:
plt.text(med + 0.02, 3.0, '{}%'.format(round(med * 100, 1)),
horizontalalignment='left', verticalalignment='center',
fontsize=18, color=colors[i])
else:
plt.text(med + 0.02, 3.0, '{}%'.format(round(med * 100, 1)),
horizontalalignment='left', verticalalignment='center',
fontsize=18, color=colors[i])
else:
plt.text(med - 0.01, 3.3, '{}%'.format(round(med * 100, 1)),
horizontalalignment='right', verticalalignment='center',
fontsize=18, color=colors[i])
plt.xlim(0, 1.0)
plt.ylim(0, 3.5)
ax1.set_xticklabels([str(i) + '%' for i in range(0, 101, 20)])
plt.xlabel('Prediction Accuracy', fontsize=20)
plt.ylabel('Density (Users)', fontsize=20)
plt.yticks(fontsize=20)
plt.xticks(fontsize=20)
if 'location' in Out_put_name:
plt.legend(fontsize=18) # , loc='upper right'
else:
plt.legend(fontsize=18) #
plt.tight_layout()
if save_fig == 0:
plt.show()
else:
plt.savefig('img/' + Out_put_name, dpi=200)
def data_process_continuous(data):
error_first_temp = (data['Predict1'].loc[data['activity_index']==0] - data['Ground_truth'].loc[data['activity_index']==0])/3600
Accuracy_first_temp = sum(np.array(data['Correct'].loc[data['activity_index']==0]))/data['Correct'].loc[data['activity_index']==0].count()
data_temp = data.loc[data['activity_index']!=0]
# data_temp = data
error_Remaining_temp = (data_temp['Predict1'] - data_temp['Ground_truth'])/3600
Accuracy_temp = sum(np.array(data_temp['Correct']))/data_temp['Correct'].count()
accuracy_all = sum(np.array(data['Correct']))/data['Correct'].count()
return error_first_temp, Accuracy_first_temp, error_Remaining_temp, Accuracy_temp,accuracy_all
def calculate_error(result_df):
# correct error data
result_df.loc[result_df['Predict_duration'] > 86400, 'Predict_duration'] = 86400
result_df.loc[result_df['Predict_duration'] <= 0, 'Predict_duration'] = 1
######
result_df['error_sq'] = (result_df['Predict_duration'] - result_df['Ground_truth_duration']) ** 2
result_df['error_abs'] = np.abs(result_df['Predict_duration'] - result_df['Ground_truth_duration'])
RMSE = np.sqrt(np.mean(result_df['error_sq']))
MAPE = np.mean(result_df['error_abs'] / result_df['Ground_truth_duration'])
MAE = np.mean(result_df['error_abs'])
if len(result_df) > 0:
R_sq = r2_score(result_df['Ground_truth_duration'], result_df['Predict_duration'])
else:
R_sq = None
return RMSE, MAPE, MAE, R_sq
def r_sq_for_two_parts(data,y_mean):
data['RES'] = (data['Ground_truth_duration'] - data['Predict_duration'])**2
data['TOT'] = (data['Ground_truth_duration'] - y_mean)**2
R_sq = 1 - sum(data['RES'])/sum(data['TOT'])
return R_sq
def data_process_continuous_R_sq(data):
_, _, _, R_sq_all = calculate_error(data)
data_first = data.loc[data['activity_index']==0].copy()
data_Remaining = data.loc[data['activity_index']!=0].copy()
mean_y = np.mean(data['Ground_truth_duration'])
R_sq_first = r_sq_for_two_parts(data_first, mean_y)
if len(data_Remaining)>0:
R_sq_Remaining = r_sq_for_two_parts(data_Remaining, mean_y)
else:
R_sq_Remaining = None
return R_sq_first, R_sq_Remaining, R_sq_all
def data_process_continuous_RMSE(data):
RMSE_all, _, _, _ = calculate_error(data)
data_first = data.loc[data['activity_index']==0].copy()
data_Remaining = data.loc[data['activity_index']!=0].copy()
RMSE_first, _, _, R_sq_first = calculate_error(data_first)
RMSE_Remaining, _, _, R_sq_Remaining = calculate_error(data_Remaining)
return RMSE_first, RMSE_Remaining, RMSE_all
def data_process_continuous_MAPE(data):
_, MAPE_all, _, _ = calculate_error(data)
data_first = data.loc[data['activity_index']==0].copy()
data_Remaining = data.loc[data['activity_index']!=0].copy()
_, MAPE_first, _, R_sq_first = calculate_error(data_first)
_, MAPE_Remaining, _, R_sq_Remaining = calculate_error(data_Remaining)
return MAPE_first, MAPE_Remaining, MAPE_all
def data_process_discrete(data):
error_first_temp = (data['Predict1'].loc[data['activity_index']==0] - data['Ground_truth'].loc[data['activity_index']==0])
Accuracy_first_temp = sum(np.array(data['Correct'].loc[data['activity_index']==0]))/data['Correct'].loc[data['activity_index']==0].count()
data_temp = data.loc[data['activity_index']!=0]
# data_temp = data
error_Remaining_temp = (data_temp['Predict1'] - data_temp['Ground_truth'])
Accuracy_temp = sum(np.array(data_temp['Correct']))/data_temp['Correct'].count()
accuracy_all = sum(np.array(data['Correct'])) / data['Correct'].count()
return error_first_temp, Accuracy_first_temp, error_Remaining_temp, Accuracy_temp, accuracy_all
def generate_accuracy_file(individual_ID_list, output_fig, duration_error):
error_list=[]
total=0
error_Remaining = pd.DataFrame({'Remaining':[]})
error_first = pd.DataFrame({'first':[]})
error_Remaining_base = pd.DataFrame({'Remaining':[]})
error_first_base = pd.DataFrame({'first':[]})
Accuracy = {'Card_ID':[], 'Remaining':[],'first':[],'all':[]}
Accuracy_base = {'Card_ID':[], 'Remaining':[],'first':[],'all':[]}
Accuracy_LSTM = {'Card_ID': [], 'Remaining': [], 'first': [], 'all': []}
Accuracy_NG = {'Card_ID': [], 'Remaining': [], 'first': [], 'all': []}
# data
Card_ID_used = []
# individual_ID_list = individual_ID_list[0:80]
#############IOHMM
for Card_ID in individual_ID_list:
# if output_fig == 'duration':
# file_name = data_path + 'results/result_' + str(Card_ID) + 'test' + '.csv'
# else:
# file_name = data_path + 'results/result_Location_' + str(Card_ID) + 'test' + '.csv'
file_name = data_path + 'results/result_con_dur+loc_' + str(Card_ID) + 'test' + '.csv'
if os.path.exists(file_name) == False:
print(Card_ID,'does not exist for IOHMM')
continue
else:
Card_ID_used.append(Card_ID)
data = pd.read_csv(file_name)
if output_fig == 'duration':
if duration_error == 'RMSE':
R_sq_first, R_sq_Remaining, R_sq_all = data_process_continuous_RMSE(data)
elif duration_error == 'MAPE':
R_sq_first, R_sq_Remaining, R_sq_all = data_process_continuous_MAPE(data)
else:
R_sq_first, R_sq_Remaining, R_sq_all = data_process_continuous_R_sq(data)
Accuracy['first'].append(R_sq_first)
Accuracy['Remaining'].append(R_sq_Remaining)
Accuracy['all'].append(R_sq_all)
Accuracy['Card_ID'].append(Card_ID)
else:
error_first_temp, Accuracy_first_temp, error_Remaining_temp, Accuracy_temp, accuracy_all = data_process_discrete(data)
#print (error_first_temp)
error_first = pd.concat([error_first, error_first_temp], axis = 0)
error_Remaining = pd.concat([error_Remaining, error_Remaining_temp], axis = 0)
Accuracy['first'].append(Accuracy_first_temp)
Accuracy['Remaining'].append(Accuracy_temp)
Accuracy['all'].append(accuracy_all)
Accuracy['Card_ID'].append(Card_ID)
# data
############## LSTM
Card_ID_used_for_base = list(set(Card_ID_used))
for Card_ID in Card_ID_used_for_base:
if output_fig == 'duration':
# file_name = data_path + 'results/result_LSTM' + str(Card_ID) + 'test' + '.csv'
file_name = data_path + 'results/result_LSTM_con_dur' + str(Card_ID) + 'test' + '.csv'
#file_name = data_path + 'results/result_NGRAM_con_dur_' + str(Card_ID) + '.csv'
else:
file_name = data_path + 'results/result_Location_LSTM' + str(Card_ID) + 'test' + '.csv'
#file_name = data_path + 'results/result_NGRAM_location_' + str(Card_ID) + '.csv'
if os.path.exists(file_name) == False:
print(Card_ID,'does not exist for LSTM')
continue
data = | pd.read_csv(file_name) | pandas.read_csv |
import os
import shutil
from joblib import Parallel, delayed
from .data_file import DataFile
from .helpers import (proc_file_input, mp_consol_save, wrap_load_func)
from pandas.util._decorators import doc
from .Dataset import _shared_docs, _sip_docs
import numpy as np
import pandas as pd
def get_file_mapping(self, cols=None):
'''This function is used to access the
up to date file mapping. And can be used
to specify that only the subset of the file mapping
of interest be used, to save on how much info is passed around.
Returns
--------
file_mapping : dict
Return a dictionary with keys as
integer's loaded in the Dataset referring
to Data Files.
See Also
--------
to_data_file : Cast existing columns to type Data File.
add_data_files : Method for adding new data files
'''
# Make sure up to date first
self._check_file_mapping()
# If a subset of cols passed,
# create new subset of file_mapping to return
if cols is not None:
# Get just the data files in scope
u_values = np.unique(np.array(self[cols]))
# Return relevant subset only - don't include any NaN in subset
return {u: self.file_mapping[u] for u in u_values if not pd.isnull(u)}
return self.file_mapping
@doc(load_func=_shared_docs['load_func'], inplace=_shared_docs['inplace'])
def add_data_files(self, files, file_to_subject,
load_func=np.load, inplace=False):
'''This method allows adding columns of type
'data file' to the Dataset class.
Parameters
----------
files : dict
| This argument specifies the files to be loaded as :ref:`data_files`.
Files must be passed as a python dict where
each key refers to the name of that feature / column of data files
to load, and the value is either a list-like of
str file paths, or a single globbing str which will
be used to determine the files.
| In addition to this parameter, you must also pass a
python function to the file_to_subject param,
which specifies how to convert from passed
file path, to a subject name.
file_to_subject : python function, dict of or 'auto'
| This parameter represents how the subject name should
be determined from the passed file paths. This
parameter can be passed any python function, where
the first argument on the function takes a full
file path and returns a subject name.
| This parameter should be passed as either a single function
or argument to be used for all columns, or as a dictionary
corresponding to the passed files dictionary in the case
that each column requires a different function mapping path
to subject. If just one function is passed, it will be used
for to load all dictionary entries. For example:
| You may also pass the custom str 'auto' to
specify that the subject name should be the base
file name with the extension removed. For example
if the path is '/some/path/subj16.npy' then the auto
subject will be 'subj16'.
| In the case that the underlying index is a MultiIndex, this
function should be designed to return the subject in correct
tuple form. See Examples below.
{load_func}
{inplace}
See Also
--------
to_data_file : Cast existing columns to type Data File.
get_file_mapping : Returns the raw file mapping.
Examples
---------
Consider the brief example below for loading two fake subjects,
with the files parameter.
::
files = dict()
files['feat1'] = ['f1/subj_0.npy', 'f1/subj_1.npy']
files['feat2'] = ['f2/subj_0.npy', 'f2/subj_1.npy']
This could be matched with file_to_subject as:
::
def file_to_subject_func(file):
subject = file.split('/')[1].replace('.npy', '')
return subject
file_to_subject = file_to_subject_func
# or
file_to_subject = dict()
file_to_subject['feat1'] = file_to_subject_func
file_to_subject['feat2'] = file_to_subject_func
In this example, subjects are loaded as 'subj_0' and 'subj_1',
and they have associated loaded data files 'feat1' and 'feat2'.
Next, we consider an example with fake data.
In this example we will first generate and save some fake data files.
These fake files will correspond to left hemisphere vertex files.
.. ipython:: python
import numpy as np
import os
dr = 'data/fake_surface/'
os.makedirs(dr, exist_ok=True)
# 20 subjects each with 10,242 vertex values
X = np.random.random(size=(20, 10242))
# Save the data as numpy arrays
for x in range(len(X)):
np.save(dr + str(x), X[x])
os.listdir(dr)[:5]
Next, we will use add data files to add these to
a :class:`Dataset`.
.. ipython:: python
data = bp.Dataset()
files = dict()
files['fake_surface'] = dr + '*' # Add * for file globbing
data = data.add_data_files(files=files, file_to_subject='auto')
data.head(5)
Let's also consider lastly a MultiIndex example:
::
# The underlying dataset is indexed by subject and event
data.set_index(['subject', 'event'], inplace=True)
# Only one feature
files = dict()
files['feat1'] = ['f1/s0_e0.npy',
'f1/s0_e1.npy',
'f1/s1_e0.npy',
'f1/s1_e1.npy']
def file_to_subject_func(file):
# This selects the substring
# at the last part seperated by the '/'
# so e.g. the stub, 's0_e0.npy', 's0_e1.npy', etc...
subj_split = file.split('/')[-1]
# This removes the .npy from the end, so
# stubs == 's0_e0', 's0_e1', etc...
subj_split = subj_split.replace('.npy', '')
# Set the subject name as the first part
# and the eventname as the second part
subj_name = subj_split.split('_')[0]
event_name = subj_split.split('_')[1]
# Lastly put it into the correct return style
# This is tuple style e.g., ('s0', 'e0'), ('s0', 'e1')
ind = (subj_name, eventname)
return ind
'''
if not inplace:
return self._inplace('add_data_files', locals())
# Wrap load func if needed
wrapped_load_func = wrap_load_func(load_func, _print=self._print)
# Init if needed
self._check_file_mapping()
# Get dict of key to files
file_series = proc_file_input(files, file_to_subject, self.index)
# For each column
for file in file_series:
# For each subject, fill in with Data File
series = file_series[file]
self._series_to_data_file(col=file, series=series,
load_func=wrapped_load_func)
@ | doc(**_sip_docs, load_func=_shared_docs['load_func']) | pandas.util._decorators.doc |
"""Build mapping between grid cells and population (total, urban, rural)"""
import multiprocessing as mp
import atlite
import numpy as np
import pandas as pd
import xarray as xr
import geopandas as gpd
from vresutils import shapes as vshapes
if __name__ == '__main__':
if 'snakemake' not in globals():
from helper import mock_snakemake
snakemake = mock_snakemake('build_population_layouts')
cutout = atlite.Cutout(snakemake.config['atlite']['cutout'])
grid_cells = cutout.grid_cells()
# nuts3 has columns country, gdp, pop, geometry
# population is given in dimensions of 1e3=k
nuts3 = gpd.read_file(snakemake.input.nuts3_shapes).set_index('index')
# Indicator matrix NUTS3 -> grid cells
I = atlite.cutout.compute_indicatormatrix(nuts3.geometry, grid_cells)
# Indicator matrix grid_cells -> NUTS3; inprinciple Iinv*I is identity
# but imprecisions mean not perfect
Iinv = cutout.indicatormatrix(nuts3.geometry)
countries = np.sort(nuts3.country.unique())
urban_fraction = pd.read_csv(snakemake.input.urban_percent,
header=None, index_col=0,
names=['fraction']).squeeze() / 100.
# fill missing Balkans values
missing = ["AL", "ME", "MK"]
reference = ["RS", "BA"]
average = urban_fraction[reference].mean()
fill_values = pd.Series({ct: average for ct in missing})
urban_fraction = urban_fraction.append(fill_values)
# population in each grid cell
pop_cells = pd.Series(I.dot(nuts3['pop']))
# in km^2
with mp.Pool(processes=snakemake.threads) as pool:
cell_areas = pd.Series(pool.map(vshapes.area, grid_cells)) / 1e6
# pop per km^2
density_cells = pop_cells / cell_areas
# rural or urban population in grid cell
pop_rural = | pd.Series(0., density_cells.index) | pandas.Series |
# pylint: disable=E1101,E1103,W0232
from datetime import datetime, timedelta
from pandas.compat import range, lrange, lzip, u, zip
import operator
import re
import nose
import warnings
import os
import numpy as np
from numpy.testing import assert_array_equal
from pandas import period_range, date_range
from pandas.core.index import (Index, Float64Index, Int64Index, MultiIndex,
InvalidIndexError, NumericIndex)
from pandas.tseries.index import DatetimeIndex
from pandas.tseries.tdi import TimedeltaIndex
from pandas.tseries.period import PeriodIndex
from pandas.core.series import Series
from pandas.util.testing import (assert_almost_equal, assertRaisesRegexp,
assert_copy)
from pandas import compat
from pandas.compat import long
import pandas.util.testing as tm
import pandas.core.config as cf
from pandas.tseries.index import _to_m8
import pandas.tseries.offsets as offsets
import pandas as pd
from pandas.lib import Timestamp
class Base(object):
""" base class for index sub-class tests """
_holder = None
_compat_props = ['shape', 'ndim', 'size', 'itemsize', 'nbytes']
def verify_pickle(self,index):
unpickled = self.round_trip_pickle(index)
self.assertTrue(index.equals(unpickled))
def test_pickle_compat_construction(self):
# this is testing for pickle compat
if self._holder is None:
return
# need an object to create with
self.assertRaises(TypeError, self._holder)
def test_numeric_compat(self):
idx = self.create_index()
tm.assertRaisesRegexp(TypeError,
"cannot perform __mul__",
lambda : idx * 1)
tm.assertRaisesRegexp(TypeError,
"cannot perform __mul__",
lambda : 1 * idx)
div_err = "cannot perform __truediv__" if compat.PY3 else "cannot perform __div__"
tm.assertRaisesRegexp(TypeError,
div_err,
lambda : idx / 1)
tm.assertRaisesRegexp(TypeError,
div_err,
lambda : 1 / idx)
tm.assertRaisesRegexp(TypeError,
"cannot perform __floordiv__",
lambda : idx // 1)
tm.assertRaisesRegexp(TypeError,
"cannot perform __floordiv__",
lambda : 1 // idx)
def test_boolean_context_compat(self):
# boolean context compat
idx = self.create_index()
def f():
if idx:
pass
tm.assertRaisesRegexp(ValueError,'The truth value of a',f)
def test_ndarray_compat_properties(self):
idx = self.create_index()
self.assertTrue(idx.T.equals(idx))
self.assertTrue(idx.transpose().equals(idx))
values = idx.values
for prop in self._compat_props:
self.assertEqual(getattr(idx, prop), getattr(values, prop))
# test for validity
idx.nbytes
idx.values.nbytes
class TestIndex(Base, tm.TestCase):
_holder = Index
_multiprocess_can_split_ = True
def setUp(self):
self.indices = dict(
unicodeIndex = tm.makeUnicodeIndex(100),
strIndex = tm.makeStringIndex(100),
dateIndex = tm.makeDateIndex(100),
intIndex = tm.makeIntIndex(100),
floatIndex = tm.makeFloatIndex(100),
boolIndex = Index([True,False]),
empty = Index([]),
tuples = MultiIndex.from_tuples(lzip(['foo', 'bar', 'baz'],
[1, 2, 3]))
)
for name, ind in self.indices.items():
setattr(self, name, ind)
def create_index(self):
return Index(list('abcde'))
def test_wrong_number_names(self):
def testit(ind):
ind.names = ["apple", "banana", "carrot"]
for ind in self.indices.values():
assertRaisesRegexp(ValueError, "^Length", testit, ind)
def test_set_name_methods(self):
new_name = "This is the new name for this index"
indices = (self.dateIndex, self.intIndex, self.unicodeIndex,
self.empty)
for ind in indices:
original_name = ind.name
new_ind = ind.set_names([new_name])
self.assertEqual(new_ind.name, new_name)
self.assertEqual(ind.name, original_name)
res = ind.rename(new_name, inplace=True)
# should return None
self.assertIsNone(res)
self.assertEqual(ind.name, new_name)
self.assertEqual(ind.names, [new_name])
#with assertRaisesRegexp(TypeError, "list-like"):
# # should still fail even if it would be the right length
# ind.set_names("a")
with assertRaisesRegexp(ValueError, "Level must be None"):
ind.set_names("a", level=0)
# rename in place just leaves tuples and other containers alone
name = ('A', 'B')
ind = self.intIndex
ind.rename(name, inplace=True)
self.assertEqual(ind.name, name)
self.assertEqual(ind.names, [name])
def test_hash_error(self):
with tm.assertRaisesRegexp(TypeError,
"unhashable type: %r" %
type(self.strIndex).__name__):
hash(self.strIndex)
def test_new_axis(self):
new_index = self.dateIndex[None, :]
self.assertEqual(new_index.ndim, 2)
tm.assert_isinstance(new_index, np.ndarray)
def test_copy_and_deepcopy(self):
from copy import copy, deepcopy
for func in (copy, deepcopy):
idx_copy = func(self.strIndex)
self.assertIsNot(idx_copy, self.strIndex)
self.assertTrue(idx_copy.equals(self.strIndex))
new_copy = self.strIndex.copy(deep=True, name="banana")
self.assertEqual(new_copy.name, "banana")
new_copy2 = self.intIndex.copy(dtype=int)
self.assertEqual(new_copy2.dtype.kind, 'i')
def test_duplicates(self):
idx = Index([0, 0, 0])
self.assertFalse(idx.is_unique)
def test_sort(self):
self.assertRaises(TypeError, self.strIndex.sort)
def test_mutability(self):
self.assertRaises(TypeError, self.strIndex.__setitem__, 0, 'foo')
def test_constructor(self):
# regular instance creation
tm.assert_contains_all(self.strIndex, self.strIndex)
tm.assert_contains_all(self.dateIndex, self.dateIndex)
# casting
arr = np.array(self.strIndex)
index = Index(arr)
tm.assert_contains_all(arr, index)
self.assert_numpy_array_equal(self.strIndex, index)
# copy
arr = np.array(self.strIndex)
index = Index(arr, copy=True, name='name')
tm.assert_isinstance(index, Index)
self.assertEqual(index.name, 'name')
assert_array_equal(arr, index)
arr[0] = "SOMEBIGLONGSTRING"
self.assertNotEqual(index[0], "SOMEBIGLONGSTRING")
# what to do here?
# arr = np.array(5.)
# self.assertRaises(Exception, arr.view, Index)
def test_constructor_corner(self):
# corner case
self.assertRaises(TypeError, Index, 0)
def test_constructor_from_series(self):
expected = DatetimeIndex([Timestamp('20110101'),Timestamp('20120101'),Timestamp('20130101')])
s = Series([Timestamp('20110101'),Timestamp('20120101'),Timestamp('20130101')])
result = Index(s)
self.assertTrue(result.equals(expected))
result = DatetimeIndex(s)
self.assertTrue(result.equals(expected))
# GH 6273
# create from a series, passing a freq
s = Series(pd.to_datetime(['1-1-1990', '2-1-1990', '3-1-1990', '4-1-1990', '5-1-1990']))
result = DatetimeIndex(s, freq='MS')
expected = DatetimeIndex(['1-1-1990', '2-1-1990', '3-1-1990', '4-1-1990', '5-1-1990'],freq='MS')
self.assertTrue(result.equals(expected))
df = pd.DataFrame(np.random.rand(5,3))
df['date'] = ['1-1-1990', '2-1-1990', '3-1-1990', '4-1-1990', '5-1-1990']
result = DatetimeIndex(df['date'], freq='MS')
# GH 6274
# infer freq of same
result = pd.infer_freq(df['date'])
self.assertEqual(result,'MS')
def test_constructor_ndarray_like(self):
# GH 5460#issuecomment-44474502
# it should be possible to convert any object that satisfies the numpy
# ndarray interface directly into an Index
class ArrayLike(object):
def __init__(self, array):
self.array = array
def __array__(self, dtype=None):
return self.array
for array in [np.arange(5),
np.array(['a', 'b', 'c']),
date_range('2000-01-01', periods=3).values]:
expected = pd.Index(array)
result = pd.Index(ArrayLike(array))
self.assertTrue(result.equals(expected))
def test_index_ctor_infer_periodindex(self):
xp = period_range('2012-1-1', freq='M', periods=3)
rs = Index(xp)
assert_array_equal(rs, xp)
tm.assert_isinstance(rs, PeriodIndex)
def test_constructor_simple_new(self):
idx = Index([1, 2, 3, 4, 5], name='int')
result = idx._simple_new(idx, 'int')
self.assertTrue(result.equals(idx))
idx = Index([1.1, np.nan, 2.2, 3.0], name='float')
result = idx._simple_new(idx, 'float')
self.assertTrue(result.equals(idx))
idx = Index(['A', 'B', 'C', np.nan], name='obj')
result = idx._simple_new(idx, 'obj')
self.assertTrue(result.equals(idx))
def test_copy(self):
i = Index([], name='Foo')
i_copy = i.copy()
self.assertEqual(i_copy.name, 'Foo')
def test_view(self):
i = Index([], name='Foo')
i_view = i.view()
self.assertEqual(i_view.name, 'Foo')
def test_legacy_pickle_identity(self):
# GH 8431
pth = tm.get_data_path()
s1 = pd.read_pickle(os.path.join(pth,'s1-0.12.0.pickle'))
s2 = pd.read_pickle(os.path.join(pth,'s2-0.12.0.pickle'))
self.assertFalse(s1.index.identical(s2.index))
self.assertFalse(s1.index.equals(s2.index))
def test_astype(self):
casted = self.intIndex.astype('i8')
# it works!
casted.get_loc(5)
# pass on name
self.intIndex.name = 'foobar'
casted = self.intIndex.astype('i8')
self.assertEqual(casted.name, 'foobar')
def test_compat(self):
self.strIndex.tolist()
def test_equals(self):
# same
self.assertTrue(Index(['a', 'b', 'c']).equals(Index(['a', 'b', 'c'])))
# different length
self.assertFalse(Index(['a', 'b', 'c']).equals(Index(['a', 'b'])))
# same length, different values
self.assertFalse(Index(['a', 'b', 'c']).equals(Index(['a', 'b', 'd'])))
# Must also be an Index
self.assertFalse(Index(['a', 'b', 'c']).equals(['a', 'b', 'c']))
def test_insert(self):
# GH 7256
# validate neg/pos inserts
result = Index(['b', 'c', 'd'])
#test 0th element
self.assertTrue(Index(['a', 'b', 'c', 'd']).equals(
result.insert(0, 'a')))
#test Nth element that follows Python list behavior
self.assertTrue(Index(['b', 'c', 'e', 'd']).equals(
result.insert(-1, 'e')))
#test loc +/- neq (0, -1)
self.assertTrue(result.insert(1, 'z').equals(
result.insert(-2, 'z')))
#test empty
null_index = Index([])
self.assertTrue(Index(['a']).equals(
null_index.insert(0, 'a')))
def test_delete(self):
idx = Index(['a', 'b', 'c', 'd'], name='idx')
expected = Index(['b', 'c', 'd'], name='idx')
result = idx.delete(0)
self.assertTrue(result.equals(expected))
self.assertEqual(result.name, expected.name)
expected = Index(['a', 'b', 'c'], name='idx')
result = idx.delete(-1)
self.assertTrue(result.equals(expected))
self.assertEqual(result.name, expected.name)
with tm.assertRaises((IndexError, ValueError)):
# either depeidnig on numpy version
result = idx.delete(5)
def test_identical(self):
# index
i1 = Index(['a', 'b', 'c'])
i2 = Index(['a', 'b', 'c'])
self.assertTrue(i1.identical(i2))
i1 = i1.rename('foo')
self.assertTrue(i1.equals(i2))
self.assertFalse(i1.identical(i2))
i2 = i2.rename('foo')
self.assertTrue(i1.identical(i2))
i3 = Index([('a', 'a'), ('a', 'b'), ('b', 'a')])
i4 = Index([('a', 'a'), ('a', 'b'), ('b', 'a')], tupleize_cols=False)
self.assertFalse(i3.identical(i4))
def test_is_(self):
ind = Index(range(10))
self.assertTrue(ind.is_(ind))
self.assertTrue(ind.is_(ind.view().view().view().view()))
self.assertFalse(ind.is_(Index(range(10))))
self.assertFalse(ind.is_(ind.copy()))
self.assertFalse(ind.is_(ind.copy(deep=False)))
self.assertFalse(ind.is_(ind[:]))
self.assertFalse(ind.is_(ind.view(np.ndarray).view(Index)))
self.assertFalse(ind.is_(np.array(range(10))))
# quasi-implementation dependent
self.assertTrue(ind.is_(ind.view()))
ind2 = ind.view()
ind2.name = 'bob'
self.assertTrue(ind.is_(ind2))
self.assertTrue(ind2.is_(ind))
# doesn't matter if Indices are *actually* views of underlying data,
self.assertFalse(ind.is_(Index(ind.values)))
arr = np.array(range(1, 11))
ind1 = Index(arr, copy=False)
ind2 = Index(arr, copy=False)
self.assertFalse(ind1.is_(ind2))
def test_asof(self):
d = self.dateIndex[0]
self.assertIs(self.dateIndex.asof(d), d)
self.assertTrue(np.isnan(self.dateIndex.asof(d - timedelta(1))))
d = self.dateIndex[-1]
self.assertEqual(self.dateIndex.asof(d + timedelta(1)), d)
d = self.dateIndex[0].to_datetime()
tm.assert_isinstance(self.dateIndex.asof(d), Timestamp)
def test_asof_datetime_partial(self):
idx = pd.date_range('2010-01-01', periods=2, freq='m')
expected = Timestamp('2010-01-31')
result = idx.asof('2010-02')
self.assertEqual(result, expected)
def test_nanosecond_index_access(self):
s = Series([Timestamp('20130101')]).values.view('i8')[0]
r = DatetimeIndex([s + 50 + i for i in range(100)])
x = Series(np.random.randn(100), index=r)
first_value = x.asof(x.index[0])
# this does not yet work, as parsing strings is done via dateutil
#self.assertEqual(first_value, x['2013-01-01 00:00:00.000000050+0000'])
self.assertEqual(first_value, x[Timestamp(np.datetime64('2013-01-01 00:00:00.000000050+0000', 'ns'))])
def test_argsort(self):
result = self.strIndex.argsort()
expected = np.array(self.strIndex).argsort()
self.assert_numpy_array_equal(result, expected)
def test_comparators(self):
index = self.dateIndex
element = index[len(index) // 2]
element = _to_m8(element)
arr = np.array(index)
def _check(op):
arr_result = op(arr, element)
index_result = op(index, element)
self.assertIsInstance(index_result, np.ndarray)
self.assert_numpy_array_equal(arr_result, index_result)
_check(operator.eq)
_check(operator.ne)
_check(operator.gt)
_check(operator.lt)
_check(operator.ge)
_check(operator.le)
def test_booleanindex(self):
boolIdx = np.repeat(True, len(self.strIndex)).astype(bool)
boolIdx[5:30:2] = False
subIndex = self.strIndex[boolIdx]
for i, val in enumerate(subIndex):
self.assertEqual(subIndex.get_loc(val), i)
subIndex = self.strIndex[list(boolIdx)]
for i, val in enumerate(subIndex):
self.assertEqual(subIndex.get_loc(val), i)
def test_fancy(self):
sl = self.strIndex[[1, 2, 3]]
for i in sl:
self.assertEqual(i, sl[sl.get_loc(i)])
def test_empty_fancy(self):
empty_farr = np.array([], dtype=np.float_)
empty_iarr = np.array([], dtype=np.int_)
empty_barr = np.array([], dtype=np.bool_)
# pd.DatetimeIndex is excluded, because it overrides getitem and should
# be tested separately.
for idx in [self.strIndex, self.intIndex, self.floatIndex]:
empty_idx = idx.__class__([])
values = idx.values
self.assertTrue(idx[[]].identical(empty_idx))
self.assertTrue(idx[empty_iarr].identical(empty_idx))
self.assertTrue(idx[empty_barr].identical(empty_idx))
# np.ndarray only accepts ndarray of int & bool dtypes, so should
# Index.
self.assertRaises(IndexError, idx.__getitem__, empty_farr)
def test_getitem(self):
arr = np.array(self.dateIndex)
exp = self.dateIndex[5]
exp = _to_m8(exp)
self.assertEqual(exp, arr[5])
def test_shift(self):
shifted = self.dateIndex.shift(0, timedelta(1))
self.assertIs(shifted, self.dateIndex)
shifted = self.dateIndex.shift(5, timedelta(1))
self.assert_numpy_array_equal(shifted, self.dateIndex + timedelta(5))
shifted = self.dateIndex.shift(1, 'B')
self.assert_numpy_array_equal(shifted, self.dateIndex + offsets.BDay())
shifted.name = 'shifted'
self.assertEqual(shifted.name, shifted.shift(1, 'D').name)
def test_intersection(self):
first = self.strIndex[:20]
second = self.strIndex[:10]
intersect = first.intersection(second)
self.assertTrue(tm.equalContents(intersect, second))
# Corner cases
inter = first.intersection(first)
self.assertIs(inter, first)
# non-iterable input
assertRaisesRegexp(TypeError, "iterable", first.intersection, 0.5)
idx1 = Index([1, 2, 3, 4, 5], name='idx')
# if target has the same name, it is preserved
idx2 = Index([3, 4, 5, 6, 7], name='idx')
expected2 = Index([3, 4, 5], name='idx')
result2 = idx1.intersection(idx2)
self.assertTrue(result2.equals(expected2))
self.assertEqual(result2.name, expected2.name)
# if target name is different, it will be reset
idx3 = Index([3, 4, 5, 6, 7], name='other')
expected3 = Index([3, 4, 5], name=None)
result3 = idx1.intersection(idx3)
self.assertTrue(result3.equals(expected3))
self.assertEqual(result3.name, expected3.name)
# non monotonic
idx1 = Index([5, 3, 2, 4, 1], name='idx')
idx2 = Index([4, 7, 6, 5, 3], name='idx')
result2 = idx1.intersection(idx2)
self.assertTrue(tm.equalContents(result2, expected2))
self.assertEqual(result2.name, expected2.name)
idx3 = Index([4, 7, 6, 5, 3], name='other')
result3 = idx1.intersection(idx3)
self.assertTrue(tm.equalContents(result3, expected3))
self.assertEqual(result3.name, expected3.name)
# non-monotonic non-unique
idx1 = Index(['A','B','A','C'])
idx2 = Index(['B','D'])
expected = Index(['B'], dtype='object')
result = idx1.intersection(idx2)
self.assertTrue(result.equals(expected))
def test_union(self):
first = self.strIndex[5:20]
second = self.strIndex[:10]
everything = self.strIndex[:20]
union = first.union(second)
self.assertTrue(tm.equalContents(union, everything))
# Corner cases
union = first.union(first)
self.assertIs(union, first)
union = first.union([])
self.assertIs(union, first)
union = Index([]).union(first)
self.assertIs(union, first)
# non-iterable input
assertRaisesRegexp(TypeError, "iterable", first.union, 0.5)
# preserve names
first.name = 'A'
second.name = 'A'
union = first.union(second)
self.assertEqual(union.name, 'A')
second.name = 'B'
union = first.union(second)
self.assertIsNone(union.name)
def test_add(self):
# - API change GH 8226
with tm.assert_produces_warning():
self.strIndex + self.strIndex
firstCat = self.strIndex.union(self.dateIndex)
secondCat = self.strIndex.union(self.strIndex)
if self.dateIndex.dtype == np.object_:
appended = np.append(self.strIndex, self.dateIndex)
else:
appended = np.append(self.strIndex, self.dateIndex.astype('O'))
self.assertTrue(tm.equalContents(firstCat, appended))
self.assertTrue(tm.equalContents(secondCat, self.strIndex))
tm.assert_contains_all(self.strIndex, firstCat)
tm.assert_contains_all(self.strIndex, secondCat)
tm.assert_contains_all(self.dateIndex, firstCat)
def test_append_multiple(self):
index = Index(['a', 'b', 'c', 'd', 'e', 'f'])
foos = [index[:2], index[2:4], index[4:]]
result = foos[0].append(foos[1:])
self.assertTrue(result.equals(index))
# empty
result = index.append([])
self.assertTrue(result.equals(index))
def test_append_empty_preserve_name(self):
left = Index([], name='foo')
right = Index([1, 2, 3], name='foo')
result = left.append(right)
self.assertEqual(result.name, 'foo')
left = Index([], name='foo')
right = Index([1, 2, 3], name='bar')
result = left.append(right)
self.assertIsNone(result.name)
def test_add_string(self):
# from bug report
index = Index(['a', 'b', 'c'])
index2 = index + 'foo'
self.assertNotIn('a', index2)
self.assertIn('afoo', index2)
def test_iadd_string(self):
index = pd.Index(['a', 'b', 'c'])
# doesn't fail test unless there is a check before `+=`
self.assertIn('a', index)
index += '_x'
self.assertIn('a_x', index)
def test_difference(self):
first = self.strIndex[5:20]
second = self.strIndex[:10]
answer = self.strIndex[10:20]
first.name = 'name'
# different names
result = first.difference(second)
self.assertTrue(tm.equalContents(result, answer))
self.assertEqual(result.name, None)
# same names
second.name = 'name'
result = first.difference(second)
self.assertEqual(result.name, 'name')
# with empty
result = first.difference([])
self.assertTrue(tm.equalContents(result, first))
self.assertEqual(result.name, first.name)
# with everythin
result = first.difference(first)
self.assertEqual(len(result), 0)
self.assertEqual(result.name, first.name)
# non-iterable input
assertRaisesRegexp(TypeError, "iterable", first.diff, 0.5)
def test_symmetric_diff(self):
# smoke
idx1 = Index([1, 2, 3, 4], name='idx1')
idx2 = Index([2, 3, 4, 5])
result = idx1.sym_diff(idx2)
expected = Index([1, 5])
self.assertTrue(tm.equalContents(result, expected))
self.assertIsNone(result.name)
# __xor__ syntax
expected = idx1 ^ idx2
self.assertTrue(tm.equalContents(result, expected))
self.assertIsNone(result.name)
# multiIndex
idx1 = MultiIndex.from_tuples(self.tuples)
idx2 = MultiIndex.from_tuples([('foo', 1), ('bar', 3)])
result = idx1.sym_diff(idx2)
expected = MultiIndex.from_tuples([('bar', 2), ('baz', 3), ('bar', 3)])
self.assertTrue(tm.equalContents(result, expected))
# nans:
# GH #6444, sorting of nans. Make sure the number of nans is right
# and the correct non-nan values are there. punt on sorting.
idx1 = Index([1, 2, 3, np.nan])
idx2 = Index([0, 1, np.nan])
result = idx1.sym_diff(idx2)
# expected = Index([0.0, np.nan, 2.0, 3.0, np.nan])
nans = pd.isnull(result)
self.assertEqual(nans.sum(), 2)
self.assertEqual((~nans).sum(), 3)
[self.assertIn(x, result) for x in [0.0, 2.0, 3.0]]
# other not an Index:
idx1 = Index([1, 2, 3, 4], name='idx1')
idx2 = np.array([2, 3, 4, 5])
expected = Index([1, 5])
result = idx1.sym_diff(idx2)
self.assertTrue(tm.equalContents(result, expected))
self.assertEqual(result.name, 'idx1')
result = idx1.sym_diff(idx2, result_name='new_name')
self.assertTrue(tm.equalContents(result, expected))
self.assertEqual(result.name, 'new_name')
# other isn't iterable
with tm.assertRaises(TypeError):
Index(idx1,dtype='object') - 1
def test_pickle(self):
self.verify_pickle(self.strIndex)
self.strIndex.name = 'foo'
self.verify_pickle(self.strIndex)
self.verify_pickle(self.dateIndex)
def test_is_numeric(self):
self.assertFalse(self.dateIndex.is_numeric())
self.assertFalse(self.strIndex.is_numeric())
self.assertTrue(self.intIndex.is_numeric())
self.assertTrue(self.floatIndex.is_numeric())
def test_is_object(self):
self.assertTrue(self.strIndex.is_object())
self.assertTrue(self.boolIndex.is_object())
self.assertFalse(self.intIndex.is_object())
self.assertFalse(self.dateIndex.is_object())
self.assertFalse(self.floatIndex.is_object())
def test_is_all_dates(self):
self.assertTrue(self.dateIndex.is_all_dates)
self.assertFalse(self.strIndex.is_all_dates)
self.assertFalse(self.intIndex.is_all_dates)
def test_summary(self):
self._check_method_works(Index.summary)
# GH3869
ind = Index(['{other}%s', "~:{range}:0"], name='A')
result = ind.summary()
# shouldn't be formatted accidentally.
self.assertIn('~:{range}:0', result)
self.assertIn('{other}%s', result)
def test_format(self):
self._check_method_works(Index.format)
index = Index([datetime.now()])
formatted = index.format()
expected = [str(index[0])]
self.assertEqual(formatted, expected)
# 2845
index = Index([1, 2.0+3.0j, np.nan])
formatted = index.format()
expected = [str(index[0]), str(index[1]), u('NaN')]
self.assertEqual(formatted, expected)
# is this really allowed?
index = Index([1, 2.0+3.0j, None])
formatted = index.format()
expected = [str(index[0]), str(index[1]), u('NaN')]
self.assertEqual(formatted, expected)
self.strIndex[:0].format()
def test_format_with_name_time_info(self):
# bug I fixed 12/20/2011
inc = timedelta(hours=4)
dates = Index([dt + inc for dt in self.dateIndex], name='something')
formatted = dates.format(name=True)
self.assertEqual(formatted[0], 'something')
def test_format_datetime_with_time(self):
t = Index([datetime(2012, 2, 7), datetime(2012, 2, 7, 23)])
result = t.format()
expected = ['2012-02-07 00:00:00', '2012-02-07 23:00:00']
self.assertEqual(len(result), 2)
self.assertEqual(result, expected)
def test_format_none(self):
values = ['a', 'b', 'c', None]
idx = Index(values)
idx.format()
self.assertIsNone(idx[3])
def test_take(self):
indexer = [4, 3, 0, 2]
result = self.dateIndex.take(indexer)
expected = self.dateIndex[indexer]
self.assertTrue(result.equals(expected))
def _check_method_works(self, method):
method(self.empty)
method(self.dateIndex)
method(self.unicodeIndex)
method(self.strIndex)
method(self.intIndex)
method(self.tuples)
def test_get_indexer(self):
idx1 = Index([1, 2, 3, 4, 5])
idx2 = Index([2, 4, 6])
r1 = idx1.get_indexer(idx2)
assert_almost_equal(r1, [1, 3, -1])
r1 = idx2.get_indexer(idx1, method='pad')
assert_almost_equal(r1, [-1, 0, 0, 1, 1])
rffill1 = idx2.get_indexer(idx1, method='ffill')
assert_almost_equal(r1, rffill1)
r1 = idx2.get_indexer(idx1, method='backfill')
assert_almost_equal(r1, [0, 0, 1, 1, 2])
rbfill1 = idx2.get_indexer(idx1, method='bfill')
assert_almost_equal(r1, rbfill1)
def test_slice_locs(self):
for dtype in [int, float]:
idx = Index(np.array([0, 1, 2, 5, 6, 7, 9, 10], dtype=dtype))
n = len(idx)
self.assertEqual(idx.slice_locs(start=2), (2, n))
self.assertEqual(idx.slice_locs(start=3), (3, n))
self.assertEqual(idx.slice_locs(3, 8), (3, 6))
self.assertEqual(idx.slice_locs(5, 10), (3, n))
self.assertEqual(idx.slice_locs(5.0, 10.0), (3, n))
self.assertEqual(idx.slice_locs(4.5, 10.5), (3, 8))
self.assertEqual(idx.slice_locs(end=8), (0, 6))
self.assertEqual(idx.slice_locs(end=9), (0, 7))
idx2 = idx[::-1]
self.assertEqual(idx2.slice_locs(8, 2), (2, 6))
self.assertEqual(idx2.slice_locs(8.5, 1.5), (2, 6))
self.assertEqual(idx2.slice_locs(7, 3), (2, 5))
self.assertEqual(idx2.slice_locs(10.5, -1), (0, n))
def test_slice_locs_dup(self):
idx = Index(['a', 'a', 'b', 'c', 'd', 'd'])
self.assertEqual(idx.slice_locs('a', 'd'), (0, 6))
self.assertEqual(idx.slice_locs(end='d'), (0, 6))
self.assertEqual(idx.slice_locs('a', 'c'), (0, 4))
self.assertEqual(idx.slice_locs('b', 'd'), (2, 6))
idx2 = idx[::-1]
self.assertEqual(idx2.slice_locs('d', 'a'), (0, 6))
self.assertEqual(idx2.slice_locs(end='a'), (0, 6))
self.assertEqual(idx2.slice_locs('d', 'b'), (0, 4))
self.assertEqual(idx2.slice_locs('c', 'a'), (2, 6))
for dtype in [int, float]:
idx = Index(np.array([10, 12, 12, 14], dtype=dtype))
self.assertEqual(idx.slice_locs(12, 12), (1, 3))
self.assertEqual(idx.slice_locs(11, 13), (1, 3))
idx2 = idx[::-1]
self.assertEqual(idx2.slice_locs(12, 12), (1, 3))
self.assertEqual(idx2.slice_locs(13, 11), (1, 3))
def test_slice_locs_na(self):
idx = Index([np.nan, 1, 2])
self.assertRaises(KeyError, idx.slice_locs, start=1.5)
self.assertRaises(KeyError, idx.slice_locs, end=1.5)
self.assertEqual(idx.slice_locs(1), (1, 3))
self.assertEqual(idx.slice_locs(np.nan), (0, 3))
idx = Index([np.nan, np.nan, 1, 2])
self.assertRaises(KeyError, idx.slice_locs, np.nan)
def test_drop(self):
n = len(self.strIndex)
dropped = self.strIndex.drop(self.strIndex[lrange(5, 10)])
expected = self.strIndex[lrange(5) + lrange(10, n)]
self.assertTrue(dropped.equals(expected))
self.assertRaises(ValueError, self.strIndex.drop, ['foo', 'bar'])
dropped = self.strIndex.drop(self.strIndex[0])
expected = self.strIndex[1:]
self.assertTrue(dropped.equals(expected))
ser = Index([1, 2, 3])
dropped = ser.drop(1)
expected = Index([2, 3])
self.assertTrue(dropped.equals(expected))
def test_tuple_union_bug(self):
import pandas
import numpy as np
aidx1 = np.array([(1, 'A'), (2, 'A'), (1, 'B'), (2, 'B')],
dtype=[('num', int), ('let', 'a1')])
aidx2 = np.array([(1, 'A'), (2, 'A'), (1, 'B'), (2, 'B'), (1, 'C'), (2,
'C')], dtype=[('num', int), ('let', 'a1')])
idx1 = pandas.Index(aidx1)
idx2 = pandas.Index(aidx2)
# intersection broken?
int_idx = idx1.intersection(idx2)
# needs to be 1d like idx1 and idx2
expected = idx1[:4] # pandas.Index(sorted(set(idx1) & set(idx2)))
self.assertEqual(int_idx.ndim, 1)
self.assertTrue(int_idx.equals(expected))
# union broken
union_idx = idx1.union(idx2)
expected = idx2
self.assertEqual(union_idx.ndim, 1)
self.assertTrue(union_idx.equals(expected))
def test_is_monotonic_incomparable(self):
index = Index([5, datetime.now(), 7])
self.assertFalse(index.is_monotonic)
self.assertFalse(index.is_monotonic_decreasing)
def test_get_set_value(self):
values = np.random.randn(100)
date = self.dateIndex[67]
assert_almost_equal(self.dateIndex.get_value(values, date),
values[67])
self.dateIndex.set_value(values, date, 10)
self.assertEqual(values[67], 10)
def test_isin(self):
values = ['foo', 'bar', 'quux']
idx = Index(['qux', 'baz', 'foo', 'bar'])
result = idx.isin(values)
expected = np.array([False, False, True, True])
self.assert_numpy_array_equal(result, expected)
# empty, return dtype bool
idx = Index([])
result = idx.isin(values)
self.assertEqual(len(result), 0)
self.assertEqual(result.dtype, np.bool_)
def test_isin_nan(self):
self.assert_numpy_array_equal(
Index(['a', np.nan]).isin([np.nan]), [False, True])
self.assert_numpy_array_equal(
Index(['a', pd.NaT]).isin([pd.NaT]), [False, True])
self.assert_numpy_array_equal(
Index(['a', np.nan]).isin([float('nan')]), [False, False])
self.assert_numpy_array_equal(
Index(['a', np.nan]).isin([pd.NaT]), [False, False])
# Float64Index overrides isin, so must be checked separately
self.assert_numpy_array_equal(
Float64Index([1.0, np.nan]).isin([np.nan]), [False, True])
self.assert_numpy_array_equal(
Float64Index([1.0, np.nan]).isin([float('nan')]), [False, True])
self.assert_numpy_array_equal(
Float64Index([1.0, np.nan]).isin([pd.NaT]), [False, True])
def test_isin_level_kwarg(self):
def check_idx(idx):
values = idx.tolist()[-2:] + ['nonexisting']
expected = np.array([False, False, True, True])
self.assert_numpy_array_equal(expected, idx.isin(values, level=0))
self.assert_numpy_array_equal(expected, idx.isin(values, level=-1))
self.assertRaises(IndexError, idx.isin, values, level=1)
self.assertRaises(IndexError, idx.isin, values, level=10)
self.assertRaises(IndexError, idx.isin, values, level=-2)
self.assertRaises(KeyError, idx.isin, values, level=1.0)
self.assertRaises(KeyError, idx.isin, values, level='foobar')
idx.name = 'foobar'
self.assert_numpy_array_equal(expected,
idx.isin(values, level='foobar'))
self.assertRaises(KeyError, idx.isin, values, level='xyzzy')
self.assertRaises(KeyError, idx.isin, values, level=np.nan)
check_idx(Index(['qux', 'baz', 'foo', 'bar']))
# Float64Index overrides isin, so must be checked separately
check_idx(Float64Index([1.0, 2.0, 3.0, 4.0]))
def test_boolean_cmp(self):
values = [1, 2, 3, 4]
idx = Index(values)
res = (idx == values)
self.assert_numpy_array_equal(res,np.array([True,True,True,True],dtype=bool))
def test_get_level_values(self):
result = self.strIndex.get_level_values(0)
self.assertTrue(result.equals(self.strIndex))
def test_slice_keep_name(self):
idx = Index(['a', 'b'], name='asdf')
self.assertEqual(idx.name, idx[1:].name)
def test_join_self(self):
# instance attributes of the form self.<name>Index
indices = 'unicode', 'str', 'date', 'int', 'float'
kinds = 'outer', 'inner', 'left', 'right'
for index_kind in indices:
res = getattr(self, '{0}Index'.format(index_kind))
for kind in kinds:
joined = res.join(res, how=kind)
self.assertIs(res, joined)
def test_indexing_doesnt_change_class(self):
idx = Index([1, 2, 3, 'a', 'b', 'c'])
self.assertTrue(idx[1:3].identical(
pd.Index([2, 3], dtype=np.object_)))
self.assertTrue(idx[[0,1]].identical(
pd.Index([1, 2], dtype=np.object_)))
def test_outer_join_sort(self):
left_idx = Index(np.random.permutation(15))
right_idx = tm.makeDateIndex(10)
with tm.assert_produces_warning(RuntimeWarning):
joined = left_idx.join(right_idx, how='outer')
# right_idx in this case because DatetimeIndex has join precedence over
# Int64Index
expected = right_idx.astype(object).union(left_idx.astype(object))
tm.assert_index_equal(joined, expected)
def test_nan_first_take_datetime(self):
idx = Index([pd.NaT, Timestamp('20130101'), Timestamp('20130102')])
res = idx.take([-1, 0, 1])
exp = Index([idx[-1], idx[0], idx[1]])
tm.assert_index_equal(res, exp)
def test_reindex_preserves_name_if_target_is_list_or_ndarray(self):
# GH6552
idx = pd.Index([0, 1, 2])
dt_idx = pd.date_range('20130101', periods=3)
idx.name = None
self.assertEqual(idx.reindex([])[0].name, None)
self.assertEqual(idx.reindex(np.array([]))[0].name, None)
self.assertEqual(idx.reindex(idx.tolist())[0].name, None)
self.assertEqual(idx.reindex(idx.tolist()[:-1])[0].name, None)
self.assertEqual(idx.reindex(idx.values)[0].name, None)
self.assertEqual(idx.reindex(idx.values[:-1])[0].name, None)
# Must preserve name even if dtype changes.
self.assertEqual(idx.reindex(dt_idx.values)[0].name, None)
self.assertEqual(idx.reindex(dt_idx.tolist())[0].name, None)
idx.name = 'foobar'
self.assertEqual(idx.reindex([])[0].name, 'foobar')
self.assertEqual(idx.reindex(np.array([]))[0].name, 'foobar')
self.assertEqual(idx.reindex(idx.tolist())[0].name, 'foobar')
self.assertEqual(idx.reindex(idx.tolist()[:-1])[0].name, 'foobar')
self.assertEqual(idx.reindex(idx.values)[0].name, 'foobar')
self.assertEqual(idx.reindex(idx.values[:-1])[0].name, 'foobar')
# Must preserve name even if dtype changes.
self.assertEqual(idx.reindex(dt_idx.values)[0].name, 'foobar')
self.assertEqual(idx.reindex(dt_idx.tolist())[0].name, 'foobar')
def test_reindex_preserves_type_if_target_is_empty_list_or_array(self):
# GH7774
idx = pd.Index(list('abc'))
def get_reindex_type(target):
return idx.reindex(target)[0].dtype.type
self.assertEqual(get_reindex_type([]), np.object_)
self.assertEqual(get_reindex_type(np.array([])), np.object_)
self.assertEqual(get_reindex_type(np.array([], dtype=np.int64)),
np.object_)
def test_reindex_doesnt_preserve_type_if_target_is_empty_index(self):
# GH7774
idx = pd.Index(list('abc'))
def get_reindex_type(target):
return idx.reindex(target)[0].dtype.type
self.assertEqual(get_reindex_type(pd.Int64Index([])), np.int64)
self.assertEqual(get_reindex_type(pd.Float64Index([])), np.float64)
self.assertEqual(get_reindex_type(pd.DatetimeIndex([])), np.datetime64)
reindexed = idx.reindex(pd.MultiIndex([pd.Int64Index([]),
pd.Float64Index([])],
[[], []]))[0]
self.assertEqual(reindexed.levels[0].dtype.type, np.int64)
self.assertEqual(reindexed.levels[1].dtype.type, np.float64)
class Numeric(Base):
def test_numeric_compat(self):
idx = self._holder(np.arange(5,dtype='int64'))
didx = self._holder(np.arange(5,dtype='int64')**2
)
result = idx * 1
tm.assert_index_equal(result, idx)
result = 1 * idx
tm.assert_index_equal(result, idx)
result = idx * idx
tm.assert_index_equal(result, didx)
result = idx / 1
tm.assert_index_equal(result, idx)
result = idx // 1
tm.assert_index_equal(result, idx)
result = idx * np.array(5,dtype='int64')
tm.assert_index_equal(result, self._holder(np.arange(5,dtype='int64')*5))
result = idx * np.arange(5,dtype='int64')
tm.assert_index_equal(result, didx)
result = idx * Series(np.arange(5,dtype='int64'))
tm.assert_index_equal(result, didx)
result = idx * Series(np.arange(5,dtype='float64')+0.1)
tm.assert_index_equal(result,
Float64Index(np.arange(5,dtype='float64')*(np.arange(5,dtype='float64')+0.1)))
# invalid
self.assertRaises(TypeError, lambda : idx * date_range('20130101',periods=5))
self.assertRaises(ValueError, lambda : idx * self._holder(np.arange(3)))
self.assertRaises(ValueError, lambda : idx * np.array([1,2]))
def test_explicit_conversions(self):
# GH 8608
# add/sub are overriden explicity for Float/Int Index
idx = self._holder(np.arange(5,dtype='int64'))
# float conversions
arr = np.arange(5,dtype='int64')*3.2
expected = Float64Index(arr)
fidx = idx * 3.2
tm.assert_index_equal(fidx,expected)
fidx = 3.2 * idx
tm.assert_index_equal(fidx,expected)
# interops with numpy arrays
expected = Float64Index(arr)
a = np.zeros(5,dtype='float64')
result = fidx - a
tm.assert_index_equal(result,expected)
expected = Float64Index(-arr)
a = np.zeros(5,dtype='float64')
result = a - fidx
tm.assert_index_equal(result,expected)
def test_ufunc_compat(self):
idx = self._holder(np.arange(5,dtype='int64'))
result = np.sin(idx)
expected = Float64Index(np.sin(np.arange(5,dtype='int64')))
tm.assert_index_equal(result, expected)
class TestFloat64Index(Numeric, tm.TestCase):
_holder = Float64Index
_multiprocess_can_split_ = True
def setUp(self):
self.mixed = Float64Index([1.5, 2, 3, 4, 5])
self.float = Float64Index(np.arange(5) * 2.5)
def create_index(self):
return Float64Index(np.arange(5,dtype='float64'))
def test_hash_error(self):
with tm.assertRaisesRegexp(TypeError,
"unhashable type: %r" %
type(self.float).__name__):
hash(self.float)
def test_repr_roundtrip(self):
for ind in (self.mixed, self.float):
tm.assert_index_equal(eval(repr(ind)), ind)
def check_is_index(self, i):
self.assertIsInstance(i, Index)
self.assertNotIsInstance(i, Float64Index)
def check_coerce(self, a, b, is_float_index=True):
self.assertTrue(a.equals(b))
if is_float_index:
self.assertIsInstance(b, Float64Index)
else:
self.check_is_index(b)
def test_constructor(self):
# explicit construction
index = Float64Index([1,2,3,4,5])
self.assertIsInstance(index, Float64Index)
self.assertTrue((index.values == np.array([1,2,3,4,5],dtype='float64')).all())
index = Float64Index(np.array([1,2,3,4,5]))
self.assertIsInstance(index, Float64Index)
index = Float64Index([1.,2,3,4,5])
self.assertIsInstance(index, Float64Index)
index = Float64Index(np.array([1.,2,3,4,5]))
self.assertIsInstance(index, Float64Index)
self.assertEqual(index.dtype, float)
index = Float64Index(np.array([1.,2,3,4,5]),dtype=np.float32)
self.assertIsInstance(index, Float64Index)
self.assertEqual(index.dtype, np.float64)
index = Float64Index(np.array([1,2,3,4,5]),dtype=np.float32)
self.assertIsInstance(index, Float64Index)
self.assertEqual(index.dtype, np.float64)
# nan handling
result = Float64Index([np.nan, np.nan])
self.assertTrue(pd.isnull(result.values).all())
result = Float64Index(np.array([np.nan]))
self.assertTrue(pd.isnull(result.values).all())
result = Index(np.array([np.nan]))
self.assertTrue(pd.isnull(result.values).all())
def test_constructor_invalid(self):
# invalid
self.assertRaises(TypeError, Float64Index, 0.)
self.assertRaises(TypeError, Float64Index, ['a','b',0.])
self.assertRaises(TypeError, Float64Index, [Timestamp('20130101')])
def test_constructor_coerce(self):
self.check_coerce(self.mixed,Index([1.5, 2, 3, 4, 5]))
self.check_coerce(self.float,Index(np.arange(5) * 2.5))
self.check_coerce(self.float,Index(np.array(np.arange(5) * 2.5, dtype=object)))
def test_constructor_explicit(self):
# these don't auto convert
self.check_coerce(self.float,Index((np.arange(5) * 2.5), dtype=object),
is_float_index=False)
self.check_coerce(self.mixed,Index([1.5, 2, 3, 4, 5],dtype=object),
is_float_index=False)
def test_astype(self):
result = self.float.astype(object)
self.assertTrue(result.equals(self.float))
self.assertTrue(self.float.equals(result))
self.check_is_index(result)
i = self.mixed.copy()
i.name = 'foo'
result = i.astype(object)
self.assertTrue(result.equals(i))
self.assertTrue(i.equals(result))
self.check_is_index(result)
def test_equals(self):
i = Float64Index([1.0,2.0])
self.assertTrue(i.equals(i))
self.assertTrue(i.identical(i))
i2 = Float64Index([1.0,2.0])
self.assertTrue(i.equals(i2))
i = Float64Index([1.0,np.nan])
self.assertTrue(i.equals(i))
self.assertTrue(i.identical(i))
i2 = Float64Index([1.0,np.nan])
self.assertTrue(i.equals(i2))
def test_get_loc_na(self):
idx = Float64Index([np.nan, 1, 2])
self.assertEqual(idx.get_loc(1), 1)
self.assertEqual(idx.get_loc(np.nan), 0)
idx = Float64Index([np.nan, 1, np.nan])
self.assertEqual(idx.get_loc(1), 1)
self.assertRaises(KeyError, idx.slice_locs, np.nan)
def test_contains_nans(self):
i = Float64Index([1.0, 2.0, np.nan])
self.assertTrue(np.nan in i)
def test_contains_not_nans(self):
i = Float64Index([1.0, 2.0, np.nan])
self.assertTrue(1.0 in i)
def test_doesnt_contain_all_the_things(self):
i = Float64Index([np.nan])
self.assertFalse(i.isin([0]).item())
self.assertFalse(i.isin([1]).item())
self.assertTrue(i.isin([np.nan]).item())
def test_nan_multiple_containment(self):
i = Float64Index([1.0, np.nan])
np.testing.assert_array_equal(i.isin([1.0]), np.array([True, False]))
np.testing.assert_array_equal(i.isin([2.0, np.pi]),
np.array([False, False]))
np.testing.assert_array_equal(i.isin([np.nan]),
np.array([False, True]))
np.testing.assert_array_equal(i.isin([1.0, np.nan]),
np.array([True, True]))
i = Float64Index([1.0, 2.0])
np.testing.assert_array_equal(i.isin([np.nan]),
np.array([False, False]))
def test_astype_from_object(self):
index = Index([1.0, np.nan, 0.2], dtype='object')
result = index.astype(float)
expected = Float64Index([1.0, np.nan, 0.2])
tm.assert_equal(result.dtype, expected.dtype)
tm.assert_index_equal(result, expected)
class TestInt64Index(Numeric, tm.TestCase):
_holder = Int64Index
_multiprocess_can_split_ = True
def setUp(self):
self.index = Int64Index(np.arange(0, 20, 2))
def create_index(self):
return Int64Index(np.arange(5,dtype='int64'))
def test_too_many_names(self):
def testit():
self.index.names = ["roger", "harold"]
assertRaisesRegexp(ValueError, "^Length", testit)
def test_constructor(self):
# pass list, coerce fine
index = Int64Index([-5, 0, 1, 2])
expected = np.array([-5, 0, 1, 2], dtype=np.int64)
self.assert_numpy_array_equal(index, expected)
# from iterable
index = Int64Index(iter([-5, 0, 1, 2]))
self.assert_numpy_array_equal(index, expected)
# scalar raise Exception
self.assertRaises(TypeError, Int64Index, 5)
# copy
arr = self.index.values
new_index = Int64Index(arr, copy=True)
self.assert_numpy_array_equal(new_index, self.index)
val = arr[0] + 3000
# this should not change index
arr[0] = val
self.assertNotEqual(new_index[0], val)
def test_constructor_corner(self):
arr = np.array([1, 2, 3, 4], dtype=object)
index = Int64Index(arr)
self.assertEqual(index.values.dtype, np.int64)
self.assertTrue(index.equals(arr))
# preventing casting
arr = np.array([1, '2', 3, '4'], dtype=object)
with tm.assertRaisesRegexp(TypeError, 'casting'):
Int64Index(arr)
arr_with_floats = [0, 2, 3, 4, 5, 1.25, 3, -1]
with tm.assertRaisesRegexp(TypeError, 'casting'):
Int64Index(arr_with_floats)
def test_hash_error(self):
with tm.assertRaisesRegexp(TypeError,
"unhashable type: %r" %
type(self.index).__name__):
hash(self.index)
def test_copy(self):
i = Int64Index([], name='Foo')
i_copy = i.copy()
self.assertEqual(i_copy.name, 'Foo')
def test_view(self):
i = Int64Index([], name='Foo')
i_view = i.view()
self.assertEqual(i_view.name, 'Foo')
def test_coerce_list(self):
# coerce things
arr = Index([1, 2, 3, 4])
tm.assert_isinstance(arr, Int64Index)
# but not if explicit dtype passed
arr = Index([1, 2, 3, 4], dtype=object)
tm.assert_isinstance(arr, Index)
def test_dtype(self):
self.assertEqual(self.index.dtype, np.int64)
def test_is_monotonic(self):
self.assertTrue(self.index.is_monotonic)
self.assertTrue(self.index.is_monotonic_increasing)
self.assertFalse(self.index.is_monotonic_decreasing)
index = Int64Index([4, 3, 2, 1])
self.assertFalse(index.is_monotonic)
self.assertTrue(index.is_monotonic_decreasing)
index = Int64Index([1])
self.assertTrue(index.is_monotonic)
self.assertTrue(index.is_monotonic_increasing)
self.assertTrue(index.is_monotonic_decreasing)
def test_is_monotonic_na(self):
examples = [Index([np.nan]),
Index([np.nan, 1]),
Index([1, 2, np.nan]),
Index(['a', 'b', np.nan]),
pd.to_datetime(['NaT']),
pd.to_datetime(['NaT', '2000-01-01']),
pd.to_datetime(['2000-01-01', 'NaT', '2000-01-02']),
pd.to_timedelta(['1 day', 'NaT']),
]
for index in examples:
self.assertFalse(index.is_monotonic_increasing)
self.assertFalse(index.is_monotonic_decreasing)
def test_equals(self):
same_values = Index(self.index, dtype=object)
self.assertTrue(self.index.equals(same_values))
self.assertTrue(same_values.equals(self.index))
def test_identical(self):
i = Index(self.index.copy())
self.assertTrue(i.identical(self.index))
same_values_different_type = Index(i, dtype=object)
self.assertFalse(i.identical(same_values_different_type))
i = self.index.copy(dtype=object)
i = i.rename('foo')
same_values = Index(i, dtype=object)
self.assertTrue(same_values.identical(self.index.copy(dtype=object)))
self.assertFalse(i.identical(self.index))
self.assertTrue(Index(same_values, name='foo', dtype=object
).identical(i))
self.assertFalse(
self.index.copy(dtype=object)
.identical(self.index.copy(dtype='int64')))
def test_get_indexer(self):
target = Int64Index(np.arange(10))
indexer = self.index.get_indexer(target)
expected = np.array([0, -1, 1, -1, 2, -1, 3, -1, 4, -1])
self.assert_numpy_array_equal(indexer, expected)
def test_get_indexer_pad(self):
target = Int64Index(np.arange(10))
indexer = self.index.get_indexer(target, method='pad')
expected = np.array([0, 0, 1, 1, 2, 2, 3, 3, 4, 4])
self.assert_numpy_array_equal(indexer, expected)
def test_get_indexer_backfill(self):
target = Int64Index(np.arange(10))
indexer = self.index.get_indexer(target, method='backfill')
expected = np.array([0, 1, 1, 2, 2, 3, 3, 4, 4, 5])
self.assert_numpy_array_equal(indexer, expected)
def test_join_outer(self):
other = Int64Index([7, 12, 25, 1, 2, 5])
other_mono = Int64Index([1, 2, 5, 7, 12, 25])
# not monotonic
# guarantee of sortedness
res, lidx, ridx = self.index.join(other, how='outer',
return_indexers=True)
noidx_res = self.index.join(other, how='outer')
self.assertTrue(res.equals(noidx_res))
eres = Int64Index([0, 1, 2, 4, 5, 6, 7, 8, 10, 12, 14, 16, 18, 25])
elidx = np.array([0, -1, 1, 2, -1, 3, -1, 4, 5, 6, 7, 8, 9, -1],
dtype=np.int64)
eridx = np.array([-1, 3, 4, -1, 5, -1, 0, -1, -1, 1, -1, -1, -1, 2],
dtype=np.int64)
tm.assert_isinstance(res, Int64Index)
self.assertTrue(res.equals(eres))
self.assert_numpy_array_equal(lidx, elidx)
self.assert_numpy_array_equal(ridx, eridx)
# monotonic
res, lidx, ridx = self.index.join(other_mono, how='outer',
return_indexers=True)
noidx_res = self.index.join(other_mono, how='outer')
self.assertTrue(res.equals(noidx_res))
eridx = np.array([-1, 0, 1, -1, 2, -1, 3, -1, -1, 4, -1, -1, -1, 5],
dtype=np.int64)
tm.assert_isinstance(res, Int64Index)
self.assertTrue(res.equals(eres))
self.assert_numpy_array_equal(lidx, elidx)
self.assert_numpy_array_equal(ridx, eridx)
def test_join_inner(self):
other = Int64Index([7, 12, 25, 1, 2, 5])
other_mono = Int64Index([1, 2, 5, 7, 12, 25])
# not monotonic
res, lidx, ridx = self.index.join(other, how='inner',
return_indexers=True)
# no guarantee of sortedness, so sort for comparison purposes
ind = res.argsort()
res = res.take(ind)
lidx = lidx.take(ind)
ridx = ridx.take(ind)
eres = Int64Index([2, 12])
elidx = np.array([1, 6])
eridx = np.array([4, 1])
tm.assert_isinstance(res, Int64Index)
self.assertTrue(res.equals(eres))
self.assert_numpy_array_equal(lidx, elidx)
self.assert_numpy_array_equal(ridx, eridx)
# monotonic
res, lidx, ridx = self.index.join(other_mono, how='inner',
return_indexers=True)
res2 = self.index.intersection(other_mono)
self.assertTrue(res.equals(res2))
eridx = np.array([1, 4])
tm.assert_isinstance(res, Int64Index)
self.assertTrue(res.equals(eres))
self.assert_numpy_array_equal(lidx, elidx)
self.assert_numpy_array_equal(ridx, eridx)
def test_join_left(self):
other = Int64Index([7, 12, 25, 1, 2, 5])
other_mono = Int64Index([1, 2, 5, 7, 12, 25])
# not monotonic
res, lidx, ridx = self.index.join(other, how='left',
return_indexers=True)
eres = self.index
eridx = np.array([-1, 4, -1, -1, -1, -1, 1, -1, -1, -1],
dtype=np.int64)
tm.assert_isinstance(res, Int64Index)
self.assertTrue(res.equals(eres))
self.assertIsNone(lidx)
self.assert_numpy_array_equal(ridx, eridx)
# monotonic
res, lidx, ridx = self.index.join(other_mono, how='left',
return_indexers=True)
eridx = np.array([-1, 1, -1, -1, -1, -1, 4, -1, -1, -1],
dtype=np.int64)
tm.assert_isinstance(res, Int64Index)
self.assertTrue(res.equals(eres))
self.assertIsNone(lidx)
self.assert_numpy_array_equal(ridx, eridx)
# non-unique
"""
idx = Index([1,1,2,5])
idx2 = Index([1,2,5,7,9])
res, lidx, ridx = idx2.join(idx, how='left', return_indexers=True)
eres = idx2
eridx = np.array([0, 2, 3, -1, -1])
elidx = np.array([0, 1, 2, 3, 4])
self.assertTrue(res.equals(eres))
self.assert_numpy_array_equal(lidx, elidx)
self.assert_numpy_array_equal(ridx, eridx)
"""
def test_join_right(self):
other = Int64Index([7, 12, 25, 1, 2, 5])
other_mono = Int64Index([1, 2, 5, 7, 12, 25])
# not monotonic
res, lidx, ridx = self.index.join(other, how='right',
return_indexers=True)
eres = other
elidx = np.array([-1, 6, -1, -1, 1, -1],
dtype=np.int64)
tm.assert_isinstance(other, Int64Index)
self.assertTrue(res.equals(eres))
self.assert_numpy_array_equal(lidx, elidx)
self.assertIsNone(ridx)
# monotonic
res, lidx, ridx = self.index.join(other_mono, how='right',
return_indexers=True)
eres = other_mono
elidx = np.array([-1, 1, -1, -1, 6, -1],
dtype=np.int64)
tm.assert_isinstance(other, Int64Index)
self.assertTrue(res.equals(eres))
self.assert_numpy_array_equal(lidx, elidx)
self.assertIsNone(ridx)
# non-unique
"""
idx = Index([1,1,2,5])
idx2 = Index([1,2,5,7,9])
res, lidx, ridx = idx.join(idx2, how='right', return_indexers=True)
eres = idx2
elidx = np.array([0, 2, 3, -1, -1])
eridx = np.array([0, 1, 2, 3, 4])
self.assertTrue(res.equals(eres))
self.assert_numpy_array_equal(lidx, elidx)
self.assert_numpy_array_equal(ridx, eridx)
idx = Index([1,1,2,5])
idx2 = Index([1,2,5,9,7])
res = idx.join(idx2, how='right', return_indexers=False)
eres = idx2
self.assert(res.equals(eres))
"""
def test_join_non_int_index(self):
other = Index([3, 6, 7, 8, 10], dtype=object)
outer = self.index.join(other, how='outer')
outer2 = other.join(self.index, how='outer')
expected = Index([0, 2, 3, 4, 6, 7, 8, 10, 12, 14,
16, 18], dtype=object)
self.assertTrue(outer.equals(outer2))
self.assertTrue(outer.equals(expected))
inner = self.index.join(other, how='inner')
inner2 = other.join(self.index, how='inner')
expected = Index([6, 8, 10], dtype=object)
self.assertTrue(inner.equals(inner2))
self.assertTrue(inner.equals(expected))
left = self.index.join(other, how='left')
self.assertTrue(left.equals(self.index))
left2 = other.join(self.index, how='left')
self.assertTrue(left2.equals(other))
right = self.index.join(other, how='right')
self.assertTrue(right.equals(other))
right2 = other.join(self.index, how='right')
self.assertTrue(right2.equals(self.index))
def test_join_non_unique(self):
left = Index([4, 4, 3, 3])
joined, lidx, ridx = left.join(left, return_indexers=True)
exp_joined = Index([3, 3, 3, 3, 4, 4, 4, 4])
self.assertTrue(joined.equals(exp_joined))
exp_lidx = np.array([2, 2, 3, 3, 0, 0, 1, 1], dtype=np.int64)
self.assert_numpy_array_equal(lidx, exp_lidx)
exp_ridx = np.array([2, 3, 2, 3, 0, 1, 0, 1], dtype=np.int64)
self.assert_numpy_array_equal(ridx, exp_ridx)
def test_join_self(self):
kinds = 'outer', 'inner', 'left', 'right'
for kind in kinds:
joined = self.index.join(self.index, how=kind)
self.assertIs(self.index, joined)
def test_intersection(self):
other = Index([1, 2, 3, 4, 5])
result = self.index.intersection(other)
expected = np.sort(np.intersect1d(self.index.values, other.values))
self.assert_numpy_array_equal(result, expected)
result = other.intersection(self.index)
expected = np.sort(np.asarray(np.intersect1d(self.index.values,
other.values)))
self.assert_numpy_array_equal(result, expected)
def test_intersect_str_dates(self):
dt_dates = [datetime(2012, 2, 9), datetime(2012, 2, 22)]
i1 = Index(dt_dates, dtype=object)
i2 = Index(['aa'], dtype=object)
res = i2.intersection(i1)
self.assertEqual(len(res), 0)
def test_union_noncomparable(self):
from datetime import datetime, timedelta
# corner case, non-Int64Index
now = datetime.now()
other = Index([now + timedelta(i) for i in range(4)], dtype=object)
result = self.index.union(other)
expected = np.concatenate((self.index, other))
self.assert_numpy_array_equal(result, expected)
result = other.union(self.index)
expected = np.concatenate((other, self.index))
self.assert_numpy_array_equal(result, expected)
def test_cant_or_shouldnt_cast(self):
# can't
data = ['foo', 'bar', 'baz']
self.assertRaises(TypeError, Int64Index, data)
# shouldn't
data = ['0', '1', '2']
self.assertRaises(TypeError, Int64Index, data)
def test_view_Index(self):
self.index.view(Index)
def test_prevent_casting(self):
result = self.index.astype('O')
self.assertEqual(result.dtype, np.object_)
def test_take_preserve_name(self):
index = Int64Index([1, 2, 3, 4], name='foo')
taken = index.take([3, 0, 1])
self.assertEqual(index.name, taken.name)
def test_int_name_format(self):
from pandas import Series, DataFrame
index = Index(['a', 'b', 'c'], name=0)
s = Series(lrange(3), index)
df = DataFrame(lrange(3), index=index)
repr(s)
repr(df)
def test_print_unicode_columns(self):
df = pd.DataFrame(
{u("\u05d0"): [1, 2, 3], "\u05d1": [4, 5, 6], "c": [7, 8, 9]})
repr(df.columns) # should not raise UnicodeDecodeError
def test_repr_summary(self):
with cf.option_context('display.max_seq_items', 10):
r = repr(pd.Index(np.arange(1000)))
self.assertTrue(len(r) < 100)
self.assertTrue("..." in r)
def test_repr_roundtrip(self):
tm.assert_index_equal(eval(repr(self.index)), self.index)
def test_unicode_string_with_unicode(self):
idx = Index(lrange(1000))
if compat.PY3:
str(idx)
else:
compat.text_type(idx)
def test_bytestring_with_unicode(self):
idx = Index(lrange(1000))
if compat.PY3:
bytes(idx)
else:
str(idx)
def test_slice_keep_name(self):
idx = Int64Index([1, 2], name='asdf')
self.assertEqual(idx.name, idx[1:].name)
class TestDatetimeIndex(Base, tm.TestCase):
_holder = DatetimeIndex
_multiprocess_can_split_ = True
def create_index(self):
return date_range('20130101',periods=5)
def test_pickle_compat_construction(self):
pass
def test_numeric_compat(self):
super(TestDatetimeIndex, self).test_numeric_compat()
if not compat.PY3_2:
for f in [lambda : np.timedelta64(1, 'D').astype('m8[ns]') * pd.date_range('2000-01-01', periods=3),
lambda : pd.date_range('2000-01-01', periods=3) * np.timedelta64(1, 'D').astype('m8[ns]') ]:
self.assertRaises(TypeError, f)
def test_roundtrip_pickle_with_tz(self):
# GH 8367
# round-trip of timezone
index=date_range('20130101',periods=3,tz='US/Eastern',name='foo')
unpickled = self.round_trip_pickle(index)
self.assertTrue(index.equals(unpickled))
def test_reindex_preserves_tz_if_target_is_empty_list_or_array(self):
# GH7774
index = date_range('20130101', periods=3, tz='US/Eastern')
self.assertEqual(str(index.reindex([])[0].tz), 'US/Eastern')
self.assertEqual(str(index.reindex(np.array([]))[0].tz), 'US/Eastern')
class TestPeriodIndex(Base, tm.TestCase):
_holder = PeriodIndex
_multiprocess_can_split_ = True
def create_index(self):
return period_range('20130101',periods=5,freq='D')
def test_pickle_compat_construction(self):
pass
class TestTimedeltaIndex(Base, tm.TestCase):
_holder = TimedeltaIndex
_multiprocess_can_split_ = True
def create_index(self):
return pd.to_timedelta(range(5),unit='d') + pd.offsets.Hour(1)
def test_numeric_compat(self):
idx = self._holder(np.arange(5,dtype='int64'))
didx = self._holder(np.arange(5,dtype='int64')**2
)
result = idx * 1
tm.assert_index_equal(result, idx)
result = 1 * idx
tm.assert_index_equal(result, idx)
result = idx / 1
tm.assert_index_equal(result, idx)
result = idx // 1
tm.assert_index_equal(result, idx)
result = idx * np.array(5,dtype='int64')
tm.assert_index_equal(result, self._holder(np.arange(5,dtype='int64')*5))
result = idx * np.arange(5,dtype='int64')
tm.assert_index_equal(result, didx)
result = idx * Series(np.arange(5,dtype='int64'))
tm.assert_index_equal(result, didx)
result = idx * Series(np.arange(5,dtype='float64')+0.1)
tm.assert_index_equal(result,
Float64Index(np.arange(5,dtype='float64')*(np.arange(5,dtype='float64')+0.1)))
# invalid
self.assertRaises(TypeError, lambda : idx * idx)
self.assertRaises(ValueError, lambda : idx * self._holder(np.arange(3)))
self.assertRaises(ValueError, lambda : idx * np.array([1,2]))
def test_pickle_compat_construction(self):
pass
class TestMultiIndex(Base, tm.TestCase):
_holder = MultiIndex
_multiprocess_can_split_ = True
_compat_props = ['shape', 'ndim', 'size', 'itemsize']
def setUp(self):
major_axis = Index(['foo', 'bar', 'baz', 'qux'])
minor_axis = Index(['one', 'two'])
major_labels = np.array([0, 0, 1, 2, 3, 3])
minor_labels = np.array([0, 1, 0, 1, 0, 1])
self.index_names = ['first', 'second']
self.index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels],
names=self.index_names, verify_integrity=False)
def create_index(self):
return self.index
def test_boolean_context_compat2(self):
# boolean context compat
# GH7897
i1 = MultiIndex.from_tuples([('A', 1), ('A', 2)])
i2 = MultiIndex.from_tuples([('A', 1), ('A', 3)])
common = i1.intersection(i2)
def f():
if common:
pass
tm.assertRaisesRegexp(ValueError,'The truth value of a',f)
def test_labels_dtypes(self):
# GH 8456
i = MultiIndex.from_tuples([('A', 1), ('A', 2)])
self.assertTrue(i.labels[0].dtype == 'int8')
self.assertTrue(i.labels[1].dtype == 'int8')
i = MultiIndex.from_product([['a'],range(40)])
self.assertTrue(i.labels[1].dtype == 'int8')
i = MultiIndex.from_product([['a'],range(400)])
self.assertTrue(i.labels[1].dtype == 'int16')
i = MultiIndex.from_product([['a'],range(40000)])
self.assertTrue(i.labels[1].dtype == 'int32')
i = pd.MultiIndex.from_product([['a'],range(1000)])
self.assertTrue((i.labels[0]>=0).all())
self.assertTrue((i.labels[1]>=0).all())
def test_hash_error(self):
with tm.assertRaisesRegexp(TypeError,
"unhashable type: %r" %
type(self.index).__name__):
hash(self.index)
def test_set_names_and_rename(self):
# so long as these are synonyms, we don't need to test set_names
self.assertEqual(self.index.rename, self.index.set_names)
new_names = [name + "SUFFIX" for name in self.index_names]
ind = self.index.set_names(new_names)
self.assertEqual(self.index.names, self.index_names)
self.assertEqual(ind.names, new_names)
with assertRaisesRegexp(ValueError, "^Length"):
ind.set_names(new_names + new_names)
new_names2 = [name + "SUFFIX2" for name in new_names]
res = ind.set_names(new_names2, inplace=True)
self.assertIsNone(res)
self.assertEqual(ind.names, new_names2)
# set names for specific level (# GH7792)
ind = self.index.set_names(new_names[0], level=0)
self.assertEqual(self.index.names, self.index_names)
self.assertEqual(ind.names, [new_names[0], self.index_names[1]])
res = ind.set_names(new_names2[0], level=0, inplace=True)
self.assertIsNone(res)
self.assertEqual(ind.names, [new_names2[0], self.index_names[1]])
# set names for multiple levels
ind = self.index.set_names(new_names, level=[0, 1])
self.assertEqual(self.index.names, self.index_names)
self.assertEqual(ind.names, new_names)
res = ind.set_names(new_names2, level=[0, 1], inplace=True)
self.assertIsNone(res)
self.assertEqual(ind.names, new_names2)
def test_set_levels(self):
# side note - you probably wouldn't want to use levels and labels
# directly like this - but it is possible.
levels, labels = self.index.levels, self.index.labels
new_levels = [[lev + 'a' for lev in level] for level in levels]
def assert_matching(actual, expected):
# avoid specifying internal representation
# as much as possible
self.assertEqual(len(actual), len(expected))
for act, exp in zip(actual, expected):
act = np.asarray(act)
exp = np.asarray(exp)
assert_almost_equal(act, exp)
# level changing [w/o mutation]
ind2 = self.index.set_levels(new_levels)
assert_matching(ind2.levels, new_levels)
assert_matching(self.index.levels, levels)
# level changing [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels, inplace=True)
self.assertIsNone(inplace_return)
assert_matching(ind2.levels, new_levels)
# level changing specific level [w/o mutation]
ind2 = self.index.set_levels(new_levels[0], level=0)
assert_matching(ind2.levels, [new_levels[0], levels[1]])
assert_matching(self.index.levels, levels)
ind2 = self.index.set_levels(new_levels[1], level=1)
assert_matching(ind2.levels, [levels[0], new_levels[1]])
assert_matching(self.index.levels, levels)
# level changing multiple levels [w/o mutation]
ind2 = self.index.set_levels(new_levels, level=[0, 1])
assert_matching(ind2.levels, new_levels)
assert_matching(self.index.levels, levels)
# level changing specific level [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels[0], level=0, inplace=True)
self.assertIsNone(inplace_return)
assert_matching(ind2.levels, [new_levels[0], levels[1]])
assert_matching(self.index.levels, levels)
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels[1], level=1, inplace=True)
self.assertIsNone(inplace_return)
assert_matching(ind2.levels, [levels[0], new_levels[1]])
assert_matching(self.index.levels, levels)
# level changing multiple levels [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels, level=[0, 1], inplace=True)
self.assertIsNone(inplace_return)
assert_matching(ind2.levels, new_levels)
assert_matching(self.index.levels, levels)
def test_set_labels(self):
# side note - you probably wouldn't want to use levels and labels
# directly like this - but it is possible.
levels, labels = self.index.levels, self.index.labels
major_labels, minor_labels = labels
major_labels = [(x + 1) % 3 for x in major_labels]
minor_labels = [(x + 1) % 1 for x in minor_labels]
new_labels = [major_labels, minor_labels]
def assert_matching(actual, expected):
# avoid specifying internal representation
# as much as possible
self.assertEqual(len(actual), len(expected))
for act, exp in zip(actual, expected):
act = np.asarray(act)
exp = np.asarray(exp)
assert_almost_equal(act, exp)
# label changing [w/o mutation]
ind2 = self.index.set_labels(new_labels)
assert_matching(ind2.labels, new_labels)
assert_matching(self.index.labels, labels)
# label changing [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels, inplace=True)
self.assertIsNone(inplace_return)
assert_matching(ind2.labels, new_labels)
# label changing specific level [w/o mutation]
ind2 = self.index.set_labels(new_labels[0], level=0)
assert_matching(ind2.labels, [new_labels[0], labels[1]])
assert_matching(self.index.labels, labels)
ind2 = self.index.set_labels(new_labels[1], level=1)
assert_matching(ind2.labels, [labels[0], new_labels[1]])
assert_matching(self.index.labels, labels)
# label changing multiple levels [w/o mutation]
ind2 = self.index.set_labels(new_labels, level=[0, 1])
assert_matching(ind2.labels, new_labels)
assert_matching(self.index.labels, labels)
# label changing specific level [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels[0], level=0, inplace=True)
self.assertIsNone(inplace_return)
assert_matching(ind2.labels, [new_labels[0], labels[1]])
assert_matching(self.index.labels, labels)
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels[1], level=1, inplace=True)
self.assertIsNone(inplace_return)
assert_matching(ind2.labels, [labels[0], new_labels[1]])
assert_matching(self.index.labels, labels)
# label changing multiple levels [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels, level=[0, 1], inplace=True)
self.assertIsNone(inplace_return)
assert_matching(ind2.labels, new_labels)
assert_matching(self.index.labels, labels)
def test_set_levels_labels_names_bad_input(self):
levels, labels = self.index.levels, self.index.labels
names = self.index.names
with tm.assertRaisesRegexp(ValueError, 'Length of levels'):
self.index.set_levels([levels[0]])
with tm.assertRaisesRegexp(ValueError, 'Length of labels'):
self.index.set_labels([labels[0]])
with tm.assertRaisesRegexp(ValueError, 'Length of names'):
self.index.set_names([names[0]])
# shouldn't scalar data error, instead should demand list-like
with tm.assertRaisesRegexp(TypeError, 'list of lists-like'):
self.index.set_levels(levels[0])
# shouldn't scalar data error, instead should demand list-like
with tm.assertRaisesRegexp(TypeError, 'list of lists-like'):
self.index.set_labels(labels[0])
# shouldn't scalar data error, instead should demand list-like
with tm.assertRaisesRegexp(TypeError, 'list-like'):
self.index.set_names(names[0])
# should have equal lengths
with tm.assertRaisesRegexp(TypeError, 'list of lists-like'):
self.index.set_levels(levels[0], level=[0, 1])
with tm.assertRaisesRegexp(TypeError, 'list-like'):
self.index.set_levels(levels, level=0)
# should have equal lengths
with tm.assertRaisesRegexp(TypeError, 'list of lists-like'):
self.index.set_labels(labels[0], level=[0, 1])
with tm.assertRaisesRegexp(TypeError, 'list-like'):
self.index.set_labels(labels, level=0)
# should have equal lengths
with tm.assertRaisesRegexp(ValueError, 'Length of names'):
self.index.set_names(names[0], level=[0, 1])
with tm.assertRaisesRegexp(TypeError, 'string'):
self.index.set_names(names, level=0)
def test_metadata_immutable(self):
levels, labels = self.index.levels, self.index.labels
# shouldn't be able to set at either the top level or base level
mutable_regex = re.compile('does not support mutable operations')
with assertRaisesRegexp(TypeError, mutable_regex):
levels[0] = levels[0]
with assertRaisesRegexp(TypeError, mutable_regex):
levels[0][0] = levels[0][0]
# ditto for labels
with assertRaisesRegexp(TypeError, mutable_regex):
labels[0] = labels[0]
with assertRaisesRegexp(TypeError, mutable_regex):
labels[0][0] = labels[0][0]
# and for names
names = self.index.names
with assertRaisesRegexp(TypeError, mutable_regex):
names[0] = names[0]
def test_inplace_mutation_resets_values(self):
levels = [['a', 'b', 'c'], [4]]
levels2 = [[1, 2, 3], ['a']]
labels = [[0, 1, 0, 2, 2, 0], [0, 0, 0, 0, 0, 0]]
mi1 = MultiIndex(levels=levels, labels=labels)
mi2 = MultiIndex(levels=levels2, labels=labels)
vals = mi1.values.copy()
vals2 = mi2.values.copy()
self.assertIsNotNone(mi1._tuples)
# make sure level setting works
new_vals = mi1.set_levels(levels2).values
assert_almost_equal(vals2, new_vals)
# non-inplace doesn't kill _tuples [implementation detail]
assert_almost_equal(mi1._tuples, vals)
# and values is still same too
assert_almost_equal(mi1.values, vals)
# inplace should kill _tuples
mi1.set_levels(levels2, inplace=True)
assert_almost_equal(mi1.values, vals2)
# make sure label setting works too
labels2 = [[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0]]
exp_values = np.array([(long(1), 'a')] * 6, dtype=object)
new_values = mi2.set_labels(labels2).values
# not inplace shouldn't change
assert_almost_equal(mi2._tuples, vals2)
# should have correct values
assert_almost_equal(exp_values, new_values)
# and again setting inplace should kill _tuples, etc
mi2.set_labels(labels2, inplace=True)
assert_almost_equal(mi2.values, new_values)
def test_copy_in_constructor(self):
levels = np.array(["a", "b", "c"])
labels = np.array([1, 1, 2, 0, 0, 1, 1])
val = labels[0]
mi = MultiIndex(levels=[levels, levels], labels=[labels, labels],
copy=True)
self.assertEqual(mi.labels[0][0], val)
labels[0] = 15
self.assertEqual(mi.labels[0][0], val)
val = levels[0]
levels[0] = "PANDA"
self.assertEqual(mi.levels[0][0], val)
def test_set_value_keeps_names(self):
# motivating example from #3742
lev1 = ['hans', 'hans', 'hans', 'grethe', 'grethe', 'grethe']
lev2 = ['1', '2', '3'] * 2
idx = pd.MultiIndex.from_arrays(
[lev1, lev2],
names=['Name', 'Number'])
df = pd.DataFrame(
np.random.randn(6, 4),
columns=['one', 'two', 'three', 'four'],
index=idx)
df = df.sortlevel()
self.assertIsNone(df.is_copy)
self.assertEqual(df.index.names, ('Name', 'Number'))
df = df.set_value(('grethe', '4'), 'one', 99.34)
self.assertIsNone(df.is_copy)
self.assertEqual(df.index.names, ('Name', 'Number'))
def test_names(self):
# names are assigned in __init__
names = self.index_names
level_names = [level.name for level in self.index.levels]
self.assertEqual(names, level_names)
# setting bad names on existing
index = self.index
assertRaisesRegexp(ValueError, "^Length of names", setattr, index,
"names", list(index.names) + ["third"])
assertRaisesRegexp(ValueError, "^Length of names", setattr, index,
"names", [])
# initializing with bad names (should always be equivalent)
major_axis, minor_axis = self.index.levels
major_labels, minor_labels = self.index.labels
assertRaisesRegexp(ValueError, "^Length of names", MultiIndex,
levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels],
names=['first'])
assertRaisesRegexp(ValueError, "^Length of names", MultiIndex,
levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels],
names=['first', 'second', 'third'])
# names are assigned
index.names = ["a", "b"]
ind_names = list(index.names)
level_names = [level.name for level in index.levels]
self.assertEqual(ind_names, level_names)
def test_reference_duplicate_name(self):
idx = MultiIndex.from_tuples([('a', 'b'), ('c', 'd')], names=['x', 'x'])
self.assertTrue(idx._reference_duplicate_name('x'))
idx = MultiIndex.from_tuples([('a', 'b'), ('c', 'd')], names=['x', 'y'])
self.assertFalse(idx._reference_duplicate_name('x'))
def test_astype(self):
expected = self.index.copy()
actual = self.index.astype('O')
assert_copy(actual.levels, expected.levels)
assert_copy(actual.labels, expected.labels)
self.check_level_names(actual, expected.names)
with assertRaisesRegexp(TypeError, "^Setting.*dtype.*object"):
self.index.astype(np.dtype(int))
def test_constructor_single_level(self):
single_level = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux']],
labels=[[0, 1, 2, 3]],
names=['first'])
tm.assert_isinstance(single_level, Index)
self.assertNotIsInstance(single_level, MultiIndex)
self.assertEqual(single_level.name, 'first')
single_level = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux']],
labels=[[0, 1, 2, 3]])
self.assertIsNone(single_level.name)
def test_constructor_no_levels(self):
assertRaisesRegexp(ValueError, "non-zero number of levels/labels",
MultiIndex, levels=[], labels=[])
both_re = re.compile('Must pass both levels and labels')
with tm.assertRaisesRegexp(TypeError, both_re):
MultiIndex(levels=[])
with tm.assertRaisesRegexp(TypeError, both_re):
MultiIndex(labels=[])
def test_constructor_mismatched_label_levels(self):
labels = [np.array([1]), np.array([2]), np.array([3])]
levels = ["a"]
assertRaisesRegexp(ValueError, "Length of levels and labels must be"
" the same", MultiIndex, levels=levels,
labels=labels)
length_error = re.compile('>= length of level')
label_error = re.compile(r'Unequal label lengths: \[4, 2\]')
# important to check that it's looking at the right thing.
with tm.assertRaisesRegexp(ValueError, length_error):
MultiIndex(levels=[['a'], ['b']], labels=[[0, 1, 2, 3], [0, 3, 4, 1]])
with tm.assertRaisesRegexp(ValueError, label_error):
MultiIndex(levels=[['a'], ['b']], labels=[[0, 0, 0, 0], [0, 0]])
# external API
with tm.assertRaisesRegexp(ValueError, length_error):
self.index.copy().set_levels([['a'], ['b']])
with tm.assertRaisesRegexp(ValueError, label_error):
self.index.copy().set_labels([[0, 0, 0, 0], [0, 0]])
# deprecated properties
with warnings.catch_warnings():
warnings.simplefilter('ignore')
with tm.assertRaisesRegexp(ValueError, length_error):
self.index.copy().levels = [['a'], ['b']]
with tm.assertRaisesRegexp(ValueError, label_error):
self.index.copy().labels = [[0, 0, 0, 0], [0, 0]]
def assert_multiindex_copied(self, copy, original):
# levels shoudl be (at least, shallow copied)
assert_copy(copy.levels, original.levels)
assert_almost_equal(copy.labels, original.labels)
# labels doesn't matter which way copied
assert_almost_equal(copy.labels, original.labels)
self.assertIsNot(copy.labels, original.labels)
# names doesn't matter which way copied
self.assertEqual(copy.names, original.names)
self.assertIsNot(copy.names, original.names)
# sort order should be copied
self.assertEqual(copy.sortorder, original.sortorder)
def test_copy(self):
i_copy = self.index.copy()
self.assert_multiindex_copied(i_copy, self.index)
def test_shallow_copy(self):
i_copy = self.index._shallow_copy()
self.assert_multiindex_copied(i_copy, self.index)
def test_view(self):
i_view = self.index.view()
self.assert_multiindex_copied(i_view, self.index)
def check_level_names(self, index, names):
self.assertEqual([level.name for level in index.levels], list(names))
def test_changing_names(self):
# names should be applied to levels
level_names = [level.name for level in self.index.levels]
self.check_level_names(self.index, self.index.names)
view = self.index.view()
copy = self.index.copy()
shallow_copy = self.index._shallow_copy()
# changing names should change level names on object
new_names = [name + "a" for name in self.index.names]
self.index.names = new_names
self.check_level_names(self.index, new_names)
# but not on copies
self.check_level_names(view, level_names)
self.check_level_names(copy, level_names)
self.check_level_names(shallow_copy, level_names)
# and copies shouldn't change original
shallow_copy.names = [name + "c" for name in shallow_copy.names]
self.check_level_names(self.index, new_names)
def test_duplicate_names(self):
self.index.names = ['foo', 'foo']
assertRaisesRegexp(KeyError, 'Level foo not found',
self.index._get_level_number, 'foo')
def test_get_level_number_integer(self):
self.index.names = [1, 0]
self.assertEqual(self.index._get_level_number(1), 0)
self.assertEqual(self.index._get_level_number(0), 1)
self.assertRaises(IndexError, self.index._get_level_number, 2)
assertRaisesRegexp(KeyError, 'Level fourth not found',
self.index._get_level_number, 'fourth')
def test_from_arrays(self):
arrays = []
for lev, lab in zip(self.index.levels, self.index.labels):
arrays.append(np.asarray(lev).take(lab))
result = MultiIndex.from_arrays(arrays)
self.assertEqual(list(result), list(self.index))
# infer correctly
result = MultiIndex.from_arrays([[pd.NaT, Timestamp('20130101')], ['a', 'b']])
self.assertTrue(result.levels[0].equals(Index([Timestamp('20130101')])))
self.assertTrue(result.levels[1].equals(Index(['a','b'])))
def test_from_product(self):
first = ['foo', 'bar', 'buz']
second = ['a', 'b', 'c']
names = ['first', 'second']
result = MultiIndex.from_product([first, second], names=names)
tuples = [('foo', 'a'), ('foo', 'b'), ('foo', 'c'),
('bar', 'a'), ('bar', 'b'), ('bar', 'c'),
('buz', 'a'), ('buz', 'b'), ('buz', 'c')]
expected = MultiIndex.from_tuples(tuples, names=names)
assert_array_equal(result, expected)
self.assertEqual(result.names, names)
def test_from_product_datetimeindex(self):
dt_index = date_range('2000-01-01', periods=2)
mi = pd.MultiIndex.from_product([[1, 2], dt_index])
etalon = pd.lib.list_to_object_array([(1, pd.Timestamp('2000-01-01')),
(1, pd.Timestamp('2000-01-02')),
(2, pd.Timestamp('2000-01-01')),
(2, pd.Timestamp('2000-01-02'))])
assert_array_equal(mi.values, etalon)
def test_values_boxed(self):
tuples = [(1, pd.Timestamp('2000-01-01')),
(2, pd.NaT),
(3, pd.Timestamp('2000-01-03')),
(1, pd.Timestamp('2000-01-04')),
(2, pd.Timestamp('2000-01-02')),
(3, pd.Timestamp('2000-01-03'))]
mi = pd.MultiIndex.from_tuples(tuples)
assert_array_equal(mi.values, pd.lib.list_to_object_array(tuples))
# Check that code branches for boxed values produce identical results
assert_array_equal(mi.values[:4], mi[:4].values)
def test_append(self):
result = self.index[:3].append(self.index[3:])
self.assertTrue(result.equals(self.index))
foos = [self.index[:1], self.index[1:3], self.index[3:]]
result = foos[0].append(foos[1:])
self.assertTrue(result.equals(self.index))
# empty
result = self.index.append([])
self.assertTrue(result.equals(self.index))
def test_get_level_values(self):
result = self.index.get_level_values(0)
expected = ['foo', 'foo', 'bar', 'baz', 'qux', 'qux']
self.assert_numpy_array_equal(result, expected)
self.assertEqual(result.name, 'first')
result = self.index.get_level_values('first')
expected = self.index.get_level_values(0)
self.assert_numpy_array_equal(result, expected)
def test_get_level_values_na(self):
arrays = [['a', 'b', 'b'], [1, np.nan, 2]]
index = pd.MultiIndex.from_arrays(arrays)
values = index.get_level_values(1)
expected = [1, np.nan, 2]
assert_array_equal(values.values.astype(float), expected)
arrays = [['a', 'b', 'b'], [np.nan, np.nan, 2]]
index = pd.MultiIndex.from_arrays(arrays)
values = index.get_level_values(1)
expected = [np.nan, np.nan, 2]
assert_array_equal(values.values.astype(float), expected)
arrays = [[np.nan, np.nan, np.nan], ['a', np.nan, 1]]
index = pd.MultiIndex.from_arrays(arrays)
values = index.get_level_values(0)
expected = [np.nan, np.nan, np.nan]
assert_array_equal(values.values.astype(float), expected)
values = index.get_level_values(1)
expected = np.array(['a', np.nan, 1],dtype=object)
assert_array_equal(values.values, expected)
arrays = [['a', 'b', 'b'], pd.DatetimeIndex([0, 1, pd.NaT])]
index = pd.MultiIndex.from_arrays(arrays)
values = index.get_level_values(1)
expected = pd.DatetimeIndex([0, 1, pd.NaT])
assert_array_equal(values.values, expected.values)
arrays = [[], []]
index = pd.MultiIndex.from_arrays(arrays)
values = index.get_level_values(0)
self.assertEqual(values.shape, (0,))
def test_reorder_levels(self):
# this blows up
assertRaisesRegexp(IndexError, '^Too many levels',
self.index.reorder_levels, [2, 1, 0])
def test_nlevels(self):
self.assertEqual(self.index.nlevels, 2)
def test_iter(self):
result = list(self.index)
expected = [('foo', 'one'), ('foo', 'two'), ('bar', 'one'),
('baz', 'two'), ('qux', 'one'), ('qux', 'two')]
self.assertEqual(result, expected)
def test_legacy_pickle(self):
if compat.PY3:
raise nose.SkipTest("testing for legacy pickles not support on py3")
path = tm.get_data_path('multiindex_v1.pickle')
obj = pd.read_pickle(path)
obj2 = MultiIndex.from_tuples(obj.values)
self.assertTrue(obj.equals(obj2))
res = obj.get_indexer(obj)
exp = np.arange(len(obj))
assert_almost_equal(res, exp)
res = obj.get_indexer(obj2[::-1])
exp = obj.get_indexer(obj[::-1])
exp2 = obj2.get_indexer(obj2[::-1])
assert_almost_equal(res, exp)
assert_almost_equal(exp, exp2)
def test_legacy_v2_unpickle(self):
# 0.7.3 -> 0.8.0 format manage
path = tm.get_data_path('mindex_073.pickle')
obj = pd.read_pickle(path)
obj2 = MultiIndex.from_tuples(obj.values)
self.assertTrue(obj.equals(obj2))
res = obj.get_indexer(obj)
exp = np.arange(len(obj))
assert_almost_equal(res, exp)
res = obj.get_indexer(obj2[::-1])
exp = obj.get_indexer(obj[::-1])
exp2 = obj2.get_indexer(obj2[::-1])
assert_almost_equal(res, exp)
assert_almost_equal(exp, exp2)
def test_roundtrip_pickle_with_tz(self):
# GH 8367
# round-trip of timezone
index=MultiIndex.from_product([[1,2],['a','b'],date_range('20130101',periods=3,tz='US/Eastern')],names=['one','two','three'])
unpickled = self.round_trip_pickle(index)
self.assertTrue(index.equal_levels(unpickled))
def test_from_tuples_index_values(self):
result = MultiIndex.from_tuples(self.index)
self.assertTrue((result.values == self.index.values).all())
def test_contains(self):
self.assertIn(('foo', 'two'), self.index)
self.assertNotIn(('bar', 'two'), self.index)
self.assertNotIn(None, self.index)
def test_is_all_dates(self):
self.assertFalse(self.index.is_all_dates)
def test_is_numeric(self):
# MultiIndex is never numeric
self.assertFalse(self.index.is_numeric())
def test_getitem(self):
# scalar
self.assertEqual(self.index[2], ('bar', 'one'))
# slice
result = self.index[2:5]
expected = self.index[[2, 3, 4]]
self.assertTrue(result.equals(expected))
# boolean
result = self.index[[True, False, True, False, True, True]]
result2 = self.index[np.array([True, False, True, False, True, True])]
expected = self.index[[0, 2, 4, 5]]
self.assertTrue(result.equals(expected))
self.assertTrue(result2.equals(expected))
def test_getitem_group_select(self):
sorted_idx, _ = self.index.sortlevel(0)
self.assertEqual(sorted_idx.get_loc('baz'), slice(3, 4))
self.assertEqual(sorted_idx.get_loc('foo'), slice(0, 2))
def test_get_loc(self):
self.assertEqual(self.index.get_loc(('foo', 'two')), 1)
self.assertEqual(self.index.get_loc(('baz', 'two')), 3)
self.assertRaises(KeyError, self.index.get_loc, ('bar', 'two'))
self.assertRaises(KeyError, self.index.get_loc, 'quux')
# 3 levels
index = MultiIndex(levels=[Index(lrange(4)),
Index(lrange(4)),
Index(lrange(4))],
labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]),
np.array([0, 1, 0, 0, 0, 1, 0, 1]),
np.array([1, 0, 1, 1, 0, 0, 1, 0])])
self.assertRaises(KeyError, index.get_loc, (1, 1))
self.assertEqual(index.get_loc((2, 0)), slice(3, 5))
def test_get_loc_duplicates(self):
index = | Index([2, 2, 2, 2]) | pandas.core.index.Index |
import argparse
import json
import os
from collections import defaultdict
from datetime import datetime as dt
from datetime import timedelta
from datetime import time
from tabulate import tabulate
from time import sleep, perf_counter
import cvxpy as cp
import empiricalutilities as eu
import numpy as np
import pandas as pd
from tqdm import tqdm
from scoring import get_score
from scrape_data import mkdir
from scrape_schedule import get_yearly_schedule
# %%
def main():
args = parse_args()
optimizer = LineupOptimizer(
year=args.year,
week=args.week,
league=args.league,
)
optimizer.get_optimal_lineup(
league=args.league,
days=args.days,
start=args.starttime,
end=args.endtime,
type=args.type,
risk=args.risk,
n_lineups=args.n_lineups,
mppt=args.max_players,
stack=args.stack,
result=args.res,
save=args.save,
verbose=args.verbose,
)
def parse_args():
"""Collect settings from command line and set defaults"""
parser = argparse.ArgumentParser()
parser.add_argument('-y', '--year', type=int, help='Year of season')
parser.add_argument('-w', '--week', type=int, help='Week of season')
parser.add_argument('-d', '--days', help='Day(s) of games')
parser.add_argument('-ts', '--starttime', help='Start of gametimes (start)')
parser.add_argument('-te', '--endtime', help='End of gametimes (start)')
parser.add_argument('-l', '--league', help='FanDuel, DraftKings, etc.')
parser.add_argument('-n', '--n_lineups', type=int, help='Number of lineups')
parser.add_argument('-m', '--max_players', type=int, help='Max plyrs/team')
parser.add_argument('-st', '--stack', action='store_true', help='Stack?')
parser.add_argument('-r', '--res', action='store_true', help='See result?')
parser.add_argument('-s', '--save', action='store_true', help='Save?')
parser.add_argument('-v', '--verbose', action='store_true', help='Print?')
parser.add_argument('-t', '--type', help="'actual' or 'proj'")
parser.add_argument('-ri', '--risk', type=float, help="-1 to 1")
today = dt.utcnow()
default_year = today.year if today.month > 7 else today.year - 1
season_start = { # Date of Thursday night game starting each season.
2015: '9/10/2015',
2016: '9/8/2016',
2017: '9/7/2017',
2018: '9/6/2018',
2019: '9/5/2019',
}[default_year]
# Default week is the current NFL season week, starting on Tuesdays.
# Before the season default is week 1, and after the season it is 17.
default_week = int(np.ceil(
(today-pd.to_datetime(season_start)+timedelta(days=2)).total_seconds()
/ (3600 * 24 * 7)
))
default_week = max(1, min(17, default_week))
# set default arguments
parser.set_defaults(
year=default_year,
week=default_week,
days='Thu Sat Sun Mon',
starttime='12:00AM',
endtime='11:59PM',
league='FanDuel',
type='proj',
risk=0,
n_lineups=1,
max_players=4,
stack=False,
res=False,
save=False,
verbose=False,
)
args = parser.parse_args()
return args
class LineupOptimizer:
def __init__(self, year, week, league):
self.year = year
self.week = week
self.league = league
self.schedule = get_yearly_schedule([year], [week])[year]
# self.schedule = pd.read_csv('week1schedule.csv', index_col=0)
# gametimes = self.schedule['gametime'].values
# times = [time(int(x.split(':')[0]), int(x.split(':')[1]))
# for x in gametimes]
# self.schedule['gametime'] = times
self.data, self.results = self._load_data()
self.budg = {'FanDuel': 60000, 'DraftKings': 50000}[self.league]
def __repr__(self):
return f'Lineup Optimizer for week #{self.week}, {self.year}'
def _read_file(self, pos):
filepath = f'../data/{self.year}/{self.week}/{pos}/'
srcs = 'FLOOR PROJ CEIL'.split()
dfs = []
for src in srcs:
fid = filepath+src+'.csv'
df = pd.read_csv(fid)
df[src] = get_score(df, pos, self.league, type='proj')
df = df[['Player', 'Team', src]].set_index(['Player', 'Team'])
dfs.append(df)
df = pd.concat(dfs, axis=1).reset_index()
df['pos'] = pos
df.columns = [col.lower() for col in df.columns]
return df
def _load_data(self):
positions = 'QB RB WR TE DST'.split()
player_dfs = {}
results_dfs = {}
for pos in positions:
filepath = f'../data/{self.year}/{self.week}/{pos}/'
player_dfs[pos] = self._read_file(pos)
try:
results_dfs[pos] = pd.read_csv(filepath+'STATS.csv')
except:
pass
return player_dfs, results_dfs
def _get_input_data(self, teams, type='proj'):
positions = 'QB RB WR TE DST'.split()
dfs = []
for pos in positions:
df = self.data[pos].copy()
if type == 'actual':
df1 = self.results[pos].copy()
df1['actual'] = get_score(df1, pos, self.league, 'actual')
df1.columns = [x.lower() for x in df1.columns]
df = df.set_index('player team pos'.split()).join(
df1.set_index('player team pos'.split())
).reset_index()
filepath = f'../data/{self.year}/{self.week}/{pos}/'
costs_df = pd.read_csv(filepath+self.league+'.csv')
costs_df.columns = 'player team salary'.split()
if pos == 'DST':
jointdf = df.set_index('team').join(
costs_df.set_index('team')['salary'],
how='left',
sort=False,
rsuffix='_').dropna().reset_index()
cols = list(jointdf)
cols[0], cols[1] = 'player', 'team'
jointdf = jointdf[cols].copy()
else:
jointdf = df.set_index(['player', 'team']).join(
costs_df.set_index(['player', 'team'])['salary'],
how='left', sort=False).dropna().reset_index()
dfs.append(jointdf)
fulldf = pd.concat(dfs, axis=0, sort=False).reset_index(drop=True)
fulldf1 = fulldf.loc[fulldf['team'].isin(teams)].copy()
pos_dummies = pd.get_dummies(fulldf1['pos'])
team_dummies = | pd.get_dummies(fulldf1['team']) | pandas.get_dummies |
import os
import glob
import json
import argparse
import datetime
import numpy as np
import pandas as pd
def get_args():
parser = argparse.ArgumentParser(description="spotify")
parser.add_argument("--path", type=str, default="upload", help="input files")
parser.add_argument("--timeZone", type=str, default="UTC", help="time zone")
parser.add_argument("--files", nargs="+")
return parser.parse_args()
def ms_to_hour(ms, hour=False):
seconds = (ms / 1000) % 60
minutes = (ms / (1000 * 60)) % 60
hours = (ms / (1000 * 60 * 60)) % 24
if hour:
return "%02d:%02d:%02d" % (hours, minutes, seconds)
return "%02d:%02d" % (minutes, seconds)
def top3(spotify_df):
top_monthly_df = pd.DataFrame(spotify_df)
top_monthly_df.set_index("endTime", drop=False, inplace=True)
top_monthly_df = top_monthly_df.resample("M").artistName.apply(lambda x: x.value_counts().head(3)).reset_index()
top_monthly_df.set_index("endTime", drop=True, inplace=True)
top_monthly_df = top_monthly_df.rename(columns={"artistName": "count", "level_1": "artistName"})
top_monthly_df["endTime"] = top_monthly_df.index.tolist()
top_monthly_df = top_monthly_df.reset_index(drop=True)
top_monthly_df.to_csv("df/top_monthly_df.csv", index=False)
def intervals(spotify_df):
interval = spotify_df["msPlayed"].tolist()
interval_df = pd.DataFrame(interval, columns=["Duration"])
interval_range = np.arange(0, max(interval) + 120000, 120000)
interval_df = interval_df.groupby(pd.cut(interval_df["Duration"], interval_range)).count()
interval_new_df = pd.DataFrame({
"intervat": [str(i) for i in interval_df.index.tolist()],
"count": interval_df["Duration"].tolist()
})
interval_new_df.to_csv("df/interval_df.csv", index=False)
def make_tracks(spotify_df):
artists = spotify_df["artistName"].tolist()
tracks = spotify_df["trackName"].tolist()
artists_tracks = [artists, tracks]
artists_tracks_list = list(map(" - ".join, zip(*artists_tracks)))
artists_tracks_counts = dict()
for artists_track in artists_tracks_list:
artists_tracks_counts[artists_track] = artists_tracks_counts.get(artists_track, 0) + 1
top_artists_tracks = np.array(sorted(artists_tracks_counts.items(), key=lambda item: item[1], reverse=True))
top_artists_tracks_df = spotify_df.groupby(["artistName", "trackName"]).size().reset_index(name="playCount")
top_artists_tracks_df = top_artists_tracks_df.sort_values(by=["playCount"], ascending=False)
top_artists_tracks_df = top_artists_tracks_df.reset_index(drop=True)
top_artists_tracks_df.to_csv("df/top_artists_tracks_count_df.csv", index=False)
top_artists_tracks_df = spotify_df.groupby(["artistName", "trackName"]).sum().reset_index()
top_artists_tracks_df["msPlayed"] = top_artists_tracks_df["msPlayed"].apply(lambda x: ms_to_hour(x, hour=True))
top_artists_tracks_df = top_artists_tracks_df.sort_values(by=["msPlayed"], ascending=False)
top_artists_tracks_df = top_artists_tracks_df.rename(columns={"msPlayed": "playTime"})
top_artists_tracks_df = top_artists_tracks_df.reset_index(drop=True)
top_artists_tracks_df.to_csv("df/top_artists_tracks_playtime_df.csv", index=False)
def make_artists(spotify_df):
artists = spotify_df["artistName"].tolist()
artists_counts = dict()
for artist in artists:
artists_counts[artist] = artists_counts.get(artist, 0) + 1
top_artist = np.array(sorted(artists_counts.items(), key=lambda item: item[1], reverse=True))
top_artist_df = pd.DataFrame(top_artist, columns=["Artist", "Count"])
top_artist_df.to_csv("df/top_artist_df.csv", index=False)
top_artist_time_df = spotify_df[["artistName", "msPlayed"]].copy().groupby(["artistName"]).sum().reset_index()
top_artist_time_df["msPlayed"] = top_artist_time_df["msPlayed"].apply(lambda x: ms_to_hour(x, hour=True))
top_artist_time_df = top_artist_time_df.sort_values(by=["msPlayed"], ascending=False)
top_artist_time_df = top_artist_time_df.rename(columns={"msPlayed": "playTime"})
top_artist_time_df = top_artist_time_df.reset_index(drop=True)
top_artist_time_df.to_csv("df/top_artist_time_df.csv", index=False)
def distributions(spotify_df):
time_lists = [str(m).split(" ")[1][:-9] for m in spotify_df["endTime"].tolist()]
spotify_hour_df = pd.DataFrame(data={"hour": time_lists})
spotify_hour_df['hour'] = pd.to_datetime(spotify_hour_df['hour'], format='%H:%M')
spotify_hour_df.set_index('hour', drop=False, inplace=True)
spotify_hour_df = spotify_hour_df['hour'].groupby(pd.Grouper(freq='60Min')).count()
spotify_hour_new_df = pd.DataFrame({
"hours": [str(h).split(" ")[1][:-3] for h in spotify_hour_df.index],
"count": spotify_hour_df.tolist()
})
spotify_hour_new_df.to_csv("df/spotify_hour_df.csv", index=False)
monthly_df = pd.DataFrame(spotify_df["endTime"], columns=["endTime"])
monthly_df.set_index("endTime", drop=False, inplace=True)
monthly_df = monthly_df.resample('M').count().dropna()
xs = [str(x).split("T")[0] for x in monthly_df.index.values]
monthly_df.index = xs
monthly_new_df = pd.DataFrame({
"month": monthly_df.index.tolist(),
"count": monthly_df["endTime"].tolist()
})
monthly_new_df.to_csv("df/monthly_df.csv", index=False)
def nonstop_playing(spotify_df):
end_times = [str(t)[:-9] for t in spotify_df["endTime"].tolist()]
play_times = [ms_to_hour(t, True) for t in spotify_df["msPlayed"].tolist()]
nonstop_records = []
nonstop = False
for i in range(len(end_times)):
end_datetime = datetime.datetime.strptime(end_times[i], "%Y-%m-%d %H:%M")
play_datetime = datetime.datetime.strptime(play_times[i], "%H:%M:%S")
play_delta = datetime.timedelta(hours=play_datetime.hour, minutes=play_datetime.minute, seconds=play_datetime.second)
start_datetime = end_datetime - play_delta
start_time1 = "{:%Y-%m-%d %H:%M}".format(start_datetime - datetime.timedelta(minutes=1))
start_time2 = "{:%Y-%m-%d %H:%M}".format(start_datetime)
start_time3 = "{:%Y-%m-%d %H:%M}".format(start_datetime + datetime.timedelta(minutes=1))
start_times = [start_time1, start_time2, start_time3]
if i == 0:
nonstop_records.append([start_time2, end_times[i], play_datetime, 1])
elif end_times[i - 1] in start_times:
if nonstop:
nonstop_records[-1][1] = end_times[i]
nonstop_records[-1][2] += + play_delta
nonstop_records[-1][3] += 1
nonstop = True
else:
nonstop_records.append([start_time2, end_times[i], play_datetime, 1])
nonstop = False
for i in range(len(nonstop_records)):
nonstop_records[i][2] = "{:%H:%M:%S}".format(nonstop_records[i][2])
nonstop_records = np.array(nonstop_records)
nonstop_play_df = pd.DataFrame({
"startTime": nonstop_records[:, 0],
"endTime": nonstop_records[:, 1],
"playTime": nonstop_records[:, 2],
"trackCount": nonstop_records[:, 3]
})
nonstop_play_df.to_csv("df/nonstop_play_df.csv", index=False)
def main():
args = get_args()
os.makedirs("df", exist_ok=True)
spotify = []
try:
for history_file in sorted(args.files):
with open(args.path + history_file, encoding="utf8") as f:
spotify.extend(json.load(f))
except:
pass
try:
spotify_df = | pd.DataFrame.from_records(spotify) | pandas.DataFrame.from_records |
# %matplotlib inline
import os, time, pickle, argparse
import pandas as pd
import torch
import torch.nn as nn
import numpy as np
from scipy.stats import beta
torch.set_printoptions(threshold=10000)
np.set_printoptions(threshold=np.inf)
parser = argparse.ArgumentParser(description='RSAutoML')
parser.add_argument('--Train_Method', type=str, default='AutoML', help='options: AutoML, Supervised')
parser.add_argument('--Val_Type', type=str, default='last_batch', help='options: last_batch, last_random')
parser.add_argument('--Loss_Type', type=str, default='MSE_sigmoid', help='options: MSE_sigmoid MSE_no_sigmoid BCEWithLogitsLoss CrossEntropyLoss')
parser.add_argument('--Data_Set', type=str, default='ml-20m', help='options: ml-20m ml-latest')
parser.add_argument('--Dy_Emb_Num', type=int, default=2, help='options: 1, 2')
args = parser.parse_args()
Model_Gpu = torch.cuda.is_available()
device = torch.device('cuda:0' if Model_Gpu else 'cpu')
DATA_PATH = './data'
DATA_SET = args.Data_Set
Batch_Size = 500 # batch size
LR_model = 0.001 # learning rate
LR_darts = 0.0001 # learning rate
Epoch = 1 # train epoch
Beta_Beta = 20 # beta for Beta distribution
H_alpha = 0 # for nn.KLDivLoss 0.001
if DATA_SET == 'ml-20m':
Train_Size = 15000000 # training dataset size
elif DATA_SET == 'ml-latest':
Train_Size = 22000000 # training dataset size
Test_Size = 5000000 # training dataset size
Emb_Size = [2, 4, 8, 16, 64, 128] # 1,2,4,8,16,32,64,128,256,512
fixed_emb_size = sum(Emb_Size)
Val_Type = args.Val_Type # last_batch last_random
Dy_Emb_Num = args.Dy_Emb_Num
Loss_Type = args.Loss_Type # MSE_sigmoid MSE_no_sigmoid BCEWithLogitsLoss CrossEntropyLoss
print('\n****************************************************************************************\n')
print('os.getpid(): ', os.getpid())
if torch.cuda.is_available():
print('torch.cuda: ', torch.cuda.is_available(), torch.cuda.current_device(), torch.cuda.device_count(), torch.cuda.get_device_name(0), torch.cuda.device(torch.cuda.current_device()))
else:
print('GPU is not available!!!')
print('Train_Size: ', Train_Size)
print('Test_Size: ', Test_Size)
print('fixed_emb_size:', fixed_emb_size)
print('Loss_Type: ', Loss_Type)
print('Val_Type: ', Val_Type)
print('Beta_Beta: ', Beta_Beta)
print('H_alpha: ', H_alpha)
print('LR_model: ', LR_model)
print('LR_darts: ', LR_darts)
print('\n****************************************************************************************\n')
def load_data():
train_features, test_features, train_target, test_target \
= pickle.load(open('{}/{}_TrainTest_{}_{}.data'.format(DATA_PATH, DATA_SET, Train_Size, Output_Dim), mode='rb'))
test_features, test_target = test_features[:Test_Size], test_target[:Test_Size]
genome_scores_dict = pickle.load(open('./{}/{}_GenomeScoresDict.data'.format(DATA_PATH, DATA_SET), mode='rb'))
train_feature_data = pd.DataFrame(train_features, columns=['userId', 'movieId', 'user_frequency', 'movie_frequency'])
test_feature_data = pd.DataFrame(test_features, columns=['userId', 'movieId', 'user_frequency', 'movie_frequency'])
User_Num = max(train_feature_data['userId'].max() + 1, test_feature_data['userId'].max() + 1) # 138494
Movie_Num = max(train_feature_data['movieId'].max() + 1, test_feature_data['movieId'].max() + 1) # 131263
max_user_popularity = max(train_feature_data['user_frequency'].max()+1, test_feature_data['user_frequency'].max()+1)
max_movie_popularity = max(train_feature_data['movie_frequency'].max() + 1, test_feature_data['movie_frequency'].max() + 1)
# print('train_feature_data\n', train_feature_data)
# print(train_feature_data.info())
# print(train_feature_data.describe())
return train_features, test_features, train_target, test_target, genome_scores_dict, \
train_feature_data, test_feature_data, len(train_features), len(test_features), \
User_Num, Movie_Num, max_user_popularity, max_movie_popularity
def Batch_Losses(Loss_Type, prediction, target):
if Loss_Type == 'MSE_sigmoid':
return nn.MSELoss(reduction='none')(nn.Sigmoid()(prediction), target)
elif Loss_Type == 'MSE_no_sigmoid':
return nn.MSELoss(reduction='none')(prediction, target)
elif Loss_Type == 'BCEWithLogitsLoss':
return nn.BCEWithLogitsLoss(reduction='none')(prediction, target)
elif Loss_Type == 'CrossEntropyLoss':
return nn.CrossEntropyLoss(reduction='none')(prediction, target)
else:
print('No such Loss_Type.')
def Batch_Accuracies(Loss_Type, prediction, target):
with torch.no_grad():
if Loss_Type == 'MSE_sigmoid':
predicted = 1 * (torch.sigmoid(prediction).data > 0.5)
elif Loss_Type == 'MSE_no_sigmoid':
predicted = 1 * (prediction > 0.5)
elif Loss_Type == 'BCEWithLogitsLoss':
predicted = 1 * (torch.sigmoid(prediction).data > 0.5)
elif Loss_Type == 'CrossEntropyLoss':
_, predicted = torch.max(prediction, 1)
else:
print('No such Loss_Type.')
Batch_Accuracies = 1 * (predicted == target)
Batch_Accuracies = list(Batch_Accuracies.detach().cpu().numpy())
return Batch_Accuracies
def Beta(length, popularity, be=10):
x = [i/length for i in range(length+1)]
cdfs = [beta.cdf(x[i+1], popularity, be) - beta.cdf(x[i], popularity, be) for i in range(length)]
return cdfs
class RS_MLP(nn.Module):
def __init__(self, Output_Dim, Dynamic_Emb_Num):
super(RS_MLP, self).__init__()
self.emb_user = nn.Embedding(num_embeddings=User_Num, embedding_dim=fixed_emb_size)
self.emb_movie = nn.Embedding(num_embeddings=Movie_Num, embedding_dim=fixed_emb_size)
self.bn_user = nn.BatchNorm1d(fixed_emb_size)
self.bn_movie = nn.BatchNorm1d(fixed_emb_size)
self.tanh = nn.Tanh()
self.movie_transfrom = nn.Sequential( # nn.BatchNorm1d(1128),
nn.Linear(1128, 512),
nn.BatchNorm1d(512),
nn.Tanh(),
nn.Linear(512, fixed_emb_size))
self.transfrom = nn.Sequential(
nn.BatchNorm1d(fixed_emb_size * 2),
nn.Linear(fixed_emb_size * 2, 512),
nn.BatchNorm1d(512),
nn.Tanh(),
nn.Linear(512, Output_Dim))
self.den = Dynamic_Emb_Num
def forward(self, userId, movieId, movie_vec):
user_emb = self.emb_user(userId)
movie_emb = None if self.den == 1 else self.emb_movie(movieId)
# v_user = sum([torch.reshape(u_weight[:, i], (len(u_weight), -1)) * self.tanh(self.bn_user(self.W_user[i](user_emb[:,Emb_Split[i]:Emb_Split[i+1]]))) for i in range(len(Emb_Size))])
# v_movie = sum([torch.reshape(m_weight[:, i], (len(m_weight), -1)) * self.tanh(self.bn_movie(self.W_movie[i](movie_emb[:,Emb_Split[i]:Emb_Split[i+1]]))) for i in range(len(Emb_Size))]) if self.den == 2 else self.movie_transfrom(movie_vec)
v_user = user_emb
v_movie = self.movie_transfrom(movie_vec) if self.den == 1 else movie_emb
user_movie = torch.cat((v_user, v_movie), 1)
return self.transfrom(user_movie)
def update_RS(index, features, Len_Features, target, mode):
""" Update RS's embeddings and NN """
global train_sample_loss, train_sample_accuracy
index_end = index + Batch_Size
if index_end >= Len_Features:
batch_train = features[index:Len_Features]
batch_train_target = target[index:Len_Features]
else:
batch_train = features[index:index_end]
batch_train_target = target[index:index_end]
userId = torch.tensor(batch_train[:, 0], requires_grad=False).to(device)
movieId = torch.tensor(batch_train[:, 1], requires_grad=False).to(device)
movie_vec = torch.tensor([genome_scores_dict[str(batch_train[:, 1][i])] for i in range(len(batch_train[:, 1]))],
requires_grad=False).to(device) if Dy_Emb_Num == 1 else None
batch_train_target = torch.tensor(batch_train_target,
dtype=torch.int64 if Loss_Type == 'CrossEntropyLoss' else torch.float32,
requires_grad=False).to(device)
rating = model(userId, movieId, movie_vec)
rating = rating.squeeze(1).squeeze(1) if Loss_Type == 'CrossEntropyLoss' else rating.squeeze(1)
batch_losses = Batch_Losses(Loss_Type, rating, batch_train_target)
loss = sum(batch_losses)
batch_accuracies = Batch_Accuracies(Loss_Type, rating, batch_train_target)
# accuracy = sum(batch_accuracies) / len(batch_train_target)
# print('loss3', loss, '\naccuracy', accuracy)
train_sample_loss += list(batch_losses.detach().cpu().numpy())
losses[mode].append(loss.detach().cpu().numpy())
train_sample_accuracy += batch_accuracies
accuracies[mode].append((sum(batch_accuracies), len(batch_train_target)))
optimizer_model.zero_grad()
loss.backward()
optimizer_model.step()
if __name__ == "__main__":
Output_Dim = 5 if Loss_Type == 'CrossEntropyLoss' else 1
train_features, test_features, train_target, test_target, genome_scores_dict, \
train_feature_data, test_feature_data, Len_Train_Features, Len_Test_Features, \
User_Num, Movie_Num, max_user_popularity, max_movie_popularity = load_data()
# Len_Train_Features, Len_Test_Features = 100000, 100000
# Len_Train_Features = 10000
train_feature_data, test_feature_data = train_feature_data[:Len_Train_Features], test_feature_data[:Len_Test_Features]
model = RS_MLP(Output_Dim, Dy_Emb_Num)
model.to(device)
if Model_Gpu:
print('\n========================================================================================\n')
print('Memory: ', torch.cuda.memory_allocated(0) / 1024 ** 3, 'GB', torch.cuda.memory_cached(0) / 1024 ** 3, 'GB')
print('\n========================================================================================\n')
t0 = time.time()
optimizer_model = torch.optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=LR_model, weight_decay=0)
losses = {'train': [], 'test': []}
accuracies = {'train': [], 'test': []}
train_sample_loss = list()
train_sample_accuracy = list()
print('\n******************************************Train******************************************\n')
for epoch_i in range(Epoch):
#############################train#############################
index = 0
while index < Len_Train_Features:
update_RS(index, train_features, Len_Train_Features, train_target, mode='train')
if len(losses['train']) % 10 == 0:
print('Epoch = {:>3} Batch = {:>4}/{:>4} ({:.3f}%) train_loss = {:.3f} train_accuracy = {:.3f} total_time = {:.3f} min'.format(
epoch_i, index + Batch_Size, Len_Train_Features, 100 * (index + Batch_Size) / Len_Train_Features, sum(losses['train'][-10:]) / 10,
sum([item[0] / item[1] for item in accuracies['train'][-10:]]) / 10,
(time.time() - t0) / 60))
index += Batch_Size
print('\n******************************************Test******************************************\n')
t0 = time.time()
index = 0
while index < Len_Test_Features:
update_RS(index, test_features, Len_Test_Features, test_target, mode='test')
if len(losses['test']) % 10 == 0:
print(
'Test Batch = {:>4}/{:>4} ({:.3f}%) test_loss = {:.3f} test_accuracy = {:.3f} whole_time = {:.3f} min'.format(
index + Batch_Size, Len_Test_Features, 100 * (index + Batch_Size) / Len_Test_Features,
sum(losses['test'][-10:]) / 10,
sum([item[0] / item[1] for item in accuracies['test'][-10:]]) / 10, (time.time() - t0) / 60))
index += Batch_Size
correct_num = sum([item[0] for item in accuracies['test']])
test_num = sum([item[1] for item in accuracies['test']])
print('Test Loss: {:.4f}'.format(sum(losses['test']) / test_num))
print('Test Correct Num: {}'.format(correct_num))
print('Test Num: {}'.format(test_num))
print('Test Accuracy: {:.4f}'.format(correct_num / test_num))
# Save model
save_model_name = './save_model/Fixed_DyEmbNum{}_LossType{}_TestAcc{:.4f}'.format(
Dy_Emb_Num, Loss_Type,
correct_num / test_num)
torch.save(model.state_dict(), save_model_name + '.pt')
print('Model saved to ' + save_model_name + '.pt')
feature_data = pd.concat([train_feature_data, test_feature_data])
print("feature_data: ", feature_data.shape[0], feature_data.shape[1])
feature_data['loss_{}'.format(Emb_Size)] = pd.DataFrame([[i] for i in train_sample_loss])
feature_data['acc_{}'.format(Emb_Size)] = pd.DataFrame([[i] for i in train_sample_accuracy])
if Model_Gpu:
print('\n++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\n')
print('Memory: ', torch.cuda.memory_allocated(0) / 1024 ** 3, 'GB', torch.cuda.memory_cached(0) / 1024 ** 3, 'GB')
torch.cuda.empty_cache()
print('Memory: ', torch.cuda.memory_allocated(0) / 1024 ** 3, 'GB', torch.cuda.memory_cached(0) / 1024 ** 3, 'GB')
print('\n++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\n')
feature_data.to_csv('./results/feature_data_with_loss_{}_Fixed_{}_{}.csv'.format(Dy_Emb_Num, Loss_Type, DATA_SET), index=None)
result_user, result_movie = [], []
for i in range(1, 100):
feature_data1 = feature_data[feature_data['user_frequency'] == i]
result_user.append(list(feature_data1.mean(axis=0)) + [len(feature_data1)])
Head = list(feature_data.columns) + ['count']
pd.DataFrame(result_user).to_csv('./results/result_{}_Fixed_{}_{}_user.csv'.format(Dy_Emb_Num, Loss_Type, DATA_SET), index=None,
header=Head)
for i in range(1, 100):
feature_data1 = feature_data[feature_data['movie_frequency'] == i]
result_movie.append(list(feature_data1.mean(axis=0)) + [len(feature_data1)])
Head = list(feature_data.columns) + ['count']
pd.DataFrame(result_movie).to_csv('./results/result_{}_Fixed_{}_{}_movie.csv'.format(Dy_Emb_Num, Loss_Type, DATA_SET), index=None,
header=Head)
result = []
for i in range(int(Train_Size / 1000000)):
feature_data1 = feature_data[i * 1000000:(i + 1) * 1000000]
result.append(list(feature_data1.mean(axis=0)) + [len(feature_data1)])
Head = list(feature_data.columns) + ['count']
| pd.DataFrame(result) | pandas.DataFrame |
#导入库
import random
import time
import os
import re
import requests
from lxml import etree
import pandas as pd
import pymysql
import time
import re
from urllib.parse import urlparse,parse_qs,urlencode,urlunparse
import mysql
#导出文档
def download_csv(data,name):
# 获取采集时间
open_time = time.localtime(time.time())
open_time = time.strftime("%Y-%m-%d", open_time)
#获取文件路径
curt_path = os.path.abspath("__file__")
#创建二级文件夹
folderpath = os.path.dirname(curt_path) + "/" + "竞店数据源"
if not os.path.exists(folderpath):
os.mkdir(folderpath)
sub_folder = folderpath + "/" + "竞店" + open_time
if not os.path.exists(sub_folder):
os.mkdir(sub_folder)
#注意是os.path.abspath()!!! os.path.dirname() 返回的是所在文件夹的路径!!
filname = os.path.abspath(sub_folder) + "/" + name + "_" + open_time + ".csv"
if os.path.exists(filname):
os.remove(filname)
#导出
head=['id','预售商品名称','预售商品链接','预售店铺名']
details= | pd.DataFrame(columns=head,data=data) | pandas.DataFrame |
import os
import numpy as np
import pandas as pd
import logging
import logging.config
import yaml
import psycopg2
from faker.providers.person.en import Provider
from sqlalchemy import create_engine
from dotenv import load_dotenv, find_dotenv
load_dotenv()
if __name__ == "__main__":
username = os.environ.get("USERNAME")
password = os.environ.get("PASSWORD")
host = os.environ.get("HOST")
port = os.environ.get("PORT")
database = os.environ.get("DATABASE")
path_yaml = os.environ.get("PATH_YAML")
with open(path_yaml, 'r') as f:
config = yaml.safe_load(f.read())
logging.config.dictConfig(config)
logger = logging.getLogger(__name__)
table = "tb_user"
conn_string=f'postgresql://{username}:{password}@{host}:{port}/{database}'
def random_names(name_type, size):
"""
Generate n-length ndarray of person names.
name_type: a string, either first_names or last_names
"""
names = getattr(Provider, name_type)
return np.random.choice(names, size=size)
def random_genders(size, p=None):
"""Generate n-length ndarray of genders."""
if not p:
# default probabilities
p = (0.49, 0.49, 0.01, 0.01)
gender = ("M", "F", "O", "")
return np.random.choice(gender, size=size, p=p)
def random_dates(start, end, size):
"""
Generate random dates within range between start and end.
Adapted from: https://stackoverflow.com/a/50668285
"""
# Unix timestamp is in nanoseconds by default, so divide it by
# 24*60*60*10**9 to convert to days.
divide_by = 24 * 60 * 60 * 10**9
start_u = start.value // divide_by
end_u = end.value // divide_by
return pd.to_datetime(np.random.randint(start_u, end_u, size), unit="D")
size = 100
df = pd.DataFrame(columns=['first', 'last', 'gender', 'birthdate'])
df['first'] = random_names('first_names', size)
df['last'] = random_names('last_names', size)
df['gender'] = random_genders(size)
df['birthdate'] = random_dates(start=pd.to_datetime('1940-01-01'), end= | pd.to_datetime('2008-01-01') | pandas.to_datetime |
# -*- coding: utf-8 -*-
""" About processing DataFrame
Author: Hunchbrown - <NAME>
Last Modified: 2020.07.20
About processing DataFrame
"""
import pandas as pd
def _get_unique_items(dataframe, name, list_type=True):
""" return unique items.
return unique items in dataframe.
Args:
dataframe (DataFrame) : dataframe to be checked
name (str) : column name
list_type (bool) : if list_type is true. need to unpack the item in list
Return:
unique_items (list) : list of unique items(tags/songs)
"""
unique_items = set()
if list_type:
for item in dataframe[name]:
unique_items |= set(item)
# sorting is done for tag and song
unique_items = sorted(unique_items)
else:
# assertion for playlist: guarantees that playlist has unique id in dataframe
assert len(dataframe[name].unique()) == len(dataframe[name])
# vertically aligned(train -> test)
unique_items = dataframe[name]
return unique_items
def to_dataframe(data):
""" to dataframe
convert json to dataframe
Args:
data (json) : source of json file
Return:
dataframe (DataFrame) : destination of json file in forms of pandas DataFrame
"""
items = {'id': [], 'plylst_title': [], 'tags': [], 'songs': [], 'like_cnt': [], 'updt_date': []}
for item in data:
items['id'].append(item['id'])
items['plylst_title'].append(item['plylst_title'])
items['tags'].append(item['tags'])
items['songs'].append(item['songs'])
items['like_cnt'].append(item['like_cnt'])
items['updt_date'].append(item['updt_date'])
dataframe = pd.DataFrame(items)
dataframe['updt_date'] = pd.to_datetime(dataframe.updt_date)
return dataframe
def get_item_idx_dictionary(train, test=None, mode='tags'):
""" Return dictionary
return item to index dictionary
Args:
train (DataFrame) : train dataframe
test (DataFrame) : test dataframe
mode (str) : mode determines which item to be converted. tags, songs, id possible
Return:
item2idx(dict): item to index dictionary
"""
assert mode in ['tags', 'songs', 'id']
if test is None:
items = _get_unique_items(train, mode, mode in ['tags', 'songs'])
else:
items = _get_unique_items(pd.concat([train, test], ignore_index=True), mode, mode in ['tags', 'songs'])
item2idx = {item:idx for idx, item in enumerate(items)}
return item2idx
def map_title_to_playlist(train, test):
""" Return dictionary
return title to list of playlist dictionary
Args:
train (DataFrame) : train dataframe
test (DataFrame) : test dataframe
Return:
title2playlist(dict) : title to list of plalist dictionary
"""
title2playlist = dict()
df = | pd.concat([train, test], ignore_index=True) | pandas.concat |
import os
import docx2txt
import xlrd
import csv
import cv2
import pytesseract
from PIL import Image
from pkgutil import find_loader
import PyPDF2
import pdfplumber
from pptx import Presentation
from pdf2image import convert_from_path
import speech_recognition as sr
from pyostie.convert import *
from pyostie.insights_ext import *
pandas_installed = find_loader("pandas") is not None
if pandas_installed:
import pandas as pd
a = pd.DataFrame()
ocr_dict_output = []
class DOCXParser:
def __init__(self, filename, img_dir):
"""
Parameters
----------
filename : The file that needs to be processed.
"""
self.file = filename
self.img_dir = img_dir
def extract_docx(self):
"""
Returns
-------
DOCXParser for Docx files.
extract text and write images in img_dir
"""
output = docx2txt.process(self.file, self.img_dir)
return output
class XLSXParser:
def __init__(self, filename):
"""
Parameters
----------
filename : The file that needs to be processed.
"""
self.file = filename
def extract_xlsx(self):
"""
Returns
-------
XLSXParser for XLSX and XLS files.
"""
out_list = []
book = xlrd.open_workbook(self.file)
for val in range(len(book.sheet_names())):
sheet = book.sheet_by_index(val)
for res in range(sheet.nrows):
output = " " + " ".join(str(val_) for val_ in (sheet.row_values(res)))
out_list.append(output)
return out_list
class CSVParser:
def __init__(self, filename, delimiter):
"""
Parameters
----------
filename : The file that needs to be processed.
delimiter : By default ','. Can be changed if any other delimiter is needed.
"""
self.file = filename
self.delimiter = delimiter
def extract_csv(self):
"""
Returns
-------
CSVParser for csv files.
"""
with open(self.file) as file:
output = csv.reader(file, delimiter=self.delimiter)
return ' '.join([' '.join(row) for row in output])
class ImageParser:
def __init__(self, filename, tess_path=None):
"""
Parameters
----------
filename : The file that needs to be processed.
tess_path : The path to the tesseract cmd (Only for windows.)
"""
self.file = filename
self.path = tess_path
def extract_image(self):
"""
Returns
-------
ImageParser for Image formats.
"""
out_list = []
if self.path is not None:
pytesseract.pytesseract.tesseract_cmd = self.path
img = Image.open(self.file)
text = pytesseract.image_to_string(img)
out_list.append(text)
else:
img = Image.open(self.file)
text = pytesseract.image_to_string(img)
out_list.append(text)
return out_list
class PDFParser:
def __init__(self, filename, insights=False):
"""
Parameters
----------
filename : The file that needs to be processed.
insights : True by default. False if the dataframe is not needed.
"""
self.file = filename
self.insights = insights
def extract_pypdf2(self):
"""
Returns
-------
PDFParser for pdf files.
"""
contents = []
text = ' '
pdfFileObj = open(self.file, 'rb')
pdfReader = PyPDF2.PdfFileReader(pdfFileObj)
pdfPages = pdfReader.getNumPages()
if pdfPages == 1:
for val in range(pdfReader.numPages):
pageObject = pdfReader.getPage(val)
text = text + pageObject.extractText()
contents.append(text)
if self.insights:
conv = conversion(self.file)
__conv = conv.convert()
insights = generate_insights(__conv, df)
__insights = insights.generate_df()
remove_files(__conv)
return __insights, contents
else:
return contents
if pdfPages >= 2:
pdf_multipage_df = | pd.DataFrame() | pandas.DataFrame |
#!python3
"""corn calculates value of OTC corn contract"""
import itertools
import matplotlib.pyplot as plt
import more_itertools
import numpy as np
import pandas
from pandas.tseries.offsets import CustomBusinessDay
# from datetime import datetime
from cal import USTradingCalendar
def generate_dts(basis_date, start_date, end_date):
"""gfenerate the dts given the start and end days."""
# skip non-business days / holidays
us_bd = CustomBusinessDay(calendar=USTradingCalendar())
date_range = | pandas.bdate_range(start_date, end_date, freq=us_bd) | pandas.bdate_range |
import argparse
import sys
import os
import numpy as np
import pandas as pd
import matplotlib
import matplotlib.pyplot as plt
import seaborn as sns
import glob
from sklearn import metrics
from scipy.stats import pearsonr, spearmanr
from scipy.optimize import curve_fit
from collections import Counter
import pickle
import pdb
parser = argparse.ArgumentParser(description = '''Visualize and analyze the DockQ scores.''')
#Bench4
parser.add_argument('--bench4_dockq_aa', nargs=1, type= str, default=sys.stdin, help = 'Path to dockq scores for benchmark 4 AF in csv.')
parser.add_argument('--bench4_dockq_RF', nargs=1, type= str, default=sys.stdin, help = 'Path to dockq scores for benchmark 4 from RF in csv.')
parser.add_argument('--plDDT_bench4', nargs=1, type= str, default=sys.stdin, help = 'Path to plDDT metrics in csv.')
#parser.add_argument('--pconsdock_bench4', nargs=1, type= str, default=sys.stdin, help = 'Path to pconsdock metrics in csv.')
#parser.add_argument('--pconsdock_marks', nargs=1, type= str, default=sys.stdin, help = 'Path to pconsdock metrics in csv.')
parser.add_argument('--bench4_kingdom', nargs=1, type= str, default=sys.stdin, help = 'Path to kingdoms for bench4 in csv.')
parser.add_argument('--dssp_bench4', nargs=1, type= str, default=sys.stdin, help = 'Path to dssp annotations for bench4 in csv.')
parser.add_argument('--afdefault_neff_bench4', nargs=1, type= str, default=sys.stdin, help = 'Path to default AF alignments Neff in csv.')
parser.add_argument('--tophits_neff_bench4', nargs=1, type= str, default=sys.stdin, help = 'Path to top hits alignments Neff in csv.')
#Marks positivef
parser.add_argument('--marks_dockq_RF', nargs=1, type= str, default=sys.stdin, help = 'Path to dockq scores for marks set RF in csv.')
parser.add_argument('--marks_dockq_AF_bb', nargs=1, type= str, default=sys.stdin, help = 'Path to dockq scores for marks set AF back bone atoms in csv.')
parser.add_argument('--marks_dockq_AF_aa', nargs=1, type= str, default=sys.stdin, help = 'Path to dockq scores for marks set AF all atoms in csv.')
parser.add_argument('--marks_dockq_GRAMM', nargs=1, type= str, default=sys.stdin, help = 'Path to dockq scores for marks set GRAMM in csv.')
parser.add_argument('--marks_dockq_TMfull', nargs=1, type= str, default=sys.stdin, help = 'Path to dockq scores for marks set TMdock in csv.')
parser.add_argument('--marks_dockq_TMint', nargs=1, type= str, default=sys.stdin, help = 'Path to dockq scores for marks set interface TMdock in csv.')
parser.add_argument('--marks_dockq_mdockpp', nargs=1, type= str, default=sys.stdin, help = 'Path to dockq scores for marks set MdockPP in csv.')
parser.add_argument('--plDDT_marks_af', nargs=1, type= str, default=sys.stdin, help = 'Path to plDDT metrics in csv.')
parser.add_argument('--plDDT_marks_fused', nargs=1, type= str, default=sys.stdin, help = 'Path to plDDT metrics in csv.')
parser.add_argument('--dssp_marks', nargs=1, type= str, default=sys.stdin, help = 'Path to dssp metrics in csv.')
parser.add_argument('--ifstats_marks', nargs=1, type= str, default=sys.stdin, help = 'Path to if metrics in csv.')
parser.add_argument('--aln_scores_marks', nargs=1, type= str, default=sys.stdin, help = 'Path to aln scores in csv.')
parser.add_argument('--oxstats_marks', nargs=1, type= str, default=sys.stdin, help = 'Path to statistics over organisms in csv.')
parser.add_argument('--afdefault_neff_marks', nargs=1, type= str, default=sys.stdin, help = 'Path to default AF alignments Neff in csv.')
parser.add_argument('--tophits_neff_marks', nargs=1, type= str, default=sys.stdin, help = 'Path to top hits alignments Neff in csv.')
parser.add_argument('--af_chain_overlap_marks', nargs=1, type= str, default=sys.stdin, help = 'Path to chain overlap for AF a3m in csv.')
#Marks negative
parser.add_argument('--plDDT_marks_negative_af', nargs=1, type= str, default=sys.stdin, help = 'Path to plDDT metrics in csv.')
#Negatome
parser.add_argument('--plDDT_negatome_af', nargs=1, type= str, default=sys.stdin, help = 'Path to plDDT metrics in csv.')
#New set
parser.add_argument('--newset_dockq_AF', nargs=1, type= str, default=sys.stdin, help = 'Path to dockq scores for new set AF in csv.')
parser.add_argument('--plDDT_newset', nargs=1, type= str, default=sys.stdin, help = 'Path to plDDT metrics in csv for newset.')
#Output directory
parser.add_argument('--outdir', nargs=1, type= str, default=sys.stdin, help = 'Path to output directory. Include /in end')
################FUNCTIONS#################
def dockq_box(bench4_dockq, outdir):
'''Plot a boxplot of the dockq score for the different modes
'''
#Plot
fig,ax = plt.subplots(figsize=(24/2.54,12/2.54))
modes = bench4_dockq.columns[1:]
all_modes = []
all_scores = []
all_msas = []
all_model_options = []
accuracies = {}
for mode in modes:
#Frac correct and avg score
fraq_correct = np.argwhere(bench4_dockq[mode].values>=0.23).shape[0]/len(bench4_dockq)
accuracies[mode]=fraq_correct
av = np.average(bench4_dockq[mode].values)
print(mode, np.round(fraq_correct,3),np.round(av,3))
#Save scores
all_scores.extend([*bench4_dockq[mode].values])
mode = '_'.join(mode.split('_')[4:])
mode = mode.split('_')
msa = mode[0]
model = '_'.join(mode[1:-1])
option = mode[-1]
#save
all_modes.extend([msa+'\n'+model+'\n'+option]*len(bench4_dockq))
all_msas.extend([msa]*len(bench4_dockq))
all_model_options.extend([model+' '+option]*len(bench4_dockq))
def correlate_scores(bench4_dockq, outdir):
'''Correlate the scores for all different modeling strategies
'''
modes = ['DockQ_dockqstats_bench4_af2_hhblits_model_1_ens8',
'DockQ_dockqstats_bench4_af2_hhblits_model_1_rec10',
'DockQ_dockqstats_bench4_af2_af2stdmsa_model_1_ens8',
'DockQ_dockqstats_bench4_af2_af2stdmsa_model_1_rec10',
'DockQ_dockqstats_bench4_af2_af2andhhblitsmsa_model_1_ens8',
'DockQ_dockqstats_bench4_af2_af2andhhblitsmsa_model_1_rec10',
'DockQ_dockqstats_bench4_af2_hhblits_model_1_ptm_ens8',
'DockQ_dockqstats_bench4_af2_hhblits_model_1_ptm_rec10',
'DockQ_dockqstats_bench4_af2_af2stdmsa_model_1_ptm_ens8',
'DockQ_dockqstats_bench4_af2_af2stdmsa_model_1_ptm_rec10',
'DockQ_dockqstats_bench4_af2_af2andhhblitsmsa_model_1_ptm_ens8',
'DockQ_dockqstats_bench4_af2_af2andhhblitsmsa_model_1_ptm_rec10']
corr_matrix = np.zeros((len(modes),len(modes)))
for i in range(len(modes)):
scores_i = bench4_dockq[modes[i]].values
for j in range(i+1,len(modes)):
scores_j = bench4_dockq[modes[j]].values
#Correlate
R,p = pearsonr(scores_i,scores_j)
corr_matrix[i,j]=np.round(R,2)
corr_matrix[j,i]=np.round(R,2)
print(modes)
print(corr_matrix)
#Create df
corr_df = | pd.DataFrame() | pandas.DataFrame |
from __future__ import print_function
import collections
import os
import re
import sys
import numpy as np
import pandas as pd
from sklearn.preprocessing import Imputer
from sklearn.preprocessing import StandardScaler, MinMaxScaler, MaxAbsScaler
file_path = os.path.dirname(os.path.realpath(__file__))
lib_path = os.path.abspath(os.path.join(file_path, '..', 'utils'))
sys.path.append(lib_path)
from data_utils import get_file
global_cache = {}
SEED = 2017
P1B3_URL = 'http://ftp.mcs.anl.gov/pub/candle/public/benchmarks/P1B3/'
def impute_and_scale(df, scaling='std'):
"""Impute missing values with mean and scale data included in pandas dataframe.
Parameters
----------
df : pandas dataframe
dataframe to impute and scale
scaling : 'maxabs' [-1,1], 'minmax' [0,1], 'std', or None, optional (default 'std')
type of scaling to apply
"""
df = df.dropna(axis=1, how='all')
imputer = Imputer(strategy='mean', axis=0)
mat = imputer.fit_transform(df)
if scaling is None or scaling.lower() == 'none':
return pd.DataFrame(mat, columns=df.columns)
if scaling == 'maxabs':
scaler = MaxAbsScaler()
elif scaling == 'minmax':
scaler = MinMaxScaler()
else:
scaler = StandardScaler()
mat = scaler.fit_transform(mat)
df = pd.DataFrame(mat, columns=df.columns)
return df
def describe_response_data(df, cells=['all'], drugs=['A'], doses=[-5, -4]):
if 'all' in cells or cells == 'all':
cells = all_cells()
if 'all' in drugs or drugs == 'all':
drugs = all_drugs()
elif len(drugs) == 1 and re.match("^[ABC]$", drugs[0].upper()):
drugs = drugs_in_set('Jason:' + drugs[0].upper())
print('cells:', cells)
print('drugs:', drugs)
lconc = -4
for cell in cells:
d = df[(df['CELLNAME'] == cell) & (df['LOG_CONCENTRATION'] == lconc)]
print(cell)
print(d.describe())
break
def load_dose_response(min_logconc=-4., max_logconc=-4., subsample=None, fraction=False):
"""Load cell line response to different drug compounds, sub-select response for a specific
drug log concentration range and return a pandas dataframe.
Parameters
----------
min_logconc : -3, -4, -5, -6, -7, optional (default -4)
min log concentration of drug to return cell line growth
max_logconc : -3, -4, -5, -6, -7, optional (default -4)
max log concentration of drug to return cell line growth
subsample: None, 'naive_balancing' (default None)
subsampling strategy to use to balance the data based on growth
fraction: bool (default False)
divide growth percentage by 100
"""
path = get_file(P1B3_URL + 'NCI60_dose_response_with_missing_z5_avg.csv')
df = global_cache.get(path)
if df is None:
df = pd.read_csv(path, sep=',', engine='c',
na_values=['na', '-', ''],
dtype={'NSC':object, 'CELLNAME':str, 'LOG_CONCENTRATION':np.float32, 'GROWTH':np.float32})
global_cache[path] = df
df = df[(df['LOG_CONCENTRATION'] >= min_logconc) & (df['LOG_CONCENTRATION'] <= max_logconc)]
df = df[['NSC', 'CELLNAME', 'GROWTH', 'LOG_CONCENTRATION']]
if subsample and subsample == 'naive_balancing':
df1 = df[df['GROWTH'] <= 0]
df2 = df[(df['GROWTH'] > 0) & (df['GROWTH'] < 50)].sample(frac=0.7, random_state=SEED)
df3 = df[(df['GROWTH'] >= 50) & (df['GROWTH'] <= 100)].sample(frac=0.18, random_state=SEED)
df4 = df[df['GROWTH'] > 100].sample(frac=0.01, random_state=SEED)
df = pd.concat([df1, df2, df3, df4])
if fraction:
df['GROWTH'] /= 100
df = df.set_index(['NSC'])
return df
def load_drug_descriptors(ncols=None, scaling='std', add_prefix=True):
"""Load drug descriptor data, sub-select columns of drugs descriptors
randomly if specificed, impute and scale the selected data, and return a
pandas dataframe.
Parameters
----------
ncols : int or None
number of columns (drugs descriptors) to randomly subselect (default None : use all data)
scaling : 'maxabs' [-1,1], 'minmax' [0,1], 'std', or None, optional (default 'std')
type of scaling to apply
add_prefix: True or False
add feature namespace prefix
"""
path = get_file(P1B3_URL + 'descriptors.2D-NSC.5dose.filtered.txt')
df = global_cache.get(path)
if df is None:
df = pd.read_csv(path, sep='\t', engine='c',
na_values=['na','-',''],
dtype=np.float32)
global_cache[path] = df
df1 = pd.DataFrame(df.loc[:,'NAME'].astype(int).astype(str))
df1.rename(columns={'NAME': 'NSC'}, inplace=True)
df2 = df.drop('NAME', 1)
if add_prefix:
df2 = df2.add_prefix('dragon7.')
total = df2.shape[1]
if ncols and ncols < total:
usecols = np.random.choice(total, size=ncols, replace=False)
df2 = df2.iloc[:,usecols]
df2 = impute_and_scale(df2, scaling)
df2 = df2.astype(np.float32)
df_dg = pd.concat([df1, df2], axis=1)
return df_dg
def load_cell_expression_u133p2(ncols=None, scaling='std', add_prefix=True):
"""Load U133_Plus2 cell line expression data prepared by Judith,
sub-select columns of gene expression randomly if specificed,
scale the selected data and return a pandas dataframe.
Parameters
----------
ncols : int or None
number of columns (gene expression) to randomly subselect (default None : use all data)
scaling : 'maxabs' [-1,1], 'minmax' [0,1], 'std', or None, optional (default 'std')
type of scaling to apply
add_prefix: True or False
add feature namespace prefix
"""
path = get_file('http://bioseed.mcs.anl.gov/~fangfang/p1h/GSE32474_U133Plus2_GCRMA_gene_median.txt')
df = global_cache.get(path)
if df is None:
df = pd.read_csv(path, sep='\t', engine='c')
global_cache[path] = df
df1 = df['CELLNAME']
df2 = df.drop('CELLNAME', 1)
if add_prefix:
df2 = df2.add_prefix('expr.')
total = df.shape[1]
if ncols and ncols < total:
usecols = np.random.choice(total, size=ncols, replace=False)
df2 = df2.iloc[:, usecols]
df2 = impute_and_scale(df2, scaling)
df2 = df2.astype(np.float32)
df = pd.concat([df1, df2], axis=1)
return df
def load_cell_expression_5platform(ncols=None, scaling='std', add_prefix=True):
"""Load 5-platform averaged cell line expression data, sub-select
columns of gene expression randomly if specificed, scale the
selected data and return a pandas dataframe.
Parameters
----------
ncols : int or None
number of columns (gene expression) to randomly subselect (default None : use all data)
scaling : 'maxabs' [-1,1], 'minmax' [0,1], 'std', or None, optional (default 'std')
type of scaling to apply
add_prefix: True or False
add feature namespace prefix
"""
path = get_file(P1B3_URL + 'RNA_5_Platform_Gene_Transcript_Averaged_intensities.transposed.txt')
df = global_cache.get(path)
if df is None:
df = pd.read_csv(path, sep='\t', engine='c',
na_values=['na','-',''])
global_cache[path] = df
df1 = df['CellLine']
df1 = df1.map(lambda x: x.replace('.', ':'))
df1.name = 'CELLNAME'
df2 = df.drop('CellLine', 1)
if add_prefix:
df2 = df2.add_prefix('expr_5p.')
total = df2.shape[1]
if ncols and ncols < total:
usecols = np.random.choice(total, size=ncols, replace=False)
df2 = df2.iloc[:, usecols]
df2 = impute_and_scale(df2, scaling)
df2 = df2.astype(np.float32)
df = pd.concat([df1, df2], axis=1)
return df
def load_cell_mirna(ncols=None, scaling='std', add_prefix=True):
"""Load cell line microRNA data, sub-select columns randomly if
specificed, scale the selected data and return a pandas
dataframe.
Parameters
----------
ncols : int or None
number of columns to randomly subselect (default None : use all data)
scaling : 'maxabs' [-1,1], 'minmax' [0,1], 'std', or None, optional (default 'std')
type of scaling to apply
add_prefix: True or False
add feature namespace prefix
"""
path = get_file(P1B3_URL + 'RNA__microRNA_OSU_V3_chip_log2.transposed.txt')
df = global_cache.get(path)
if df is None:
df = pd.read_csv(path, sep='\t', engine='c',
na_values=['na','-',''])
global_cache[path] = df
df1 = df['CellLine']
df1 = df1.map(lambda x: x.replace('.', ':'))
df1.name = 'CELLNAME'
df2 = df.drop('CellLine', 1)
if add_prefix:
df2 = df2.add_prefix('mRNA.')
total = df2.shape[1]
if ncols and ncols < total:
usecols = np.random.choice(total, size=ncols, replace=False)
df2 = df2.iloc[:, usecols]
df2 = impute_and_scale(df2, scaling)
df2 = df2.astype(np.float32)
df = pd.concat([df1, df2], axis=1)
return df
def load_cell_proteome(ncols=None, scaling='std', add_prefix=True):
"""Load cell line microRNA data, sub-select columns randomly if
specificed, scale the selected data and return a pandas
dataframe.
Parameters
----------
ncols : int or None
number of columns to randomly subselect (default None : use all data)
scaling : 'maxabs' [-1,1], 'minmax' [0,1], 'std', or None, optional (default 'std')
type of scaling to apply
add_prefix: True or False
add feature namespace prefix
"""
path1 = get_file(P1B3_URL + 'nci60_proteome_log2.transposed.tsv')
path2 = get_file(P1B3_URL + 'nci60_kinome_log2.transposed.tsv')
df = global_cache.get(path1)
if df is None:
df = | pd.read_csv(path1, sep='\t', engine='c') | pandas.read_csv |
from datetime import datetime
import pytest
from pandas import (
DatetimeIndex,
offsets,
to_datetime,
)
import pandas._testing as tm
from pandas.tseries.holiday import (
AbstractHolidayCalendar,
Holiday,
Timestamp,
USFederalHolidayCalendar,
USLaborDay,
USThanksgivingDay,
get_calendar,
)
@pytest.mark.parametrize(
"transform", [lambda x: x, lambda x: x.strftime("%Y-%m-%d"), lambda x: Timestamp(x)]
)
def test_calendar(transform):
start_date = datetime(2012, 1, 1)
end_date = datetime(2012, 12, 31)
calendar = USFederalHolidayCalendar()
holidays = calendar.holidays(transform(start_date), transform(end_date))
expected = [
datetime(2012, 1, 2),
datetime(2012, 1, 16),
datetime(2012, 2, 20),
datetime(2012, 5, 28),
datetime(2012, 7, 4),
datetime(2012, 9, 3),
datetime(2012, 10, 8),
datetime(2012, 11, 12),
datetime(2012, 11, 22),
datetime(2012, 12, 25),
]
assert list(holidays.to_pydatetime()) == expected
def test_calendar_caching():
# see gh-9552.
class TestCalendar(AbstractHolidayCalendar):
def __init__(self, name=None, rules=None):
super().__init__(name=name, rules=rules)
jan1 = TestCalendar(rules=[Holiday("jan1", year=2015, month=1, day=1)])
jan2 = TestCalendar(rules=[Holiday("jan2", year=2015, month=1, day=2)])
# Getting holidays for Jan 1 should not alter results for Jan 2.
tm.assert_index_equal(jan1.holidays(), DatetimeIndex(["01-Jan-2015"]))
tm.assert_index_equal(jan2.holidays(), DatetimeIndex(["02-Jan-2015"]))
def test_calendar_observance_dates():
# see gh-11477
us_fed_cal = | get_calendar("USFederalHolidayCalendar") | pandas.tseries.holiday.get_calendar |
# Arithmetic tests for DataFrame/Series/Index/Array classes that should
# behave identically.
from datetime import datetime, timedelta
import numpy as np
import pytest
from pandas.errors import (
NullFrequencyError, OutOfBoundsDatetime, PerformanceWarning)
import pandas as pd
from pandas import (
DataFrame, DatetimeIndex, NaT, Series, Timedelta, TimedeltaIndex,
Timestamp, timedelta_range)
import pandas.util.testing as tm
def get_upcast_box(box, vector):
"""
Given two box-types, find the one that takes priority
"""
if box is DataFrame or isinstance(vector, DataFrame):
return DataFrame
if box is Series or isinstance(vector, Series):
return Series
if box is pd.Index or isinstance(vector, pd.Index):
return pd.Index
return box
# ------------------------------------------------------------------
# Timedelta64[ns] dtype Comparisons
class TestTimedelta64ArrayLikeComparisons:
# Comparison tests for timedelta64[ns] vectors fully parametrized over
# DataFrame/Series/TimedeltaIndex/TimedeltaArray. Ideally all comparison
# tests will eventually end up here.
def test_compare_timedelta64_zerodim(self, box_with_array):
# GH#26689 should unbox when comparing with zerodim array
box = box_with_array
xbox = box_with_array if box_with_array is not pd.Index else np.ndarray
tdi = pd.timedelta_range('2H', periods=4)
other = np.array(tdi.to_numpy()[0])
tdi = tm.box_expected(tdi, box)
res = tdi <= other
expected = np.array([True, False, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(res, expected)
with pytest.raises(TypeError):
# zero-dim of wrong dtype should still raise
tdi >= np.array(4)
class TestTimedelta64ArrayComparisons:
# TODO: All of these need to be parametrized over box
def test_compare_timedelta_series(self):
# regression test for GH#5963
s = pd.Series([timedelta(days=1), timedelta(days=2)])
actual = s > timedelta(days=1)
expected = pd.Series([False, True])
tm.assert_series_equal(actual, expected)
def test_tdi_cmp_str_invalid(self, box_with_array):
# GH#13624
xbox = box_with_array if box_with_array is not pd.Index else np.ndarray
tdi = TimedeltaIndex(['1 day', '2 days'])
tdarr = tm.box_expected(tdi, box_with_array)
for left, right in [(tdarr, 'a'), ('a', tdarr)]:
with pytest.raises(TypeError):
left > right
with pytest.raises(TypeError):
left >= right
with pytest.raises(TypeError):
left < right
with pytest.raises(TypeError):
left <= right
result = left == right
expected = np.array([False, False], dtype=bool)
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
result = left != right
expected = np.array([True, True], dtype=bool)
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
@pytest.mark.parametrize('dtype', [None, object])
def test_comp_nat(self, dtype):
left = pd.TimedeltaIndex([pd.Timedelta('1 days'), pd.NaT,
pd.Timedelta('3 days')])
right = pd.TimedeltaIndex([pd.NaT, pd.NaT, pd.Timedelta('3 days')])
lhs, rhs = left, right
if dtype is object:
lhs, rhs = left.astype(object), right.astype(object)
result = rhs == lhs
expected = np.array([False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = rhs != lhs
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(lhs == pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT == rhs, expected)
expected = np.array([True, True, True])
tm.assert_numpy_array_equal(lhs != pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT != lhs, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(lhs < pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT > lhs, expected)
def test_comparisons_nat(self):
tdidx1 = pd.TimedeltaIndex(['1 day', pd.NaT, '1 day 00:00:01', pd.NaT,
'1 day 00:00:01', '5 day 00:00:03'])
tdidx2 = pd.TimedeltaIndex(['2 day', '2 day', pd.NaT, pd.NaT,
'1 day 00:00:02', '5 days 00:00:03'])
tdarr = np.array([np.timedelta64(2, 'D'),
np.timedelta64(2, 'D'), np.timedelta64('nat'),
np.timedelta64('nat'),
np.timedelta64(1, 'D') + np.timedelta64(2, 's'),
np.timedelta64(5, 'D') + np.timedelta64(3, 's')])
cases = [(tdidx1, tdidx2), (tdidx1, tdarr)]
# Check pd.NaT is handles as the same as np.nan
for idx1, idx2 in cases:
result = idx1 < idx2
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx2 > idx1
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= idx2
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx2 >= idx1
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 == idx2
expected = np.array([False, False, False, False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 != idx2
expected = np.array([True, True, True, True, True, False])
tm.assert_numpy_array_equal(result, expected)
# TODO: better name
def test_comparisons_coverage(self):
rng = timedelta_range('1 days', periods=10)
result = rng < rng[3]
expected = np.array([True, True, True] + [False] * 7)
tm.assert_numpy_array_equal(result, expected)
# raise TypeError for now
with pytest.raises(TypeError):
rng < rng[3].value
result = rng == list(rng)
exp = rng == rng
tm.assert_numpy_array_equal(result, exp)
# ------------------------------------------------------------------
# Timedelta64[ns] dtype Arithmetic Operations
class TestTimedelta64ArithmeticUnsorted:
# Tests moved from type-specific test files but not
# yet sorted/parametrized/de-duplicated
def test_ufunc_coercions(self):
# normal ops are also tested in tseries/test_timedeltas.py
idx = TimedeltaIndex(['2H', '4H', '6H', '8H', '10H'],
freq='2H', name='x')
for result in [idx * 2, np.multiply(idx, 2)]:
assert isinstance(result, TimedeltaIndex)
exp = TimedeltaIndex(['4H', '8H', '12H', '16H', '20H'],
freq='4H', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == '4H'
for result in [idx / 2, np.divide(idx, 2)]:
assert isinstance(result, TimedeltaIndex)
exp = TimedeltaIndex(['1H', '2H', '3H', '4H', '5H'],
freq='H', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == 'H'
idx = TimedeltaIndex(['2H', '4H', '6H', '8H', '10H'],
freq='2H', name='x')
for result in [-idx, np.negative(idx)]:
assert isinstance(result, TimedeltaIndex)
exp = TimedeltaIndex(['-2H', '-4H', '-6H', '-8H', '-10H'],
freq='-2H', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == '-2H'
idx = TimedeltaIndex(['-2H', '-1H', '0H', '1H', '2H'],
freq='H', name='x')
for result in [abs(idx), np.absolute(idx)]:
assert isinstance(result, TimedeltaIndex)
exp = TimedeltaIndex(['2H', '1H', '0H', '1H', '2H'],
freq=None, name='x')
tm.assert_index_equal(result, exp)
assert result.freq is None
def test_subtraction_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = pd.date_range('20130101', periods=3, name='bar')
td = Timedelta('1 days')
dt = Timestamp('20130101')
msg = "cannot subtract a datelike from a TimedeltaArray"
with pytest.raises(TypeError, match=msg):
tdi - dt
with pytest.raises(TypeError, match=msg):
tdi - dti
msg = (r"descriptor '__sub__' requires a 'datetime\.datetime' object"
" but received a 'Timedelta'")
with pytest.raises(TypeError, match=msg):
td - dt
msg = "bad operand type for unary -: 'DatetimeArray'"
with pytest.raises(TypeError, match=msg):
td - dti
result = dt - dti
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'], name='bar')
tm.assert_index_equal(result, expected)
result = dti - dt
expected = TimedeltaIndex(['0 days', '1 days', '2 days'], name='bar')
tm.assert_index_equal(result, expected)
result = tdi - td
expected = TimedeltaIndex(['0 days', pd.NaT, '1 days'], name='foo')
tm.assert_index_equal(result, expected, check_names=False)
result = td - tdi
expected = TimedeltaIndex(['0 days', pd.NaT, '-1 days'], name='foo')
tm.assert_index_equal(result, expected, check_names=False)
result = dti - td
expected = DatetimeIndex(
['20121231', '20130101', '20130102'], name='bar')
tm.assert_index_equal(result, expected, check_names=False)
result = dt - tdi
expected = DatetimeIndex(['20121231', pd.NaT, '20121230'], name='foo')
tm.assert_index_equal(result, expected)
def test_subtraction_ops_with_tz(self):
# check that dt/dti subtraction ops with tz are validated
dti = pd.date_range('20130101', periods=3)
ts = Timestamp('20130101')
dt = ts.to_pydatetime()
dti_tz = pd.date_range('20130101', periods=3).tz_localize('US/Eastern')
ts_tz = Timestamp('20130101').tz_localize('US/Eastern')
ts_tz2 = Timestamp('20130101').tz_localize('CET')
dt_tz = ts_tz.to_pydatetime()
td = Timedelta('1 days')
def _check(result, expected):
assert result == expected
assert isinstance(result, Timedelta)
# scalars
result = ts - ts
expected = Timedelta('0 days')
_check(result, expected)
result = dt_tz - ts_tz
expected = Timedelta('0 days')
_check(result, expected)
result = ts_tz - dt_tz
expected = Timedelta('0 days')
_check(result, expected)
# tz mismatches
msg = ("Timestamp subtraction must have the same timezones or no"
" timezones")
with pytest.raises(TypeError, match=msg):
dt_tz - ts
msg = "can't subtract offset-naive and offset-aware datetimes"
with pytest.raises(TypeError, match=msg):
dt_tz - dt
msg = ("Timestamp subtraction must have the same timezones or no"
" timezones")
with pytest.raises(TypeError, match=msg):
dt_tz - ts_tz2
msg = "can't subtract offset-naive and offset-aware datetimes"
with pytest.raises(TypeError, match=msg):
dt - dt_tz
msg = ("Timestamp subtraction must have the same timezones or no"
" timezones")
with pytest.raises(TypeError, match=msg):
ts - dt_tz
with pytest.raises(TypeError, match=msg):
ts_tz2 - ts
with pytest.raises(TypeError, match=msg):
ts_tz2 - dt
with pytest.raises(TypeError, match=msg):
ts_tz - ts_tz2
# with dti
with pytest.raises(TypeError, match=msg):
dti - ts_tz
with pytest.raises(TypeError, match=msg):
dti_tz - ts
with pytest.raises(TypeError, match=msg):
dti_tz - ts_tz2
result = dti_tz - dt_tz
expected = TimedeltaIndex(['0 days', '1 days', '2 days'])
tm.assert_index_equal(result, expected)
result = dt_tz - dti_tz
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'])
tm.assert_index_equal(result, expected)
result = dti_tz - ts_tz
expected = TimedeltaIndex(['0 days', '1 days', '2 days'])
tm.assert_index_equal(result, expected)
result = ts_tz - dti_tz
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'])
tm.assert_index_equal(result, expected)
result = td - td
expected = Timedelta('0 days')
_check(result, expected)
result = dti_tz - td
expected = DatetimeIndex(
['20121231', '20130101', '20130102'], tz='US/Eastern')
tm.assert_index_equal(result, expected)
def test_dti_tdi_numeric_ops(self):
# These are normally union/diff set-like ops
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = pd.date_range('20130101', periods=3, name='bar')
# TODO(wesm): unused?
# td = Timedelta('1 days')
# dt = Timestamp('20130101')
result = tdi - tdi
expected = TimedeltaIndex(['0 days', pd.NaT, '0 days'], name='foo')
tm.assert_index_equal(result, expected)
result = tdi + tdi
expected = TimedeltaIndex(['2 days', pd.NaT, '4 days'], name='foo')
tm.assert_index_equal(result, expected)
result = dti - tdi # name will be reset
expected = DatetimeIndex(['20121231', pd.NaT, '20130101'])
tm.assert_index_equal(result, expected)
def test_addition_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = pd.date_range('20130101', periods=3, name='bar')
td = Timedelta('1 days')
dt = Timestamp('20130101')
result = tdi + dt
expected = DatetimeIndex(['20130102', pd.NaT, '20130103'], name='foo')
tm.assert_index_equal(result, expected)
result = dt + tdi
expected = DatetimeIndex(['20130102', pd.NaT, '20130103'], name='foo')
tm.assert_index_equal(result, expected)
result = td + tdi
expected = TimedeltaIndex(['2 days', pd.NaT, '3 days'], name='foo')
tm.assert_index_equal(result, expected)
result = tdi + td
expected = TimedeltaIndex(['2 days', pd.NaT, '3 days'], name='foo')
tm.assert_index_equal(result, expected)
# unequal length
msg = "cannot add indices of unequal length"
with pytest.raises(ValueError, match=msg):
tdi + dti[0:1]
with pytest.raises(ValueError, match=msg):
tdi[0:1] + dti
# random indexes
with pytest.raises(NullFrequencyError):
tdi + pd.Int64Index([1, 2, 3])
# this is a union!
# pytest.raises(TypeError, lambda : Int64Index([1,2,3]) + tdi)
result = tdi + dti # name will be reset
expected = DatetimeIndex(['20130102', pd.NaT, '20130105'])
tm.assert_index_equal(result, expected)
result = dti + tdi # name will be reset
expected = DatetimeIndex(['20130102', pd.NaT, '20130105'])
tm.assert_index_equal(result, expected)
result = dt + td
expected = Timestamp('20130102')
assert result == expected
result = td + dt
expected = Timestamp('20130102')
assert result == expected
# TODO: Needs more informative name, probably split up into
# more targeted tests
@pytest.mark.parametrize('freq', ['D', 'B'])
def test_timedelta(self, freq):
index = pd.date_range('1/1/2000', periods=50, freq=freq)
shifted = index + timedelta(1)
back = shifted + timedelta(-1)
tm.assert_index_equal(index, back)
if freq == 'D':
expected = pd.tseries.offsets.Day(1)
assert index.freq == expected
assert shifted.freq == expected
assert back.freq == expected
else: # freq == 'B'
assert index.freq == pd.tseries.offsets.BusinessDay(1)
assert shifted.freq is None
assert back.freq == pd.tseries.offsets.BusinessDay(1)
result = index - timedelta(1)
expected = index + timedelta(-1)
tm.assert_index_equal(result, expected)
# GH#4134, buggy with timedeltas
rng = pd.date_range('2013', '2014')
s = Series(rng)
result1 = rng - pd.offsets.Hour(1)
result2 = DatetimeIndex(s - np.timedelta64(100000000))
result3 = rng - np.timedelta64(100000000)
result4 = DatetimeIndex(s - pd.offsets.Hour(1))
tm.assert_index_equal(result1, result4)
tm.assert_index_equal(result2, result3)
class TestAddSubNaTMasking:
# TODO: parametrize over boxes
def test_tdi_add_timestamp_nat_masking(self):
# GH#17991 checking for overflow-masking with NaT
tdinat = pd.to_timedelta(['24658 days 11:15:00', 'NaT'])
tsneg = Timestamp('1950-01-01')
ts_neg_variants = [tsneg,
tsneg.to_pydatetime(),
tsneg.to_datetime64().astype('datetime64[ns]'),
tsneg.to_datetime64().astype('datetime64[D]')]
tspos = Timestamp('1980-01-01')
ts_pos_variants = [tspos,
tspos.to_pydatetime(),
tspos.to_datetime64().astype('datetime64[ns]'),
tspos.to_datetime64().astype('datetime64[D]')]
for variant in ts_neg_variants + ts_pos_variants:
res = tdinat + variant
assert res[1] is pd.NaT
def test_tdi_add_overflow(self):
# See GH#14068
# preliminary test scalar analogue of vectorized tests below
with pytest.raises(OutOfBoundsDatetime):
pd.to_timedelta(106580, 'D') + Timestamp('2000')
with pytest.raises(OutOfBoundsDatetime):
Timestamp('2000') + pd.to_timedelta(106580, 'D')
_NaT = int(pd.NaT) + 1
msg = "Overflow in int64 addition"
with pytest.raises(OverflowError, match=msg):
pd.to_timedelta([106580], 'D') + Timestamp('2000')
with pytest.raises(OverflowError, match=msg):
Timestamp('2000') + pd.to_timedelta([106580], 'D')
with pytest.raises(OverflowError, match=msg):
pd.to_timedelta([_NaT]) - Timedelta('1 days')
with pytest.raises(OverflowError, match=msg):
pd.to_timedelta(['5 days', _NaT]) - Timedelta('1 days')
with pytest.raises(OverflowError, match=msg):
(pd.to_timedelta([_NaT, '5 days', '1 hours']) -
pd.to_timedelta(['7 seconds', _NaT, '4 hours']))
# These should not overflow!
exp = TimedeltaIndex([pd.NaT])
result = pd.to_timedelta([pd.NaT]) - Timedelta('1 days')
tm.assert_index_equal(result, exp)
exp = TimedeltaIndex(['4 days', pd.NaT])
result = pd.to_timedelta(['5 days', pd.NaT]) - Timedelta('1 days')
tm.assert_index_equal(result, exp)
exp = TimedeltaIndex([pd.NaT, pd.NaT, '5 hours'])
result = (pd.to_timedelta([pd.NaT, '5 days', '1 hours']) +
pd.to_timedelta(['7 seconds', pd.NaT, '4 hours']))
tm.assert_index_equal(result, exp)
class TestTimedeltaArraylikeAddSubOps:
# Tests for timedelta64[ns] __add__, __sub__, __radd__, __rsub__
# TODO: moved from frame tests; needs parametrization/de-duplication
def test_td64_df_add_int_frame(self):
# GH#22696 Check that we don't dispatch to numpy implementation,
# which treats int64 as m8[ns]
tdi = pd.timedelta_range('1', periods=3)
df = tdi.to_frame()
other = pd.DataFrame([1, 2, 3], index=tdi) # indexed like `df`
with pytest.raises(TypeError):
df + other
with pytest.raises(TypeError):
other + df
with pytest.raises(TypeError):
df - other
with pytest.raises(TypeError):
other - df
# TODO: moved from tests.indexes.timedeltas.test_arithmetic; needs
# parametrization+de-duplication
def test_timedelta_ops_with_missing_values(self):
# setup
s1 = pd.to_timedelta(Series(['00:00:01']))
s2 = pd.to_timedelta(Series(['00:00:02']))
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
# Passing datetime64-dtype data to TimedeltaIndex is deprecated
sn = pd.to_timedelta(Series([pd.NaT]))
df1 = pd.DataFrame(['00:00:01']).apply(pd.to_timedelta)
df2 = pd.DataFrame(['00:00:02']).apply(pd.to_timedelta)
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
# Passing datetime64-dtype data to TimedeltaIndex is deprecated
dfn = pd.DataFrame([pd.NaT]).apply(pd.to_timedelta)
scalar1 = pd.to_timedelta('00:00:01')
scalar2 = pd.to_timedelta('00:00:02')
timedelta_NaT = pd.to_timedelta('NaT')
actual = scalar1 + scalar1
assert actual == scalar2
actual = scalar2 - scalar1
assert actual == scalar1
actual = s1 + s1
tm.assert_series_equal(actual, s2)
actual = s2 - s1
tm.assert_series_equal(actual, s1)
actual = s1 + scalar1
tm.assert_series_equal(actual, s2)
actual = scalar1 + s1
tm.assert_series_equal(actual, s2)
actual = s2 - scalar1
tm.assert_series_equal(actual, s1)
actual = -scalar1 + s2
tm.assert_series_equal(actual, s1)
actual = s1 + timedelta_NaT
tm.assert_series_equal(actual, sn)
actual = timedelta_NaT + s1
tm.assert_series_equal(actual, sn)
actual = s1 - timedelta_NaT
tm.assert_series_equal(actual, sn)
actual = -timedelta_NaT + s1
tm.assert_series_equal(actual, sn)
with pytest.raises(TypeError):
s1 + np.nan
with pytest.raises(TypeError):
np.nan + s1
with pytest.raises(TypeError):
s1 - np.nan
with pytest.raises(TypeError):
-np.nan + s1
actual = s1 + pd.NaT
tm.assert_series_equal(actual, sn)
actual = s2 - pd.NaT
tm.assert_series_equal(actual, sn)
actual = s1 + df1
tm.assert_frame_equal(actual, df2)
actual = s2 - df1
tm.assert_frame_equal(actual, df1)
actual = df1 + s1
tm.assert_frame_equal(actual, df2)
actual = df2 - s1
tm.assert_frame_equal(actual, df1)
actual = df1 + df1
tm.assert_frame_equal(actual, df2)
actual = df2 - df1
tm.assert_frame_equal(actual, df1)
actual = df1 + scalar1
tm.assert_frame_equal(actual, df2)
actual = df2 - scalar1
tm.assert_frame_equal(actual, df1)
actual = df1 + timedelta_NaT
tm.assert_frame_equal(actual, dfn)
actual = df1 - timedelta_NaT
tm.assert_frame_equal(actual, dfn)
with pytest.raises(TypeError):
df1 + np.nan
with pytest.raises(TypeError):
df1 - np.nan
actual = df1 + pd.NaT # NaT is datetime, not timedelta
tm.assert_frame_equal(actual, dfn)
actual = df1 - pd.NaT
tm.assert_frame_equal(actual, dfn)
# TODO: moved from tests.series.test_operators, needs splitting, cleanup,
# de-duplication, box-parametrization...
def test_operators_timedelta64(self):
# series ops
v1 = pd.date_range('2012-1-1', periods=3, freq='D')
v2 = pd.date_range('2012-1-2', periods=3, freq='D')
rs = Series(v2) - Series(v1)
xp = Series(1e9 * 3600 * 24,
rs.index).astype('int64').astype('timedelta64[ns]')
tm.assert_series_equal(rs, xp)
assert rs.dtype == 'timedelta64[ns]'
df = DataFrame(dict(A=v1))
td = Series([timedelta(days=i) for i in range(3)])
assert td.dtype == 'timedelta64[ns]'
# series on the rhs
result = df['A'] - df['A'].shift()
assert result.dtype == 'timedelta64[ns]'
result = df['A'] + td
assert result.dtype == 'M8[ns]'
# scalar Timestamp on rhs
maxa = df['A'].max()
assert isinstance(maxa, Timestamp)
resultb = df['A'] - df['A'].max()
assert resultb.dtype == 'timedelta64[ns]'
# timestamp on lhs
result = resultb + df['A']
values = [Timestamp('20111230'), Timestamp('20120101'),
Timestamp('20120103')]
expected = Series(values, name='A')
tm.assert_series_equal(result, expected)
# datetimes on rhs
result = df['A'] - datetime(2001, 1, 1)
expected = Series(
[timedelta(days=4017 + i) for i in range(3)], name='A')
tm.assert_series_equal(result, expected)
assert result.dtype == 'm8[ns]'
d = datetime(2001, 1, 1, 3, 4)
resulta = df['A'] - d
assert resulta.dtype == 'm8[ns]'
# roundtrip
resultb = resulta + d
tm.assert_series_equal(df['A'], resultb)
# timedeltas on rhs
td = timedelta(days=1)
resulta = df['A'] + td
resultb = resulta - td
tm.assert_series_equal(resultb, df['A'])
assert resultb.dtype == 'M8[ns]'
# roundtrip
td = timedelta(minutes=5, seconds=3)
resulta = df['A'] + td
resultb = resulta - td
tm.assert_series_equal(df['A'], resultb)
assert resultb.dtype == 'M8[ns]'
# inplace
value = rs[2] + np.timedelta64(timedelta(minutes=5, seconds=1))
rs[2] += np.timedelta64(timedelta(minutes=5, seconds=1))
assert rs[2] == value
def test_timedelta64_ops_nat(self):
# GH 11349
timedelta_series = Series([NaT, Timedelta('1s')])
nat_series_dtype_timedelta = Series([NaT, NaT],
dtype='timedelta64[ns]')
single_nat_dtype_timedelta = Series([NaT], dtype='timedelta64[ns]')
# subtraction
tm.assert_series_equal(timedelta_series - NaT,
nat_series_dtype_timedelta)
tm.assert_series_equal(-NaT + timedelta_series,
nat_series_dtype_timedelta)
tm.assert_series_equal(timedelta_series - single_nat_dtype_timedelta,
nat_series_dtype_timedelta)
tm.assert_series_equal(-single_nat_dtype_timedelta + timedelta_series,
nat_series_dtype_timedelta)
# addition
tm.assert_series_equal(nat_series_dtype_timedelta + NaT,
nat_series_dtype_timedelta)
tm.assert_series_equal(NaT + nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
tm.assert_series_equal(nat_series_dtype_timedelta +
single_nat_dtype_timedelta,
nat_series_dtype_timedelta)
tm.assert_series_equal(single_nat_dtype_timedelta +
nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
tm.assert_series_equal(timedelta_series + NaT,
nat_series_dtype_timedelta)
tm.assert_series_equal(NaT + timedelta_series,
nat_series_dtype_timedelta)
tm.assert_series_equal(timedelta_series + single_nat_dtype_timedelta,
nat_series_dtype_timedelta)
tm.assert_series_equal(single_nat_dtype_timedelta + timedelta_series,
nat_series_dtype_timedelta)
tm.assert_series_equal(nat_series_dtype_timedelta + NaT,
nat_series_dtype_timedelta)
tm.assert_series_equal(NaT + nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
tm.assert_series_equal(nat_series_dtype_timedelta +
single_nat_dtype_timedelta,
nat_series_dtype_timedelta)
tm.assert_series_equal(single_nat_dtype_timedelta +
nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
# multiplication
tm.assert_series_equal(nat_series_dtype_timedelta * 1.0,
nat_series_dtype_timedelta)
tm.assert_series_equal(1.0 * nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
tm.assert_series_equal(timedelta_series * 1, timedelta_series)
tm.assert_series_equal(1 * timedelta_series, timedelta_series)
tm.assert_series_equal(timedelta_series * 1.5,
Series([NaT, Timedelta('1.5s')]))
tm.assert_series_equal(1.5 * timedelta_series,
Series([NaT, Timedelta('1.5s')]))
tm.assert_series_equal(timedelta_series * np.nan,
nat_series_dtype_timedelta)
tm.assert_series_equal(np.nan * timedelta_series,
nat_series_dtype_timedelta)
# division
tm.assert_series_equal(timedelta_series / 2,
Series([NaT, Timedelta('0.5s')]))
tm.assert_series_equal(timedelta_series / 2.0,
Series([NaT, Timedelta('0.5s')]))
tm.assert_series_equal(timedelta_series / np.nan,
nat_series_dtype_timedelta)
# -------------------------------------------------------------
# Invalid Operations
def test_td64arr_add_str_invalid(self, box_with_array):
# GH#13624
tdi = TimedeltaIndex(['1 day', '2 days'])
tdi = tm.box_expected(tdi, box_with_array)
with pytest.raises(TypeError):
tdi + 'a'
with pytest.raises(TypeError):
'a' + tdi
@pytest.mark.parametrize('other', [3.14, np.array([2.0, 3.0])])
def test_td64arr_add_sub_float(self, box_with_array, other):
tdi = TimedeltaIndex(['-1 days', '-1 days'])
tdarr = tm.box_expected(tdi, box_with_array)
with pytest.raises(TypeError):
tdarr + other
with pytest.raises(TypeError):
other + tdarr
with pytest.raises(TypeError):
tdarr - other
with pytest.raises(TypeError):
other - tdarr
@pytest.mark.parametrize('freq', [None, 'H'])
def test_td64arr_sub_period(self, box_with_array, freq):
# GH#13078
# not supported, check TypeError
p = pd.Period('2011-01-01', freq='D')
idx = TimedeltaIndex(['1 hours', '2 hours'], freq=freq)
idx = tm.box_expected(idx, box_with_array)
with pytest.raises(TypeError):
idx - p
with pytest.raises(TypeError):
p - idx
@pytest.mark.parametrize('pi_freq', ['D', 'W', 'Q', 'H'])
@pytest.mark.parametrize('tdi_freq', [None, 'H'])
def test_td64arr_sub_pi(self, box_with_array, tdi_freq, pi_freq):
# GH#20049 subtracting PeriodIndex should raise TypeError
tdi = TimedeltaIndex(['1 hours', '2 hours'], freq=tdi_freq)
dti = Timestamp('2018-03-07 17:16:40') + tdi
pi = dti.to_period(pi_freq)
# TODO: parametrize over box for pi?
tdi = tm.box_expected(tdi, box_with_array)
with pytest.raises(TypeError):
tdi - pi
# -------------------------------------------------------------
# Binary operations td64 arraylike and datetime-like
def test_td64arr_sub_timestamp_raises(self, box_with_array):
idx = TimedeltaIndex(['1 day', '2 day'])
idx = tm.box_expected(idx, box_with_array)
msg = ("cannot subtract a datelike from|"
"Could not operate|"
"cannot perform operation")
with pytest.raises(TypeError, match=msg):
idx - Timestamp('2011-01-01')
def test_td64arr_add_timestamp(self, box_with_array, tz_naive_fixture):
# GH#23215
# TODO: parametrize over scalar datetime types?
tz = tz_naive_fixture
other = Timestamp('2011-01-01', tz=tz)
idx = TimedeltaIndex(['1 day', '2 day'])
expected = DatetimeIndex(['2011-01-02', '2011-01-03'], tz=tz)
idx = tm.box_expected(idx, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = idx + other
tm.assert_equal(result, expected)
result = other + idx
tm.assert_equal(result, expected)
def test_td64arr_add_sub_timestamp(self, box_with_array):
# GH#11925
ts = Timestamp('2012-01-01')
# TODO: parametrize over types of datetime scalar?
tdi = timedelta_range('1 day', periods=3)
expected = pd.date_range('2012-01-02', periods=3)
tdarr = tm.box_expected(tdi, box_with_array)
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(ts + tdarr, expected)
tm.assert_equal(tdarr + ts, expected)
expected2 = pd.date_range('2011-12-31', periods=3, freq='-1D')
expected2 = tm.box_expected(expected2, box_with_array)
tm.assert_equal(ts - tdarr, expected2)
tm.assert_equal(ts + (-tdarr), expected2)
with pytest.raises(TypeError):
tdarr - ts
def test_tdi_sub_dt64_array(self, box_with_array):
dti = pd.date_range('2016-01-01', periods=3)
tdi = dti - dti.shift(1)
dtarr = dti.values
expected = pd.DatetimeIndex(dtarr) - tdi
tdi = tm.box_expected(tdi, box_with_array)
expected = tm.box_expected(expected, box_with_array)
with pytest.raises(TypeError):
tdi - dtarr
# TimedeltaIndex.__rsub__
result = dtarr - tdi
tm.assert_equal(result, expected)
def test_tdi_add_dt64_array(self, box_with_array):
dti = pd.date_range('2016-01-01', periods=3)
tdi = dti - dti.shift(1)
dtarr = dti.values
expected = pd.DatetimeIndex(dtarr) + tdi
tdi = tm.box_expected(tdi, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = tdi + dtarr
tm.assert_equal(result, expected)
result = dtarr + tdi
tm.assert_equal(result, expected)
def test_td64arr_add_datetime64_nat(self, box_with_array):
# GH#23215
other = np.datetime64('NaT')
tdi = timedelta_range('1 day', periods=3)
expected = pd.DatetimeIndex(["NaT", "NaT", "NaT"])
tdser = tm.box_expected(tdi, box_with_array)
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(tdser + other, expected)
tm.assert_equal(other + tdser, expected)
# ------------------------------------------------------------------
# Operations with int-like others
def test_td64arr_add_int_series_invalid(self, box):
tdser = pd.Series(['59 Days', '59 Days', 'NaT'], dtype='m8[ns]')
tdser = tm.box_expected(tdser, box)
err = TypeError if box is not pd.Index else NullFrequencyError
int_ser = Series([2, 3, 4])
with pytest.raises(err):
tdser + int_ser
with pytest.raises(err):
int_ser + tdser
with pytest.raises(err):
tdser - int_ser
with pytest.raises(err):
int_ser - tdser
def test_td64arr_add_intlike(self, box_with_array):
# GH#19123
tdi = TimedeltaIndex(['59 days', '59 days', 'NaT'])
ser = tm.box_expected(tdi, box_with_array)
err = TypeError
if box_with_array in [pd.Index, tm.to_array]:
err = NullFrequencyError
other = Series([20, 30, 40], dtype='uint8')
# TODO: separate/parametrize
with pytest.raises(err):
ser + 1
with pytest.raises(err):
ser - 1
with pytest.raises(err):
ser + other
with pytest.raises(err):
ser - other
with pytest.raises(err):
ser + np.array(other)
with pytest.raises(err):
ser - np.array(other)
with pytest.raises(err):
ser + pd.Index(other)
with pytest.raises(err):
ser - pd.Index(other)
@pytest.mark.parametrize('scalar', [1, 1.5, np.array(2)])
def test_td64arr_add_sub_numeric_scalar_invalid(self, box_with_array,
scalar):
box = box_with_array
tdser = pd.Series(['59 Days', '59 Days', 'NaT'], dtype='m8[ns]')
tdser = tm.box_expected(tdser, box)
err = TypeError
if box in [pd.Index, tm.to_array] and not isinstance(scalar, float):
err = NullFrequencyError
with pytest.raises(err):
tdser + scalar
with pytest.raises(err):
scalar + tdser
with pytest.raises(err):
tdser - scalar
with pytest.raises(err):
scalar - tdser
@pytest.mark.parametrize('dtype', ['int64', 'int32', 'int16',
'uint64', 'uint32', 'uint16', 'uint8',
'float64', 'float32', 'float16'])
@pytest.mark.parametrize('vec', [
np.array([1, 2, 3]),
pd.Index([1, 2, 3]),
Series([1, 2, 3])
# TODO: Add DataFrame in here?
], ids=lambda x: type(x).__name__)
def test_td64arr_add_sub_numeric_arr_invalid(self, box, vec, dtype):
tdser = pd.Series(['59 Days', '59 Days', 'NaT'], dtype='m8[ns]')
tdser = tm.box_expected(tdser, box)
err = TypeError
if box is pd.Index and not dtype.startswith('float'):
err = NullFrequencyError
vector = vec.astype(dtype)
with pytest.raises(err):
tdser + vector
with pytest.raises(err):
vector + tdser
with pytest.raises(err):
tdser - vector
with pytest.raises(err):
vector - tdser
# ------------------------------------------------------------------
# Operations with timedelta-like others
# TODO: this was taken from tests.series.test_ops; de-duplicate
@pytest.mark.parametrize('scalar_td', [timedelta(minutes=5, seconds=4),
Timedelta(minutes=5, seconds=4),
Timedelta('5m4s').to_timedelta64()])
def test_operators_timedelta64_with_timedelta(self, scalar_td):
# smoke tests
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
td1 + scalar_td
scalar_td + td1
td1 - scalar_td
scalar_td - td1
td1 / scalar_td
scalar_td / td1
# TODO: this was taken from tests.series.test_ops; de-duplicate
def test_timedelta64_operations_with_timedeltas(self):
# td operate with td
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td2 = timedelta(minutes=5, seconds=4)
result = td1 - td2
expected = (Series([timedelta(seconds=0)] * 3) -
Series([timedelta(seconds=1)] * 3))
assert result.dtype == 'm8[ns]'
tm.assert_series_equal(result, expected)
result2 = td2 - td1
expected = (Series([timedelta(seconds=1)] * 3) -
Series([timedelta(seconds=0)] * 3))
tm.assert_series_equal(result2, expected)
# roundtrip
tm.assert_series_equal(result + td2, td1)
# Now again, using pd.to_timedelta, which should build
# a Series or a scalar, depending on input.
td1 = Series(pd.to_timedelta(['00:05:03'] * 3))
td2 = pd.to_timedelta('00:05:04')
result = td1 - td2
expected = (Series([timedelta(seconds=0)] * 3) -
Series([timedelta(seconds=1)] * 3))
assert result.dtype == 'm8[ns]'
tm.assert_series_equal(result, expected)
result2 = td2 - td1
expected = (Series([timedelta(seconds=1)] * 3) -
Series([timedelta(seconds=0)] * 3))
tm.assert_series_equal(result2, expected)
# roundtrip
tm.assert_series_equal(result + td2, td1)
def test_td64arr_add_td64_array(self, box):
dti = pd.date_range('2016-01-01', periods=3)
tdi = dti - dti.shift(1)
tdarr = tdi.values
expected = 2 * tdi
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
result = tdi + tdarr
tm.assert_equal(result, expected)
result = tdarr + tdi
tm.assert_equal(result, expected)
def test_td64arr_sub_td64_array(self, box):
dti = pd.date_range('2016-01-01', periods=3)
tdi = dti - dti.shift(1)
tdarr = tdi.values
expected = 0 * tdi
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
result = tdi - tdarr
tm.assert_equal(result, expected)
result = tdarr - tdi
tm.assert_equal(result, expected)
# TODO: parametrize over [add, sub, radd, rsub]?
@pytest.mark.parametrize('names', [(None, None, None),
('Egon', 'Venkman', None),
('NCC1701D', 'NCC1701D', 'NCC1701D')])
def test_td64arr_add_sub_tdi(self, box, names):
# GH#17250 make sure result dtype is correct
# GH#19043 make sure names are propagated correctly
if box is pd.DataFrame and names[1] == 'Venkman':
pytest.skip("Name propagation for DataFrame does not behave like "
"it does for Index/Series")
tdi = TimedeltaIndex(['0 days', '1 day'], name=names[0])
ser = Series([Timedelta(hours=3), Timedelta(hours=4)], name=names[1])
expected = Series([Timedelta(hours=3), Timedelta(days=1, hours=4)],
name=names[2])
ser = tm.box_expected(ser, box)
expected = tm.box_expected(expected, box)
result = tdi + ser
tm.assert_equal(result, expected)
if box is not pd.DataFrame:
assert result.dtype == 'timedelta64[ns]'
else:
assert result.dtypes[0] == 'timedelta64[ns]'
result = ser + tdi
tm.assert_equal(result, expected)
if box is not pd.DataFrame:
assert result.dtype == 'timedelta64[ns]'
else:
assert result.dtypes[0] == 'timedelta64[ns]'
expected = Series([Timedelta(hours=-3), Timedelta(days=1, hours=-4)],
name=names[2])
expected = tm.box_expected(expected, box)
result = tdi - ser
tm.assert_equal(result, expected)
if box is not pd.DataFrame:
assert result.dtype == 'timedelta64[ns]'
else:
assert result.dtypes[0] == 'timedelta64[ns]'
result = ser - tdi
tm.assert_equal(result, -expected)
if box is not pd.DataFrame:
assert result.dtype == 'timedelta64[ns]'
else:
assert result.dtypes[0] == 'timedelta64[ns]'
def test_td64arr_add_sub_td64_nat(self, box):
# GH#23320 special handling for timedelta64("NaT")
tdi = pd.TimedeltaIndex([NaT, Timedelta('1s')])
other = np.timedelta64("NaT")
expected = pd.TimedeltaIndex(["NaT"] * 2)
obj = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
result = obj + other
tm.assert_equal(result, expected)
result = other + obj
tm.assert_equal(result, expected)
result = obj - other
tm.assert_equal(result, expected)
result = other - obj
tm.assert_equal(result, expected)
def test_td64arr_sub_NaT(self, box):
# GH#18808
ser = Series([NaT, Timedelta('1s')])
expected = Series([NaT, NaT], dtype='timedelta64[ns]')
ser = tm.box_expected(ser, box)
expected = tm.box_expected(expected, box)
res = ser - pd.NaT
tm.assert_equal(res, expected)
def test_td64arr_add_timedeltalike(self, two_hours, box):
# only test adding/sub offsets as + is now numeric
rng = timedelta_range('1 days', '10 days')
expected = timedelta_range('1 days 02:00:00', '10 days 02:00:00',
freq='D')
rng = tm.box_expected(rng, box)
expected = tm.box_expected(expected, box)
result = rng + two_hours
tm.assert_equal(result, expected)
def test_td64arr_sub_timedeltalike(self, two_hours, box):
# only test adding/sub offsets as - is now numeric
rng = timedelta_range('1 days', '10 days')
expected = timedelta_range('0 days 22:00:00', '9 days 22:00:00')
rng = tm.box_expected(rng, box)
expected = tm.box_expected(expected, box)
result = rng - two_hours
tm.assert_equal(result, expected)
# ------------------------------------------------------------------
# __add__/__sub__ with DateOffsets and arrays of DateOffsets
# TODO: this was taken from tests.series.test_operators; de-duplicate
def test_timedelta64_operations_with_DateOffset(self):
# GH#10699
td = Series([timedelta(minutes=5, seconds=3)] * 3)
result = td + pd.offsets.Minute(1)
expected = Series([timedelta(minutes=6, seconds=3)] * 3)
tm.assert_series_equal(result, expected)
result = td - pd.offsets.Minute(1)
expected = Series([timedelta(minutes=4, seconds=3)] * 3)
tm.assert_series_equal(result, expected)
with tm.assert_produces_warning(PerformanceWarning):
result = td + Series([pd.offsets.Minute(1), pd.offsets.Second(3),
pd.offsets.Hour(2)])
expected = Series([timedelta(minutes=6, seconds=3),
timedelta(minutes=5, seconds=6),
timedelta(hours=2, minutes=5, seconds=3)])
tm.assert_series_equal(result, expected)
result = td + pd.offsets.Minute(1) + pd.offsets.Second(12)
expected = Series([timedelta(minutes=6, seconds=15)] * 3)
tm.assert_series_equal(result, expected)
# valid DateOffsets
for do in ['Hour', 'Minute', 'Second', 'Day', 'Micro', 'Milli',
'Nano']:
op = getattr(pd.offsets, do)
td + op(5)
op(5) + td
td - op(5)
op(5) - td
@pytest.mark.parametrize('names', [(None, None, None),
('foo', 'bar', None),
('foo', 'foo', 'foo')])
def test_td64arr_add_offset_index(self, names, box):
# GH#18849, GH#19744
if box is pd.DataFrame and names[1] == 'bar':
pytest.skip("Name propagation for DataFrame does not behave like "
"it does for Index/Series")
tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00'],
name=names[0])
other = pd.Index([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)],
name=names[1])
expected = TimedeltaIndex([tdi[n] + other[n] for n in range(len(tdi))],
freq='infer', name=names[2])
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
# The DataFrame operation is transposed and so operates as separate
# scalar operations, which do not issue a PerformanceWarning
warn = PerformanceWarning if box is not pd.DataFrame else None
with tm.assert_produces_warning(warn):
res = tdi + other
tm.assert_equal(res, expected)
with tm.assert_produces_warning(warn):
res2 = other + tdi
tm.assert_equal(res2, expected)
# TODO: combine with test_td64arr_add_offset_index by parametrizing
# over second box?
def test_td64arr_add_offset_array(self, box):
# GH#18849
tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00'])
other = np.array([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)])
expected = TimedeltaIndex([tdi[n] + other[n] for n in range(len(tdi))],
freq='infer')
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
# The DataFrame operation is transposed and so operates as separate
# scalar operations, which do not issue a PerformanceWarning
warn = PerformanceWarning if box is not pd.DataFrame else None
with tm.assert_produces_warning(warn):
res = tdi + other
tm.assert_equal(res, expected)
with tm.assert_produces_warning(warn):
res2 = other + tdi
tm.assert_equal(res2, expected)
@pytest.mark.parametrize('names', [(None, None, None),
('foo', 'bar', None),
('foo', 'foo', 'foo')])
def test_td64arr_sub_offset_index(self, names, box):
# GH#18824, GH#19744
if box is pd.DataFrame and names[1] == 'bar':
pytest.skip("Name propagation for DataFrame does not behave like "
"it does for Index/Series")
tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00'],
name=names[0])
other = pd.Index([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)],
name=names[1])
expected = TimedeltaIndex([tdi[n] - other[n] for n in range(len(tdi))],
freq='infer', name=names[2])
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
# The DataFrame operation is transposed and so operates as separate
# scalar operations, which do not issue a PerformanceWarning
warn = PerformanceWarning if box is not pd.DataFrame else None
with tm.assert_produces_warning(warn):
res = tdi - other
tm.assert_equal(res, expected)
def test_td64arr_sub_offset_array(self, box_with_array):
# GH#18824
tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00'])
other = np.array([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)])
expected = TimedeltaIndex([tdi[n] - other[n] for n in range(len(tdi))],
freq='infer')
tdi = tm.box_expected(tdi, box_with_array)
expected = tm.box_expected(expected, box_with_array)
# The DataFrame operation is transposed and so operates as separate
# scalar operations, which do not issue a PerformanceWarning
warn = None if box_with_array is pd.DataFrame else PerformanceWarning
with tm.assert_produces_warning(warn):
res = tdi - other
tm.assert_equal(res, expected)
@pytest.mark.parametrize('names', [(None, None, None),
('foo', 'bar', None),
('foo', 'foo', 'foo')])
def test_td64arr_with_offset_series(self, names, box_df_fail):
# GH#18849
box = box_df_fail
box2 = Series if box in [pd.Index, tm.to_array] else box
tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00'],
name=names[0])
other = Series([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)],
name=names[1])
expected_add = Series([tdi[n] + other[n] for n in range(len(tdi))],
name=names[2])
tdi = tm.box_expected(tdi, box)
expected_add = tm.box_expected(expected_add, box2)
with tm.assert_produces_warning(PerformanceWarning):
res = tdi + other
tm.assert_equal(res, expected_add)
with tm.assert_produces_warning(PerformanceWarning):
res2 = other + tdi
tm.assert_equal(res2, expected_add)
# TODO: separate/parametrize add/sub test?
expected_sub = Series([tdi[n] - other[n] for n in range(len(tdi))],
name=names[2])
expected_sub = tm.box_expected(expected_sub, box2)
with tm.assert_produces_warning(PerformanceWarning):
res3 = tdi - other
tm.assert_equal(res3, expected_sub)
@pytest.mark.parametrize('obox', [np.array, pd.Index, pd.Series])
def test_td64arr_addsub_anchored_offset_arraylike(self, obox,
box_with_array):
# GH#18824
tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00'])
tdi = tm.box_expected(tdi, box_with_array)
anchored = obox([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)])
# addition/subtraction ops with anchored offsets should issue
# a PerformanceWarning and _then_ raise a TypeError.
with pytest.raises(TypeError):
with tm.assert_produces_warning(PerformanceWarning):
tdi + anchored
with pytest.raises(TypeError):
with tm.assert_produces_warning(PerformanceWarning):
anchored + tdi
with pytest.raises(TypeError):
with tm.assert_produces_warning(PerformanceWarning):
tdi - anchored
with pytest.raises(TypeError):
with tm.assert_produces_warning(PerformanceWarning):
anchored - tdi
class TestTimedeltaArraylikeMulDivOps:
# Tests for timedelta64[ns]
# __mul__, __rmul__, __div__, __rdiv__, __floordiv__, __rfloordiv__
# TODO: Moved from tests.series.test_operators; needs cleanup
@pytest.mark.parametrize("m", [1, 3, 10])
@pytest.mark.parametrize("unit", ['D', 'h', 'm', 's', 'ms', 'us', 'ns'])
def test_timedelta64_conversions(self, m, unit):
startdate = Series(pd.date_range('2013-01-01', '2013-01-03'))
enddate = Series(pd.date_range('2013-03-01', '2013-03-03'))
ser = enddate - startdate
ser[2] = np.nan
# op
expected = Series([x / np.timedelta64(m, unit) for x in ser])
result = ser / np.timedelta64(m, unit)
tm.assert_series_equal(result, expected)
# reverse op
expected = Series([Timedelta(np.timedelta64(m, unit)) / x
for x in ser])
result = np.timedelta64(m, unit) / ser
tm.assert_series_equal(result, expected)
# ------------------------------------------------------------------
# Multiplication
# organized with scalar others first, then array-like
def test_td64arr_mul_int(self, box_with_array):
idx = TimedeltaIndex(np.arange(5, dtype='int64'))
idx = tm.box_expected(idx, box_with_array)
result = idx * 1
tm.assert_equal(result, idx)
result = 1 * idx
tm.assert_equal(result, idx)
def test_td64arr_mul_tdlike_scalar_raises(self, two_hours, box_with_array):
rng = timedelta_range('1 days', '10 days', name='foo')
rng = tm.box_expected(rng, box_with_array)
with pytest.raises(TypeError):
rng * two_hours
def test_tdi_mul_int_array_zerodim(self, box_with_array):
rng5 = np.arange(5, dtype='int64')
idx = TimedeltaIndex(rng5)
expected = TimedeltaIndex(rng5 * 5)
idx = tm.box_expected(idx, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = idx * np.array(5, dtype='int64')
tm.assert_equal(result, expected)
def test_tdi_mul_int_array(self, box_with_array):
rng5 = np.arange(5, dtype='int64')
idx = TimedeltaIndex(rng5)
expected = TimedeltaIndex(rng5 ** 2)
idx = tm.box_expected(idx, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = idx * rng5
tm.assert_equal(result, expected)
def test_tdi_mul_int_series(self, box_with_array):
box = box_with_array
xbox = pd.Series if box in [pd.Index, tm.to_array] else box
idx = TimedeltaIndex(np.arange(5, dtype='int64'))
expected = TimedeltaIndex(np.arange(5, dtype='int64') ** 2)
idx = tm.box_expected(idx, box)
expected = tm.box_expected(expected, xbox)
result = idx * pd.Series(np.arange(5, dtype='int64'))
tm.assert_equal(result, expected)
def test_tdi_mul_float_series(self, box_with_array):
box = box_with_array
xbox = pd.Series if box in [pd.Index, tm.to_array] else box
idx = TimedeltaIndex(np.arange(5, dtype='int64'))
idx = tm.box_expected(idx, box)
rng5f = np.arange(5, dtype='float64')
expected = TimedeltaIndex(rng5f * (rng5f + 1.0))
expected = tm.box_expected(expected, xbox)
result = idx * Series(rng5f + 1.0)
tm.assert_equal(result, expected)
# TODO: Put Series/DataFrame in others?
@pytest.mark.parametrize('other', [
np.arange(1, 11),
pd.Int64Index(range(1, 11)),
pd.UInt64Index(range(1, 11)),
pd.Float64Index(range(1, 11)),
pd.RangeIndex(1, 11)
], ids=lambda x: type(x).__name__)
def test_tdi_rmul_arraylike(self, other, box_with_array):
box = box_with_array
xbox = get_upcast_box(box, other)
tdi = TimedeltaIndex(['1 Day'] * 10)
expected = timedelta_range('1 days', '10 days')
expected._data.freq = None
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, xbox)
result = other * tdi
tm.assert_equal(result, expected)
commute = tdi * other
tm.assert_equal(commute, expected)
# ------------------------------------------------------------------
# __div__, __rdiv__
def test_td64arr_div_nat_invalid(self, box_with_array):
# don't allow division by NaT (maybe could in the future)
rng = timedelta_range('1 days', '10 days', name='foo')
rng = tm.box_expected(rng, box_with_array)
with pytest.raises(TypeError,
match="'?true_divide'? cannot use operands"):
rng / pd.NaT
with pytest.raises(TypeError, match='Cannot divide NaTType by'):
pd.NaT / rng
def test_td64arr_div_td64nat(self, box_with_array):
# GH#23829
rng = timedelta_range('1 days', '10 days',)
rng = tm.box_expected(rng, box_with_array)
other = np.timedelta64('NaT')
expected = np.array([np.nan] * 10)
expected = tm.box_expected(expected, box_with_array)
result = rng / other
tm.assert_equal(result, expected)
result = other / rng
tm.assert_equal(result, expected)
def test_td64arr_div_int(self, box_with_array):
idx = TimedeltaIndex(np.arange(5, dtype='int64'))
idx = tm.box_expected(idx, box_with_array)
result = idx / 1
tm.assert_equal(result, idx)
with pytest.raises(TypeError, match='Cannot divide'):
# GH#23829
1 / idx
def test_td64arr_div_tdlike_scalar(self, two_hours, box_with_array):
# GH#20088, GH#22163 ensure DataFrame returns correct dtype
rng = timedelta_range('1 days', '10 days', name='foo')
expected = pd.Float64Index((np.arange(10) + 1) * 12, name='foo')
rng = tm.box_expected(rng, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = rng / two_hours
tm.assert_equal(result, expected)
result = two_hours / rng
expected = 1 / expected
tm.assert_equal(result, expected)
def test_td64arr_div_tdlike_scalar_with_nat(self, two_hours,
box_with_array):
rng = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
expected = pd.Float64Index([12, np.nan, 24], name='foo')
rng = tm.box_expected(rng, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = rng / two_hours
tm.assert_equal(result, expected)
result = two_hours / rng
expected = 1 / expected
tm.assert_equal(result, expected)
def test_td64arr_div_td64_ndarray(self, box_with_array):
# GH#22631
rng = TimedeltaIndex(['1 days', pd.NaT, '2 days'])
expected = pd.Float64Index([12, np.nan, 24])
rng = tm.box_expected(rng, box_with_array)
expected = tm.box_expected(expected, box_with_array)
other = np.array([2, 4, 2], dtype='m8[h]')
result = rng / other
tm.assert_equal(result, expected)
result = rng / tm.box_expected(other, box_with_array)
tm.assert_equal(result, expected)
result = rng / other.astype(object)
tm.assert_equal(result, expected)
result = rng / list(other)
tm.assert_equal(result, expected)
# reversed op
expected = 1 / expected
result = other / rng
tm.assert_equal(result, expected)
result = tm.box_expected(other, box_with_array) / rng
tm.assert_equal(result, expected)
result = other.astype(object) / rng
tm.assert_equal(result, expected)
result = list(other) / rng
tm.assert_equal(result, expected)
def test_tdarr_div_length_mismatch(self, box_with_array):
rng = TimedeltaIndex(['1 days', pd.NaT, '2 days'])
mismatched = [1, 2, 3, 4]
rng = tm.box_expected(rng, box_with_array)
for obj in [mismatched, mismatched[:2]]:
# one shorter, one longer
for other in [obj, np.array(obj), pd.Index(obj)]:
with pytest.raises(ValueError):
rng / other
with pytest.raises(ValueError):
other / rng
# ------------------------------------------------------------------
# __floordiv__, __rfloordiv__
def test_td64arr_floordiv_tdscalar(self, box_with_array, scalar_td):
# GH#18831
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
expected = Series([0, 0, np.nan])
td1 = tm.box_expected(td1, box_with_array, transpose=False)
expected = tm.box_expected(expected, box_with_array, transpose=False)
result = td1 // scalar_td
tm.assert_equal(result, expected)
def test_td64arr_rfloordiv_tdscalar(self, box_with_array, scalar_td):
# GH#18831
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
expected = Series([1, 1, np.nan])
td1 = tm.box_expected(td1, box_with_array, transpose=False)
expected = tm.box_expected(expected, box_with_array, transpose=False)
result = scalar_td // td1
tm.assert_equal(result, expected)
def test_td64arr_rfloordiv_tdscalar_explicit(self, box_with_array,
scalar_td):
# GH#18831
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
expected = Series([1, 1, np.nan])
td1 = tm.box_expected(td1, box_with_array, transpose=False)
expected = tm.box_expected(expected, box_with_array, transpose=False)
# We can test __rfloordiv__ using this syntax,
# see `test_timedelta_rfloordiv`
result = td1.__rfloordiv__(scalar_td)
tm.assert_equal(result, expected)
def test_td64arr_floordiv_int(self, box_with_array):
idx = TimedeltaIndex(np.arange(5, dtype='int64'))
idx = tm.box_expected(idx, box_with_array)
result = idx // 1
tm.assert_equal(result, idx)
pattern = ('floor_divide cannot use operands|'
'Cannot divide int by Timedelta*')
with pytest.raises(TypeError, match=pattern):
1 // idx
def test_td64arr_floordiv_tdlike_scalar(self, two_hours, box_with_array):
tdi = timedelta_range('1 days', '10 days', name='foo')
expected = pd.Int64Index((np.arange(10) + 1) * 12, name='foo')
tdi = tm.box_expected(tdi, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = tdi // two_hours
tm.assert_equal(result, expected)
# TODO: Is this redundant with test_td64arr_floordiv_tdlike_scalar?
@pytest.mark.parametrize('scalar_td', [
timedelta(minutes=10, seconds=7),
Timedelta('10m7s'),
Timedelta('10m7s').to_timedelta64()
], ids=lambda x: type(x).__name__)
def test_td64arr_rfloordiv_tdlike_scalar(self, scalar_td, box_with_array):
# GH#19125
tdi = TimedeltaIndex(['00:05:03', '00:05:03', pd.NaT], freq=None)
expected = pd.Index([2.0, 2.0, np.nan])
tdi = tm.box_expected(tdi, box_with_array, transpose=False)
expected = tm.box_expected(expected, box_with_array, transpose=False)
res = tdi.__rfloordiv__(scalar_td)
tm.assert_equal(res, expected)
expected = pd.Index([0.0, 0.0, np.nan])
expected = tm.box_expected(expected, box_with_array, transpose=False)
res = tdi // (scalar_td)
tm.assert_equal(res, expected)
# ------------------------------------------------------------------
# mod, divmod
# TODO: operations with timedelta-like arrays, numeric arrays,
# reversed ops
def test_td64arr_mod_tdscalar(self, box_with_array, three_days):
tdi = timedelta_range('1 Day', '9 days')
tdarr = tm.box_expected(tdi, box_with_array)
expected = TimedeltaIndex(['1 Day', '2 Days', '0 Days'] * 3)
expected = tm.box_expected(expected, box_with_array)
result = tdarr % three_days
tm.assert_equal(result, expected)
if box_with_array is pd.DataFrame:
pytest.xfail("DataFrame does not have __divmod__ or __rdivmod__")
result = divmod(tdarr, three_days)
tm.assert_equal(result[1], expected)
tm.assert_equal(result[0], tdarr // three_days)
def test_td64arr_mod_int(self, box_with_array):
tdi = timedelta_range('1 ns', '10 ns', periods=10)
tdarr = tm.box_expected(tdi, box_with_array)
expected = TimedeltaIndex(['1 ns', '0 ns'] * 5)
expected = tm.box_expected(expected, box_with_array)
result = tdarr % 2
tm.assert_equal(result, expected)
with pytest.raises(TypeError):
2 % tdarr
if box_with_array is pd.DataFrame:
pytest.xfail("DataFrame does not have __divmod__ or __rdivmod__")
result = divmod(tdarr, 2)
tm.assert_equal(result[1], expected)
tm.assert_equal(result[0], tdarr // 2)
def test_td64arr_rmod_tdscalar(self, box_with_array, three_days):
tdi = timedelta_range('1 Day', '9 days')
tdarr = tm.box_expected(tdi, box_with_array)
expected = ['0 Days', '1 Day', '0 Days'] + ['3 Days'] * 6
expected = | TimedeltaIndex(expected) | pandas.TimedeltaIndex |
#!/usr/bin/env python
# coding: utf-8
import xlrd
import numpy as np
from math import sqrt
import pandas as pd
import time
import datetime
import matplotlib.pyplot as plt
import math
import random as rd
import calendar
import torch
from torch import nn
from torch.autograd import Variable
from sklearn.preprocessing import minmax_scale
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import mean_squared_error
from sklearn import preprocessing
import csv
# Global variables
record_path = '../data/record/'
weather_path = '../data/weather/'
park_all_cnt = 10
weather_all_cnt = 6
park_table_id = ['P1','P2','P3','P4','P5','P6','P7','P8','P9','P10']
park_weather_idx = [0,0,1,1,1,2,2,2,2,2]
weather_name = ['Ningbo','Ningbo Yinzhou','Changsha']
# util function
def read_park_table(index, debug = False):
park_table_path = record_path + park_table_id[index] + '.csv'
park_book = pd.read_csv(park_table_path,encoding='ISO-8859-1')##########
if debug:
print('open table ' + park_table_name[i] + ' with lines ' + str(len(park_book)))
return park_book
def read_weather_table(index, debug = False):
weather_table_path = weather_path + str(index) + '.csv'
weather_book = pd.read_csv(weather_table_path,encoding='ISO-8859-1')
if debug:
print ('open table ' + weather_name[i] + ' with lines ' + str(len(weather_book)))
return weather_book
def trans_record_to_count(data, debug = False):
invalid_record = 0
valid_record = 0
p_dict = {}
for stime,etime in zip(data['Lockdown Time'],data['Lockup Time']):
start_tss = time.strptime(stime, "%Y/%m/%d %H:%M")##########
end_tss = time.strptime(etime, "%Y/%m/%d %H:%M")#########
# Converts start and end times to seconds
start_tsp = int(time.mktime(start_tss))
end_tsp = int(time.mktime(end_tss))
# A parking record which has duration less than 5 mins are regard as invalid record
if end_tsp - start_tsp <= 5*60:
invalid_record = invalid_record + 1
continue
valid_record = valid_record + 1
start_hour = int(start_tsp//(60*60))
end_hour = int(end_tsp//(60*60))
# Calculate the parking numbers per hour
for j in range(start_hour,end_hour+1):
if j not in p_dict:
p_dict[j] = {}
p_dict[j]['cnt'] = 1
else:
p_dict[j]['cnt'] = p_dict[j]['cnt'] + 1
if debug:
print('valid record is ' + str(valid_record))
print('invalid record is ' + str(invalid_record))
return p_dict
def calc_park_cnt_from_dict(p_dict, debug = False):
if debug:
print('calcing parking count from dict ...')
park_cnt = []
st = min(p_dict.keys())
ed = max(p_dict.keys())
for i in range(st,ed+1):
if i in p_dict:
park_cnt.append(p_dict[i]['cnt'])
else:
park_cnt.append(0)
return park_cnt
def process_weather(data, debug= False):
output = []
start_h = data['DAY'][0]
start_h = int(time.mktime(time.strptime(start_h,"%Y/%m/%d %H:%M")) // (60*60))############
output.append(start_h)
for i in range(5):
output.append([])
output.append({})
for i in range(len(data['HOUR'])):
output[1].append(data['TEM'][i])
output[2].append(data['RHU'][i])
output[3].append(data['WIN_S'][i])
output[4].append(data['PRE_1h'][i])
output[5].append(time.strptime(data['DAY'][i],"%Y/%m/%d %H:%M").tm_wday)##############
output[6][int(time.mktime(time.strptime(data['DAY'][i],"%Y/%m/%d %H:%M")) // (60*60))] = i############
return output
def invalid(w_list,idx):
if w_list[1][idx] > 999:
return True
if w_list[2][idx] > 999:
return True
if w_list[3][idx] > 999:
return True
if w_list[4][idx] > 999:
return True
return False
def gen_series(park_cnt, weather_rec, start_h, end_h, debug=False):
tt = []
for i in range(len(park_cnt)):
tt.append(start_h + i)
"""if debug:
print(tt[-1])"""
temp = []
for i in range(5):
temp.append([])
for i in range(len(park_cnt)):
if tt[i] in weather_rec[6]:
idx = weather_rec[6][tt[i]]
if invalid(weather_rec,idx):
continue
temp[0].append(park_cnt[i])
temp[1].append(weather_rec[1][idx])
temp[2].append(weather_rec[2][idx])
temp[3].append(weather_rec[3][idx])
temp[4].append(weather_rec[4][idx])
#if debug:
#print('The length of temp array is ' + str(len(temp[0])))
park_cnt = pd.Series(temp[0], name='cnt')
tem = pd.Series(temp[1], name='tem')
rhu = pd.Series(temp[2], name='rhu')
winds = pd.Series(temp[3], name='wind_s')
pre_1h = pd.Series(temp[4],name='pre_ih')
output = pd.concat([tem,rhu,winds,pre_1h,park_cnt], axis=1)
return output
def series_to_supervised(data, n_in=1, n_out=1, dropnan=True):
n_vars = 1 if type(data) is list else data.shape[1]
df = | pd.DataFrame(data) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Mon Sep 17 13:52:10 2018
@author: i
"""
from tkinter import *
from tkinter import filedialog
from tkinter import messagebox
import datetime
from time import strftime
import os
import math
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.dates as md
from matplotlib.ticker import MaxNLocator
import matplotlib.offsetbox as offsetbox
# options for drop down menus in dialog window
hours_months = [1,2,3,4,5,6,7,8,9,10,11,12]
am_pm = ["am","pm"]
days = [1,2,3,4,5,6,7,8,9,10,
11,12,13,14,15,16,17,18,19,20,
21,22,23,24,25,26,27,28,29,30,31]
years = [2014,2015,2016,2017,2018,2019,2020,2021,2022,2023,2024]
hours24 = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23]
# function to get night intervals and day intervals from a single dataframe
# returns two lists of start end tuples, one for nights and one for days
def get_intervals(mouse_df, lights_on, lights_out):
# get timestamps from mouse df
dates_range = mouse_df.index
#create distinct, consecutive dates
unique_dates = pd.date_range(mouse_df.index.min().date(),
mouse_df.index.max().date(),
freq='D')
# mouse day and night intervals
night_intervals = []
day_intervals = []
# create night intervals
# for each date in mouse, create start_hour-end_hour pair
# of night interval
for j in range(len(unique_dates)):
# start interval
start_night = datetime.datetime(unique_dates[j].year,
unique_dates[j].month,
unique_dates[j].day,
hour=lights_out,
minute=0,
second=0)
end_night_before = datetime.datetime(unique_dates[j].year,
unique_dates[j].month,
unique_dates[j].day,
hour=lights_on,
minute=0,
second=0)
# make sure it is not the last inteval
if (j+1) < len(unique_dates):
# end interval
end_night_next = datetime.datetime(unique_dates[j+1].year,
unique_dates[j+1].month,
unique_dates[j+1].day,
hour=lights_on,
minute=0,
second=0)
else: # if it is last interval
if start_night < dates_range[-1]:
night_intervals.append((start_night, dates_range[-1]))
break
if j == 0: # for the first interval
if end_night_before > dates_range[0]:
temp0 = dates_range[0]
temp1 = end_night_before
night_intervals.append((temp0,temp1))
# next night interval strats on the same date
temp0 = start_night
temp1 = end_night_next if end_night_next <= dates_range[-1] else dates_range[-1]
night_intervals.append((temp0,temp1))
else:
temp0 = start_night
# if the next date is in the list,
# set it to the end of nighttime,
# if not set the end of plot to be the end of nighttime
temp1 = end_night_next if end_night_next <= dates_range[-1] else dates_range[-1]
night_intervals.append((temp0,temp1))
else: # not the first day
temp0 = start_night
temp1 = end_night_next if end_night_next <= dates_range[-1] else dates_range[-1]
night_intervals.append((temp0,temp1))
# invert night intervals to get days
for j in range(len(night_intervals)):
start_night, end_night = night_intervals[j]
# if it is the first interval
if j==0:
# if night starts later than the start of the timestamps
if start_night > dates_range[0]:
start_day = dates_range[0]
end_day = start_night
day_intervals.append((start_day,end_day))
else:
# check if this is not the only interval
if j+1 < len(night_intervals):
start_day = end_night
end_day = night_intervals[j+1][0]
day_intervals.append((start_day,end_day))
else: # if it was the only interval
if end_night < dates_range[-1]:
start_day = end_night
end_day = dates_range[-1]
day_intervals.append((start_day,end_day))
# check if it was the last interval
elif j+1 == len(night_intervals):
if len(day_intervals) > 1:
if end_night < dates_range[-1]:
start_day = end_night
end_day = dates_range[-1]
day_intervals.append((start_day,end_day))
else:
start_day = end_night
end_day = night_intervals[j+1][0]
day_intervals.append((start_day,end_day))
return night_intervals, day_intervals
class FedApp(Toplevel):
def __init__(self, parent, title = None):
Toplevel.__init__(self, parent)
self.transient(parent)
if title:
self.title(title)
self.parent = parent
self.retrieved_id_ints = None
# instantiate window with options and make it in focus
body = Frame(self)
self.initial_focus = self.body(body)
body.pack(padx=5, pady=5)
# instantaite buttons ok and cancel
self.buttonbox()
self.grab_set()
if not self.initial_focus:
self.initial_focus = self
self.protocol("WM_DELETE_WINDOW", self.cancel)
self.geometry("+%d+%d" % (parent.winfo_rootx()+50,
parent.winfo_rooty()+50))
self.initial_focus.focus_set()
self.wait_window(self)
def body(self, master):
# dialog body. return widget with options that should have
# initial focus.
Label(master, text="Select folder with csv files:").grid(row=0,
columnspan=4,
sticky=W,
padx=5,
pady=15)
Label(master, text="Lights on at:").grid(row=2, column=0, sticky=W, padx=5, pady=5)
Label(master, text="Lights out at:").grid(row=3, sticky=W, padx=5, pady=5)
Label(master, text="Select your dates and hours (mm/dd/yyyy h):").grid(row=4,
columnspan=3,
sticky=W,
padx=5,
pady=20)
Label(master, text="Month").grid(row=5, column=1, padx=5, pady=5)
Label(master, text="Day").grid(row=5, column=3, padx=5, pady=5)
Label(master, text="Year").grid(row=5, column=5, padx=5, pady=5)
Label(master, text="Hour").grid(row=5, column=6, padx=5, pady=5)
Label(master, text="From:").grid(row=6, column=0, sticky=W, padx=5, pady=5)
Label(master, text="/").grid(row=6, column=2, sticky=W, padx=5, pady=5)
Label(master, text="/").grid(row=6, column=4, sticky=W, padx=5, pady=5)
Label(master, text="Until:").grid(row=7, column=0, sticky=W, padx=5, pady=5)
Label(master, text="/").grid(row=7, column=2, sticky=W, padx=5, pady=5)
Label(master, text="/").grid(row=7, column=4, sticky=W, padx=5, pady=5)
Label(master, text="*Includes both above dates!", fg="red").grid(row=8, columnspan=3, sticky=W, padx=5, pady=5)
Label(master, text="Bin size for histograms:\n(in minutes)").grid(row=9, sticky=W, padx=5, pady=5)
self.folder_value = StringVar()
self.lights_on_default = IntVar(value=6)
self.lights_out_default = IntVar(value=6)
self.lights_on_am_pm_default = StringVar(value="am")
self.lights_out_am_pm_default = StringVar(value="pm")
self.month_from_default = IntVar(value=3)
self.day_from_default = IntVar(value=8)
self.year_from_default = IntVar(value=2018)
self.hour_from_default = IntVar(value=18)
self.month_until_default = IntVar(value=3)
self.day_until_default = IntVar(value=10)
self.year_until_default = IntVar(value=2018)
self.hour_until_default = IntVar(value=18)
self.bin_size_default = IntVar(value=60)
self.select_folder_btn = Button(master, text="Select", command=self.show_folders)
self.select_folder_btn.grid(row=1,column=5, padx=5, pady=5)
self.folder_path = Entry(master, textvariable=self.folder_value, width=60)
self.lights_on = OptionMenu(master, self.lights_on_default, *hours_months)
self.lights_on['bg'] = "#FFF994"
self.lights_on_am_pm = OptionMenu(master, self.lights_on_am_pm_default, *am_pm)
self.lights_out = OptionMenu(master, self.lights_out_default, *hours_months)
self.lights_out['bg'] ="#689CEB"
self.lights_out_am_pm = OptionMenu(master, self.lights_out_am_pm_default, *am_pm)
self.month_from = OptionMenu(master, self.month_from_default, *hours_months)
self.day_from = OptionMenu(master, self.day_from_default, *days)
self.year_from = OptionMenu(master, self.year_from_default, *years)
self.hour_from = OptionMenu(master, self.hour_from_default, *hours24)
self.month_until = OptionMenu(master, self.month_until_default, *hours_months)
self.day_until = OptionMenu(master, self.day_until_default, *days)
self.year_until = OptionMenu(master, self.year_until_default, *years)
self.hour_until = OptionMenu(master, self.hour_until_default, *hours24)
self.bin_size = Entry(master, textvariable=self.bin_size_default)
self.folder_path.grid(row=1, columnspan=5, sticky=W, padx=5, pady=15)
self.lights_on.grid(row=2, column=1, padx=5, pady=5)
self.lights_out.grid(row=3, column=1, padx=5, pady=5)
self.lights_on_am_pm.grid(row=2, column=2, columnspan=4, sticky=W, padx=5, pady=5)
self.lights_out_am_pm.grid(row=3, column=2, columnspan=4, sticky=W, padx=5, pady=5)
self.month_from.grid(row=6,column=1, padx=5, pady=5)
self.day_from.grid(row=6, column=3, padx=5, pady=5)
self.year_from.grid(row=6,column=5, padx=5, pady=5)
self.hour_from.grid(row=6, column=6, padx=5, pady=5)
self.month_until.grid(row=7,column=1, padx=5, pady=5)
self.day_until.grid(row=7, column=3, padx=5, pady=5)
self.year_until.grid(row=7,column=5, padx=5, pady=5)
self.hour_until.grid(row=7, column=6, padx=5, pady=5)
self.bin_size.grid(row=9, column=1, columnspan=2, sticky=W, padx=5, pady=5)
self.plot_checkbox = IntVar(value=1)
self.cb = Checkbutton(master,
text="Save Plots",
variable=self.plot_checkbox)
self.cb.grid(row=11, column=0, columnspan=3, sticky=E, padx=5, pady=5)
self.data_checkbox = IntVar(value=1)
self.cb = Checkbutton(master,
text="Save Data",
variable=self.data_checkbox)
self.cb.grid(row=11, column=3, columnspan=3, sticky=W, padx=5, pady=5)
def buttonbox(self):
# add standard button box (ok and cancel)
box = Frame(self)
w = Button(box, text="Cancel", width=10, command=self.cancel)
w.pack(side=LEFT, padx=5, pady=15)
w = Button(box, text="OK", width=10, command=self.ok, default=ACTIVE)
w.pack(side=LEFT, padx=5, pady=15)
# same commands with keyboard (enter==ok, esc==cancel)
self.bind("<Return>", self.ok)
self.bind("<Escape>", self.cancel)
box.pack()
#########################################
# This is where all the magic is called #
#########################################
def ok(self, event=None):
if not self.validate():
self.initial_focus.focus_set() # put focus back
return
self.withdraw()
self.update_idletasks()
# retrieve user input
self.get_input()
# close options window
self.cancel()
# execute main functionality of the script
self.main_function()
print
print("\nDone")
try:
self.parent.destroy()
except:
return
#########################################
def cancel(self, event=None):
# put focus back to the parent window
self.parent.focus_set()
self.destroy()
def validate(self):
# validate if path was given
# if day and night last 12 hours
# if bin size is integer no larger than 12 hours
# if given dates are chronological
# check if day duration of day and night is at least an hour
if self.lights_on_default.get() == 12:
if self.lights_on_am_pm_default.get()=="am":
self.my_lights_on = 0
elif self.lights_on_am_pm_default.get()=="pm":
self.my_lights_on = self.lights_on_default.get()
elif self.lights_on_am_pm_default.get()=="am":
self.my_lights_on = self.lights_on_default.get()
elif self.lights_on_am_pm_default.get()=="pm":
self.my_lights_on = self.lights_on_default.get()+12
if self.lights_out_default.get() == 12:
if self.lights_out_am_pm_default.get()=="am":
self.my_lights_out = 0
elif self.lights_out_am_pm_default.get()=="pm":
self.my_lights_out = self.lights_out_default.get()
elif self.lights_out_am_pm_default.get()=="am":
self.my_lights_out = self.lights_out_default.get()
elif self.lights_out_am_pm_default.get()=="pm":
self.my_lights_out = self.lights_out_default.get()+12
if abs(self.my_lights_on - self.my_lights_out) != 12:
messagebox.showwarning(
"Warning!",
"Day and Night should last 12 hours each!"
)
return 0
try:
# check in path was provided
if len(self.folder_path.get()) > 0:
# test if bin is integer
int(self.bin_size.get())
else:
messagebox.showwarning(
"Warning!",
"Remember to select the path.\nBin has to be an integer.\n\nPlease try again."
)
return 0
# check range of bin size (no bigger than 12 hours)
if int(self.bin_size.get()) <= 0 or int(self.bin_size.get()) > 720:
messagebox.showwarning(
"Warning!",
"Bin size has to be smaller than 12 hours (720 minutes)!"
)
return 0
# check if from date is earlier than until date
date_from_date = datetime.datetime(self.year_from_default.get(),
self.month_from_default.get(),
self.day_from_default.get(),
hour=self.hour_from_default.get(),
minute=0,second=0)
date_until_date = datetime.datetime(self.year_until_default.get(),
self.month_until_default.get(),
self.day_until_default.get(),
hour=self.hour_until_default.get(),
minute=0,second=0)
if date_from_date < date_until_date:
return 1
else:
messagebox.showwarning(
"Warning!",
"From date has to be before Until date!"
)
return 0
except ValueError:
messagebox.showwarning(
"Warning!",
"Remember to select the path.\nBin has to be an integer.\n\nPlease try again."
)
return 0
def get_input(self):
# executed after clicking on ok button
self.main_folder_path = self.folder_path.get()
date_from_str = (str(self.year_from_default.get()) + "-" +
str(self.month_from_default.get()) + "-" +
str(self.day_from_default.get()) + " " +
str(self.hour_from_default.get()) + ":00:00")
date_until_str = (str(self.year_until_default.get()) + "-" +
str(self.month_until_default.get()) + "-" +
str(self.day_until_default.get()) + " " +
str(self.hour_until_default.get()) + ":00:00")
self.my_start_date = date_from_str
self.my_end_date = date_until_str
self.my_bin_size = self.bin_size.get()
self.to_plot = False if self.plot_checkbox.get()==0 else True
self.to_save_data = False if self.data_checkbox.get()==0 else True
def show_folders(self):
# executed when select button in dialog box is clicked
# select folder from explorer window
self.src = filedialog.askdirectory()
self.folder_value.set(self.src)
def select_mice(self):
# create a list of available mice ids
self.mice_ids_str_values = ""
for i in range(len(self.mice_ids_list)):
if i+1 == len(self.mice_ids_list): # if the last one
self.mice_ids_str_values = self.mice_ids_str_values + str(self.mice_ids_list[i])
else:
self.mice_ids_str_values = self.mice_ids_str_values + str(self.mice_ids_list[i]) + ","
# create option window
self.option_window = Tk()
self.option_window.title('Mice Selection')
Label(self.option_window, text="Select your mice from the list of available mice:").grid(row=0, column=0, sticky=W, padx=5, pady=5)
Label(self.option_window, text=self.mice_ids_str_values).grid(row=1, column=0, padx=5, pady=5)
self.mice_selection = Entry(self.option_window, textvariable="")
# clear entry just in case, and set the text to mice ids from files
self.mice_selection.delete(0, END)
self.mice_selection.insert(0, self.mice_ids_str_values)
self.mice_selection.grid(row=2, column=0, padx=5, pady=5)
Label(self.option_window, text="*List of coma separated integer ids! No spaces!", fg="red").grid(row=3, column=0, sticky=W, padx=5, pady=5)
b = Button(self.option_window, text='Ok', command=self.get_mice_choice)
b.grid(row=4, column=0, sticky='nsew', padx=20, pady=5)
self.option_window.initial_focus = self.option_window
self.option_window.wait_window(self.option_window)
def get_mice_choice(self):
try:
# remove leading and trailing whitespaces and comas, and split by comas
retrieved_id_strings = self.mice_selection.get().strip().strip(',').split(',')
# check if all options are integers
self.retrieved_id_ints = [int(el) for el in retrieved_id_strings]
# check if all options were available in file
for el in self.retrieved_id_ints:
if str(el) not in self.mice_ids_str_values.split(','):
messagebox.showwarning(
"Warning!",
"Some of the given ids might not be available in the files."
)
self.option_window.destroy()
# reset ids to none
self.retrieved_id_ints = None
return
except:
messagebox.showwarning(
"Warning!",
"List of coma separated integer ids!\nNo spaces!\n"
)
self.option_window.destroy()
# reset ids to none
self.retrieved_id_ints = None
return
self.option_window.destroy()
#########################################################################
# My sequence of actions (reading, binning, plotting)
def main_function(self):
csv_read = self.read_csv_files()
# if the csv_read function's checks failed
if csv_read == 0:
return 0
# read which mice to include
self.select_mice()
# if selected mice were not correctly validated, end here
if self.retrieved_id_ints is None:
print("Failed to select mice")
return
# create a new list of dataframes only with the selected mice
self.include_selected_mice()
# retrieve totals for each day, each day/night
# and interval times between pellet intakes
self.get_data()
# create path to subfolder for results
# MonthDay_HourMinute + the above ending + .xls
current_time = strftime("%m%d_%H%M_%S")+"s"
subfolder_name = "Results"+current_time
self.subfolder_path = os.path.join(self.main_folder_path, subfolder_name)
# if folder not yet created, create one
if (not os.path.exists(self.subfolder_path)):
os.mkdir(self.subfolder_path)
# plot binned pellets and motorturns and intervlas (one plot per mouse)
self.plot_pellets_and_motorturns()
if (self.to_plot):
# plot histograms
self.plot_histograms()
self.plot_kcal()
if (self.to_save_data):
# save day data and day/night data
self.save_data()
#############################################################################
#####################################################
# FUNCTION TO READ AND SORT EACH MOUSE DATA,
# AND TO GET ALL MICE IDS
def read_csv_files(self):
# reads csv files and organizes them into dataframes
all_dataframes = []
# for all files in folder
for file in os.listdir(self.main_folder_path):
if file.endswith(".CSV"):
if file.startswith("FED"):
# read that file into a dataframe
file_path = os.path.join(self.main_folder_path ,file)
df = pd.read_csv(file_path)
all_dataframes.append(df)
##################################################
# create a single dataframe from all files
self.main_df = pd.concat(all_dataframes)
print(self.main_df)
##################################################
# create separate dataframe for each mouse
# (all original columns)
by_mouse_df_list = []
# find unique mouse indexes
mice_indexes = pd.unique(self.main_df[' Mouse'])
# split main dataframe into single dataframe per mouse
for index in mice_indexes:
single_mouse_df = self.main_df[self.main_df[' Mouse']==index]
by_mouse_df_list.append(single_mouse_df)
########################################################################
# list of dataframes by mouse (only given dates)
# (only sorted timestamps, mouse index, pellet count, motorturn count)
self.mouse_df_list = []
########################################################################
# make sure all dates are sorted:
for i in range(len(by_mouse_df_list)):
# count how many rows are there
# that is equal to the total pellet count
total_pellet_count = by_mouse_df_list[i].shape[0]
# create consecutive pellet count values
total_pellet_count_list = [i+1 for i in range(total_pellet_count)]
# convert dates to pandas datetime
ts_list = pd.to_datetime(by_mouse_df_list[i]['MM:DD:YYYY hh:mm:ss']).tolist()
# create new dataframe
new_df = pd.DataFrame({"MM:DD:YYYY hh:mm:ss" :ts_list,
"Mouse" : by_mouse_df_list[i][' Mouse'].tolist(),
"PelletCount" : total_pellet_count_list,
"MotorTurns" : by_mouse_df_list[i][' MotorTurns'].tolist()})
# make timestamps indexes
new_df.index = new_df['MM:DD:YYYY hh:mm:ss']
# remove old column
del new_df['MM:DD:YYYY hh:mm:ss']
# sort dates
new_df = new_df.sort_index()
# select only user defined timeframe
# https://pandas.pydata.org/pandas-docs/stable/timeseries.html
new_df = new_df[self.my_start_date:self.my_end_date]
# replace pellet count with new consecutive pellet count for that dates
new_df['PelletCount'] = [i+1 for i in range(new_df.shape[0])]
if new_df.shape[0] != 0:
self.mouse_df_list.append(new_df)
else:
# if for a mouse, there is no data within given dates
# my_start_year,my_start_month,my_start_day = self.my_start_date.split('-')
# my_end_year,my_end_month,my_end_day = self.my_end_date.split('-')
# create dataframe with all zero values
start = datetime.datetime.strptime(self.my_start_date, "%Y-%m-%d %H:%M:%S")
end = datetime.datetime.strptime(self.my_end_date, "%Y-%m-%d %H:%M:%S")
new_df = pd.DataFrame({"MM:DD:YYYY hh:mm:ss" :[start,end],
"Mouse" : [by_mouse_df_list[i][' Mouse'].iloc[0], by_mouse_df_list[i][' Mouse'].iloc[0]],
"PelletCount" : [0,0],
"MotorTurns" : [0,0]})
new_df.index = new_df['MM:DD:YYYY hh:mm:ss']
del new_df['MM:DD:YYYY hh:mm:ss']
new_df = new_df.sort_index()
self.mouse_df_list.append(new_df)
# check if there was any data
if len(self.mouse_df_list) == 0:
messagebox.showwarning(
"Warning!",
"No data for given dates!"
)
return 0
# get all mice ids from dataframes
self.mice_ids_list = []
for i in range(len(self.mouse_df_list)):
mouse_id = self.mouse_df_list[i]['Mouse'].iloc[0]
if mouse_id not in self.mice_ids_list:
self.mice_ids_list.append(mouse_id)
return 1
def include_selected_mice(self):
included_mice_df = []
for i in range(len(self.mouse_df_list)):
# get mouse id from the dataframe
mouse_id = self.mouse_df_list[i]['Mouse'].iloc[0]
# check if that is was selected by user
if mouse_id in self.retrieved_id_ints:
included_mice_df.append(self.mouse_df_list[i])
# make new list of dataframes only with selected mice a main source of data
self.mouse_df_list = included_mice_df
##################################################################
# FUNCTION TO GET DAY AND NIGHT INTERVALS FOR EACH MOUSE
# TO CALCULATE TOTAL PELLET INTAKE BY 24HRS AND AVERAGE,
# TOTAL PELLET INTAKE DURING DAYTIMES AND NIGHTTIMES AND AVERAGE
# INTERVALS BETWEEN PELLET INTAKES
def get_data(self):
################################################################
# day and night intervals by mouse
self.night_mouse_intervals = []
self.day_mouse_intervals = []
######################################################################
# for each mouse get all night intervals and all day intervals
for i in range(len(self.mouse_df_list)):
# single mouse
night_intervals, day_intervals = get_intervals(self.mouse_df_list[i],
self.my_lights_on,
self.my_lights_out)
# add to the list of intervals for all mice
self.night_mouse_intervals.append(night_intervals)
self.day_mouse_intervals.append(day_intervals)
######## end creating all day intervals for that mouse
######## (self.day_mouse_intervals)
# find first date of all and last date from all
starts = []
ends = []
# for all mice find beginning and end of data
for df in self.mouse_df_list:
# find first date for that mouse and the last date
starts.append(df.index.min())
ends.append(df.index.max())
# find the earliest date from all mice and the lates date from all mice
earliest_of_all = min(starts)
latest_of_all = max(ends)
# print(earliest_of_all, latest_of_all)
# create the list of start times for all available 24 hour periods
# first, find whether the earliest common date is closer to start day or start night
only_date_earliest = earliest_of_all.date()
that_day = pd.Timestamp(year=only_date_earliest.year,
month=only_date_earliest.month,
day=only_date_earliest.day,
hour=int(self.my_lights_on))
that_night = pd.Timestamp(year=only_date_earliest.year,
month=only_date_earliest.month,
day=only_date_earliest.day,
hour=int(self.my_lights_out))
# decide whether to start from the lights out or lights on hour
if abs(that_day-earliest_of_all) < abs(that_night-earliest_of_all):
my_earliest = that_day
else:
my_earliest = that_night
all_24hrs_dates = pd.date_range(my_earliest,
latest_of_all,
freq='D')
# create a dataframe for each mouse
# that contains data from common start and common end
mouse_per24hrs_full_dfs = []
all_mouse_day_intervals = []
all_mouse_nigtht_intervals = []
for df in self.mouse_df_list:
new_df = df[earliest_of_all:latest_of_all]
mouse_per24hrs_full_dfs.append(new_df)
# get all night and day intervals for that mouse
night_intervals, day_intervals = get_intervals(new_df,
self.my_lights_on,
self.my_lights_out)
all_mouse_day_intervals.append(day_intervals)
all_mouse_nigtht_intervals.append(night_intervals)
# for each mouse create list of dataframes that contain timestamps
# for each 24 hour period
mouse_24hrs_dfs = []
for i in range(len(mouse_per24hrs_full_dfs)):
# list of tuples,
# first element is a pair of start-end period,
# second element is dataframe, one df per each 24 hour period
mouse_dfs = []
for j in range(len(all_24hrs_dates)):
# if it is the first beginning
if j == 0:
# check if this is the only beginning
if len(all_24hrs_dates) == 1:
start = all_24hrs_dates[j]
# ends on last available time
end = mouse_per24hrs_full_dfs[i].index.max()
single_period_df = mouse_per24hrs_full_dfs[i][start:end]
if single_period_df.empty:
# if there was no data for that interval create a dummy
single_period_df = pd.DataFrame({"MM:DD:YYYY hh:mm:ss" :[start,end],
"Mouse" : [mouse_per24hrs_full_dfs[i]['Mouse'].iloc[0], mouse_per24hrs_full_dfs[i]['Mouse'].iloc[0]],
"PelletCount" : [0,0],
"MotorTurns" : [0,0]})
single_period_df.index = single_period_df['MM:DD:YYYY hh:mm:ss']
del single_period_df['MM:DD:YYYY hh:mm:ss']
mouse_dfs.append(((start,end),single_period_df))
else: # this was not the only beginning (not the last)
start = all_24hrs_dates[j]
end = all_24hrs_dates[j+1]
single_period_df = mouse_per24hrs_full_dfs[i][start:end]
if single_period_df.empty:
# if there was no data for that interval create a dummy
single_period_df = pd.DataFrame({"MM:DD:YYYY hh:mm:ss" :[start,end],
"Mouse" : [mouse_per24hrs_full_dfs[i]['Mouse'].iloc[0], mouse_per24hrs_full_dfs[i]['Mouse'].iloc[0]],
"PelletCount" : [0,0],
"MotorTurns" : [0,0]})
single_period_df.index = single_period_df['MM:DD:YYYY hh:mm:ss']
del single_period_df['MM:DD:YYYY hh:mm:ss']
mouse_dfs.append(((start,end),single_period_df))
# check if it was the last beginning
elif (j+1) == len(all_24hrs_dates):
start = all_24hrs_dates[j]
end = mouse_per24hrs_full_dfs[i].index.max()
# check if the start date is earlier that the end of data
if start < end:
single_period_df = mouse_per24hrs_full_dfs[i][start:end]
if single_period_df.empty:
# if there was no data for that interval create a dummy
single_period_df = pd.DataFrame({"MM:DD:YYYY hh:mm:ss" :[start,end],
"Mouse" : [mouse_per24hrs_full_dfs[i]['Mouse'].iloc[0], mouse_per24hrs_full_dfs[i]['Mouse'].iloc[0]],
"PelletCount" : [0,0],
"MotorTurns" : [0,0]})
single_period_df.index = single_period_df['MM:DD:YYYY hh:mm:ss']
del single_period_df['MM:DD:YYYY hh:mm:ss']
mouse_dfs.append(((start,end),single_period_df))
else: # not the first and not the last beginning
start = all_24hrs_dates[j]
end = all_24hrs_dates[j+1]
single_period_df = mouse_per24hrs_full_dfs[i][start:end]
if single_period_df.empty:
# if there was no data for that interval create a dummy
single_period_df = pd.DataFrame({"MM:DD:YYYY hh:mm:ss" :[start,end],
"Mouse" : [mouse_per24hrs_full_dfs[i]['Mouse'].iloc[0], mouse_per24hrs_full_dfs[i]['Mouse'].iloc[0]],
"PelletCount" : [0,0],
"MotorTurns" : [0,0]})
single_period_df.index = single_period_df['MM:DD:YYYY hh:mm:ss']
del single_period_df['MM:DD:YYYY hh:mm:ss']
mouse_dfs.append(((start,end),single_period_df))
mouse_24hrs_dfs.append(mouse_dfs)
# print(mouse_dfs)
# create dataframes for the csv files
# for all mice, create dataframe with start dates as indexes
# column names as mice ids with sums from that 24 hours in each row
# last column with means
# divide each day into sums from daytime and nighttime
mice_by_24hrs_cumm_dfs = []
mice_by24h_day_night_dfs = []
for i in range(len(mouse_24hrs_dfs)): # for each mouse
my_sums = []
my_day_sums = []
my_night_sums = []
# print("\nMouse", i+1)
for j in range(len(mouse_24hrs_dfs[i])): # for each day
# get a list of either day or night according to hours from timestamp
day_night_list = []
for row in mouse_24hrs_dfs[i][j][1].itertuples():
if row[0].hour >= self.my_lights_on and row[0].hour < self.my_lights_out:
day_night_list.append('day')
else:
day_night_list.append('night')
# create new dataframe with a column for days and nights
# first element of tuple is pair of dates
# second element is dataframe
day_night_df = mouse_24hrs_dfs[i][j][1].copy()
day_night_df['DayNight'] = day_night_list
# (if sum=0, there were no data)
if day_night_df[day_night_df.DayNight == 'day']['PelletCount'].sum() == 0:
my_day_sum = np.nan
else:
my_day_sum = day_night_df[day_night_df.DayNight == 'day'].shape[0]
if day_night_df[day_night_df.DayNight == 'night']['PelletCount'].sum() == 0:
my_night_sum = np.nan
else:
my_night_sum = day_night_df[day_night_df.DayNight == 'night'].shape[0]
# second element is dataframe
# first sum pellets (if sum=0, there were no data)
if mouse_24hrs_dfs[i][j][1]['PelletCount'].sum() == 0:
my_sum = np.nan
else: # number of rows of data is the sum of all pellets
my_sum = mouse_24hrs_dfs[i][j][1].shape[0]
mouse_name = "Mouse " + str(mouse_24hrs_dfs[i][j][1]['Mouse'].iloc[0])
mouse_name_day = mouse_name + "_Day"
mouse_name_night = mouse_name + "_Night"
my_sums.append(my_sum)
my_day_sums.append(my_day_sum)
my_night_sums.append(my_night_sum)
df = pd.DataFrame({mouse_name:my_sums})
day_night_df = pd.DataFrame({mouse_name_day:my_day_sums,
mouse_name_night:my_night_sums})
mice_by_24hrs_cumm_dfs.append(df)
mice_by24h_day_night_dfs.append(day_night_df)
# join all single mice data into one, common dataframe
self.mice_by_24hrs_df = pd.concat(mice_by_24hrs_cumm_dfs, axis=1)
# join all day/night dataframes into a single dataframe
self.mice_by_daynight_df = | pd.concat(mice_by24h_day_night_dfs, axis=1) | pandas.concat |
import torch
import numpy as np
import pandas as pd
import plotly.graph_objects as go
from plotly.subplots import make_subplots
from sklearn.metrics import mean_squared_error, mean_absolute_error
from torchvision import utils
import matplotlib.pyplot as plt
plt.style.use('ggplot')
plt.rcParams.update({'font.size': 16, 'axes.labelweight': 'bold'})
from .optimization import *
def plot_pokemon(
x, y, y_hat=None, x_range=[10, 130], y_range=[10, 130], dx=20, dy=20
):
fig = go.Figure()
fig.add_trace(
go.Scatter(x=x, y=y, mode="markers", marker=dict(size=10), name="data")
)
if y_hat is not None:
fig.add_trace(
go.Scatter(
x=x,
y=y_hat,
line_color="red",
mode="lines",
line=dict(width=3),
name="Fitted line",
)
)
width = 550
title_x = 0.46
else:
width = 500
title_x = 0.5
fig.update_layout(
width=width,
height=500,
title="Pokemon stats",
title_x=title_x,
title_y=0.93,
xaxis_title="defense",
yaxis_title="attack",
margin=dict(t=60),
)
fig.update_xaxes(range=x_range, tick0=x_range[0], dtick=dx)
fig.update_yaxes(range=y_range, tick0=y_range[0], dtick=dy)
return fig
def plot_logistic(
x,
y,
y_hat=None,
threshold=None,
x_range=[-3, 3],
y_range=[-0.25, 1.25],
dx=1,
dy=0.25,
):
fig = go.Figure()
fig.update_xaxes(range=x_range, tick0=x_range[0], dtick=dx)
fig.update_yaxes(range=y_range, tick0=y_range[0], dtick=dy)
if threshold is not None:
threshold_ind = (np.abs(y_hat - threshold)).argmin()
fig.add_trace(
go.Scatter(
x=[x_range[0], x_range[0], x[threshold_ind], x[threshold_ind]],
y=[y_range[0], y_range[1], y_range[1], y_range[0]],
mode="lines",
fill="toself",
fillcolor="limegreen",
opacity=0.2,
line=dict(width=0),
name="0 prediction",
)
)
fig.add_trace(
go.Scatter(
x=[x[threshold_ind], x[threshold_ind], x_range[1], x_range[1]],
y=[y_range[0], y_range[1], y_range[1], y_range[0]],
mode="lines",
fill="toself",
fillcolor="lightsalmon",
opacity=0.3,
line=dict(width=0),
name="1 prediction",
)
)
fig.add_trace(
go.Scatter(
x=x,
y=y,
mode="markers",
marker=dict(
size=10,
color="#636EFA",
line=dict(width=1, color="DarkSlateGrey"),
),
name="data",
)
)
if y_hat is not None:
fig.add_trace(
go.Scatter(
x=x,
y=y_hat,
line_color="red",
mode="lines",
line=dict(width=3),
name="Fitted line",
)
)
width = 650
title_x = 0.46
else:
width = 600
title_x = 0.5
if threshold is not None:
fig.add_trace(
go.Scatter(
x=[x[threshold_ind]],
y=[threshold],
mode="markers",
marker=dict(
size=18,
color="gold",
line=dict(width=1, color="DarkSlateGrey"),
),
name="Threshold",
)
)
fig.update_layout(
width=width,
height=500,
title="Pokemon stats",
title_x=title_x,
title_y=0.93,
xaxis_title="defense",
yaxis_title="legendary",
margin=dict(t=60),
)
return fig
def plot_gradient_m(x, y, m, slopes, mse, grad_func):
fig = go.Figure()
fig.add_trace(
go.Scatter(
x=slopes,
y=mse,
line_color="#1ac584",
line=dict(width=3),
mode="lines",
name="MSE",
)
)
fig.add_trace(
go.Scatter(
x=slopes,
y=mean_squared_error(y, m * x) + grad_func(x, y, m) * (slopes - m),
line_color="red",
mode="lines",
line=dict(width=2),
name="gradient",
)
)
fig.add_trace(
go.Scatter(
x=[m],
y=[mean_squared_error(y, m * x)],
line_color="red",
marker=dict(size=14, line=dict(width=1, color="DarkSlateGrey")),
mode="markers",
name=f"slope {m}",
)
)
fig.update_layout(
width=520,
height=450,
xaxis_title="slope (w)",
yaxis_title="MSE",
title=f"slope {m:.1f}, gradient {grad_func(x, y, m):.1f}",
title_x=0.46,
title_y=0.93,
margin=dict(t=60),
)
fig.update_xaxes(range=[0.4, 1.6], tick0=0.4, dtick=0.2)
fig.update_yaxes(range=[0, 2500])
return fig
def plot_grid_search(
x,
y,
slopes,
loss_function,
title="Mean Squared Error",
y_range=[0, 2500],
y_title="MSE",
):
mse = []
df = pd.DataFrame()
for m in slopes:
df[f"{m:.2f}"] = m * x # store predictions for plotting later
mse.append(loss_function(y, m * x)) # calc MSE
mse = pd.DataFrame({"slope": slopes, "squared_error": mse})
fig = make_subplots(
rows=1, cols=2, subplot_titles=("Data & Fitted Line", title)
)
fig.add_trace(
go.Scatter(x=x, y=y, mode="markers", marker=dict(size=10), name="Data"),
row=1,
col=1,
)
fig.add_trace(
go.Scatter(
x=x,
y=df.iloc[:, 0],
line_color="red",
mode="lines",
line=dict(width=3),
name="Fitted line",
),
row=1,
col=1,
)
fig.add_trace(
go.Scatter(
x=mse["slope"],
y=mse["squared_error"],
mode="markers",
marker=dict(size=7),
name="MSE",
),
row=1,
col=2,
)
fig.add_trace(
go.Scatter(
x=mse.iloc[[0]]["slope"],
y=mse.iloc[[0]]["squared_error"],
line_color="red",
mode="markers",
marker=dict(size=14, line=dict(width=1, color="DarkSlateGrey")),
name="MSE for line",
),
row=1,
col=2,
)
fig.update_layout(width=900, height=475)
fig.update_xaxes(
range=[10, 130],
tick0=10,
dtick=20,
row=1,
col=1,
title="defense",
title_standoff=0,
)
fig.update_xaxes(
range=[0.3, 1.6],
tick0=0.3,
dtick=0.2,
row=1,
col=2,
title="slope",
title_standoff=0,
)
fig.update_yaxes(
range=[10, 130],
tick0=10,
dtick=20,
row=1,
col=1,
title="attack",
title_standoff=0,
)
fig.update_yaxes(
range=y_range, row=1, col=2, title=y_title, title_standoff=0
)
frames = [
dict(
name=f"{slope:.2f}",
data=[
go.Scatter(x=x, y=y),
go.Scatter(x=x, y=df[f"{slope:.2f}"]),
go.Scatter(x=mse["slope"], y=mse["squared_error"]),
go.Scatter(
x=mse.iloc[[n]]["slope"], y=mse.iloc[[n]]["squared_error"]
),
],
traces=[0, 1, 2, 3],
)
for n, slope in enumerate(slopes)
]
sliders = [
{
"currentvalue": {
"font": {"size": 16},
"prefix": "slope: ",
"visible": True,
},
"pad": {"b": 10, "t": 30},
"steps": [
{
"args": [
[f"{slope:.2f}"],
{
"frame": {
"duration": 0,
"easing": "linear",
"redraw": False,
},
"transition": {"duration": 0, "easing": "linear"},
},
],
"label": f"{slope:.2f}",
"method": "animate",
}
for slope in slopes
],
}
]
fig.update(frames=frames), fig.update_layout(sliders=sliders)
return fig
def plot_grid_search_2d(x, y, slopes, intercepts):
mse = np.zeros((len(slopes), len(intercepts)))
for i, slope in enumerate(slopes):
for j, intercept in enumerate(intercepts):
mse[i, j] = mean_squared_error(y, x * slope + intercept)
fig = make_subplots(
rows=1,
cols=2,
subplot_titles=("Surface Plot", "Contour Plot"),
specs=[[{"type": "surface"}, {"type": "contour"}]],
)
fig.add_trace(
go.Surface(
z=mse, x=intercepts, y=slopes, name="", colorscale="viridis"
),
row=1,
col=1,
)
fig.add_trace(
go.Contour(
z=mse,
x=intercepts,
y=slopes,
name="",
showscale=False,
colorscale="viridis",
),
row=1,
col=2,
)
fig.update_layout(
scene=dict(
zaxis=dict(title="MSE"),
yaxis=dict(title="slope (w<sub>1</sub>)"),
xaxis=dict(title="intercept (w<sub>0</sub>)"),
),
scene_camera=dict(eye=dict(x=2, y=1.1, z=1.2)),
margin=dict(l=0, r=0, b=60, t=90),
)
fig.update_xaxes(
title="intercept (w<sub>0</sub>)",
range=[intercepts.max(), intercepts.min()],
tick0=intercepts.max(),
row=1,
col=2,
title_standoff=0,
)
fig.update_yaxes(
title="slope (w<sub>1</sub>)",
range=[slopes.min(), slopes.max()],
tick0=slopes.min(),
row=1,
col=2,
title_standoff=0,
)
fig.update_layout(width=900, height=475, margin=dict(t=60))
return fig
def plot_gradient_descent(x, y, w, alpha, tolerance=2e-4, max_iterations=5000):
if x.ndim == 1:
x = np.array(x).reshape(-1, 1)
slopes, losses = gradient_descent(
x, y, [w], alpha, tolerance, max_iterations, history=True
)
slopes = [_[0] for _ in slopes]
x = x.flatten()
mse = []
df = | pd.DataFrame() | pandas.DataFrame |
# -*- coding:utf-8 -*-
# By:<NAME>
# Create:2019-12-23
# Update:2021-10-20
# For: Scrape data from weibo and a simple and not so rigours sentiment analysis based on sentiment dictionary
import requests
import re
import os
import time
import random
from lxml import etree
from datetime import datetime, timedelta
import pandas as pd
from urllib.request import quote, unquote
from fp.fp import FreeProxy
class ScrapePosts:
def __init__(self,kw=None,cookies=None,headers=None,use_prox=True,st=None,et=None,sort="hot",cr_url=True):
self.cookies = cookies
self.headers = headers
if use_prox:
self.new_proxy()
else:
self.proxies = None
self.keyword = quote(kw, encoding='utf-8') if kw is not None else None
self.starttime = datetime.strptime(st, '%Y/%m/%d') if st is not None else None
self.endtime = datetime.strptime(et, '%Y/%m/%d') if et is not None else None
self.sort = sort
self.url = self.get_url() if cr_url else None
def new_proxy(self, rand = True):
self.proxies = FreeProxy(rand=rand).get()
def change_endtime(self,date):
self.endtime = datetime.strptime(date, '%Y/%m/%d')
self.url = self.get_url()
def change_starttime(self,date):
self.starttime = datetime.strptime(date, '%Y/%m/%d')
self.url = self.get_url()
def change_kw(self,kw):
self.keyword = quote(kw, encoding='utf-8')
self.url = self.get_url()
def change_sort(self,sort):
self.sort = sort
self.url = self.get_url()
def get_filter(self):
self.keyword = input("Please input keyword:")
self.endtime = input("Please input end time(yyyy/mm/dd):")
self.starttime = input("Please input start time(yyyy/mm/dd):")
self.sort = input("Please choose sorting method(time/hot):")
# Sometimes it's ok to just put Chinese words into the url, but it will be better to encode with URL encoding
self.keyword = quote(self.keyword, encoding='utf-8')
self.starttime = datetime.strptime(self.starttime, '%Y/%m/%d')
self.endtime = datetime.strptime(self.endtime, '%Y/%m/%d')
# get the url, note that we need to paste the page= to the url
# and the function returns a list of urls, each of which searches for the posts within one day
def get_url(self):
# default start time is Jan-01, 2010, default sort method is by time(could be by 'hot')
search_url = 'https://weibo.cn/search/mblog?hideSearchFrame='
delta = self.endtime - self.starttime + timedelta(days=1)
url = [None] * delta.days
i = 0
while i < delta.days:
url[i] = search_url + "&keyword=" + self.keyword + "&advancedfilter=1" + "&starttime=" + (
self.starttime + timedelta(days=i)).strftime('%Y%m%d') + "&endtime=" + (
self.starttime + timedelta(days=i)).strftime('%Y%m%d') + "&sort=" + self.sort
i += 1
return url
# create a tiny function to create name
def save_html(self, url, html):
ed = re.findall(r'endtime=(.*?)&', url)[0]
pg = re.findall(r'page=(.*)', url)[0]
name = '_'.join([unquote(self.keyword), ed, pg])
save = open('.//html/%s.txt' % name, "w", encoding="utf-8")
save.write('%s' % html)
save.close()
# note that if you generate the url from geturl function, you will need to add the "&page=" to the url
def get_html(self, url, save_html=True, use_prox=True):
# find the headers, you will need the cookies that is freshly baked, you will need the Fiddler to get cookies
headers = {
'User-Agent': self.headers,
'Cookie': self.cookies
}
if use_prox:
proxies = {
"https": self.proxies.replace("http://",""),
"http": self.proxies.replace("http://", "")
}
response = requests.get(url, headers=headers, proxies=proxies)
else:
response = requests.get(url, headers=headers)
response.encoding = "utf-8"
# to know if we successfully get the response
if response.status_code != 200:
print('\nResponse Error!')
html = response.text
if save_html:
self.save_html(url, html)
html = bytes(html, encoding='utf-8')
html = etree.HTML(html)
return html
def total_page(self, html):
try:
page = html.xpath("//div[@class='pa']//div/text()")
page = str(page)
page = int(re.findall(r'/(.*?)页', str(page))[0])
if page > 100:
page = 100
return page
except Exception as e:
return 0
print(f'Error while getting the total page,{e}')
def parse_html(self, html):
post_list = html.xpath("//div[@class='c'][@id]")
info_list = []
for post in post_list:
poster = post.xpath(".//div/a[@class='nk']/text()")[0]
poster_url = post.xpath(".//div/a[@class='nk']/@href")[0]
post_date = post.xpath(".//div/span[@class='ct']/text()")[0]
post_like = post.xpath(".//div/a[@href]/text()")[-4]
post_repo = post.xpath(".//div/a[@href]/text()")[-3]
post_cmt = post.xpath(".//div/a[@href]/text()")[-2]
div = post.xpath(".//div")
if len(div) == 1:
post_txt = etree.tostring(post.xpath(".//div/span[@class='ctt']")[0], encoding="unicode")
post_txt = post_txt.replace('<span class="ctt">:', '')
post_txt = post_txt.replace(f'<span class="kt">{self.keyword}</span>', self.keyword)
post_txt = post_txt.replace('</span>\xa0', '')
# Here, as above, the data we get may contain nothing or only what the last user who repoed had written
# let's just tackle it later
o_poster, o_poster_url, o_post_txt, o_post_like, o_post_repo, o_post_cmt = None, None, None, None, None, None
elif len(div) == 2:
try:
temp_post = div[1].xpath(".//text()")
post_txt = " ".join(temp_post[:len(temp_post) - 9])
except Exception as e1:
post_txt, post_like, post_repo, post_cmt = None, None, None, None
print("Error in getting repo information, error type:%s" % e1)
if div[0].xpath(".//span[@class='cmt']/a[@href]/text()"):
o_poster = div[0].xpath(".//span[@class='cmt']/a[@href]/text()")[0]
o_poster_url = div[0].xpath(".//span[@class='cmt']/a/@href")[0]
o_post_txt = etree.tostring(div[0].xpath(".//span[@class='ctt']")[0], encoding="unicode")
o_post_txt = re.sub(r'<[\w+/](.*?)[\"/\w]>', '', o_post_txt)
o_post_txt = re.sub(r'[\s]+', '', o_post_txt)
o_post_like = div[0].xpath(".//span[@class='cmt']/text()")[2]
o_post_repo = div[0].xpath(".//span[@class='cmt']/text()")[3]
o_post_cmt = div[0].xpath(".//a[@class='cc']/text()")[0]
else:
o_poster, o_poster_url, o_post_txt, o_post_like, o_post_repo, o_post_cmt = None, None, None, None, None, None
# print("Warning: this user can be posting a pic, userID is %s.\r" % poster)
elif len(div) == 3:
try:
temp_post = div[2].xpath(".//text()")
post_txt = " ".join(temp_post[:len(temp_post) - 9])
except Exception as e3:
post_txt, post_like, post_repo, post_cmt = None, None, None, None
print("Error in getting repo information, error type:%s" % e3)
o_poster = div[0].xpath(".//span[@class='cmt']/a[@href]/text()")[0]
o_poster_url = div[0].xpath(".//span[@class='cmt']/a/@href")[0]
# here we can not just choose the text, because people might have @others and posts some hashtags which
# will be eliminated if we only return the text
o_post_txt = etree.tostring(div[0].xpath(".//span[@class='ctt']")[0], encoding="unicode")
o_post_txt = re.sub(r'<[\w+/](.*?)[\"/\w]>', '', o_post_txt)
o_post_txt = re.sub(r'[\s]+', '', o_post_txt)
o_post_like = div[1].xpath(".//span[@class='cmt']/text()")[0]
o_post_repo = div[1].xpath(".//span[@class='cmt']/text()")[1]
o_post_cmt = div[1].xpath(".//a[@class='cc']/text()")[0]
else:
post_txt, post_like, post_repo, post_cmt = None, None, None, None
o_poster, o_poster_url, o_post_txt, o_post_like, o_post_repo, o_post_cmt = None, None, None, None, None, None
print("Error in implement")
info = {
'user_id': poster,
'user_url': poster_url,
'post_date': post_date,
'post_content': post_txt,
'post_like': post_like,
'post_repo': post_repo,
'post_comment': post_cmt,
'original_poster_id': o_poster,
'original_poster_url': o_poster_url,
'original_post_content': o_post_txt,
'original_post_like': o_post_like,
'original_post_repo': o_post_repo,
'original_post_comment': o_post_cmt
}
info_list.append(info)
info_list = pd.DataFrame(info_list)
return (info_list)
def post_list(self, get_ttp = True,use_prox=True):
info_list = | pd.DataFrame() | pandas.DataFrame |
import unittest
import pandas as pd
import numpy as np
from pandas.testing import assert_frame_equal
from msticpy.analysis.anomalous_sequence import sessionize
class TestSessionize(unittest.TestCase):
def setUp(self):
self.df1 = pd.DataFrame({"UserId": [], "time": [], "operation": []})
self.df1_with_ses_col = pd.DataFrame(
{"UserId": [], "time": [], "operation": [], "session_ind": []}
)
self.df1_sessionized = pd.DataFrame(
{
"UserId": [],
"time_min": [],
"time_max": [],
"operation_list": [],
"duration": [],
"number_events": [],
}
)
self.df2 = pd.DataFrame(
{
"UserId": [1, 1, 2, 3, 1, 2, 2],
"time": [
pd.to_datetime("2020-01-03 00:00:00"),
pd.to_datetime("2020-01-03 00:01:00"),
pd.to_datetime("2020-01-05 00:00:00"),
pd.to_datetime("2020-01-06 11:06:00"),
pd.to_datetime("2020-01-03 01:00:00"),
pd.to_datetime("2020-01-05 00:21:00"),
pd.to_datetime("2020-01-05 00:25:00"),
],
"operation": ["A", "B", "C", "A", "A", "B", "C"],
}
)
self.df2_with_ses_col_1 = pd.DataFrame(
{
"UserId": [1, 1, 1, 2, 2, 2, 3],
"time": [
pd.to_datetime("2020-01-03 00:00:00"),
pd.to_datetime("2020-01-03 00:01:00"),
pd.to_datetime("2020-01-03 01:00:00"),
pd.to_datetime("2020-01-05 00:00:00"),
pd.to_datetime("2020-01-05 00:21:00"),
pd.to_datetime("2020-01-05 00:25:00"),
pd.to_datetime("2020-01-06 11:06:00"),
],
"operation": ["A", "B", "A", "C", "B", "C", "A"],
"session_ind": [0, 0, 1, 2, 3, 4, 5],
}
)
self.df2_sessionized_1 = pd.DataFrame(
{
"UserId": [1, 1, 2, 2, 2, 3],
"time_min": [
pd.to_datetime("2020-01-03 00:00:00"),
pd.to_datetime("2020-01-03 01:00:00"),
pd.to_datetime("2020-01-05 00:00:00"),
pd.to_datetime("2020-01-05 00:21:00"),
pd.to_datetime("2020-01-05 00:25:00"),
| pd.to_datetime("2020-01-06 11:06:00") | pandas.to_datetime |
#%%#############################################################################
# filterAndAggregate.py
# Copyright (c) 2017, <NAME> and <NAME>
# Affiliation: Department of Bacteriology
# University of Wisconsin-Madison, Madison, Wisconsin, USA
# URL: http://http://mcmahonlab.wisc.edu/
# All rights reserved.
################################################################################
# Count reads which map to each (genome, gene) pair and to each (clade, COG)
# pair.
################################################################################
#%%#############################################################################
### Import packages
################################################################################
import os
import pandas as pd
import subprocess
#%%#############################################################################
### Static folder structure
################################################################################
# Define fixed input and output files
concatFolder = '../../data/refGenomes/concat'
genomeFolder = '../../data/refGenomes/fna'
sampleFolder = '../../data/sequences'
mapFolder = '../../data/mapping'
bamFolder = '../../data/mapping/bamFiles'
coverageFolder = '../../data/mapping/coverage-pooled'
countFolder = '../../data/mapping/htseq'
cogTable = '../../data/orthoMCL/cogTable.csv'
taxonFile = '../../data/externalData/taxonomy.csv'
# Check that the new output directory exists and create if it doesn't
if not os.path.exists(countFolder):
print("Creating output directory\n")
os.makedirs(countFolder)
#%%#############################################################################
### Read in sample and genome lists. Create DF to store read countDict.
################################################################################
# Read in list of samples
sampleList = []
for sample in os.listdir(sampleFolder):
if sample.endswith('.fastq'):
sampleList.append(sample)
sampleList = [sample.replace('.fastq', '') for sample in sampleList]
# Read in list of genomes.
genomeList = []
for genome in os.listdir(genomeFolder):
if genome.endswith('.fna'):
genomeList.append(genome)
genomeList = [genome.replace('.fna', '') for genome in genomeList]
# Read in list of genomes.
concatList = []
for concat in os.listdir(concatFolder):
if concat.endswith('.fna'):
concatList.append(concat)
concatList = [concat.replace('.fna', '') for concat in concatList]
#%%#############################################################################
### Count the reads which align to each CDS
################################################################################
# Define parameters for HTSeq-Count script
minQual = 0
featureType = 'CDS'
idAttr = 'locus_tag'
overlapMode = 'intersection-strict'
for sample in sampleList:
for concat in concatList:
samFile = bamFolder+'/'+sample+'-'+concat+'.sam'
gffFile = concatFolder+'/'+concat+'.gff'
outFile = countFolder+'/'+sample+'-'+concat+'.CDS.out'
subprocess.call('htseq-count -f sam -r pos -s no -a 0 -t CDS -i '+
'locus_tag -m intersection-strict '+samFile+' '+
gffFile+' > '+outFile, shell=True)
#%%#############################################################################
### Filtering. In this section, filter out all coding sequences which do not
### recruit at least 50 reads
################################################################################
# First, read in the read counts for each CDS
# Create empty dataframe to merge into
tempDF = pd.read_csv(countFolder+'/'+sampleList[0]+'-'+concatList[0]+'.CDS.out', sep='\t', index_col=0, names=[sampleList[0]])
readCountDF = pd.DataFrame(index=tempDF.index)
# And read in the counts
for sample in sampleList:
for concat in concatList:
tempDF = pd.read_csv(countFolder+'/'+sample+'-'+concat+'.CDS.out', sep='\t', index_col=0, names=[sample])
tempDF = tempDF[:-5]
# Merge with readCountDF
readCountDF = | pd.concat([readCountDF, tempDF], axis=1, join='outer') | pandas.concat |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import pandas as pd
from data_scientia import config
from data_scientia.data import capacidad_hospitalaria
def get_down_hill(hosp_data):
"""Given the temporal data of a hospital, get the portion of the timeline
belonging to a down-hill, up to the point of the first state that is no
longer critical.
Parameters
-----------
hosp_data: pd.DataFrame
Historical capacidad hospitalaria of a single hospital.
Rows correspond to a daily record of its capacity.
Returns
--------
down_hilll_data: pd.DataFrame
Portion of the data that bellongs to an down-hill segment of the
historical data up to the first non critical status.
hosp_data: pd.DataFrame
Remaining portion of the data, with the down_hilll_data removed.
"""
is_cooldown = (hosp_data['estatus_capacidad_uci'] != 'Crítica')
if is_cooldown.sum() > 0:
idx_cooldown = is_cooldown.argmax()
down_hilll_data = hosp_data.iloc[
:idx_cooldown + 1
]
hosp_data = hosp_data.iloc[
idx_cooldown + 1:]
else:
down_hilll_data = hosp_data.copy()
hosp_data = pd.DataFrame()
return down_hilll_data, hosp_data
def get_up_hill(hosp_data):
"""Given the temporal data of a hospital, get the portion of the timeline
belonging to an up-hill peak, up to the point of the first critical state
is found.
Parameters
-----------
hosp_data: pd.DataFrame
Historical capacidad hospitalaria of a single hospital.
Rows correspond to a daily record of its capacity.
Returns
--------
up_hill_data: pd.DataFrame
Portion of the data that bellongs to an up-hill segment of the
historical data up to the first next critical status.
hosp_data: pd.DataFrame
Remaining portion of the data, with the up_hill_data removed.
"""
is_peak = hosp_data['estatus_capacidad_uci'] == 'Crítica'
if is_peak.sum() > 0:
idx_peak = is_peak.argmax()
up_hill_data = hosp_data.iloc[
:idx_peak + 1]
hosp_data = hosp_data.iloc[
idx_peak + 1:]
else:
up_hill_data = None
hosp_data = None
return up_hill_data, hosp_data
def get():
"""Get peaks dataset.
"""
# Get hosp. capacity data
data = capacidad_hospitalaria.get()
# Do not consider rows with UCI status.
data = data[~data['estatus_capacidad_uci'].isnull()]
# Find peaks and its statistics
peaks = []
for hospital, hospital_data in data.groupby('nombre_hospital'):
if config.VERBOSE:
print(hospital)
# Ensure data is ordered
hospital_data.sort_values('fecha', inplace=True)
# Loop until the end of the data
hospital_peaks = []
remaining_hospital_data = hospital_data.copy()
while(remaining_hospital_data.shape[0] > 2):
# Get next peak.
up_hilll_data, remaining_hospital_data = get_up_hill(
hosp_data=remaining_hospital_data)
# If no additional peaks stop the loop
if up_hilll_data is None:
break
# Get next cooldown
down_hilll_data, remaining_hospital_data = get_down_hill(
hosp_data=remaining_hospital_data)
# get peak stats (start, date of the peak, end of the down-hill)
last_peak = up_hilll_data['fecha'].min()
peak_date = up_hilll_data['fecha'].max()
date_cooldown = down_hilll_data['fecha'].max()
hospital_peaks.append({
'nombre_hospital': hospital,
'last_peak': last_peak,
'peak_date': peak_date,
'days_since_last_peak': (peak_date - last_peak).days,
'peak_cooldown': date_cooldown,
'peak_length': (date_cooldown - peak_date).days
})
# Hospital time has no peaks.
if len(hospital_peaks) == 0:
continue
# Keep track of all peaks
hospital_peaks = pd.DataFrame(hospital_peaks)
peaks.append(hospital_peaks)
peaks = | pd.concat(peaks) | pandas.concat |
import numpy as np
import pandas as pd
from sklearn.metrics import r2_score
paesi_abitanti_eu = {"Austria": 8.917, "Belgium": 11.56, "Bulgaria": 6.927,
"Cyprus": 1.207, "Croatia": 4.047, "Denmark": 5.831,
"Estonia": 1.331, "Finland": 5.531, "France": 67.39,
"Germany": 83.24, "Greece": 10.27, "Ireland": 4.995,
"Italy": 59.55, "Latvia": 1.902, "Lithuania": 2.795,
"Luxembourg": 0.632275, "Malta": 0.525285, "Netherlands": 17.44,
"Poland": 37.95, "Portugal": 10.31, "Czechia": 10.7,
"Romania": 19.29, "Slovakia": 5.549, "Slovenia": 2.1,
"Spain": 47.35, "Sweden": 10.35, "Hungary": 9.75}
paesi_eu_ita = ["Austria", "Belgio", "Bulgaria", "Cipro", "Croazia", "Danimarca",
"Estonia", "Finlandia", "Francia", "Germania", "Grecia", "Irlanda",
"Italia", "Lettonia", "Lituania", "Lussemburgo", "Malta", "Olanda",
"Polonia", "Portogallo", "Repubblica Ceca", "Romania", "Slovacchia",
"Slovenia", "Spagna", "Svezia", "Ungheria"]
def stat_model(x, coeff_fit):
if len(coeff_fit) == 2:
y = coeff_fit[1] + coeff_fit[0]*x
elif len(coeff_fit) == 3:
y = coeff_fit[2] + coeff_fit[1]*x + coeff_fit[0]*x**2
else:
raise ValueError("Fit not supported")
return y
def fit_model(vacc_res_2021, dec_res_2021, degree=1):
""" fit """
coeff_fit = np.polyfit(vacc_res_2021, dec_res_2021, degree)
x_grid = np.arange(0, 100, 1)
y_grid = [stat_model(v, coeff_fit) for v in x_grid]
# calcola R2 score
y_pred = [stat_model(v, coeff_fit) for v in vacc_res_2021]
y_test = dec_res_2021
score = round(r2_score(y_test, y_pred), 2)
print("R\u00b2 score è pari a", score)
return x_grid, y_grid, score
# Importa dati vaccini e dati epidemiologici
def import_vaccines_data():
""" Recupera dati sui vaccini da Our World in Data"""
url = "https://raw.githubusercontent.com/owid/covid-19-data/master/public/data/vaccinations/vaccinations.csv"
df_vacc = pd.read_csv(url)
df_vacc = df_vacc.fillna(method="ffill")
return df_vacc
def get_vaccine_data(df_vacc, country):
""" Recupera dati vaccini per paese """
df_vacc_country = df_vacc[df_vacc["location"] == country].iloc[2:, :]
date = | pd.to_datetime(df_vacc_country["date"]) | pandas.to_datetime |
from __future__ import division
from utils.utils import *
from utils.vd_evaluator import VDEvaluator
from utils.parse_yolo_weights import parse_yolo_weights
from models.yolov3 import *
from models.shap_loss_1 import *
from dataset.dataset_vd import *
import os
import argparse
import yaml
import random
import torch
from torch.autograd import Variable
import torch.optim as optim
import numpy as np
import pandas as pd
import time
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--cfg', type=str, default='config/yolov3_vd.cfg',
help='config file. see readme')
parser.add_argument('--weights_path', type=str,
default=None, help='darknet weights file')
parser.add_argument('--n_cpu', type=int, default=0,
help='number of workers')
parser.add_argument('--checkpoint_interval', type=int,
default=1000, help='interval between saving checkpoints')
parser.add_argument('--eval_interval', type=int,
default=4000, help='interval between evaluations')
parser.add_argument('--checkpoint', type=str,
help='pytorch checkpoint file path')
parser.add_argument('--checkpoint_dir', type=str,
default='checkpoints',
help='directory where checkpoint files are saved')
parser.add_argument('--use_cuda', type=bool, default=True)
parser.add_argument('--debug', action='store_true', default=False,
help='debug mode where only one image is trained')
parser.add_argument(
'--tfboard', help='tensorboard path for logging', type=str, default=None)
parser.add_argument('--anno_file', type=str,
default='anno_data.json', help='annotation data json file name')
parser.add_argument('--shap_interval', type=int,
default=None, help='interval between updating shaploss')
return parser.parse_args()
def main():
"""
SHAP-regularized YOLOv3 trainer.
"""
args = parse_args()
print("Setting Arguments.. : ", args)
cuda = torch.cuda.is_available() and args.use_cuda
os.makedirs(args.checkpoint_dir, exist_ok=True)
# Parse config settings
with open(args.cfg, 'r') as f:
cfg = yaml.load(f)
print("successfully loaded config file: ", cfg)
momentum = cfg['TRAIN']['MOMENTUM']
decay = cfg['TRAIN']['DECAY']
burn_in = cfg['TRAIN']['BURN_IN']
iter_size = cfg['TRAIN']['MAXITER']
steps = eval(cfg['TRAIN']['STEPS'])
batch_size = cfg['TRAIN']['BATCHSIZE']
subdivision = cfg['TRAIN']['SUBDIVISION']
ignore_thre = cfg['TRAIN']['IGNORETHRE']
random_resize = cfg['AUGMENTATION']['RANDRESIZE']
base_lr = cfg['TRAIN']['LR'] / batch_size / subdivision
at_alpha = cfg['TRAIN']['ATTENTION_ALPHA']
at_beta = cfg['TRAIN']['ATTENTION_BETA']
print('effective_batch_size = batch_size * iter_size = %d * %d' %
(batch_size, subdivision))
# Learning rate setup
def burnin_schedule(i):
if i < burn_in:
factor = pow(i / burn_in, 4)# pow(x, y):x^y
elif i < steps[0]:
factor = 1.0
elif i < steps[1]:
factor = 0.1
else:
factor = 0.01
return factor
# Initiate model
model = YOLOv3(cfg['MODEL'], ignore_thre=ignore_thre)
if args.weights_path:
print("loading darknet weights....", args.weights_path)
parse_yolo_weights(model, args.weights_path)
elif args.checkpoint:
print("loading pytorch ckpt...", args.checkpoint)
state = torch.load(args.checkpoint)
if 'model_state_dict' in state.keys():
model.load_state_dict(state['model_state_dict'])
else:
model.load_state_dict(state)
if cuda:
print("using cuda")
model = model.cuda()
if args.tfboard:
print("using tfboard")
from tensorboardX import SummaryWriter
tblogger = SummaryWriter(args.tfboard)
model.train()
imgsize = cfg['TRAIN']['IMGSIZE']
dataset = ListDataset(model_type=cfg['MODEL']['TYPE'],
data_dir=cfg['TRAIN']['TRAIN_DIR'],
json_file=args.anno_file,
img_size=imgsize,
augmentation=cfg['AUGMENTATION'])
dataloader = torch.utils.data.DataLoader(
dataset,
batch_size=batch_size,
shuffle=True,
num_workers=args.n_cpu)
dataiterator = iter(dataloader)
evaluator = VDEvaluator(data_dir=cfg['TRAIN']['VAL_DIR'],
json_file=args.anno_file,
img_size=cfg['TEST']['IMGSIZE'],
confthre=cfg['TEST']['CONFTHRE'],
nmsthre=cfg['TEST']['NMSTHRE'])
dtype = torch.cuda.FloatTensor if cuda else torch.FloatTensor
# optimizer setup
# set weight decay only on conv.weight
params_dict = dict(model.named_parameters())
params = []
for key, value in params_dict.items():
if 'conv.weight' in key:
params += [{'params':value, 'weight_decay':decay * batch_size * subdivision}]
else:
params += [{'params':value, 'weight_decay':0.0}]
optimizer = optim.SGD(params, lr=base_lr, momentum=momentum,
dampening=0, weight_decay=decay * batch_size * subdivision)
iter_state = 0
if args.checkpoint:
if 'optimizer_state_dict' in state.keys():
optimizer.load_state_dict(state['optimizer_state_dict'])
iter_state = state['iter'] + 1
scheduler = optim.lr_scheduler.LambdaLR(optimizer, burnin_schedule)
# start training loop
log_col = ['time(min)', 'iter','lr','xy', 'wh',
'conf', 'cls', 'shap', 'l2', 'imgsize',
'ap50', 'precision50', 'recall50', 'F_measure']
log = []
ap50 = np.nan
precision50 = np.nan
recall50 = np.nan
F_measure = np.nan
shap_loss = torch.tensor(float('nan'),dtype=torch.float32)
t_0 = time.time()
for iter_i in range(iter_state, iter_size + 1):
# VD evaluation
if iter_i % args.eval_interval == 0 and iter_i > 0:
ap50, precision50, recall50, F_measure = evaluator.evaluate(model)
model.train()
if args.tfboard:
tblogger.add_scalar('val/COCOAP50', ap50, iter_i)
print('[Iter {}/{}]:AP50:{}'.format(iter_i, iter_size,ap50))
# subdivision loop
optimizer.zero_grad()
for inner_iter_i in range(subdivision):
try:
imgs, targets, _, _ = next(dataiterator) # load a batch
except StopIteration:
dataiterator = iter(dataloader)
imgs, targets, _, _ = next(dataiterator) # load a batch
imgs = Variable(imgs.type(dtype))
targets = Variable(targets.type(dtype), requires_grad=False)
loss = model(imgs, targets)
loss_dict = model.loss_dict
# adding SHAP-based loss
if args.shap_interval is not None:
if inner_iter_i % args.shap_interval == 0:
shap_loss_ = shaploss(imgs, targets, model,
num_classes=cfg['MODEL']['N_CLASSES'],
confthre=cfg['TEST']['CONFTHRE'],
nmsthre=cfg['TEST']['NMSTHRE'],
n_samples=cfg['TRAIN']['N_SAMPLES'],
alpha=at_alpha, beta=at_beta)
if shap_loss_ != 0 and shap_loss != torch.tensor(float('nan'),dtype=torch.float32):
shap_loss = shap_loss_
model.train()
loss += shap_loss
loss.backward()
optimizer.step()
scheduler.step()
if iter_i % 10 == 0:
# logging
current_lr = scheduler.get_lr()[0] * batch_size * subdivision
t = (time.time() - t_0)//60
print('[Time %d] [Iter %d/%d] [lr %f] '
'[Losses: xy %f, wh %f, conf %f, cls %f, att %f, total %f, imgsize %d, ap %f, precision %f, recall %f, F %f]'
% (t, iter_i, iter_size, current_lr,
loss_dict['xy'], loss_dict['wh'],
loss_dict['conf'], loss_dict['cls'], shap_loss,
loss_dict['l2'], imgsize, ap50, precision50, recall50, F_measure),
flush=True)
log.append([t, iter_i, current_lr,
np.atleast_1d(loss_dict['xy'].to('cpu').detach().numpy().copy())[0],
np.atleast_1d(loss_dict['wh'].to('cpu').detach().numpy().copy())[0],
np.atleast_1d(loss_dict['conf'].to('cpu').detach().numpy().copy())[0],
np.atleast_1d(loss_dict['cls'].to('cpu').detach().numpy().copy())[0],
np.atleast_1d(shap_loss.to('cpu').detach().numpy().copy())[0],
np.atleast_1d(loss_dict['l2'].to('cpu').detach().numpy().copy())[0],
imgsize, ap50, precision50, recall50, F_measure])
ap50 = np.nan
precision50 = np.nan
recall50 = np.nan
F_measure = np.nan
if args.tfboard:
tblogger.add_scalar('train/total_loss', model.loss_dict['l2'], iter_i)
# random resizing
if random_resize:
imgsize = (random.randint(0, 9) % 10 + 10) * 32
dataset.img_shape = (imgsize, imgsize)
dataset.img_size = imgsize
dataloader = torch.utils.data.DataLoader(
dataset, batch_size=batch_size, shuffle=True, num_workers=args.n_cpu)
dataiterator = iter(dataloader)
# save checkpoint
#if iter_i > 0 and (iter_i % args.checkpoint_interval == 0):
if (0<iter_i<=1000 and (iter_i % 100 == 0))or(1000<iter_i and (iter_i % 500 == 0)):
torch.save({'iter': iter_i,
'model_state_dict': model.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
},
os.path.join(args.checkpoint_dir, "snapshot"+str(iter_i)+".ckpt"))
df_log = | pd.DataFrame(log, columns=log_col) | pandas.DataFrame |
from Bio import SeqIO
import os
import pandas
import json
import icebreaker
# LOAD ALL "SHARED" FOLDERS DATA ON YOUR COMPUTER
ice = icebreaker.IceClient("../api_tokens/admin.yaml")
folders = ice.get_collection_folders("SHARED")
# folders = folders[:4] # test with a small part
local_data_folder = "local_data"
os.mkdir(local_data_folder)
for folder in folders:
print("Processing folder", folder["folderName"])
local_folder_path = os.path.join(local_data_folder, folder["folderName"])
if not os.path.exists(local_folder_path):
os.mkdir(local_folder_path)
parts_in_folder = ice.get_folder_entries(folder_id=folder["id"])
genbanks_path = os.path.join(local_folder_path, "records")
if not os.path.exists(genbanks_path):
os.mkdir(genbanks_path)
parts_data_path = os.path.join(local_folder_path, "data")
if not os.path.exists(parts_data_path):
os.mkdir(parts_data_path)
parts_infos_list = []
for part in parts_in_folder:
print("... entry", part["name"])
part_infos = ice.get_part_infos(part["id"])
parts_infos_list.append(part_infos)
json_target = os.path.join(
parts_data_path, "%s.json" % part_infos["id"]
)
with open(json_target, "w") as f:
json.dump(part_infos, f)
genbank_target = os.path.join(
genbanks_path, "%s.gb" % part_infos["id"]
)
genbank = ice.get_sequence(part["id"])
with open(genbank_target, "w") as f:
f.write(genbank)
df = | pandas.DataFrame.from_records(parts_infos_list) | pandas.DataFrame.from_records |
import gc
import sys
import logging
import yaml
import numpy as np
import pandas as pd
from pandas.tseries.holiday import USFederalHolidayCalendar as calendar
from utils import timer, load_data, reduce_mem_usage
from encoders import GaussianTargetEncoder
# define groupings and corresponding priors
groups_and_priors = {
# single encodings
("hour",): None,
("weekday",): None,
("month",): None,
("building_id",): None,
("primary_use",): None,
("site_id",): None,
# ("meter",): None,
# # second-order interactions
# ("meter", "hour"): ["gte_meter", "gte_hour"],
# ("meter", "weekday"): ["gte_meter", "gte_weekday"],
# ("meter", "month"): ["gte_meter", "gte_month"],
# ("meter", "building_id"): ["gte_meter", "gte_building_id"],
# ("meter", "primary_use"): ["gte_meter", "gte_primary_use"],
# ("meter", "site_id"): ["gte_meter", "gte_site_id"],
# # higher-order interactions with building_id
# ("meter", "building_id", "hour"): ["gte_meter_building_id", "gte_meter_hour"],
# ("meter", "building_id", "weekday"): ["gte_meter_building_id", "gte_meter_weekday"],
# ("meter", "building_id", "month"): ["gte_meter_building_id", "gte_meter_month"],
}
def process_timestamp(df):
df.timestamp = pd.to_datetime(df.timestamp)
df.timestamp = (
df.timestamp - pd.to_datetime("2016-01-01")
).dt.total_seconds() // 3600
def process_weather(
df, dataset, fix_timestamps=True, interpolate_na=True, add_na_indicators=True
):
if fix_timestamps:
site_GMT_offsets = [-5, 0, -7, -5, -8, 0, -5, -5, -5, -6, -7, -5, 0, -6, -5, -5]
GMT_offset_map = {site: offset for site, offset in enumerate(site_GMT_offsets)}
df.timestamp = df.timestamp + df.site_id.map(GMT_offset_map)
if interpolate_na:
site_dfs = []
for site_id in df.site_id.unique():
# Make sure that we include all possible hours so that we can interpolate evenly
if dataset == "train":
site_df = (
df[df.site_id == site_id]
.set_index("timestamp")
.reindex(range(8784))
)
elif dataset == "test":
site_df = (
df[df.site_id == site_id]
.set_index("timestamp")
.reindex(range(8784, 26304))
)
else:
raise ValueError(f"dataset={dataset} not recognized")
site_df.site_id = site_id
for col in [c for c in site_df.columns if c != "site_id"]:
if add_na_indicators:
site_df[f"had_{col}"] = ~site_df[col].isna()
site_df[col] = site_df[col].interpolate(
limit_direction="both",
method="spline",
order=3,
)
# Some sites are completely missing some columns, so use this fallback
site_df[col] = site_df[col].fillna(df[col].median())
site_dfs.append(site_df)
df = pd.concat(
site_dfs
).reset_index() # make timestamp back into a regular column
if add_na_indicators:
for col in df.columns:
if df[col].isna().any():
df[f"had_{col}"] = ~df[col].isna()
return df.fillna(-1) # .set_index(["site_id", "timestamp"])
def add_lag_feature(df, window=3, group_cols="site_id", lag_cols=["air_temperature"]):
rolled = df.groupby(group_cols)[lag_cols].rolling(
window=window, min_periods=0, center=True
)
lag_mean = rolled.mean().reset_index().astype(np.float16)
lag_max = rolled.quantile(0.95).reset_index().astype(np.float16)
lag_min = rolled.quantile(0.05).reset_index().astype(np.float16)
lag_std = rolled.std().reset_index().astype(np.float16)
for col in lag_cols:
df[f"{col}_mean_lag{window}"] = lag_mean[col]
df[f"{col}_max_lag{window}"] = lag_max[col]
df[f"{col}_min_lag{window}"] = lag_min[col]
df[f"{col}_std_lag{window}"] = lag_std[col]
def add_features(df):
# time features
df["hour"] = df.ts.dt.hour
df["weekday"] = df.ts.dt.weekday
df["month"] = df.ts.dt.month
df["year"] = df.ts.dt.year
# time interactions
df["weekday_hour"] = df.weekday.astype(str) + "-" + df.hour.astype(str)
# apply cyclic encoding of periodic features
df["hour_x"] = np.cos(2 * np.pi * df.timestamp / 24)
df["hour_y"] = np.sin(2 * np.pi * df.timestamp / 24)
df["month_x"] = np.cos(2 * np.pi * df.timestamp / (30.4 * 24))
df["month_y"] = np.sin(2 * np.pi * df.timestamp / (30.4 * 24))
df["weekday_x"] = np.cos(2 * np.pi * df.timestamp / (7 * 24))
df["weekday_y"] = np.sin(2 * np.pi * df.timestamp / (7 * 24))
# meta data features
df["year_built"] = df["year_built"] - 1900
# bulding_id interactions
# bm_ = df.building_id.astype(str) + "-" + df.meter.astype(str) + "-"
bm_ = df.building_id.astype(str) + "-"
df["building_weekday_hour"] = bm_ + df.weekday_hour
df["building_weekday"] = bm_ + df.weekday.astype(str)
df["building_month"] = bm_ + df.month.astype(str)
df["building_hour"] = bm_ + df.hour.astype(str)
# df["building_meter"] = bm_
# get holidays
dates_range = pd.date_range(start="2015-12-31", end="2019-01-01")
us_holidays = calendar().holidays(start=dates_range.min(), end=dates_range.max())
df["is_holiday"] = (df.ts.dt.date.astype("datetime64").isin(us_holidays)).astype(
np.int8
)
if __name__ == "__main__":
# load config file from CLI
with open(str(sys.argv[1]), "r") as f:
config = yaml.load(f, Loader=yaml.FullLoader)
algorithm = config["algorithm"]
discord_file = config["discord_file"]
data_location = config["data_location"]
discord_location = config["discord_location"]
output_location = config["output_location"]
# logging file
logging.basicConfig(
filename=algorithm + ".log",
level=logging.INFO,
format="%(asctime)s:%(levelname)s:%(message)s",
)
logging.info(f"Experiment: {algorithm}")
with timer("Loading data"):
logging.info("Loading data")
train, test = load_data("input", data_location=data_location)
building_meta = load_data("meta", data_location=data_location)
train_weather, test_weather = load_data("weather", data_location=data_location)
with timer("Process timestamp"):
logging.info("Process timestamp")
train["ts"] = pd.to_datetime(train.timestamp)
test["ts"] = | pd.to_datetime(test.timestamp) | pandas.to_datetime |
"""module for web map with films locations in certain years"""
import argparse
import functools
from typing import Dict, List, Tuple, Union
import folium
import pandas as pd
def create_map(
path: str,
location: Tuple[float, float],
year: int,
fast_procesing: bool,
opened: bool,
) -> None:
"""creates web map of films from database
Args:
path (str): path to file with films
location (Tuple[float, float]): latitude and longtitude of current location
year (int): year of films to search for in closest layer
fast_procesing (bool): using preprocesed dataset
opened (bool): open web map after generating it
"""
# create map/html
map = folium.Map(location=location, zoom_start=5, control_scale=True)
if fast_procesing:
films = get_films_info_from_csv(path)
else:
films = get_films_info(path)
local_films = find_films_in_location(films)
closest_films = find_closest_locations(films, location, str(year))
local_films_layer = create_layer(local_films, "Films in location")
closest_films_in_year_layer = create_layer(
closest_films, f"Closest films in {year}"
)
# create marker for user location
current_location = folium.CircleMarker(
location=location,
radius=15,
popup="Я тут)",
fill_color="green",
color="green",
fill_opacity=0.5,
)
map.add_child(current_location)
map.add_child(local_films_layer)
map.add_child(closest_films_in_year_layer)
map.add_child(folium.LayerControl())
map.save("Film_map.html")
if opened:
open_web_map("Film_map.html")
def create_layer(films: pd.DataFrame, name: str) -> folium.FeatureGroup:
"""creates layer for web map
Args:
films (pd.DataFrame): films for layer
name (str): name of the layer in the web map
Returns:
folium.FeatureGroup: web map layer with films
"""
# dictionary for assigning to marker popup multiple films
films_locations: Dict[Tuple[float, float], List[Tuple[str, str]]] = dict()
for i in range(len(films)):
if films.iloc[i]["Coordinates"] in films_locations.keys():
films_locations[films.iloc[i]["Coordinates"]].append(
(films.iloc[i]["Name"], films.iloc[i]["Year"])
)
else:
films_locations[films.iloc[i]["Coordinates"]] = [
(films.iloc[i]["Name"], films.iloc[i]["Year"])
]
# create layer for map
films_layer = folium.FeatureGroup(name=name)
for film_coordinates in films_locations.keys():
iframe = folium.IFrame(
html=create_html_popup(films_locations[film_coordinates]),
width=250,
height=100,
)
films_layer.add_child(
folium.Marker(
location=film_coordinates,
popup=folium.Popup(iframe),
icon=folium.Icon(),
)
)
return films_layer
def create_html_popup(films: List[Tuple[str, str]]) -> str:
"""creates html_template for popup window
Args:
films (List[Tuple[str, str]]): films for this popup
Returns:
str: html string to be showed in popup
"""
html_template = "Films:"
for film in films:
html_template += f"""<br>
<a href="https://www.google.com/search?q=%22{film[0].strip('"')}%22"
target="_blank">{film[0], film[1]}</a><br>
"""
return html_template
def get_films_info(path: str) -> pd.DataFrame:
"""reads file, converts it by lines in dataframe
Args:
path (str): path to file with films
Returns:
pd.DataFrame: dataframe consisting of films' names,\
years and locations
"""
try:
with open(path, "r", encoding="utf-8", errors="ignore") as data:
for _ in range(14):
data.readline()
films_data = data.readlines()
films = []
for film in films_data:
if film[-2] == ")":
name_and_year, place = film.split("\t")[0], film.split("\t")[-2]
else:
name_and_year, place = film.split("\t")[0], film.split("\t")[-1][:-1]
year_start = name_and_year.find("(")
films.append([
name_and_year[: year_start - 1],
name_and_year[year_start + 1: year_start + 5],
place,
])
films_df = pd.DataFrame(films[:-1], columns=["Name", "Year", "Location"])
except FileNotFoundError:
print("There is no such file")
films_df = | pd.DataFrame(columns=["Name", "Year", "Location"]) | pandas.DataFrame |
import pandas as pd
from typing import List, Tuple
import pickle
from functools import reduce
from data_analysis.analysis import find_relevant_currency_hops
leagues = ["hc_betrayal", "betrayal", "standard", "hardcore"]
def read_merged_pickle(path):
with open(path, "rb") as f:
data = pickle.load(f)
return data
# Read raw merged pickle file per league
files = ["data_analysis/raw/{}/merge.pickle".format(x) for x in leagues]
raw_data_list = [read_merged_pickle(x) for x in files]
# Map each merged pickle file to the hop data
hop_data = [find_relevant_currency_hops(x, 0) for x in raw_data_list]
hop_groups = [x["groups"] for x in hop_data]
def map_groups_to_dataframe(groups: List[Tuple[str, int]], league: str) -> pd.DataFrame:
currency_edge = [x[0] for x in groups]
snapshot_popularity = [x[1] for x in groups]
return pd.DataFrame(
{"edge": currency_edge, "{}".format(league): snapshot_popularity}
)
edge_popularity_dfs = [
map_groups_to_dataframe(x, leagues[idx]) for idx, x in enumerate(hop_groups)
]
final_df = reduce(
lambda x, y: pd.merge(x, y, on="edge", how="outer"),
edge_popularity_dfs,
| pd.DataFrame({"edge": []}) | pandas.DataFrame |
import pandas as pd
import time
from multiprocessing.dummy import Pool as ThreadPool
from functools import partial
from typedb.client import SessionType, TransactionType, TypeDBOptions
def prep_entity_insert_queries(
df,
isa_type,
mappings,
dict_attr_valuetype
):
'''
@usage: subroutine to convert a typedb entity subtype and list of string
containing column-to-schematype mappings into a list of TypeQL insert queries
everything is vectorized using pandas.Series.str.cat for speed
@param df: data table
@param isa_type: typedb_type, string
@param mappings: list of string: "has {rel_attr_type} <{column_selected}>"
@param valuetype: attribute valuetype
@return list of TypeQL insert queries: ["insert $x isa cardealer, has name Lisette;", "insert $x isa ... "]
'''
list_attr = [mapping.split(" <")[0].split("has ")[1].strip() for mapping in mappings]
pattern_missing = "|".join([f" has {attr} ?,|has {attr} '',| has {attr} 'nan',| has {attr} nan," for attr in list_attr])
list_series_stump = []
# for each input attribute, prepare series of string like ["$x has age 24", "$x has age 64", .. ]
for mapping in mappings:
mapping = str(mapping)
column_selected = mapping.split("<")[1].rstrip(">")
data = pd.Series(data=df[column_selected], dtype=str)
attr = mapping.split(" <")[0].split("has ")[1].strip()
if dict_attr_valuetype[attr]=="STRING":
stump = " has " + attr + " '"
else:
stump =" has " + attr + " "
list_stump = [stump]*df.shape[0]
series_stump = pd.Series(data=list_stump, dtype = str)
# # concatenate with values and the second quotation mark
series_stump = series_stump.str.cat(others = [data])
if dict_attr_valuetype[attr] == "STRING":
series_stump = series_stump.str.cat(others = [pd.Series(data=["'"]*df.shape[0], dtype=str)])
list_series_stump.append(series_stump)
# Now concatenate the lists of query stumps onto the initial "insert $x isa plumber; "
series_queries_init = pd.Series(data=[f"insert $x isa {isa_type}" for i in range(df.shape[0])], dtype=str)
# concatenate all the above lists to it element-wise; add a final semicolon to complete each query
series_queries_out = series_queries_init.str.cat(others=list_series_stump, sep=",")
# remove clauses with missing value attribute
series_queries_out = series_queries_out.str.replace(pat=pattern_missing, repl="", case=False, regex=True)
series_queries_out = series_queries_out.str.cat(others=pd.Series(data=["; "]*df.shape[0], dtype=str))
return list(series_queries_out)
def prep_relation_insert_queries(
df,
isa_type,
mappings,
dict_attr_valuetype,
):
'''
@usage: subroutine to convert a typedb relation subtype and list of string
containing column-to-schematype mappings into a list of TypeQL insert queries
everything is vectorized using pandas.Series.str.cat for speed
@param df: data table
@param isa_type: typedb_type, string
@param mappings:
"has {0} <{1}>".format(select_sub,column_selected) for RELATION attributes
"{0} isa {1}, has {2} <{3}> ... {4} : {5}".format(rp_var, rp_type, rp_attr_type, column_selected, role, rp_var) for ROLEPLAYER attributes
@param dict_attr_valuetype: attribute valuetype
@return list of TypeQL insert queries:
["match $company isa company, has name 'Telecom'; $customer isa person, has phone-number '+00 091 xxx'; insert (provider: $company, customer: $customer) isa contract;', ... ]
'''
list_attr = [mapping.split(" <")[0].split("has ")[1].strip() for mapping in mappings]
pattern_missing = "|".join([f" ?has {attr} ?,| ?has {attr} '',| ?has {attr} 'nan',| ?has {attr} nan," for attr in list_attr])
series_stump_match_init = pd.Series(data=["match "] * df.shape[0], dtype=str)
list_series_stump_rp_isa_has = []
series_stump_insert_init = pd.Series(["insert ("] * df.shape[0], dtype=str)
list_series_stump_role_rp = []
series_stump_insert_rel_isa = pd.Series([f") isa {isa_type}" for i in range(df.shape[0])], dtype=str)
list_series_stump_rel_has = []
for mapping in mappings:
if " ... " in mapping:
# roleplayer attribute
# "${0} isa {1}, has {2} <{3}>, has {} <{}>, has {} <{}> ... {4} : {5}".format(rp_var, rp_type, rp_attr_type, column_selected, role, rp_var)
# prepare the isa / has part of the query
series_stump_rp_isa = pd.Series(data=[mapping.split("; ")[0]]*df.shape[0], dtype=str) if "isa" in mapping else None
list_series_stump_rp_has = []
list_stump_rp_has = mapping.split(" ... ")[0].split(";")[1:] if type(series_stump_rp_isa) is pd.Series else mapping.split(" ... ")[0].split(";")
if not list_stump_rp_has:
raise ValueError(f"role player {mapping.split(' ')[0]} must have unique attributes but has none")
for stump_rp_has in list_stump_rp_has:
column_selected = stump_rp_has.split("<")[1].rstrip(">")
attr = stump_rp_has.split(" <")[0].split("has ")[1].strip()
if dict_attr_valuetype[attr]=="STRING":
stump = f"{mapping.split(' ')[0]} has " + attr + " '"
else:
stump = f"{mapping.split(' ')[0]} has " + attr + " "
series_stump_rp_has = | pd.Series(data=[stump]*df.shape[0], dtype=str) | pandas.Series |
import abc
import time, random
import pandas as pd
import os
import numpy as np
import benchutils as utils
import knowledgebases
from sklearn.feature_selection import SelectKBest, f_classif
from sklearn.preprocessing import StandardScaler
from sklearn.ensemble import RandomForestClassifier
from mlxtend.feature_selection import SequentialFeatureSelector as SFS
from sklearn.neighbors import KNeighborsClassifier
from sklearn.linear_model import LinearRegression, Lasso
from sklearn.svm import LinearSVC
from sklearn.naive_bayes import MultinomialNB
from sklearn.feature_selection import RFE, VarianceThreshold
from sklearn import preprocessing
class FeatureSelectorFactory():
"""Singleton class.
Python code encapsulates it in a way that is not shown in Sphinx, so have a look at the descriptions in the source code.
Creates feature selector object based on a given name.
New feature selection approaches must be registered here.
Names for feature selectors must follow to a particular scheme, with keywords separated by _:
- first keyword is the actual selector name
- if needed, second keyword is the knowledge base
- if needed, third keyword is the (traditional) approach to be combined
Examples:
- Traditional Approaches have only one keyword, e.g. InfoGain or ANOVA
- LassoPenalty_KEGG provides KEGG information to the LassoPenalty feature selection approach
- Weighted_KEGG_InfoGain --> Factory creates an instance of KBweightedSelector which uses KEGG as knowledge base and InfoGain as traditional selector.
While the focus here lies on the combination of traditional approaches with prior biological knowledge, it is theoretically possible to use ANY selector object for combination that inherits from :class:`FeatureSelector`.
:param config: configuration parameters for UMLS web service as specified in config file.
:type config: dict
"""
class __FeatureSelectorFactory():
def createFeatureSelector(self, name):
"""Create selector from a given name.
Separates creation process into (traditional) approaches (only one keyword), approaches requiring a knowledge base, and approaches requiring both a knowledge base and another selector, e.g. a traditional one.
:param name: selector name following the naming conventions: first keyword is the actual selector name, second keyword is the knowledge base, third keyword another selector to combine. Keywords must be separated by "_". Example: Weighted_KEGG_InfoGain
:type name: str
:return: instance of a feature selector implementation.
:rtype: :class:`FeatureSelector` or inheriting class
"""
parts = name.split("_")
if len(parts) == 1:
return self.createTraditionalSelector(name)
elif len(parts) == 2:
return self.createIntegrativeSelector(parts[0], parts[1])
elif len(parts) == 3:
return self.createCombinedSelector(parts[0], parts[1], parts[2])
utils.logError("ERROR: The provided selector name does not correspond to the expected format. "
"A selector name should consist of one or more keywords separated by _. "
"The first keyword is the actual approach (e.g. weighted, or a traditional approach), "
"the second keyword corresponds to a knowledge base to use (e.g. KEGG),"
"the third keyword corresponds to a traditional selector to use (e.g. when using a modifying or combining approach")
exit()
def createTraditionalSelector(self, selectorName):
"""Creates a (traditional) selector (without a knowledge base) from a given name.
Register new implementations of a (traditional) selector here.
Stops processing if the selector could not be found.
:param selectorName: selector name
:type selectorName: str
:return: instance of a feature selector implementation.
:rtype: :class:`FeatureSelector` or inheriting class
"""
if selectorName == "Random":
return RandomSelector()
if selectorName == "VB-FS":
return VarianceSelector()
if selectorName == "Variance":
return Variance2Selector()
if selectorName == "ANOVA":
return AnovaSelector()
if selectorName == "mRMR":
return MRMRSelector()
if selectorName == "SVMpRFE":
return SVMRFESelector()
# RUN WEKA FEATURE SELECTION AS SELECTED
if selectorName == "InfoGain":
return InfoGainSelector()
if selectorName == "ReliefF":
return ReliefFSelector()
#if "-RFE" in selectorName or "-SFS" in selectorName: -- SFS is currently disabled because sometimes the coef_ param is missing and error is thrown
if "-RFE" in selectorName:
return WrapperSelector(selectorName)
if selectorName == "Lasso":
return LassoSelector()
if selectorName == "RandomForest":
return RandomForestSelector()
utils.logError("ERROR: The listed selector " + selectorName + " is not available. See the documentation for available selectors. Stop execution.")
exit()
def createIntegrativeSelector(self, selectorName, kb):
"""Creates a feature selector using a knowledge base from the given selector and knowledge base names.
Register new implementations of a prior knowledge selector here that does not requires a (traditional) selector.
Stops processing if the selector could not be found.
:param selectorName: selector name
:type selectorName: str
:param kb: knowledge base name
:type kb: str
:return: instance of a feature selector implementation.
:rtype: :class:`FeatureSelector` or inheriting class
"""
kbfactory = knowledgebases.KnowledgeBaseFactory()
knowledgebase = kbfactory.createKnowledgeBase(kb)
if selectorName == "NetworkActivity":
featuremapper = PathwayActivityMapper()
return NetworkActivitySelector(knowledgebase, featuremapper)
if selectorName == "CorgsNetworkActivity":
featuremapper = CORGSActivityMapper()
return NetworkActivitySelector(knowledgebase, featuremapper)
if selectorName == "LassoPenalty":
return LassoPenalty(knowledgebase)
if selectorName == "KBonly":
return KbSelector(knowledgebase)
utils.logError("ERROR: The listed selector " + selectorName + " is not available. See the documentation for available selectors. Stop execution.")
exit()
def createCombinedSelector(self, selectorName, trad, kb):
"""Creates a feature selector that combines a knowledge base and another feature selector based on the given names.
Register new implementations of a prior knowledge selector that requires another selector here.
Stops processing if the selector could not be found.
:param selectorName: selector name
:type selectorName: str
:param trad: name of the (traditional) feature selector.
:type trad: str
:param kb: knowledge base name
:type kb: str
:return: instance of a feature selector implementation.
:rtype: :class:`FeatureSelector` or inheriting class
"""
tradSelector = self.createTraditionalSelector(trad)
kbfactory = knowledgebases.KnowledgeBaseFactory()
knowledgebase = kbfactory.createKnowledgeBase(kb)
if selectorName == "Postfilter":
return PostFilterSelector(knowledgebase, tradSelector)
if selectorName == "Prefilter":
return PreFilterSelector(knowledgebase, tradSelector)
if selectorName == "Extension":
return ExtensionSelector(knowledgebase, tradSelector)
if selectorName == "Weighted":
return KBweightedSelector(knowledgebase, tradSelector)
utils.logError("ERROR: The listed selector " + selectorName + " is not available. See the documentation for available selectors. Stop execution.")
exit()
instance = None
def __init__(self):
if not FeatureSelectorFactory.instance:
FeatureSelectorFactory.instance = FeatureSelectorFactory.__FeatureSelectorFactory()
def __getattr__(self, name):
return getattr(self.instance, name)
class FeatureSelector:
"""Abstract super class for feature selection functionality.
Every feature selection class has to inherit from this class and implement its :meth:`FeatureSelector.selectFeatures` method and - if necessary - its :meth:`FeatureSelector.setParams` method.
Once created, feature selection can be triggered by first setting parameters (input, output, etc) as needed with :meth:`FeatureSelector.setParams`.
The actual feature selection is triggered by invoking :meth:`FeatureSelector.selectFeatures`.
:param input: absolute path to input dataset.
:type input: str
:param output: absolute path to output directory (where the ranking will be stored).
:type output: str
:param dataset: the dataset for which to select features. Will be loaded dynamically based on self.input at first usage.
:type dataset: :class:`pandas.DataFrame`
:param dataConfig: config parameters for input data set.
:type dataConfig: dict
:param name: selector name
:type name: str
"""
def __init__(self, name):
self.input = None
self.output = None
self.dataset = None
self.loggingDir = None
self.dataConfig = utils.getConfig("Dataset")
self.setTimeLogs(utils.createTimeLog())
self.enableLogFlush()
self.name = name
super().__init__()
@abc.abstractmethod
def selectFeatures(self):
"""Abstract.
Invoke feature selection functionality in this method when implementing a new selector
:return: absolute path to the output ranking file.
:rtype: str
"""
pass
def getTimeLogs(self):
"""Gets all logs for this selector.
:return: dataframe of logged events containing start/end time, duration, and a short description.
:rtype: :class:`pandas.DataFrame`
"""
return self.timeLogs
def setTimeLogs(self, newTimeLogs):
"""Overwrites the current logs with new ones.
:param newTimeLogs: new dataframe of logged events containing start/end time, duration, and a short description.
:type newTimeLogs: :class:`pandas.DataFrame`
"""
self.timeLogs = newTimeLogs
def disableLogFlush(self):
"""Disables log flushing (i.e., writing the log to a separate file) of the selector at the end of feature selection.
This is needed when a :class:`CombiningSelector` uses a second selector and wants to avoid that its log messages are written, potentially overwriting logs from another selector of the same name.
"""
self.enableLogFlush = False
def enableLogFlush(self):
"""Enables log flushing, i.e. writing the logs to a separate file at the end of feature selection.
"""
self.enableLogFlush = True
def getName(self):
"""Gets the selector's name.
:return: selector name.
:rtype: str
"""
return self.name
def getData(self):
"""Gets the labeled dataset from which to select features.
:return: dataframe containing the dataset with class labels.
:rtype: :class:`pandas.DataFrame`
"""
if self.dataset is None:
self.dataset = pd.read_csv(self.input, index_col=0)
return self.dataset
def getUnlabeledData(self):
"""Gets the dataset without labels.
:return: dataframe containing the dataset without class labels.
:rtype: :class:`pandas.DataFrame`
"""
dataset = self.getData()
return dataset.loc[:, dataset.columns != "classLabel"]
def getFeatures(self):
"""Gets features from the dataset.
:return: list of features.
:rtype: list of str
"""
return self.getData().columns[1:]
def getUniqueLabels(self):
"""Gets the unique class labels available in the dataset.
:return: list of distinct class labels.
:rtype: list of str
"""
return list(set(self.getLabels()))
def getLabels(self):
"""Gets the labels in the data set.
:return: all labels from the dataset.
:rtype: list of str
"""
return list(self.getData()["classLabel"])
def setParams(self, inputPath, outputDir, loggingDir):
"""Sets parameters for the feature selection run: path to the input datast and path to the output directory.
:param inputPath: absolute path to the input file containing the dataset for analysis.
:type inputPath: str
:param outputDir: absolute path to the output directory (where to store the ranking)
:type outputDir: str
:param loggingDir: absolute path to the logging directory (where to store log files)
:type loggingDir: str
"""
self.input = inputPath
self.output = outputDir
self.loggingDir = loggingDir
def writeRankingToFile(self, ranking, outputFile, index = False):
"""Writes a given ranking to a specified file.
:param ranking: dataframe with the ranking.
:type ranking: :class:`pandas.DataFrame`
:param outputFile: absolute path of the file where ranking will be stored.
:type outputFile: str
:param index: whether to write the dataframe's index or not.
:type index: bool, default False
"""
if not ranking.empty:
ranking.to_csv(outputFile, index = index, sep = "\t")
else:
#make sure to write at least the header if the dataframe is empty
with open(outputFile, 'w') as outfile:
header_line = "\"attributeName\"\t\"score\"\n"
outfile.write(header_line)
class PythonSelector(FeatureSelector):
"""Abstract.
Inherit from this class when implementing a feature selector using any of scikit-learn's functionality.
As functionality invocation, input preprocessing and output postprocessing are typically very similar/the same for such implementations, this class already encapsulates it.
Instead of implementing :meth:`PythonSelector.selectFeatures`, implement :meth:`PythonSelector.runSelector`.
"""
def __init__(self, name):
super().__init__(name)
@abc.abstractmethod
def runSelector(self, data, labels):
"""Abstract - implement this method when inheriting from this class.
Runs the actual feature selector of scikit-learn.
Is invoked by :meth:`PythonSelector.selectFeatures`.
:param data: dataframe containing the unlabeled dataset.
:type data: :class:`pandas.DataFrame`
:param labels: numerically encoded class labels.
:type labels: list of int
:return: sklearn/mlxtend selector that ran the selection (containing coefficients etc.).
"""
pass
def selectFeatures(self):
"""Executes the feature selection procedure.
Prepares the input data set to match scikit-learn's expected formats and postprocesses the output to create a ranking.
:return: absolute path to the output ranking file.
:rtype: str
"""
utils.logInfo("######################## " + self.getName() + "... ########################")
start = time.time()
filename = self.getName() + ".csv"
outputFile = self.output + filename
data, labels = self.prepareInput()
selector = self.runSelector(data, labels)
self.prepareOutput(outputFile, data, selector)
end = time.time()
self.timeLogs = utils.logRuntime(self.timeLogs, start, end, "Feature Selection")
if self.enableLogFlush:
utils.flushTimeLog(self.timeLogs, self.loggingDir + self.getName() + ".csv")
utils.logInfo("######################## " + self.getName() + " finished ########################")
return outputFile
def prepareInput(self):
"""Prepares the input data set before running any of scikit-learn's selectors.
Removes the labels from the input data set and encodes the labels in numbers.
:return: dataset (without labels) and labels encoded in numbers.
:rtype: :class:`pandas.DataFrame` and list of int
"""
start = time.time()
labels = self.getLabels()
data = self.getUnlabeledData()
le = preprocessing.LabelEncoder()
numeric_labels = le.fit_transform(labels)
end = time.time()
self.timeLogs = utils.logRuntime(self.timeLogs, start, end, "Input Preparation")
return data, numeric_labels
def prepareOutput(self, outputFile, data, selector):
"""Transforms the selector output to a valid ranking and stores it into the specified file.
:param outputFile: absolute path of the file to which to write the ranking.
:type outputFile: str
:param data: input dataset.
:type data: :class:`pandas.DataFrame`
:param selector: selector object from scikit-learn.
"""
start = time.time()
ranking = pd.DataFrame()
ranking["attributeName"] = data.columns
ranking["score"] = selector.scores_
ranking = ranking.sort_values(by='score', ascending=False)
self.writeRankingToFile(ranking, outputFile)
end = time.time()
self.timeLogs = utils.logRuntime(self.timeLogs, start, end, "Output Preparation")
class RSelector(FeatureSelector,metaclass=abc.ABCMeta):
"""Selector class for invoking R code for feature selection.
Inherit from this class if you want to use R code, implement :meth:`RSelector.createParams` with what your script requires, and set self.scriptName accordingly.
:param rConfig: config parameters to execute R code.
:type rConfig: dict
"""
def __init__(self, name):
self.rConfig = utils.getConfig("R")
self.scriptName = "FS_" + name + ".R"
super().__init__(name)
@abc.abstractmethod
def createParams(self, filename):
"""Abstract.
Implement this method to set the parameters your R script requires.
:param filename: absolute path of the output file.
:type filename: str
:return: list of parameters to use for R code execution, e.g. input and output filenames.
:rtype: list of str
"""
pass
def selectFeatures(self):
"""Triggers the feature selection.
Actually a wrapper method that invokes external R code.
:return: absolute path to the result file containing the ranking.
:rtype: str
"""
utils.logInfo("######################## " + self.getName() + "... ########################")
start = time.time()
filename = self.getName() + ".csv"
outputFile = self.output + filename
params = self.createParams(outputFile)
utils.runRCommand(self.rConfig, self.scriptName , params)
end = time.time()
self.timeLogs = utils.logRuntime(self.timeLogs, start, end, "Feature Selection")
if self.enableLogFlush:
utils.flushTimeLog(self.timeLogs, self.loggingDir + self.getName() + ".csv")
utils.logInfo("######################## " + self.getName() + " finished ########################")
return filename
class JavaSelector(FeatureSelector):
"""Selector class for invoking R code for feature selection.
Inherit from this class if you want to use R code, implement :meth:`RSelector.createParams` with what your script requires, and set self.scriptName accordingly.
:param javaConfig: config parameters to execute java code.
:type javaConfig: dict
"""
def __init__(self, name):
self.javaConfig = utils.getConfig("Java")
super().__init__(name)
@abc.abstractmethod
def createParams(self):
"""Abstract.
Implement this method to set the parameters your java code requires.
:return: list of parameters to use for java code execution, e.g. input and output filenames.
:rtype: list of str
"""
pass
def selectFeatures(self):
"""Triggers the feature selection.
Actually a wrapper method that invokes external java code.
:return: absolute path to the result file containing the ranking.
:rtype: str
"""
utils.logInfo("######################## " + self.getName() + "... ########################")
start = time.time()
filename = self.name + ".csv"
params = self.createParams()
utils.runJavaCommand(self.javaConfig, "/WEKA_FeatureSelector.jar", params)
output_filepath = self.output + filename
end = time.time()
self.timeLogs = utils.logRuntime(self.timeLogs, start, end, "Feature Selection")
if self.enableLogFlush:
utils.flushTimeLog(self.timeLogs, self.loggingDir + self.getName() + ".csv")
utils.logInfo("######################## " + self.getName() + " finished ########################")
return output_filepath
############################### PRIOR KNOWLEDGE SELECTORS ###############################
class PriorKnowledgeSelector(FeatureSelector,metaclass=abc.ABCMeta):
"""Super class for all prior knowledge approaches.
If you want to implement an own prior knowledge approach that uses a knowledge base (but not a second selector and no network approaches), inherit from this class.
:param knowledgebase: instance of a knowledge base.
:type knowledgebase: :class:`knowledgebases.KnowledgeBase` or inheriting class
:param alternativeSearchTerms: list of alternative search terms to use for querying the knowledge base.
:type alternativeSearchTerms: list of str
"""
def __init__(self, name, knowledgebase):
self.knowledgebase = knowledgebase
super().__init__(name)
self.alternativeSearchTerms = self.collectAlternativeSearchTerms()
@abc.abstractmethod
def selectFeatures(self):
"""Abstract.
Implement this method when inheriting from this class.
:return: absolute path to the output ranking file.
:rtype: str
"""
pass
def collectAlternativeSearchTerms(self):
"""Gets all alternative search terms that were specified in the config file and put them into a list.
:return: list of alternative search terms to use for querying the knowledge base.
:rtype: list of str
"""
alternativeTerms = self.dataConfig["alternativeSearchTerms"].split(" ")
searchTerms = []
for term in alternativeTerms:
searchTerms.append(term.replace("_", " "))
return searchTerms
def getSearchTerms(self):
"""Gets all search terms to use for querying a knowledge base.
Search terms that will be used are a) the class labels in the dataset, and b) the alternative search terms that were specified in the config file.
:return: list of search terms to use for querying the knowledge base.
:rtype: list of str
"""
searchTerms = list(self.getUniqueLabels())
searchTerms.extend(self.alternativeSearchTerms)
return searchTerms
def getName(self):
"""Returns the full name (including applied knowledge base) of this selector.
:return: selector name.
:rtype: str
"""
return self.name + "_" + self.knowledgebase.getName()
#selector class for modifying integrative approaches
class CombiningSelector(PriorKnowledgeSelector):
"""Super class for prior knoweldge approaches that use a knowledge base AND combine it with any kind of selector, e.g. a traditional approach.
Inherit from this class if you want to implement a feature selector that requires both a knowledge base and another selector, e.g. because it combines information from both.
:param knowledgebase: instance of a knowledge base.
:type knowledgebase: :class:`knowledgebases.KnowledgeBase` or inheriting class
:param tradApproach: any feature selector implementation to use internally, e.g. a traditional approach like ANOVA
:type tradApproach: :class:`FeatureSelector`
"""
def __init__(self, name, knowledgebase, tradApproach):
self.tradSelector = tradApproach
self.tradSelector.disableLogFlush()
super().__init__(name, knowledgebase)
self.tradSelector.setTimeLogs(self.timeLogs)
@abc.abstractmethod
def selectFeatures(self):
"""Abstract.
Implement this method as desired when inheriting from this class.
:return: absolute path to the output ranking file.
:rtype: str
"""
pass
def getName(self):
"""Returns the full name (including applied knowledge base and feature selector) of this selector.
:returns: selector name.
:rtype: str
"""
return self.name + "_" + self.tradSelector.getName() + "_" + self.knowledgebase.getName()
def getExternalGenes(self):
"""Gets all genes related to the provided search terms from the knowledge base.
:returns: list of gene names.
:rtype: list of str
"""
start = time.time()
externalGenes = self.knowledgebase.getRelevantGenes(self.getSearchTerms())
end = time.time()
self.timeLogs = utils.logRuntime(self.timeLogs, start, end, "Getting External Genes")
return externalGenes
class NetworkSelector(PriorKnowledgeSelector):
"""Abstract.
Inherit from this method if you want to implement a new network approach that actually conducts feature EXTRACTION, i.e. maps the original data set to have pathway/subnetworks.
Instead of :meth:`FeatureSelector.selectFeatures` implement :meth:`NetworkSelector.selectPathways` when inheriting from this class.
Instances of :class:`NetworkSelector` and inheriting classes also require a :class:`PathwayMapper` object that transfers the dataset to the new feature space.
Custom implementations thus need to implement a) a selection strategy to select pathways and b) a mapping strategy to compute new feature values for the selected pathways.
:param featureMapper: feature mapping object that transfers the feature space.
:type featureMapper: :class:`FeatureMapper` or inheriting class
"""
def __init__(self, name, knowledgebase, featuremapper):
self.featureMapper = featuremapper
super().__init__(name, knowledgebase)
@abc.abstractmethod
def selectPathways(self, pathways):
"""Selects the pathways that will become the new features of the data set.
Implement this method (instead of :meth:`FeatureSelector.selectFeatures` when inheriting from this class.
:param pathways: dict of pathways (pathway names as keys) to select from.
:type pathways: dict
:returns: pathway ranking as dataframe
:rtype: :class:`pandas.DataFrame`
"""
pass
def writeMappedFile(self, mapped_data, fileprefix):
"""Writes the mapped dataset with new feature values to the same directory as the original file is located (it will be automatically processed then).
:param mapped_data: dataframe containing the dataset with mapped feature space.
:type mapped_data: :class:`pandas.DataFrame`
:param fileprefix: prefix of the file name, e.g. the directory path
:type fileprefix: str
:return: absolute path of the file name to store the mapped data set.
:rtype: str
"""
mapped_filepath = fileprefix + "_" + self.getName() + ".csv"
mapped_data.to_csv(mapped_filepath)
return mapped_filepath
def getName(self):
"""Gets the selector name (including the knowledge base).
:returns: selector name.
:rtype: str
"""
return self.name + "_" + self.knowledgebase.getName()
def filterPathways(self, pathways):
filtered_pathways = {}
for pathwayName in pathways:
genes = pathways[pathwayName].nodes_by_label.keys()
#check if there is an overlap between the pathway and data set genes
existingGenes = list(set(self.getFeatures()) & set(genes))
if len(existingGenes) > 0:
filtered_pathways[pathwayName] = pathways[pathwayName]
else:
utils.logWarning("WARNING: No genes of pathway " + pathwayName + " found in dataset. Pathway will not be considered")
return filtered_pathways
def selectFeatures(self):
"""Instead of selecting existing features, instances of :class:`NetworkSelector` select pathways or submodules as features.
For that, it first queries its knowledge base for pathways.
It then selects the top k pathways (strategy to be implemented in :meth:`NetworkSelector.selectPathways`) and subsequently maps the dataset to its new feature space.
The mapping will be conducted by an object of :class:`PathwayMapper` or inheriting classes.
If a second dataset for cross-validation is available, the feature space of this dataset will also be transformed.
:returns: absolute path to the pathway ranking.
:rtype: str
"""
utils.logInfo("######################## " + self.getName() + "... ########################")
overallstart = time.time()
pathways = self.knowledgebase.getRelevantPathways(self.getSearchTerms())
end = time.time()
self.timeLogs = utils.logRuntime(self.timeLogs, overallstart, end, "Get Pathways")
#filter pathways to only those that contain at least one gene from the data set
pathways = self.filterPathways(pathways)
start = time.time()
pathwayRanking = self.selectPathways(pathways)
outputFile = self.output + self.getName() + ".csv"
self.writeRankingToFile(pathwayRanking, outputFile)
end = time.time()
self.timeLogs = utils.logRuntime(self.timeLogs, start, end, "Pathway Selection")
pathwayNames = pathwayRanking["attributeName"]
start = time.time()
mapped_data = self.featureMapper.mapFeatures(self.getData(), pathways)
fileprefix = os.path.splitext(self.input)[0]
mapped_filepath = self.writeMappedFile(mapped_data, fileprefix)
end = time.time()
self.timeLogs = utils.logRuntime(self.timeLogs, start, end, "Feature Mapping")
#if crossvalidation is enabled, we also have to map the crossvalidation file
if (utils.getConfigBoolean("Evaluation", "enableCrossEvaluation")):
start = time.time()
#we need to get the cross validation file that had been moved into the intermediate folder
crossValidationPath = utils.getConfigValue("General", "crossVal_preprocessing") + "ready/"
crossValidationFile = utils.getConfigValue("Evaluation", "crossEvaluationData")
crossValFilename = os.path.basename(crossValidationFile)
crossValFilepath = crossValidationPath + crossValFilename
crossValData = pd.read_csv(crossValFilepath, index_col=0)
mapped_crossValData = self.featureMapper.mapFeatures(crossValData, pathways)
crossvalFileprefix = os.path.splitext(crossValFilepath)[0]
crossval_mapped_filepath = self.writeMappedFile(mapped_crossValData, crossvalFileprefix)
end = time.time()
self.timeLogs = utils.logRuntime(self.timeLogs, start, end, "CrossValidation Feature Mapping")
overallend = time.time()
self.timeLogs = utils.logRuntime(self.timeLogs, overallstart, overallend, "Feature Selection")
if self.enableLogFlush:
utils.flushTimeLog(self.timeLogs, self.loggingDir + self.getName() + ".csv")
utils.logInfo("######################## " + self.getName() + " finished ########################")
return outputFile
############################### FILTER ###############################
class RandomSelector(FeatureSelector):
"""Baseline Selector: Randomly selects any features.
"""
def __init__(self):
super().__init__("Random")
def selectFeatures(self):
"""Randomly select any features from the feature space.
Assigns a score of 0.0 to every feature
:returns: absolute path to the ranking file.
:rtype: str
"""
utils.logInfo("######################## " + self.getName() + "... ########################")
start = time.time()
filename = self.getName() + ".csv"
outFilename = self.output + filename
#randomly pick any features
with open(self.input, 'r') as infile:
header = infile.readline().rstrip().split(",")
max_index = len(header)
min_index = 2
shuffled_indices = random.sample(range(min_index, max_index), max_index - 2)
with open(outFilename, 'w') as outfile:
header_line = "\"attributeName\"\t\"score\"\n"
outfile.write(header_line)
for i in shuffled_indices:
line = "\"" + header[i] + "\"\t\"0.0000\"\n"
outfile.write(line)
end = time.time()
self.timeLogs = utils.logRuntime(self.timeLogs, start, end, "Feature Selection")
if self.enableLogFlush:
utils.flushTimeLog(self.timeLogs, self.loggingDir + self.getName() + ".csv")
utils.logInfo("######################## " + self.getName() + " finished ########################")
return outFilename
class AnovaSelector(PythonSelector):
"""Runs ANOVA feature selection using scikit-learn implementation
"""
def __init__(self):
super().__init__("ANOVA")
def runSelector(self, data, labels):
"""Runs the ANOVA feature selector of scikit-learn.
Is invoked by :meth:`PythonSelector.selectFeatures`.
:param data: dataframe containing the unlabeled dataset.
:type data: :class:`pandas.DataFrame`
:param labels: numerically encoded class labels.
:type labels: list of int
:return: sklearn/mlxtend selector that ran the selection (containing coefficients etc.).
"""
start = time.time()
#setting k to "all" returns all features
selector = SelectKBest(f_classif, k="all")
selector.fit_transform(data, labels)
end = time.time()
self.timeLogs = utils.logRuntime(self.timeLogs, start, end, "ANOVA")
return selector
class Variance2Selector(PythonSelector):
"""Runs variance-based feature selection using scikit-learn.
"""
def __init__(self):
super().__init__("Variance")
def prepareOutput(self, outputFile, data, selector):
"""Transforms the selector output to a valid ranking and stores it into the specified file.
We need to override this method because variance selector has no attribute scores but variances.
:param outputFile: absolute path of the file to which to write the ranking.
:type outputFile: str
:param data: input dataset.
:type data: :class:`pandas.DataFrame`
:param selector: selector object from scikit-learn.
"""
start = time.time()
ranking = pd.DataFrame()
ranking["attributeName"] = data.columns
ranking["score"] = selector.variances_
ranking = ranking.sort_values(by='score', ascending=False)
self.writeRankingToFile(ranking, outputFile)
end = time.time()
self.timeLogs = utils.logRuntime(self.timeLogs, start, end, "Output Preparation")
def runSelector(self, data, labels):
"""Runs the actual variance-based feature selector of scikit-learn.
Is invoked by :meth:`PythonSelector.selectFeatures`.
:param data: dataframe containing the unlabeled dataset.
:type data: :class:`pandas.DataFrame`
:param labels: numerically encoded class labels.
:type labels: list of int
:return: sklearn/mlxtend selector that ran the selection (containing coefficients etc.).
"""
start = time.time()
selector = VarianceThreshold()
selector.fit_transform(data)
end = time.time()
self.timeLogs = utils.logRuntime(self.timeLogs, start, end, "Variance_p")
return selector
class MRMRSelector(RSelector):
"""Runs maximum Relevance minimum Redundancy (mRMR) feature selection using the mRMRe R implementation: https://cran.r-project.org/web/packages/mRMRe/index.html
Actually a wrapper class for invoking the R code.
:param scriptName: name of the R script to invoke.
:type scriptName: str
:param maxFeatures: maximum number of features to select. Currently all features (=0) are ranked..
:type maxFeatures: int
"""
def __init__(self):
self.maxFeatures = 0
super().__init__("mRMR")
def createParams(self, outputFile):
"""Sets the parameters the R script requires (input file, output file, maximum number of features).
:return: list of parameters to use for mRMR execution in R.
:rtype: list of str
"""
params = [self.input, outputFile, str(self.maxFeatures)]
return params
class VarianceSelector(RSelector):
"""Runs variance-based feature selection using R genefilter library.
Actually a wrapper class for invoking the R code.
:param scriptName: name of the R script to invoke.
:type scriptName: str
"""
def __init__(self):
super().__init__("VB-FS")
def createParams(self, outputFile):
"""Sets the parameters the R script requires (input file, output file).
:param outputFile: absolute path to the output file that will contain the ranking.
:type outputFile: str
:return: list of parameters to use for mRMR execution in R.
:rtype: list of str
"""
params = [self.input, outputFile]
return params
class InfoGainSelector(JavaSelector):
"""Runs InfoGain feature selection as provided by WEKA: https://www.cs.waikato.ac.nz/ml/weka/
Actually a wrapper class for invoking java code.
"""
def __init__(self):
super().__init__("InfoGain")
def createParams(self):
"""Sets the parameters the java program requires (input file, output file, selector name).
:return: list of parameters to use for InfoGain execution in java.
:rtype: list of str
"""
params = [self.input, self.output, "InfoGain"]
return params
class ReliefFSelector(JavaSelector):
"""Runs ReliefF feature selection as provided by WEKA: https://www.cs.waikato.ac.nz/ml/weka/
Actually a wrapper class for invoking java code.
"""
def __init__(self):
super().__init__("ReliefF")
def createParams(self):
"""Sets the parameters the java program requires (input file, output file, selector name).
:return: list of parameters to use for InfoGain execution in java.
:rtype: list of str
"""
params = [self.input, self.output, "ReliefF"]
return params
############################### FILTER - COMBINED ###############################
class KbSelector(PriorKnowledgeSelector):
"""Knowledge base selector.
Selects features exclusively based the information retrieved from a knowledge base.
:param knowledgebase: instance of a knowledge base.
:type knowledgebase: :class:`knowledgebases.KnowledgeBase`
"""
def __init__(self, knowledgebase):
super().__init__("KBonly", knowledgebase)
def updateScores(self, entry, newGeneScores):
"""Updates a score entry with the new score retrieved from the knowledge base.
Used by apply function.
:param entry: a gene score entry consisting of the gene name and its score
:type entry: :class:`pandas.Series`
:param newGeneScores: dataframe containing gene scores retrieved from the knowledge base.
:type newGeneScores: :class:`pandas.DataFrame`
:returns: updated series element.
:rtype: :class:`pandas.Series`
"""
gene = entry["attributeName"]
updatedGenes = newGeneScores.iloc[:,0]
#if the gene has a new score, update the entry
if gene in updatedGenes.values:
x = newGeneScores.loc[(newGeneScores["gene_symbol"] == gene), "score"]
#necessary because we want to get the scalar value, not a series
entry["score"] = x.iloc[0]
return entry
def selectFeatures(self):
"""Does the actual feature selection.
Retrieves association scores for genes from the knowledge base based on the given search terms.
:returns: absolute path to the resulting ranking file.
:rtype: str
"""
utils.logInfo("######################## " + self.getName() + "... ########################")
start = time.time()
outputFile = self.output + self.getName() + ".csv"
genes = self.getFeatures()
# assign a minimal default score (0.000001) to all genes
attributeNames = genes
scores = [0.00001] * len(genes)
ranking = pd.DataFrame({"attributeName": attributeNames, "score": scores})
kb_start = time.time()
associatedGenes = self.knowledgebase.getGeneScores(self.getSearchTerms())
kb_end = time.time()
self.timeLogs = utils.logRuntime(self.timeLogs, kb_start, kb_end, "Getting External Gene Scores")
# assign association score to all genes in data
updated_ranking = ranking.apply(self.updateScores, axis = 1, newGeneScores = associatedGenes)
#sort by score, with highest on top
updated_ranking = updated_ranking.sort_values("score", ascending=False)
#save final rankings to file
self.writeRankingToFile(updated_ranking, outputFile)
end = time.time()
self.timeLogs = utils.logRuntime(self.timeLogs, start, end, "Feature Selection")
if self.enableLogFlush:
utils.flushTimeLog(self.timeLogs, self.loggingDir + self.getName() + ".csv")
utils.logInfo("######################## " + self.getName() + " finished. ########################")
return outputFile
class KBweightedSelector(CombiningSelector):
"""Selects features based on association scores retrieved from the knowledge base and the relevance score retrieved by the (traditional) approach.
Computes the final score via tradScore * assocScore.
:param knowledgebase: instance of a knowledge base.
:type knowledgebase: :class:`knowledgebases.KnowledgeBase` or inheriting class
:param tradApproach: any feature selector implementation to use internally, e.g. a traditional approach like ANOVA
:type tradApproach: :class:`FeatureSelector`
"""
def __init__(self, knowledgebase, tradApproach):
super().__init__("Weighted", knowledgebase, tradApproach)
def updateScores(self, entry, newGeneScores):
"""Updates a score entry with the new score retrieved from the knowledge base.
Used by apply function.
:param entry: a gene score entry consisting of the gene name and its score
:type entry: :class:`pandas.Series`
:param newGeneScores: dataframe containing gene scores retrieved from the knowledge base.
:type newGeneScores: :class:`pandas.DataFrame`
:returns: updated series element.
:rtype: :class:`pandas.Series`
"""
gene = entry["attributeName"]
updatedGenes = newGeneScores.iloc[:,0]
#if the gene has a new score, update the entry
if gene in updatedGenes.values:
x = newGeneScores.loc[(newGeneScores["gene_symbol"] == gene), "score"]
#necessary because we want to get the scalar value, not a series
entry["score"] = x.iloc[0]
return entry
def getName(self):
"""Gets the selector name (including the knowledge base and (traditional) selector).
:returns: selector name.
:rtype: str
"""
return self.name + "_" + self.tradSelector.getName() + "_" + self.knowledgebase.getName()
def computeStatisticalRankings(self, intermediateDir):
"""Computes the statistical relevance score of all features using the (traditional) selector.
:param intermediateDir: absolute path to output directory for (traditional) selector (where to write the statistical rankings).
:type intermediateDir: str
:returns: dataframe with statistical ranking.
:rtype: :class:`pandas.DataFrame`
"""
start = time.time()
self.tradSelector.setParams(self.input, intermediateDir, self.loggingDir)
statsRankings = self.tradSelector.selectFeatures()
#load data frame from file
statisticalRankings = pd.read_csv(statsRankings, index_col = 0, sep = "\t", engine = "python")
self.timeLogs = pd.concat([self.timeLogs, self.tradSelector.getTimeLogs()])
end = time.time()
self.timeLogs = utils.logRuntime(self.timeLogs, start, end, "Statistical Ranking")
return statisticalRankings
def computeExternalRankings(self):
"""Computes the association scores for every gene using the knowledge base.
Genes for which no entry could be found receive a default score of 0.000001.
:return: dataframe with statistical ranking.
:rtype: :class:`pandas.DataFrame`
"""
start = time.time()
genes = self.getFeatures()
# assign a minimal default score (0.000001) to all genes
geneScores = dict.fromkeys(genes, 0.000001)
associatedGenes = self.knowledgebase.getGeneScores(self.getSearchTerms())
#assign association score to all genes in data
# assign association score to all genes in data
for gene in geneScores.keys():
# check if score for gene was found in knowledge base
if gene in list(associatedGenes.iloc[:, 0]):
gene_entry = associatedGenes[associatedGenes["gene_symbol"] == gene]
geneScores[gene] = gene_entry.iloc[0, 1]
end = time.time()
self.timeLogs = utils.logRuntime(self.timeLogs, start, end, "External Ranking")
return geneScores
def combineRankings(self, externalRankings, statisticalRankings):
"""Combines score rankings from both the knowledge base and the (traditional) selector (kb_score * trad_score) to retrieve a final score for every gene.
:param externalRankings: dataframe with ranking from knowledge base.
:type externalRankings: :class:`pandas.DataFrame`
:param statisticalRankings: dataframe with statistical ranking.
:type statisticalRankings: :class:`pandas.DataFrame`
:returns: dataframe with final combined ranking.
:rtype: :class:`pandas.DataFrame`
"""
start = time.time()
#just take over the statistical rankings and alter the scores accordingly
combinedRankings = statisticalRankings.copy()
features = statisticalRankings.index
#go trough every item and combine by weighting
for feature in features:
#update scores - external rankings only provide feature scores, no indices
if feature in externalRankings.keys():
externalScore = externalRankings[feature]
else:
#if no entry exists, set the score to be minimal to not zero up the whole equation in the end
externalScore = 0.00001
if externalScore == 0:
# if no entry exists, set the score to be minimal to not zero up the whole equation in the end
externalScore = 0.00001
statsScore = statisticalRankings.at[feature, "score"]
combinedRankings.at[feature, "score"] = externalScore * statsScore
#reorder genes based on new score
combinedRankings = combinedRankings.sort_values('score', ascending=False)
end = time.time()
self.timeLogs = utils.logRuntime(self.timeLogs, start, end, "Ranking Combination")
return combinedRankings
def selectFeatures(self):
"""Runs the feature selection process.
Retrieves scores from knowledge base and (traditional) selector and combines these to a single score.
:returns: absolute path to final output file containing the ranking.
:rtype: str
"""
utils.logInfo("######################## " + self.getName() + "... ########################")
start = time.time()
intermediateDir = utils.getConfigValue("General",
"intermediateDir") + self.getName() + "/"
utils.createDirectory(intermediateDir)
outputFile = self.output + self.getName() + ".csv"
#compute gene rankings with traditional approaches
statisticalRankings = self.computeStatisticalRankings(intermediateDir)
#compute gene rankings/associations with external knowledge base
externalRankings = self.computeExternalRankings()
#combine ranking scores
combinedRankings = self.combineRankings(externalRankings, statisticalRankings)
#save final rankings to file
#note: here the gene ids are the index, so write it to file
self.writeRankingToFile(combinedRankings, outputFile, True)
end = time.time()
self.timeLogs = utils.logRuntime(self.timeLogs, start, end, "Feature Selection")
if self.enableLogFlush:
utils.flushTimeLog(self.timeLogs, self.loggingDir + self.getName() + ".csv")
utils.logInfo("######################## " + self.getName() + " finished. ########################")
return outputFile
class LassoPenalty(PriorKnowledgeSelector, RSelector):
"""Runs feature selection by invoking xtune R package: https://cran.r-project.org/web/packages/xtune/index.html
xtune is a Lasso selector that uses feature-individual penalty scores.
These penalty scores are retrieved from the knowledge base.
"""
selectFeatures = RSelector.selectFeatures #make sure the right selectFeatures method will be invoked
getName = PriorKnowledgeSelector.getName
def __init__(self, knowledgebase):
super().__init__("LassoPenalty", knowledgebase)
self.scriptName = "FS_LassoPenalty.R"
def createParams(self, outputFile):
"""Sets the parameters the xtune R script requires (input file, output file, filename containing rankings from knowledge base).
:return: list of parameters to use for xtune execution in R.
:rtype: list of str
"""
externalScore_filename = self.computeExternalRankings()
params = [self.input, outputFile, externalScore_filename]
return params
def computeExternalRankings(self):
"""Computes the association scores for each feature based on the scores retrieved from the knowledge base.
Features that could not be found in the knowledge base receive a default score of 0.000001.
:return: absolute path to the file containing the external rankings.
:rtype: str
"""
start = time.time()
intermediateOutput = utils.getConfigValue("General",
"intermediateDir") + self.getName() + "/"
utils.createDirectory(intermediateOutput)
genes = self.getFeatures()
# assign a minimal default score (0.000001) to all genes
geneScores = dict.fromkeys(genes, 0.000001)
associatedGenes = self.knowledgebase.getGeneScores(self.getSearchTerms())
#assign association score to all genes in data
for gene in geneScores.keys():
#check if score for gene was found in knowledge base
if gene in list(associatedGenes.iloc[:,0]):
gene_entry = associatedGenes[associatedGenes["gene_symbol"] == gene]
geneScores[gene] = gene_entry.iloc[0,1]
#write gene scores to file
scores_filename = intermediateOutput + self.knowledgebase.getName() + "_scores.csv"
scores_df = pd.DataFrame.from_dict(geneScores, orient = "index", columns = ["score"])
scores_df = scores_df.sort_values('score', ascending=False)
scores_df.to_csv(scores_filename, index=True)
end = time.time()
self.timeLogs = utils.logRuntime(self.timeLogs, start, end, "External Ranking")
return scores_filename
############################### WRAPPER ###############################
class WrapperSelector(PythonSelector):
"""Selector implementation for wrapper selectors using scikit-learn.
Currently implements recursive feature eliminatin (RFE) and sequential forward selection (SFS) strategies,
which can be combined with nearly any classifier offered by scikit-learn, e.g. SVM.
:param selector: scikit-learn selector strategy (currently RFE and SFS)
:param classifier: scikit-learn classifier to use for wrapper selection.
"""
def __init__(self, name):
super().__init__(name)
self.classifier = self.createClassifier()
self.selector = self.createSelector()
def createClassifier(self):
"""Creates a classifier instance (from scikit-learn) to be used during the selection process.
To enable the framework to use a new classifier, extend this method accordingly.
:returns: scikit-learn classifier instance.
"""
classifier = None
classifierType = self.name.split("-")[0]
if "KNN" in classifierType:
#attention: assumes that KNN is followed by a number!
k = int(classifierType.replace("KNN", ""))
classifier = KNeighborsClassifier(n_neighbors=k)
elif classifierType == "SVMl":#SVM with linear kernel
classifier = LinearSVC(max_iter=10000)
#elif classifierType == "SVMp": # SVM with polynomial kernel, but it does not have coef component
# classifier = SVC(kernel="poly")
elif classifierType == "LR":
classifier = LinearRegression()
elif classifierType == "NB":
#use MultinomialNB because we cannot assume feature likelihood to be gaussian by default
classifier = MultinomialNB()
elif classifierType == "ANOVA":
classifier = f_classif
else:
raise BaseException("No suitable classifier found for " + classifierType + ". Choose between KNNx, SVMl (SVM with linear kernel), SVMp (SVM with polynomial kernel), LR, NB, ANOVA.")
return classifier
def createSelector(self):
"""Creates a selector instance that leads the selection process.
Currently, sequential forward selection (SFS) and recursive feature elimination (RFE) are implemented.
Extend this method if you want to add another selection strategy.
:returns: scikit-learn selector instance.
"""
selector = None
k = utils.getConfigValue("Gene Selection - General", "selectKgenes")
selectorType = self.name.split("-")[1]
if selectorType == "RFE":
selector = RFE(self.classifier, int(k))
elif selectorType == "SFS":
selector = SFS(self.classifier,
k_features=int(k),
forward=True,
floating=False,
scoring='accuracy',
verbose = 2,
n_jobs = int(utils.getConfigValue("General", "numCores"))/2, #use half of the available cores
cv=0)
return selector
def prepareOutput(self, outputFile, data, selector):
"""Overwrites the inherited prepareOutput method because we need to access the particular selector's coefficients.
The coefficients are extracted as feature scores and will be written to the rankings file.
:param outputFile: selector name
:type outputFile: str
:param data: input dataset to get the feature names.
:type data: :class:`pandas.DataFrame`
:param selector: selector instance that is used during feature selection.
"""
start = time.time()
ranking = pd.DataFrame()
try:
x = selector.estimator_.coef_
except:
try:
x = selector.estimator.coef_
except:
x = selector.est_.coef_
selected_columnIDs = selector.ranking[selector.ranking_ == 1]
selected_features = data.columns[selected_columnIDs]
ranking["attributeName"] = selected_features
ranking["score"] = x[0]
ranking = ranking.sort_values('score', ascending=False)
self.writeRankingToFile(ranking, outputFile)
end = time.time()
self.timeLogs = utils.logRuntime(start, end, "Prepare Output")
def runSelector(self, data, labels):
"""Runs the actual feature selector of scikit-learn.
Is invoked by :meth:`PythonSelector.selectFeatures`.
:param data: dataframe containing the unlabeled dataset.
:type data: :class:`pandas.DataFrame`
:param labels: numerically encoded class labels.
:type labels: list of int
:return: sklearn/mlxtend selector that ran the selection (containing coefficients etc.).
"""
# do gene selection
start = time.time()
#adjust k to not exceed data columns
k = int(utils.getConfigValue("Gene Selection - General", "selectKgenes"))
if k > data.columns.size:
self.selector.n_features_to_select_ = data.columns.size
self.selector.k_features = data.columns.size
# do data scaling
scaling = StandardScaler().fit(data)
scaled_data = scaling.transform(data)
data = scaled_data
self.selector = self.selector.fit(data, labels)
end = time.time()
self.timeLogs = utils.logRuntime(self.timeLogs, start, end, "Wrapper Selector")
return self.selector
class SVMRFESelector(JavaSelector):
"""Executes SVM-RFE with poly-kernel.
Uses an efficient java implementation from WEKA and is thus just a wrapper class to invoke the corresponding jars.
"""
def __init__(self):
super().__init__("SVMpRFE")
def createParams(self):
"""Sets the parameters the java program requires (input file, output file, selector name).
:return: list of parameters to use for InfoGain execution in java.
:rtype: list of str
"""
params = [self.input, self.output, "SVMpRFE"]
return params
############################### EMBEDDED ###############################
class RandomForestSelector(PythonSelector):
"""Selector class that implements RandomForest as provided by scikit-learn.
"""
def __init__(self):
super().__init__("RandomForest")
#override method because there is no scores_ attribute but instead feature_importances_
def prepareOutput(self, outputFile, data, selector):
"""Overwrites the inherited prepareOutput method because we need to access the RandomForest selector's feature importances.
These feature importances are extracted as feature scores and will be written to the rankings file.
:param outputFile: selector name
:type outputFile: str
:param data: input dataset to get the feature names.
:type data: :class:`pandas.DataFrame`
:param selector: RandomForest selector instance that is used during feature selection.
"""
start = time.time()
ranking = pd.DataFrame()
ranking["attributeName"] = data.columns
ranking["score"] = selector.feature_importances_
ranking = ranking.sort_values('score', ascending=False)
self.writeRankingToFile(ranking, outputFile)
end = time.time()
self.timeLogs = utils.logRuntime(self.timeLogs, start, end, "Prepare Output")
def runSelector(self, data, labels):
"""Runs the actual feature selection using scikit-learn's RandomForest classifier.
Is invoked by :meth:`PythonSelector.selectFeatures`.
:param data: dataframe containing the unlabeled dataset.
:type data: :class:`pandas.DataFrame`
:param labels: numerically encoded class labels.
:type labels: list of int
:return: scikit-learn RandomForestClassifier that ran the selection.
"""
# setting k to "all" returns all features
start = time.time()
clf = RandomForestClassifier(random_state = 0)
# Train the classifier
clf.fit(data, labels)
end = time.time()
self.timeLogs = utils.logRuntime(self.timeLogs, start, end, "Random Forest")
return clf
class LassoSelector(PythonSelector):
"""Selector class that implements Lasso feature selection using scikit-learn.
"""
def __init__(self):
super().__init__("Lasso")
# override method because there is no scores_ attribute but instead feature_importances_
def prepareOutput(self, outputFile, data, selector):
"""Overwrites the inherited prepareOutput method because we need to access Lasso's coefficients.
These coefficients are extracted as feature scores and will be written to the rankings file.
:param outputFile: selector name
:type outputFile: str
:param data: input dataset to get the feature names.
:type data: :class:`pandas.DataFrame`
:param selector: RandomForest selector instance that is used during feature selection.
"""
start = time.time()
ranking = | pd.DataFrame() | pandas.DataFrame |
import decimal
import numpy as np
from numpy import iinfo
import pytest
import pandas as pd
from pandas import to_numeric
from pandas.util import testing as tm
class TestToNumeric(object):
def test_empty(self):
# see gh-16302
s = pd.Series([], dtype=object)
res = to_numeric(s)
expected = pd.Series([], dtype=np.int64)
tm.assert_series_equal(res, expected)
# Original issue example
res = to_numeric(s, errors='coerce', downcast='integer')
expected = | pd.Series([], dtype=np.int8) | pandas.Series |
from statsmodels.compat.python import lrange, long
from statsmodels.compat.pandas import is_numeric_dtype
import datetime
from pandas import to_datetime, DatetimeIndex, Period, PeriodIndex, Timestamp
from statsmodels.base import data
import statsmodels.base.model as base
import statsmodels.base.wrapper as wrap
from statsmodels.tsa.base import datetools
_freq_to_pandas = datetools._freq_to_pandas
_tsa_doc = """
%(model)s
Parameters
----------
%(params)s
dates : array-like of datetime, optional
An array-like object of datetime objects. If a pandas object is given
for endog or exog, it is assumed to have a DateIndex.
freq : str, optional
The frequency of the time-series. A Pandas offset or 'B', 'D', 'W',
'M', 'A', or 'Q'. This is optional if dates are given.
%(extra_params)s
%(extra_sections)s
"""
_model_doc = "Timeseries model base class"
_generic_params = base._model_params_doc
_missing_param_doc = base._missing_param_doc
class TimeSeriesModel(base.LikelihoodModel):
__doc__ = _tsa_doc % {"model" : _model_doc, "params" : _generic_params,
"extra_params" : _missing_param_doc,
"extra_sections" : ""}
def __init__(self, endog, exog=None, dates=None, freq=None, missing='none'):
super(TimeSeriesModel, self).__init__(endog, exog, missing=missing)
self._init_dates(dates, freq)
def _init_dates(self, dates, freq):
if dates is None:
dates = self.data.row_labels
if dates is not None:
if (not datetools._is_datetime_index(dates) and
isinstance(self.data, data.PandasData)):
try:
if is_numeric_dtype(dates):
raise ValueError
dates = to_datetime(dates)
except ValueError:
raise ValueError("Given a pandas object and the index does "
"not contain dates")
if not freq:
try:
freq = datetools._infer_freq(dates)
except:
raise ValueError("Frequency inference failed. Use `freq` "
"keyword.")
if isinstance(dates[0], datetime.datetime):
dates = DatetimeIndex(dates)
else: # preserve PeriodIndex
dates = | PeriodIndex(dates) | pandas.PeriodIndex |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@theme: extractor
@author: mario
"""
from pyhanlp import *
from datetime import datetime
import sys
import pandas as pd
import os
import numpy as np
from collections import Counter
try:
from .type import *
from .parser import *
from .nlp import WVModel
except:
pass
#Verb,Entity,Rhetoric,Noun,Department,Enterprise,Location,University,Other
import logging
logging.basicConfig(format="%(asctime)s %(levelname)s %(message)s")
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
# get __file__ path:
__MODULEPATH__ = os.path.dirname(os.path.abspath(__file__))
class ExtractorException(Exception):
pass
class EntityExtractor(object):
def dependencyExtract(self):
# sentences should be a list of corpus:
corpus = self.doc.sentences
# call ._default_parser
relObj = []
for index,each in enumerate(corpus):
_rel = DependencyParser().default_parser(each)
# pre-defined entities like: org, people, loc...
self.verbalExtractor()
self.entityExtractor(_rel)
self.rhetoricExtractor(_rel)
self.nounExtractor(_rel)
self.predefinedExtractor(_rel)
# temporarily storage:
relObj += [_rel]
sys.stdout.write("\r[DependencyParser] process No.{} sentences ...".format(index+1))
sys.stdout.flush()
# here, the index follows the order in sentences:
self.doc.indexedDependency = relObj
# verbal extraction part:
# end. assign
self.genVocabulary()
# remained for the rest:
return self.doc
def genVocabulary(self):
self.doc.vocab = list(set([key for key in self.doc.archive.keys()]))
def embedding(self):
# ./src/wv_model
pass
def verbalExtractor(self):
# v::动词 vd::副动词 vf::趋向动词 vg::动词性语素 vi::不及物动词(内动词)
# vl::动词性惯用语 vn::名动词 vshi::动词“是” x::形式动词 vyou::动词“有”
_verbalRef = ["v","vd","vf","vg","vi","vl","vn","vx"]
for index,sentence in enumerate(self.doc.indexedSegments):
for token in sentence:
if token[1] in _verbalRef:
self.doc[token[0]] = Verb(token[0],token[1],self.doc.sentences[index])
# end for
# end for
def keywordExtract(self):
# prepare sentence-level corpus:
indexedSegments = self.doc.indexedSegments
indexedSegments = [[word[0] for word in sentence if word[1]!="w"] for sentence in indexedSegments]
indexedSegments = [list(set(sentence)) for sentence in indexedSegments]
# train word2vec model:
wvm = WVModel(self.doc.module_path + "/model/")
# wvm.initModel(wvm.buildSegmentCorpus(self.doc))
wvm.initModel(wvm.buildVocabCorpus(self.doc))
model = wvm.model
def predict_proba(oword, iword):
iword_vec = model[iword]
oword = model.wv.vocab[oword]
oword_l = model.wv.syn0.T
#oword_l = model.trainables.syn1neg.T
dot = np.dot(iword_vec, oword_l)
lprob = -sum(np.logaddexp(0, -dot) + 1*dot)
return lprob
def keywords(s,topn=3):
s = [w for w in s if w in model]
ws = {w:sum([predict_proba(u, w) for u in s]) for w in s} # N*N complexity
return Counter(ws).most_common(n=topn)
# extract:
indexedKeywords = []
for s in indexedSegments:
indexedKeywords += [keywords(s)]
# end
self.doc.keywords = indexedKeywords
return self.doc
def predefinedExtractor(self,relObj):
"""
Pre-defined entity recognition
:param relObj: Dependency Object
Note: noun terms are pre-given and the retrieved one is a subset.
"""
doc = self.doc
df = pd.DataFrame(relObj.default_dependency)
try:
ref = open(doc.module_path+"/src/hanlpNounTermRef.txt","r",encoding="utf8")
except:
ref = open(self.module_path+"/src/hanlpNounTermRef.txt","r",encoding="utf8")
# more efficient if using array other than hashmap
terms = {each.split(",")[0]:each.split(",")[1] for each in ref.read().splitlines()}
ref.close()
for name,tag in zip(df["LEMMA"],df["POSTAG"]):
if tag in terms:
if tag == "nto":
doc[name] = Department(name,relObj.default_text)
elif tag == "ntc":
doc[name] = Enterprise(name,relObj.default_text)
elif tag.startswith("ns"):
doc[name] = Location(name,relObj.default_text)
else:
doc[name] = Other(name,terms[tag],relObj.default_text)
# end
self.doc = doc
def rhetoricExtractor(self,relObj):
doc = self.doc
""" Rhetoric rule: 形容词和副词
:Caution: 在这里没有对修辞进行区分,存在表示不同情感和方向的修辞
"""
# these are relatively small batches
_adjRef = ["a","ad","an","ag","al"]
_advRef = ["d"]
df = pd.DataFrame(relObj.default_dependency)
# basically, rhetoric represents "定中关系" in dependency
duplicates = pd.DataFrame()
for each in (_adjRef+_advRef):
adj = pd.DataFrame()
sub = df[df["POSTAG"] == each]
if not sub.empty:
for i in range(len(sub)):
src = sub.iloc[i,]["LEMMA"]
srctype = sub.iloc[i,]["POSTAG"]
q = relObj.query_by_word(word=src,depth=1,direction="upward",ID=sub.iloc[i,]["ID"])[0]
# remove the symbols
if "w" in q["POSTAG"].tolist():
q = q.drop(index=q[q["POSTAG"]=="w"].index)
if not duplicates.empty:
if sub.iloc[i,]["ID"] not in duplicates["ID"].tolist():
if len(q) > 1:
q = q.drop(index=q[q["LEMMA"]==src].index)
tar = "".join(q["LEMMA"].tolist())
doc[src] = Rhetoric(src=src,tar=tar,srcType=srctype,sentence=relObj.default_text)
else:
if len(q) > 1:
q = q.drop(index=q[q["LEMMA"]==src].index)
tar = "".join(q["LEMMA"].tolist())
doc[src] = Rhetoric(src=src,tar=tar,srcType=srctype,sentence=relObj.default_text)
# end
# end
# end
self.doc = doc
def nounExtractor(self,relObj):
""" Noun extractor depends on dependency objects """
# extract nouns by completion
doc = self.doc
df = pd.DataFrame(relObj.default_dependency)
tagger = lambda x:True if x.startswith("n") else False
subPostag = df[[tagger(each) for each in df["POSTAG"].tolist()]]
if subPostag.empty:
return None
qTree = {} # insert the tree
for i,word in zip(subPostag["ID"].tolist(),subPostag["LEMMA"].tolist()):
keys = qTree.keys()
query = relObj.query_by_word(word,1,"downward",i)
leaf = query[0].ID.tolist()
leaf.remove(i)
if set(leaf).intersection(set(keys)):
# if leaf is contained in qLeaf:
delete = list(set(leaf).intersection(set(keys)))
[qTree.pop(each) for each in delete]
# append to the Tree:
qTree[i] = query[0].ID.tolist()
# end
skip = []
IDer = lambda x:[each-1 for each in x]
treeVec = list(qTree.values())
treeVec.reverse()
for v in treeVec:
if v in skip:
next
else:
if len(v)>2: # more reasonable word sequence:
seq = list(range(v[0],v[-1]+1,1))
extra = list(set(seq).difference(set(v)))
if list(extra) in treeVec:
skip += [list(extra)]
else:
seq = v
each = df.iloc[IDer(seq)]
each = each[each["POSTAG"]!="w"]
name = "".join(each["LEMMA"].tolist())
doc[name] = Noun(name,relObj.default_text)
self.doc = doc
def timeExtractor(self):
# recommend: regexpr
pass
def entityExtractor(self,relObj):
""" A number of rules: 动宾,并列, 主谓等 """
# df = pd.DataFrame(dp.default_dependency),
# `dependency.default_dependency` for query:
# 动宾关系
doc = self.doc
df = pd.DataFrame(relObj.default_dependency)
df_vob = df[df["DEPREL"] == "动宾关系"]
if df_vob.empty: # no 动宾关系 found
return []
duplicates = | pd.DataFrame() | pandas.DataFrame |
'''
author = '<NAME>'
'''
import history
import pandas as pd
from time import sleep
from config import *
def main():
#recover streamings history
token = history.get_token(username, client_id,
client_secret, redirect_uri, scope)
streamings = history.get_streamings()
print(f'Recovered {len(streamings)} streamings.')
#getting a list of unique tracks in our history
tracks = set([streaming['trackName'] for streaming in streamings])
print(f'Discovered {len(tracks)} unique tracks.')
#getting saved ids for tracks
track_ids = history.get_saved_ids(tracks)
#checking tracks that still miss idd
tracks_missing_idd = len([track for track in tracks if track_ids.get(track) is None])
print(f'There are {tracks_missing_idd} tracks missing ID.')
if tracks_missing_idd > 0:
#using spotify API to recover track ids
#note: this methods works only for tracks.
#podcasts and other items will be ignored.
print('Connecting to Spotify to recover tracks IDs.')
sleep(3)
for track, idd in track_ids.items():
if idd is None:
try:
found_idd = history.get_api_id(track, token)
track_ids[track] = found_idd
print(track, found_idd)
except:
pass
#how many tracks did we identify?
identified_tracks = [track for track in track_ids
if track_ids[track] is not None]
print(f'Successfully recovered the ID of {len(identified_tracks)} tracks.')
#how many items did we fail to identify?
n_tracks_without_id = len(track_ids) - len(identified_tracks)
print(f"Failed to identify {n_tracks_without_id} items. "
"However, some of these may not be tracks (e.g. podcasts).")
#using pandas to save tracks ids (so we don't have to API them in the future)
ids_path = 'output/track_ids.csv'
ids_dataframe = pd.DataFrame.from_dict(track_ids,
orient = 'index')
ids_dataframe.to_csv(ids_path)
print(f'track ids saved to {ids_path}.')
#recovering saved features
track_features = history.get_saved_features(tracks)
tracks_without_features = [track for track in tracks if track_features.get(track) is None]
print(f"There are still {len(tracks_without_features)} tracks without features.")
path = 'output/features.csv'
#connecting to spotify API to retrieve missing features
if len (tracks_without_features):
print('Connecting to Spotify to extract features...')
acquired = 0
for track, idd in track_ids.items():
if idd is not None and track in tracks_without_features:
try:
features = history.get_api_features(idd, token)
track_features[track] = features
if features:
acquired += 1
print(f'Acquired features: {track}. Total: {acquired}')
except:
features = None
tracks_without_features = [track for track in tracks if track_features.get(track) is None]
print(f'Successfully recovered features of {acquired} tracks.')
if len(tracks_without_features):
print(f'Failed to identify {len(tracks_without_features)} items. Some of these may not be tracks.')
#saving features
features_dataframe = pd.DataFrame(track_features).T
features_dataframe.to_csv(path)
print(f'Saved features to {path}.')
#joining features and streamings
print('Adding features to streamings...')
streamings_with_features = []
for streaming in streamings:
track = streaming['trackName']
features = track_features[track]
if features:
streamings_with_features.append({'name': track, **streaming, **features})
print(f'Added features to {len(streamings_with_features)} streamings.')
print('Saving streamings...')
df_final = | pd.DataFrame(streamings_with_features) | pandas.DataFrame |
import os
import pickle
import numpy as np
import pandas as pd
from sklearn.model_selection import GridSearchCV
from sklearn.utils import resample
# depent on tensorflow 1.14
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.layers import Conv1D, AveragePooling1D, Dropout, Flatten, Dense
from tensorflow.keras.models import Sequential
from tensorflow.keras.regularizers import l1
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.wrappers.scikit_learn import KerasClassifier
# privacy package
from tensorflow_privacy.privacy.optimizers.dp_optimizer import DPGradientDescentGaussianOptimizer
# set random seed
np.random.seed(19122)
GradientDescentOptimizer = tf.compat.v1.train.GradientDescentOptimizer
class DataGenerator(object):
"""
Load and preprocess data: filter NA, binarize phenotype, balance sample, one hot encoding.
"""
def __init__(self, genotype_file, phenotype_file, shuffle=True):
super(DataGenerator, self).__init__()
self.genotype_file = genotype_file
self.phenotype_file = phenotype_file
self.shuffle = shuffle
# preprocess
self._load_data()
self._filter_na_phenotype()
self._binarize_phenotype()
self._balance_sample()
self._one_hot_encode()
def _load_data(self):
self.genotype = | pd.read_csv(genotype_file, sep='\t', index_col=0) | pandas.read_csv |
import pytest
import numpy as np
import pandas as pd
import requests as req
import gzip
from pyliftover import LiftOver
import sys
sys.path.append("..")
from bin.read_scorefile import *
def get_timeout(url):
""" Get a remote file with timeout """
try:
return req.get(url, timeout = 5)
except (req.exceptions.ConnectionError, req.Timeout):
return []
@pytest.fixture
def db():
''' Download reference database from gitlab '''
database = get_timeout('https://gitlab.ebi.ac.uk/nebfield/test-datasets/-/raw/master/pgsc_calc/reference_data/pgsc_calc_ref.sqlar')
if not database:
pytest.skip("Couldn't get file from EBI FTP")
else:
with open('db.sqlar', 'wb') as f:
f.write(database.content)
yield 'db.sqlar'
os.remove('db.sqlar')
@pytest.fixture
def chain_files(db):
''' Stage chain files from reference database in working directory '''
os.system('sqlite3 db.sqlar -Ax hg19ToHg38.over.chain.gz hg38ToHg19.over.chain.gz')
yield ['hg19ToHg38.over.chain.gz', 'hg38ToHg19.over.chain.gz']
os.remove('hg38ToHg19.over.chain.gz')
os.remove('hg19ToHg38.over.chain.gz')
@pytest.fixture
def valid_chrom():
""" Valid chromosomes should be 1 - 22, X, Y, and MT.
Validity is not checked or enforced """
return '1'
@pytest.fixture
def valid_pos():
""" Valid positions are integers. Invalid integers are dropped. """
return 1234
@pytest.fixture
def annotated_chrom():
''' LiftOver sometimes returns annotated chromosomes '''
return "22_annotations_that_are_annoying"
@pytest.fixture
def accession():
''' A placeholder PGS Catalog accession '''
return "PGS000802"
@pytest.fixture
def hg38_coords():
''' A dataframe of random variants, pos in GRCh38 '''
d = {'rsid' : ['rs11903757', 'rs6061231'], 'chr_name': ['2', '20'], 'chr_position': [191722478, 62381861] }
return pd.DataFrame(d)
@pytest.fixture
def hg38_to_hg19_coords(hg38_coords):
''' A dataframe containing known good coordinates in GRCh37, from dbSNP '''
d = {'lifted_chr': ['2', '20'], 'lifted_pos': [192587204, 60956917], 'liftover': [True, True] }
return (hg38_coords.join(pd.DataFrame(d), how = 'outer')
.astype({'liftover': bool}))
@pytest.fixture
def hg19_unique_coords():
''' A dataframe of coordinates that are deleted in hg38 and won't map '''
d = {'chr_name': [22, 22, 22], 'chr_position': [22561610, 23412058, 28016883]}
return pd.DataFrame(d)
@pytest.fixture
def hg38():
''' The only input the workflow should accept, but equivalent to hg38 '''
return 'GRCh38'
@pytest.fixture
def hg19():
''' The only input the workflow should accept, but equivalent to hg19 '''
return 'GRCh37'
@pytest.fixture
def lo_tohg19(chain_files):
''' pyliftover object reponsible for converting coordinates hg38 -> hg19 '''
return LiftOver('hg38ToHg19.over.chain.gz')
@pytest.fixture
def lo_tohg38(chain_files):
''' pyliftover object reponsible for converting coordinates hg19 -> hg38 '''
return LiftOver('hg19ToHg38.over.chain.gz')
@pytest.fixture
def min_lift():
''' Minimum proportion of variants to successfully remap coordinates '''
return 0.95
@pytest.fixture
def scoring_file_noheader():
''' Fetch a scorefile without genome build data in the metadata header '''
scorefile = get_timeout('https://ftp.ebi.ac.uk/pub/databases/spot/pgs/scores/PGS000802/ScoringFiles/PGS000802.txt.gz')
if not scorefile:
pytest.skip("Couldn't get file from EBI FTP")
else:
with open('PGS000802.txt', 'wb') as f:
f.write(gzip.decompress(scorefile.content))
yield 'PGS000802.txt'
os.remove('PGS000802.txt')
@pytest.fixture
def scoring_file_sex():
""" Fetch a scoring file with X chromosomes """
scorefile = get_timeout('https://ftp.ebi.ac.uk/pub/databases/spot/pgs/scores/PGS000049/ScoringFiles/PGS000049.txt.gz')
if not scorefile:
pytest.skip("Couldn't get file from EBI FTP")
else:
with open('PGS000049.txt', 'wb') as f:
f.write(gzip.decompress(scorefile.content))
yield 'PGS000049.txt'
os.remove('PGS000049.txt')
@pytest.fixture
def pgs001229():
scorefile = get_timeout('https://ftp.ebi.ac.uk/pub/databases/spot/pgs/scores/PGS001229/ScoringFiles/PGS001229.txt.gz')
if not scorefile:
pytest.skip("Couldn't get file from EBI FTP")
else:
with open('PGS001229.txt', 'wb') as f:
f.write(gzip.decompress(scorefile.content))
yield 'PGS001229.txt'
os.remove('PGS001229.txt')
@pytest.fixture
def scoring_file_header():
''' Fetch a scorefile with genome build data in the metadata header '''
scorefile = get_timeout('https://ftp.ebi.ac.uk/pub/databases/spot/pgs/scores/PGS000777/ScoringFiles/PGS000777.txt.gz')
if not scorefile:
pytest.skip("Couldn't get file from EBI FTP")
else:
with open('PGS000777.txt', 'wb') as f:
f.write(gzip.decompress(scorefile.content))
yield 'PGS000777.txt'
os.remove('PGS000777.txt')
@pytest.fixture
def scoring_file_noEA(scoring_file_noheader):
''' A scoring file (path) with no other allele '''
f = pd.read_table(scoring_file_noheader, comment = '#')
f.drop(['effect_allele'], inplace = True, axis = 1)
f.to_csv('no_ea.txt', sep = '\t', index = False)
yield 'no_ea.txt'
os.remove('no_ea.txt')
@pytest.fixture
def scoring_file_noOA(scoring_file_noheader):
''' A scoring file (path) with no other allele '''
f = pd.read_table(scoring_file_noheader, comment = '#')
f.drop(['other_allele'], inplace = True, axis = 1)
f.to_csv('no_oa.txt', sep = '\t', index = False)
yield 'no_oa.txt'
os.remove('no_oa.txt')
@pytest.fixture
def out_scorefile():
yield 'out.txt'
os.remove('out.txt')
@pytest.fixture
def multi_et_scorefile_df(scoring_file_noheader):
''' Scorefile dataframe with multiple effect types '''
return pd.read_table(scoring_file_noheader, comment = '#')
@pytest.fixture
def bad_multi_et_scorefile_df(multi_et_scorefile_df):
''' Scorefile dataframe with bad (mixed) effect types '''
return multi_et_scorefile_df.assign(is_dominant = True)
@pytest.fixture
def good_score_df():
''' Scorefile dataframe with no effect type '''
d = {'chr_name': [22], 'chr_position': [22561610], 'effect_allele': 'A', 'other_allele': 'G', 'effect_weight': 1}
return pd.DataFrame(d)
@pytest.fixture
def duplicate_score_df(good_score_df):
''' Bad scorefile dataframe with duplicate variants '''
return | pd.concat([good_score_df, good_score_df]) | pandas.concat |
# --------------
#Header files
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
data = pd.DataFrame()
data = | pd.read_csv(path) | pandas.read_csv |
import pandas as pd
import evaluation
import pytest
def test_labels() -> None:
labels = pd.DataFrame.from_dict({'label': ['high', 'medium', 'low'], 'url': ['a', 'b', 'c']})
predictions = pd.DataFrame.from_dict({'prediction': ['high', 'low', 'low'], 'url': ['a', 'b', 'c']})
result = evaluation.calc_error_metrics(labels, predictions)
assert 2/3 == pytest.approx(result[1])
def test_convert_label() -> None:
labels = pd.DataFrame.from_dict({'label': ['high', 'low', 'low'], 'url': ['a', 'b', 'c']})
expected = labels.copy(deep=True)
expected['is_high'] = [True, False, False]
result = evaluation.convert_labels_to_booleans(labels, 'high')
assert expected.equals(result)
def test_float() -> None:
labels = pd.DataFrame.from_dict({'label': ['high', 'medium', 'medium'], 'url': ['a', 'b', 'c']})
predictions = pd.DataFrame.from_dict({'prediction': [0.8, 0.7, 0.2], 'url': ['a', 'b', 'c']})
result = evaluation.calc_error_metrics(labels, predictions, 'high')
assert result[1] == 1
def test_videos() -> None:
labels = pd.read_csv('video/youtube.csv')
predictions = | pd.read_csv('test/video/youtube-test.csv') | pandas.read_csv |
import pandas as pd
import datetime
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
from pandas.plotting import scatter_matrix
import yfinance as yf
#import talib
#%matplotlib inline
start = '2017-02-19'
end = '2022-2-19'
sp500 = yf.download('^GSPC', start, end)
# Moving Averages https://www.analyticsvidhya.com/blog/2021/07/stock-prices-analysis-with-python/#h2_5
def MA(data_frame, days):
name = 'MA'+str(days)
data_frame[name] = data_frame['close'].rolling(days).mean()
return data_frame
# RSI https://wire.insiderfinance.io/calculate-rsi-with-python-and-yahoo-finance-c8fb78b1c199
def RSI(data, window = 14, adjust = False):
delta = data['Close'].diff(1).dropna()
loss = delta.copy()
gains = delta.copy()
gains[gains < 0] = 0
loss[loss > 0] = 0
gain_ewm = gains.ewm(com = window - 1, adjust = adjust).mean()
loss_ewm = abs(loss.ewm(com = window - 1, adjust = adjust).mean())
RS = gain_ewm / loss_ewm
RSI = 100 - 100/ (1 + RS)
return RSI
reversed_df = sp500.iloc[::-1]
#sp500['RSI'] = talib.RSI(reversed_df['Close'], 14)
locator = mdates.MonthLocator(interval = 3)
fmt = mdates.DateFormatter('%b')
#KDJ https://github.com/Abhay64/KDJ-Indicator
array_close = np.array(sp500['Close'])
array_high = np.array(sp500['High'])
array_low = np.array(sp500['Low'])
z = 0
y = 0
highest = 0
lowest = 0
kperiods = 13 #kperiods are 14 array start from 0 index
array_highest = []
array_lowest = []
for i in range(0, array_high.size - kperiods):
highest = array_high[y]
for j in range(0, kperiods):
if(highest < array_high[y + 1]):
highest = array_high[y + 1]
y = y + 1
# creating list highest of k periods
array_highest.append(highest)
y = y - (kperiods - 1)
for i in range(0, array_low.size - kperiods):
lowest = array_low[z]
for j in range(0, kperiods):
if(lowest > array_low[z + 1]):
lowest = array_low[z + 1]
z = z + 1
# creating list lowest of k periods
array_lowest.append(lowest)
# skip one from starting after each iteration
z = z - (kperiods - 1)
#KDJ (K line, D line, J line)
# K line
Kvalue = []
for i in range(kperiods,array_close.size):
k = ((array_close[i] - array_lowest[i - kperiods]) * 100 / (array_highest[i - kperiods] - array_lowest[i - kperiods]))
Kvalue.append(k)
sp500['K'] = pd.Series(Kvalue)
# D line
x = 0
# dperiods for calculate d values
dperiods = 3
Dvalue = [None, None]
mean = 0
for i in range(0, len(Kvalue) - dperiods + 1):
sum = 0
for j in range(0, dperiods):
sum = Kvalue[x] + sum
x = x + 1
mean = sum / dperiods
# d values for %d line adding in the list Dvalue
Dvalue.append(mean)
# skip one from starting after each iteration
x = x - (dperiods - 1)
sp500['D'] = pd.Series(Dvalue)
# J line
Jvalue = [None, None]
for i in range(0, len(Dvalue) - dperiods + 1):
j = (Dvalue[i + 2] * 3) - (Kvalue[i + 2] * 2)
# j values for %j line
Jvalue.append(j)
sp500['J'] = pd.Series(Jvalue)
# SP500
sp500_data = | pd.read_csv('~/Documents/Github/IndustryPricePrediction/data/sectors/SP500_7yr_daily.csv') | pandas.read_csv |
# -*- coding: utf-8 -*-
"""Main module."""
### Libraries ###
import pandas as pd
from datetime import datetime
import croissance
from croissance import process_curve
from croissance.estimation.outliers import remove_outliers
import re
import os
import matplotlib.pyplot as plt
import matplotlib
import numpy as np
from scipy.optimize import curve_fit
from croissance.estimation.util import with_overhangs
from croissance.estimation import regression
from pandas import Series
import subprocess
import sys
from scipy import interpolate
import math
import argparse
from matplotlib.legend_handler import HandlerLine2D
import seaborn as sns
import xlsxwriter
import datetime
#TEST
__version__ = "0.1.1"
def argument_parser(argv_list=None):
'''Asesses the input arguments and outputs flags to run the different functions according to the user needs.
Args:
argv_list: List of arguments provided by the user when running the program.
Returns:
flags
'''
#Initialize the argument parser
parser = argparse.ArgumentParser()
#Adding general arguments
parser.add_argument("-e", "--estimations", help="Get only the estimations for every sample", action = "store_true")
parser.add_argument("-f", "--figures", help="Get only the growth curve figures", action = "store_true")
parser.add_argument("-s", "--summary", help="Get only the summary of growth rate estimations",action = "store_true")
parser.add_argument("-it", "--interpolation", help="Get interpolation of growth rate measurements with given od", action = "store_true")
#Visualization arguments
parser.add_argument("-b", "--bioshaker", help="Get one growth rate figure for every individual bioshaker", action = "store_true")
parser.add_argument("-i", "--individual", help="Get one growth rate figure for every individual sample", action = "store_true")
parser.add_argument("-bc", "--bioshakercolor", help="Get one growth rate figure for every species colored by bioshaker", action = "store_true")
#Volume loss related arguments
parser.add_argument("-v", "--volumeloss", help="Volume loss compesation is not computed", action = "store_false")
parser.parse_args()
args = parser.parse_args(argv_list[1:])
#Create flags
if args.estimations == False and args.figures == False and args.summary == False :
flag_all = True
flag_est = False
flag_sum = False
flag_fig = False
flag_ind = args.individual
flag_bioshakercolor = args.bioshakercolor
flag_volume_loss = args.volumeloss
flag_bioshaker = args.bioshaker
flag_interpolation = args.interpolation
elif args.estimations == True or args.figures == True or args.summary == True :
flag_all = False
flag_est = args.estimations
flag_sum = args.summary
flag_fig = args.figures
flag_ind = args.individual
flag_bioshakercolor = args.bioshakercolor
flag_volume_loss = args.volumeloss
flag_bioshaker = args.bioshaker
flag_interpolation = args.interpolation
return flag_all, flag_est, flag_sum, flag_fig, flag_ind, flag_bioshakercolor, args.volumeloss, flag_bioshaker, flag_interpolation
# ------ PARSE THE DATA USING THE autoflow_parser LIBRARY ------
def parse_data() :
'''Calls the autoflow_parser and returns a merged xlsx document with all the OD readings combined'''
try :
call = subprocess.call("autoflow_parser",shell = True)
except :
return sys.exit("The data could not be parsed due to some error, check the input documentation")
return call
# ------ DATA LOADING AND VARIABLE SELECTION ------
def read_xlsx(filename = "results.xlsx") : #done
'''Reads .xlsx file, returns a dataframe with relevant variables. The output of the parser is set to be "results.xlsx", the default reads the mentioned file without any additional argument'''
try :
#Open data file
df = pd.read_excel(filename)
except FileNotFoundError :
return sys.exit("Could not find the parsed data file (XLSX extension)")
#Select valuable columns
cols = ["Sample ID", "Measurement", "Measurement type", "Sampling time","Sampling date"] #relevant variables
df = df[cols]
df.columns = df.columns.str.replace(' ', '_') #header spaces replaced with underscore
return df
# ------ SEPARATION OF GROWTH RATE SAMPLE AND VOLUME LOSS SAMPLES ------
def sample_outcome(sample_file, df) : #done
'''Uses an external file containing individual sample purposes, returns two classifed dataframes based on sample purposes and labelled by bioshaker.
Args:
sample_file: variable or string containing the name of the file and its extension.
df: dataframe obtained by using the read_xlsx method on the merged xlsx file.
Returns:
df_gr: dataframe containing observations related to the microbial growth rate, labelled by bioshaker.
df_vl: dataframe containing observations related to the volume loss estimation, labelled by bioshaker.
'''
#Open the file containing sample purposes
df_calc = | pd.read_csv(sample_file, sep="\t") | pandas.read_csv |
# coding: utf-8
# # Dataset boolean7: sentences conjoined by or
#
# Generating sentences of the form
#
# - 1) **c is P or d is Q, neither c is P nor d is Q** (contradiction)
#
# - 2) **c is P or d is Q, c is not P and d is not Q** (contradiction)
#
# - 3) **c is P or d is Q, c is not P or d is not Q** (non-contradiction)
#
# - 4) **c is P or d is Q, c (d) is not P (Q)** (non-contradiction)
# In[1]:
import numpy as np
import pandas as pd
import os
try:
from word_lists import name_list, positive_personality_list
except ImportError:
from contra_qa.text_generation.word_lists import name_list, positive_personality_list # noqa
try:
from word_lists import apparance_list, negative_personality_list
except ImportError:
from contra_qa.text_generation.word_lists import apparance_list, negative_personality_list # noqa
def get_new_item(item_list, src_list):
size = len(src_list)
new_item = src_list[np.random.choice(size)]
while new_item in item_list:
new_i = np.random.choice(size)
new_item = src_list[new_i]
return new_item
qualities = positive_personality_list + \
apparance_list + negative_personality_list
upper_bound = 11000/4
# ### Generating all types of sentences
def boolean7():
# - 1) **c is P or d is Q, neither c is P nor d is Q** (contradiction)
all_sentences_1 = []
for i in range(int(upper_bound)):
person1 = get_new_item([], name_list)
person2 = get_new_item([person1], name_list)
pred1 = get_new_item([], qualities)
pred2 = get_new_item([pred1], qualities)
sentence = "{} is {} or {} is {}, neither {} is {} nor {} is {}".format(person1,
pred1,
person2,
pred2,
person1,
pred1,
person2,
pred2)
all_sentences_1.append(sentence)
all_sentences_1 = [sentence.split(",") + [1]
for sentence in all_sentences_1]
# - 2) **c is P or d is Q, c is not P and d is not Q** (contradiction)
all_sentences_2 = []
for i in range(int(upper_bound)):
person1 = get_new_item([], name_list)
person2 = get_new_item([person1], name_list)
pred1 = get_new_item([], qualities)
pred2 = get_new_item([pred1], qualities)
sentence = "{} is {} or {} is {}, {} is not {} and {} is not {}".format(person1,
pred1,
person2,
pred2,
person1,
pred1,
person2,
pred2)
all_sentences_2.append(sentence)
all_sentences_2 = [sentence.split(",") + [1]
for sentence in all_sentences_2]
# - 3) **c is P or d is Q, c is not P or d is not Q** (non-contradiction)
all_sentences_3 = []
for i in range(int(upper_bound)):
person1 = get_new_item([], name_list)
person2 = get_new_item([person1], name_list)
pred1 = get_new_item([], qualities)
pred2 = get_new_item([pred1], qualities)
sentence = "{} is {} or {} is {}, {} is not {} or {} is not {}".format(person1,
pred1,
person2,
pred2,
person1,
pred1,
person2,
pred2)
all_sentences_3.append(sentence)
all_sentences_3 = [sentence.split(",") + [0]
for sentence in all_sentences_3]
# - 4) **c is P or d is Q, c (d) is not P (Q)** (non-contradiction)
all_sentences_4 = []
for i in range(int(upper_bound)):
person1 = get_new_item([], name_list)
person2 = get_new_item([person1], name_list)
pred1 = get_new_item([], qualities)
pred2 = get_new_item([pred1], qualities)
if i % 2 == 0:
person3 = person1
pred3 = pred1
else:
person3 = person2
pred3 = pred2
sentence = "{} is {} or {} is {}, {} is not {}".format(person1,
pred1,
person2,
pred2,
person3,
pred3)
all_sentences_4.append(sentence)
all_sentences_4 = [sentence.split(",") + [0]
for sentence in all_sentences_4]
np.random.shuffle(all_sentences_1)
np.random.shuffle(all_sentences_2)
np.random.shuffle(all_sentences_3)
np.random.shuffle(all_sentences_4)
size1 = len(all_sentences_1)
size2 = len(all_sentences_2)
size3 = len(all_sentences_3)
size4 = len(all_sentences_4)
all_sentences = all_sentences_1 + all_sentences_2 + \
all_sentences_3 + all_sentences_4
# ### Generating a train DataFrame with 10000 examples and a test DataFrame with 1000 examples
sentence_1 = [triple[0] for triple in all_sentences]
sentence_2 = [triple[1] for triple in all_sentences]
label = [triple[2] for triple in all_sentences]
df_dict = {"sentence1": sentence_1,
"sentence2": sentence_2,
"label": label}
df = | pd.DataFrame(df_dict) | pandas.DataFrame |
# -*- coding: utf-8 -*-
# @Time : 2021/12/27 16:34
# @Author : <NAME>
import argparse
import copy
import random
import pandas as pd
import numpy as np
import os
import torch
import torch.nn as nn
from torch.autograd import Variable
from torchvision import datasets, transforms
import wandb
# from models import *
import models
import wandb
# os.environ['CUDA_VISIBLE_DEVICES'] = '4'
from model_complexity import get_model_infos
def create_model(model, cfg, cfg_mask):
while 0 in cfg:
cfg.remove(0)
# if args.arch.endswith('lp'):
# # model = models.__dict__[args.arch](bits_A=args.bits_A, bits_E=args.bits_E, bits_W=args.bits_W, dataset=args.dataset, depth=args.depth)
# newmodel = models.__dict__[args.arch](8, 8, 32, dataset=args.dataset, depth=args.depth)
# elif args.dataset == 'imagenet':
# newmodel = models.__dict__[args.arch](pretrained=False)
# if len(args.gpu_ids) > 1:
# model = torch.nn.DataParallel(model, device_ids=args.gpu_ids)
# else:
newmodel = models.__dict__['vgg'](dataset='cifar100', cfg = cfg)
# for [m0, m1] in zip(model.modules(), newmodel.modules()):
# if isinstance(m0, nn.BatchNorm2d):
# if np.sum(end_mask) == 0:
# continue
# idx1 = np.squeeze(np.argwhere(end_mask))
# if idx1.size == 1:
# idx1 = np.resize(idx1, (1,))
# m1.weight.data = m0.weight.data[idx1.tolist()].clone()
# m1.bias.data = m0.bias.data[idx1.tolist()].clone()
# m1.running_mean = m0.running_mean[idx1.tolist()].clone()
# m1.running_var = m0.running_var[idx1.tolist()].clone()
# layer_id_in_cfg += 1
# start_mask = copy.copy(end_mask)
# if layer_id_in_cfg < len(cfg_mask): # do not change in Final FC
# end_mask = cfg_mask[layer_id_in_cfg]
# elif isinstance(m0, nn.Conv2d):
# if np.sum(end_mask) == 0:
# continue
# idx0 = np.squeeze(np.argwhere(start_mask))
# idx1 = np.squeeze(np.argwhere(end_mask))
# # random set for test
# # new_end_mask = np.asarray(end_mask.cpu().numpy())
# # new_end_mask = np.append(new_end_mask[int(len(new_end_mask)/2):], new_end_mask[:int(len(new_end_mask)/2)])
# # idx1 = np.squeeze(np.argwhere(new_end_mask))
#
# # print('In shape: {:d}, Out shape {:d}.'.format(idx0.size, idx1.size))
# if idx0.size == 1:
# idx0 = np.resize(idx0, (1,))
# if idx1.size == 1:
# idx1 = np.resize(idx1, (1,))
# w1 = m0.weight.data[:, idx0.tolist(), :, :].clone()
# w1 = w1[idx1.tolist(), :, :, :].clone()
# m1.weight.data = w1.clone()
# elif isinstance(m0, nn.Linear):
# idx0 = np.squeeze(np.argwhere(start_mask))
# if idx0.size == 1:
# idx0 = np.resize(idx0, (1,))
# m1.weight.data = m0.weight.data[:, idx0].clone()
# m1.bias.data = m0.bias.data.clone()
layer_id_in_cfg = 0
start_mask = np.ones(3)
end_mask = cfg_mask[layer_id_in_cfg]
parameter_buffer = {}
for m0 in model.modules():
if isinstance(m0, nn.BatchNorm2d):
key = str(layer_id_in_cfg) + 'BatchNorm'
value = []
if np.sum(end_mask) == 0:
pass
else:
idx1 = np.squeeze(np.argwhere(end_mask))
if idx1.size == 1:
idx1 = np.resize(idx1, (1,))
value.append(m0.weight.data[idx1.tolist()].clone())
value.append(m0.bias.data[idx1.tolist()].clone())
value.append(m0.running_mean[idx1.tolist()].clone())
value.append(m0.running_var[idx1.tolist()].clone())
start_mask = copy.copy(end_mask)
parameter_buffer[key] = value
layer_id_in_cfg += 1
if layer_id_in_cfg < len(cfg_mask): # do not change in Final FC
end_mask = cfg_mask[layer_id_in_cfg]
elif isinstance(m0, nn.Conv2d):
key = str(layer_id_in_cfg) + 'Conv'
value = []
if np.sum(end_mask) == 0:
pass
else:
idx0 = np.squeeze(np.argwhere(start_mask))
idx1 = np.squeeze(np.argwhere(end_mask))
if idx0.size == 1:
idx0 = np.resize(idx0, (1,))
if idx1.size == 1:
idx1 = np.resize(idx1, (1,))
w1 = m0.weight.data[:, idx0.tolist(), :, :].clone()
w1 = w1[idx1.tolist(), :, :, :].clone()
value.append(w1.clone())
parameter_buffer[key] = value
elif isinstance(m0, nn.Linear):
key = str(layer_id_in_cfg) + 'Linear'
value = []
idx0 = np.squeeze(np.argwhere(start_mask))
if idx0.size == 1:
idx0 = np.resize(idx0, (1,))
value.append(m0.weight.data[:, idx0].clone())
value.append(m0.bias.data.clone())
parameter_buffer[key] = value
layer_id_in_cfg = 0
for m1 in newmodel.modules():
if isinstance(m1, nn.BatchNorm2d):
key = str(layer_id_in_cfg) + 'BatchNorm'
while len(parameter_buffer[key]) == 0:
layer_id_in_cfg += 1
key = str(layer_id_in_cfg) + 'BatchNorm'
m1.weight.data = parameter_buffer[key][0]
m1.bias.data = parameter_buffer[key][1]
m1.running_mean = parameter_buffer[key][2]
m1.running_var = parameter_buffer[key][3]
layer_id_in_cfg += 1
elif isinstance(m1, nn.Conv2d):
key = str(layer_id_in_cfg) + 'Conv'
while len(parameter_buffer[key]) == 0:
layer_id_in_cfg += 1
key = str(layer_id_in_cfg) + 'Conv'
m1.weight.data = parameter_buffer[key][0]
elif isinstance(m1, nn.Linear):
key = str(layer_id_in_cfg) + 'Linear'
m1.weight.data = parameter_buffer[key][0]
m1.bias.data = parameter_buffer[key][1]
pass
return newmodel
def get_batch_jacobian(net, x, target, device):
net.zero_grad()
x.requires_grad_(True)
y = net(x)
y.backward(torch.ones_like(y))
jacob = x.grad.detach()
return jacob, target.detach(), y.detach()
def check_score(model, train_loader, sanity_check=False):
newmodel = copy.deepcopy(model)
reset_seed()
newmodel.K = np.zeros((args.test_batch_size, args.test_batch_size))
def counting_forward_hook(module, inp, out):
try:
if not module.visited_backwards:
return
if isinstance(inp, tuple):
inp = inp[0]
inp = inp.view(inp.size(0), -1)
x = (inp > 0).float()
K = x @ x.t()
K2 = (1. - x) @ (1. - x.t())
newmodel.K = newmodel.K + K.cpu().numpy() + K2.cpu().numpy()
except:
pass
def counting_backward_hook(module, inp, out):
module.visited_backwards = True
for name, module in newmodel.named_modules():
if 'ReLU' in str(type(module)):
# hooks[name] = module.register_forward_hook(counting_hook)
module.register_forward_hook(counting_forward_hook)
module.register_backward_hook(counting_backward_hook)
newmodel = newmodel.to(device)
s = []
for j in range(5):
data_iterator = iter(train_loader)
x, target = next(data_iterator)
if sanity_check:
x = shuffle_data(x)
x2 = torch.clone(x)
x2 = x2.to(device)
x, target = x.to(device), target.to(device)
jacobs, labels, y = get_batch_jacobian(newmodel, x, target, device)
newmodel(x2.to(device))
s_, ld = np.linalg.slogdet(newmodel.K)
s.append(ld)
score = np.mean(s)
return score
def check_channel_score(model, train_loader, sanity_check=False):
newmodel = copy.deepcopy(model)
reset_seed()
def counting_forward_hook(module, inp, out):
try:
# if not module.visited_backwards:
# return
if isinstance(inp, tuple):
inp = inp[0]
K_layer = np.zeros((args.test_batch_size, args.test_batch_size))
inp = inp.permute(1, 0, 2, 3)
inp = inp.view(inp.size(0), inp.size(1), -1)
inp = (inp > 0).float()
score_list = []
for i in range(inp.size(0)):
x = inp[i]
K1 = x @ x.t()
K2 = (1. - x) @ (1. - x.t())
K = K1.cpu().numpy() + K2.cpu().numpy()
K_layer += K
# s_, ld = np.linalg.slogdet(K)
s_, ld = np.linalg.slogdet(K/inp.size(2))
score_list.append(ld)
s_, ld = np.linalg.slogdet(K_layer/(inp.size(0)*inp.size(2)))
# s_, ld = np.linalg.slogdet(K_layer)
newmodel.layer_score.append(ld)
newmodel.channel_score.append(score_list)
except Exception as e:
print(e)
def counting_backward_hook(module, inp, out):
module.visited_backwards = True
def counting_backward_hook_ini(module, inp, out):
newmodel.layer_score = []
newmodel.channel_score = []
for name, module in newmodel.named_modules():
if 'ReLU' in str(type(module)):
# hooks[name] = module.register_forward_hook(counting_hook)
module.register_forward_hook(counting_forward_hook)
if name == 'feature.0':
module.register_forward_hook(counting_backward_hook_ini)
newmodel = newmodel.to(device)
s = []
layer_s = []
for j in range(5):
data_iterator = iter(train_loader)
x, target = next(data_iterator)
if sanity_check:
x = shuffle_data(x)
x2 = torch.clone(x)
x2 = x2.to(device)
x, target = x.to(device), target.to(device)
# jacobs, labels, y = get_batch_jacobian(newmodel, x, target, device)
newmodel(x2.to(device))
s.append(copy.deepcopy(newmodel.channel_score))
layer_s.append(copy.deepcopy(newmodel.layer_score))
layer_s = np.array(layer_s)
layer_s = np.mean(layer_s, axis=0)
channel_score = []
for channel in range(len(s[0])):
tep = []
for j in range(len(s)):
tep.append(s[j][channel])
tep = np.array(tep)
tep = np.mean(tep, axis=0)
channel_score.append(tep)
# s = np.array(s).astype(float)
# # s = np.mean(s, axis=0)
# s = s.transpose()
# tep = np.array(s[0])
return layer_s, channel_score
def pruning(model):
total = 0
cfg = []
cfg_mask = []
for m in model.modules():
if isinstance(m, nn.BatchNorm2d):
total += m.weight.data.shape[0]
bn = torch.zeros(total)
index = 0
for m in model.modules():
if isinstance(m, nn.BatchNorm2d):
size = m.weight.data.shape[0]
bn[index:(index+size)] = m.weight.data.abs().clone()
index += size
y, i = torch.sort(bn)
thre_index = int(total * args.percent)
thre = y[thre_index]
# print('Pruning threshold: {}'.format(thre))
mask = torch.zeros(total)
index = 0
for k, m in enumerate(model.modules()):
if isinstance(m, nn.BatchNorm2d):
size = m.weight.data.numel()
weight_copy = m.weight.data.abs().clone()
_mask = weight_copy.gt(thre.cuda()).float().cuda()
cfg_mask.append(_mask.clone())
if int(torch.sum(_mask)) > 0:
cfg.append(int(torch.sum(_mask)))
mask[index:(index+size)] = _mask.view(-1)
# print('layer index: {:d} \t total channel: {:d} \t remaining channel: {:d}'.format(k, _mask.shape[0], int(torch.sum(_mask))))
index += size
elif isinstance(m, nn.MaxPool2d):
cfg.append('M')
# print('Pre-processing Successful!')
return mask, cfg, cfg_mask
def create_cfg(cfg_mask_all, indicator):
form = copy.deepcopy(cfg_mask_all)
while 'M' in form:
form.remove('M')
# np.random.shuffle(mask_all)
cfg_mask = []
end = 0
for i in form:
cfg_mask.append(indicator[end:end + i])
end += i
cfg = []
index = 0
for i in range(len(cfg_mask_all)):
if cfg_mask_all[i] != 'M':
if np.sum(cfg_mask[index]) != 0:
cfg.append(int(np.sum(cfg_mask[index])))
index += 1
else:
cfg.append('M')
return cfg, cfg_mask
def random_search(cfg_mask_all, percent):
form = copy.deepcopy(cfg_mask_all)
while 'M' in form:
form.remove('M')
total = np.sum(form)
choose_num = int(total * percent)
mask_all = np.append(np.ones(choose_num), np.zeros(total - choose_num))
record_dict = {}
for i in range(len(mask_all)):
record_dict[i] = []
score_test = 0
trail_index = 0
while score_test < 1450:
for i in range(100):
np.random.shuffle(mask_all)
cfg, cfg_mask = create_cfg(cfg_mask_all, mask_all)
model_new = create_model(model, cfg, cfg_mask)
score = check_score(model_new, train_loader)
for i in range(len(mask_all)):
if not mask_all[i]:
record_dict[i].append(score)
average_score = pd.DataFrame([], columns=["position", "score"])
for i in range(len(mask_all)):
info_dict = {
'position':i,
'score':np.max(record_dict[i])
}
average_score = average_score.append(info_dict, ignore_index=True)
average_score = average_score.sort_values(by=['score'], ascending=False)
indexes = average_score['position'][0: int(len(average_score) * percent)]
indexes = indexes.astype(int)
indicator = np.ones(total)
indicator[indexes] = 0
cfg, cfg_mask = create_cfg(cfg_mask_all, indicator)
model_new = create_model(model, cfg, cfg_mask)
score = check_score(model_new, train_loader)
info_dict = {
'index': trail_index,
'cfg': cfg,
'cfg_mask': cfg_mask,
'score': score
}
wandb.log(info_dict)
print('The trial of {}, the score is {:.2f}'.format(trail_index, score))
trail_index += 1
if score > score_test:
score_test = score
np.save('{:.2f}.npy'.format(score_test), info_dict)
def reset_seed(seed=1):
np.random.seed(seed)
torch.manual_seed(seed)
random.seed(seed)
def count_channel(model):
cfg_mask_all = []
for m in model.modules():
if isinstance(m, nn.BatchNorm2d):
cfg_mask_all.append(m.weight.data.shape[0])
elif isinstance(m, nn.MaxPool2d):
cfg_mask_all.append('M')
form = copy.deepcopy(cfg_mask_all)
while 'M' in cfg_mask_all:
cfg_mask_all.remove('M')
total = np.sum(cfg_mask_all)
return total, form
def shuffle_data(xs):
Size = xs.size()
# e.g. for CIFAR100, is 50000 * 32 * 32 * 3
xs = xs.reshape(Size[0], -1)
for i in range(Size[0]):
xs[i] = xs[i][torch.randperm(xs[i].nelement())]
xs = xs.reshape(Size)
return xs
def greedy_search_new(model, percent, train_loader):
buffer = 33
total, form = count_channel(model)
channel_num = total
progress_index = 0
indicator = np.ones(total)
while channel_num > total * percent:
# indicator = np.ones(total)
score_dict = pd.DataFrame([], columns=['index', 'score'])
for position in range(total):
if indicator[position]:
indicator_tep = copy.deepcopy(indicator)
indicator_tep[position] = 0
cfg, cfg_mask = create_cfg(form, indicator_tep)
model_new = create_model(model, cfg, cfg_mask)
score = check_score(model_new, train_loader)
info_dict = {
'index': position,
'score': score
}
score_dict = score_dict.append(info_dict, ignore_index=True)
print('{}----{}/{}: score {:.2f}'.format(channel_num, position, total, score))
else:
score = -1
info_dict = {
'index': position,
'score': -1,
}
score_dict = score_dict.append(info_dict, ignore_index=True)
print('{}----{}/{}: score {:.2f}'.format(channel_num, position, total, score))
score_dict = score_dict.sort_values(by=['score'], ascending=False)
indexes = score_dict['index'][0:buffer]
indexes = indexes.astype(int)
indicator[indexes] = 0
cfg, cfg_mask = create_cfg(form, indicator)
channel_num = count_cfg_channel(cfg)
newmodel = create_model(model, cfg, cfg_mask)
score = check_score(newmodel, train_loader)
info_dict = {
'index': progress_index*buffer,
'score': score
}
wandb.log(info_dict)
progress_index += 1
# for i in range(len(cfg_mask)):
# cfg = copy.copy(cfg_mask)
# cfg[i] -= 1
# newmodel = models.__dict__[args.arch](dataset=args.dataset, cfg=cfg)
# score = check_score(newmodel, train_loader)
# score_dict[i] = score
# print(score_dict)
save_dict = {
'state_dict': newmodel.state_dict(),
'cfg': cfg,
'cfg_mask': cfg_mask,
'score': score
}
torch.save(save_dict, '{:.2f}.pth'.format(score))
# np.save('{:.2f}.npy'.format(score), save_dict)
def channel_score_search(model, percent, train_loader):
total, form = count_channel(model)
indicator = np.ones(total)
cfg, cfg_mask = create_cfg(form, indicator)
new_model = copy.copy(model)
for i in range(0, len(cfg_mask)):
# score = check_score(new_model, train_loader)
channel_score = check_channel_score(new_model, train_loader)
channel_score = channel_score[i]
channel_score_rank = copy.deepcopy(channel_score)
channel_score_rank.sort()
thre_index = int(len(channel_score)*percent)
thre_score = channel_score_rank[thre_index-1]
mask = [0 if j <= thre_score else 1 for j in channel_score]
cfg_mask[i] = mask
indicator_tep = []
for j in cfg_mask:
indicator_tep += list(j)
cfg_new, cfg_mask_new = create_cfg(form, indicator_tep)
new_model = create_model(model, cfg_new, cfg_mask_new)
score = check_score(new_model, train_loader)
print(score)
def rate_check(model, percent, train_loader):
xshape = (1, 3, 32, 32)
flops_original, param_original = get_model_infos(model, xshape)
total, form = count_channel(model)
indicator = np.ones(total)
cfg, cfg_mask = create_cfg(form, indicator)
f_list = []
p_list = []
for i in range(len(cfg_mask)):
cfg_mask_new = copy.deepcopy(cfg_mask)
cfg_mask_new[i][0:30] = 0
indicator_new = []
for one in cfg_mask_new:
indicator_new += list(one)
cfg_new, cfg_mask_new = create_cfg(form, indicator_new)
model_new = create_model(model, cfg_new, cfg_mask_new)
flops, param = get_model_infos(model_new, xshape)
flops_rate = (flops_original-flops)/flops_original
param_rate = (param_original-param)/param_original
f_list.append(flops_rate)
p_list.append(param_rate)
print("")
def channel_remove_check(model, train_loader, goal_f_rate, goal_p_rate):
baseline_f_rate = 260292527 / 313772032
baseline_p_rate = 8300726 / 15299748
xshape = (1, 3, 32, 32)
flops_original, param_original = get_model_infos(model, xshape)
score = check_score(model, train_loader, True)
score_layer_original, score_channel_original = check_channel_score(model, train_loader, True)
total, form = count_channel(model)
indicator = np.ones(total)
cfg_original, cfg_mask_original = create_cfg(form, indicator)
cfg_mask = copy.deepcopy(cfg_mask_original)
model_new = copy.deepcopy(model)
for i in range(len(cfg_mask_original)-1):
score_layer, score_channel = check_channel_score(model_new, train_loader, True)
cfg_mask[i] = score_channel[i] != -np.inf
indicator = []
for one in cfg_mask:
indicator += list(one)
cfg, cfg_mask = create_cfg(form, indicator)
model_new = create_model(model, cfg, cfg_mask)
flops, param = get_model_infos(model_new, xshape)
f_rate = flops / flops_original
p_rate = param / param_original
print(f_rate)
index = 0
while f_rate > goal_f_rate:
score_layer, score_channel = check_channel_score(model_new, train_loader)
score_channel[-1] = np.ones(score_channel[-1].shape)*5000
indicator = []
for one in score_channel:
indicator += list(one)
min_index = indicator.index(min(indicator))
indicator = np.ones(len(indicator))
indicator[min_index] = 0
cfg, cfg_mask = create_cfg(cfg, indicator)
model_new = create_model(model_new, cfg, cfg_mask)
flops, param = get_model_infos(model_new, xshape)
f_rate = flops / flops_original
p_rate = param / param_original
print(f_rate)
info_dict = {
"f_rate": f_rate,
"p_rate": p_rate,
"cfg": cfg,
"index": index,
}
# wandb.log(info_dict)
index += 1
score_prune = check_score(model_new, train_loader)
torch.save(model_new, 'shuffle_channel_remove_n_f{:.4f}_p{:.4f}_{:.2f}.pth'.format(f_rate, p_rate, score_prune))
def ratio_cfg(form, ratio):
output = []
for i in range(len(form)-1):
if form[i] != 'M':
form[i] = round(form[i]*ratio)
return form
def layer_wise_pruning(model, train_loader, pruning_rate):
xshape = (1, 3, 32, 32)
flops_original, param_original = get_model_infos(model, xshape)
score = check_score(model, train_loader)
score_layer_original, score_channel_original = check_channel_score(model, train_loader)
total, form = count_channel(model)
cfg_goal = ratio_cfg(copy.deepcopy(form), pruning_rate)
# model_new = models.__dict__['vgg'](dataset='cifar100', cfg=cfg_goal)
# flops, param = get_model_infos(model_new, xshape)
# f_rate = flops / flops_original
# p_rate = param / param_original
# score = check_score(model_new, train_loader)
while 'M' in cfg_goal:
cfg_goal.remove('M')
indicator = np.ones(total)
cfg_original, cfg_mask_original = create_cfg(form, indicator)
cfg_mask = copy.deepcopy(cfg_mask_original)
model_new = copy.deepcopy(model)
for i in range(len(cfg_mask_original)-1):
score_layer, score_channel = check_channel_score(model_new, train_loader)
pruning_num = np.sum(cfg_mask[i])-cfg_goal[i]
ranked_score = copy.deepcopy(score_channel[i])
ranked_score.sort()
thre_score = ranked_score[int(pruning_num)-1]
if thre_score != -np.inf:
cfg_mask[i] = score_channel[i] > thre_score
else:
tep = score_channel[i] == -np.inf
index = np.where(tep == 1)
index = np.random.choice(index[0], int(pruning_num), replace= False)
tep = np.array(cfg_mask[i])
tep[index] = 0
cfg_mask[i] = list(tep)
indicator = []
for one in cfg_mask:
indicator += list(one)
cfg, cfg_mask = create_cfg(form, indicator)
model_new = create_model(model, cfg, cfg_mask)
flops, param = get_model_infos(model_new, xshape)
f_rate = flops / flops_original
p_rate = param / param_original
print(f_rate)
#
# index = 0
# while f_rate > goal_f_rate:
# score_layer, score_channel = check_channel_score(model_new, train_loader)
# score_channel[-1] = np.ones(score_channel[-1].shape)*5000
# indicator = []
# for one in score_channel:
# indicator += list(one)
# min_index = indicator.index(min(indicator))
# indicator = np.ones(len(indicator))
# indicator[min_index] = 0
# cfg, cfg_mask = create_cfg(cfg, indicator)
# model_new = create_model(model_new, cfg, cfg_mask)
# flops, param = get_model_infos(model_new, xshape)
# f_rate = flops / flops_original
# p_rate = param / param_original
# print(f_rate)
# info_dict = {
# "f_rate": f_rate,
# "p_rate": p_rate,
# "cfg": cfg,
# "index": index,
# }
# wandb.log(info_dict)
# index += 1
score_prune = check_score(model_new, train_loader)
torch.save(model_new, 'layer_wise_pruning_rate{}_f{:.4f}_p{:.4f}_{:.2f}.pth'.format(pruning_rate, f_rate, p_rate, score_prune))
def layer_remove_check(model, train_loader):
baseline_f_rate = 260292527/313772032
baseline_p_rate = 8300726/15299748
xshape = (1, 3, 32, 32)
flops_original, param_original = get_model_infos(model, xshape)
score = check_score(model, train_loader)
score_layer_original, score_channel_original = check_channel_score(model, train_loader)
total, form = count_channel(model)
indicator = np.ones(total)
cfg, cfg_mask = create_cfg(form, indicator)
# cut_list = [11,10,8,7,4,5,2,0]
cut_list_all = [10, 0, 11, 7, 8, 2, 4, 5]
cut_list = cut_list_all[:1]
for one in cut_list:
cfg_mask[one] = np.zeros(len(cfg_mask[one]))
# cfg_mask[4] = np.zeros(len(cfg_mask[4]))
indicator_new = []
for one in cfg_mask:
indicator_new += list(one)
cfg_new, cfg_mask_new = create_cfg(form, indicator_new)
model_new = create_model(model, cfg_new, cfg_mask_new)
flops, param = get_model_infos(model_new, xshape)
score_prune = check_score(model_new, train_loader)
f_rate = flops/flops_original
p_rate = param/param_original
score_layer, score_channel = check_channel_score(model_new, train_loader)
# for one in score_channel:
# print(np.sum(one == -np.inf), len(one))
#
# save_dict = {
# 'state_dict': model_new.state_dict(),
# 'cfg': cfg_new,
# 'cfg_mask': cfg_mask_new,
# 'score': score_prune
# }
# torch.save(model_new, '{:.2f}.pth'.format(score_prune))
# model_new = models.__dict__[args.arch](dataset=args.dataset, cfg = cfg_new)
# score_prune = check_score(model_new, train_loader)
# score_layer, score_channel = check_channel_score(model_new, train_loader)
torch.save(model_new, '{}-{:.2f}.pth'.format(len(cut_list),score_prune))
print("")
# f_list = []
# p_list = []
#
# for i in range(len(cfg_mask)):
# cfg_mask_new = copy.deepcopy(cfg_mask)
# cfg_mask_new[i][0:30] = 0
# indicator_new = []
# for one in cfg_mask_new:
# indicator_new += list(one)
# cfg_new, cfg_mask_new = create_cfg(form, indicator_new)
# model_new = create_model(model, cfg_new, cfg_mask_new)
# flops, param = get_model_infos(model_new, xshape)
# flops_rate = (flops_original-flops)/flops_original
# param_rate = (param_original-param)/param_original
# f_list.append(flops_rate)
# p_list.append(param_rate)
def count_cfg_channel(cfg):
form = copy.deepcopy(cfg)
while 'M' in form:
form.remove('M')
channel = np.sum(form)
return channel
def create_base(model, train_loader):
total, form = count_channel(model)
indicator = []
for one in form:
if one != 'M':
tep = np.zeros(one)
tep[0] = 1
indicator.append(tep)
for i in range(len(indicator)):
record = pd.DataFrame([], columns=["index", "score"])
for j in range(len(indicator[i])):
tep = np.zeros(len(indicator[i]))
tep[j] = 1
indicator_tep = copy.deepcopy(indicator)
indicator_tep[i] = tep
indicator_list = []
for k in indicator_tep:
indicator_list += list(k)
indicator_tep = np.array(indicator_list).astype(int)
cfg, cfg_mask = create_cfg(form, indicator_tep)
new_model = create_model(model, cfg, cfg_mask)
score = check_score(new_model, train_loader)
info_dict = {
"index": j,
"score": score
}
record = record.append(info_dict, ignore_index=True)
# print("for the {}-th module, tried {}/{}, the score is {:.2f}".format(i, j, len(indicator[i]), score))
record = record.sort_values(by=['score'], ascending=False)
indexes = record['index'][0]
tep = np.zeros(len(indicator[i]))
tep[int(indexes)] = 1
indicator[i] = tep
print("for the {}-th module, tried {}/{}, the score is {:.2f}".format(i, j, len(indicator[i]), record['score'][0]))
indicator_list = []
for i in indicator:
indicator_list += (list(i))
indicator = np.array(indicator_list)
return indicator
def greedy_search_increase(model, percent, train_loader):
buffer = 11
total, form = count_channel(model)
indicator = create_base(model, train_loader)
left_channels = int(total*percent - np.sum(indicator))
while left_channels > 0:
record = | pd.DataFrame([], columns=["index", "score"]) | pandas.DataFrame |
#!/usr/bin/env python3
"""Generate non-canonical nucleotide probability predictions using signal align output
"""
import os
import itertools
import numpy as np
import pandas as pd
from py3helpers.utils import list_dir, merge_lists
from py3helpers.multiprocess import *
from signalalign.nanoporeRead import NanoporeRead
from signalalign.signalAlignment import SignalAlignment
from signalalign.train.trainModels import read_in_alignment_file
from signalalign.utils.sequenceTools import CustomAmbiguityPositions, AMBIG_BASES
class MarginalizeVariants(object):
def __init__(self, variant_data, variants, read_name):
"""Marginalize over all posterior probabilities to give a per position read probability
:param variants: bases to track probabilities
:param variant_data: variant data
"""
self.read_name = read_name
self.variant_data = variant_data
self.variants = sorted(variants)
self.columns = merge_lists([['read_name', 'contig', 'position', 'strand', 'forward_mapped'],
list(self.variants)])
self.contig = NanoporeRead.bytes_to_string(self.variant_data["contig"][0])
self.position_probs = pd.DataFrame()
self.has_data = False
self.per_read_calls = pd.DataFrame()
self.per_read_columns = merge_lists([['read_name', 'contig', 'strand', "forward_mapped",
"n_sites"], list(self.variants)])
def get_data(self):
"""Calculate the normalized probability of variant for each nucleotide and across the read"""
# final location of per position data and per read data
data = []
per_read_data = []
for read_strand in (b"t", b"c"):
read_strand_specifc_data = self.variant_data[self.variant_data["strand"] == read_strand]
read_strand = read_strand.decode("utf-8")
if len(read_strand_specifc_data) == 0:
continue
for forward_mapped in set(self.variant_data["forward_mapped"]):
mapping_strand = "-"
if forward_mapped == b"forward":
mapping_strand = "+"
strand_specifc_data = read_strand_specifc_data[read_strand_specifc_data["forward_mapped"] ==
forward_mapped]
if len(strand_specifc_data) == 0:
continue
# get positions on strand
positions = set(strand_specifc_data["reference_position"])
n_positions = len(positions)
strand_read_nuc_data = [0] * len(self.variants)
# marginalize probabilities for each position
for pos in positions:
pos_data = strand_specifc_data[strand_specifc_data["reference_position"] == pos]
total_prob = 0
position_nuc_dict = {x: 0.0 for x in self.variants}
# Get total probability for each nucleotide
for nuc in set(pos_data["base"]):
nuc_data = pos_data[pos_data["base"] == nuc]
nuc_prob = sum(nuc_data["posterior_probability"])
total_prob += nuc_prob
position_nuc_dict[NanoporeRead.bytes_to_string(nuc)] = nuc_prob
# normalize probabilities over each position
nuc_data = [0] * len(self.variants)
for nuc in position_nuc_dict.keys():
index = self.variants.index(nuc)
nuc_data[index] = position_nuc_dict[nuc] / total_prob
strand_read_nuc_data[index] += nuc_data[index]
data.append(merge_lists([[self.read_name, self.contig, pos, read_strand, mapping_strand],
nuc_data]))
if n_positions > 0:
per_read_data.append(merge_lists([[self.read_name, self.contig, read_strand, mapping_strand,
n_positions],
[prob / n_positions for prob in strand_read_nuc_data]]))
self.position_probs = | pd.DataFrame(data, columns=self.columns) | pandas.DataFrame |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
Date: 2021/12/30 11:31
Desc: 股票数据-总貌-市场总貌
股票数据-总貌-成交概括
http://www.szse.cn/market/overview/index.html
http://www.sse.com.cn/market/stockdata/statistic/
"""
import warnings
from io import BytesIO
from akshare.utils import demjson
import pandas as pd
import requests
warnings.filterwarnings('ignore')
def stock_szse_summary(date: str = "20200619") -> pd.DataFrame:
"""
深证证券交易所-总貌
http://www.szse.cn/market/overview/index.html
:param date: 最近结束交易日
:type date: str
:return: 深证证券交易所-总貌
:rtype: pandas.DataFrame
"""
url = "http://www.szse.cn/api/report/ShowReport"
params = {
"SHOWTYPE": "xlsx",
"CATALOGID": "1803_sczm",
"TABKEY": "tab1",
"txtQueryDate": "-".join([date[:4], date[4:6], date[6:]]),
"random": "0.39339437497296137",
}
r = requests.get(url, params=params)
temp_df = pd.read_excel(BytesIO(r.content))
temp_df["证券类别"] = temp_df["证券类别"].str.strip()
temp_df.iloc[:, 2:] = temp_df.iloc[:, 2:].applymap(lambda x: x.replace(",", ""))
temp_df.columns = [
'证券类别',
'数量',
'成交金额',
'成交量',
'总股本',
'总市值',
'流通股本',
'流通市值']
temp_df['数量'] = pd.t | o_numeric(temp_df['数量']) | pandas.to_numeric |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.