prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
import os
import math
import logging
import calendar
import datetime
import pandas as pd
import sqlite3 as sql
from dotenv import load_dotenv
from coc import ClashOfClans
logging.basicConfig(level=logging.INFO)
PATH = os.path.dirname(os.path.abspath(__file__))
def get_last_monday_of_month(year, month):
'''
Find the last Monday of the month of the year.
Parameters
----------
year : int
Year.
month : int
Month.
Returns
-------
retval : datetime.datetime
The datetime.datetime object of the last Monday of the month.
'''
calendar_month = calendar.monthcalendar(year, month)
mondays = [week[0] for week in calendar_month if week[0] > 0]
return datetime.datetime(year, month, mondays[-1])
def get_last_month(year, month):
''' Find last month.
Parameters
----------
year : int
Year.
month : int
Month.
Returns
-------
year : int
Year of last month.
month : int
Month of last month.
'''
first_day_this_month = datetime.datetime(year, month, 1)
last_day_last_month = first_day_this_month - datetime.timedelta(days=1)
return last_day_last_month.year, last_day_last_month.month
def get_next_month(year, month):
''' Find next month.
Parameters
----------
year : int
Year.
month : int
Month.
Returns
-------
year : int
Year of next month.
month : int
Month of next month.
'''
if month == 12:
return year + 1, 1
else:
return year, month + 1
class LegendsLeagueLeaderboard:
'''
Make a Legends League leaderboard
Parameters
----------
filename : the file to
'''
dbname = "database.db"
def __init__(self, filename, api_token):
self.filename = filename
self.coc = ClashOfClans(api_token=api_token)
self.player_tags = []
self.qualified_clans = []
def __enter__(self):
self.load_player_tags()
self.load_qualified_clans()
return self
def __exit__(self, exc_type, exc_value, traceback):
self.save_player_tags()
self.save_qualified_clans()
def load_player_tags(self):
''' Load player tags from database.
'''
with sql.connect(os.path.join(PATH, self.dbname)) as con:
try:
data = pd.read_sql("SELECT * FROM player_tags", con=con).set_index('index')
self.player_tags = data.squeeze(axis=1).to_list()
except pd.io.sql.DatabaseError:
self.player_tags = []
def save_player_tags(self):
''' Save player tags into database.
'''
data = pd.Series(self.player_tags)
with sql.connect(os.path.join(PATH, self.dbname)) as con:
data.to_sql("player_tags", con=con, if_exists='replace')
def load_qualified_clans(self):
''' Load qualifed clan tags from database.
'''
with sql.connect(os.path.join(PATH, self.dbname)) as con:
try:
data = | pd.read_sql("SELECT * FROM qualified_clans", con=con) | pandas.read_sql |
# This file contains plots to show data
from itertools import product
import numpy as np
import pandas as pd
import seaborn as sns
from matplotlib import pyplot as plt, rcParams
from scipy import stats
PALETTE = ['#59b5e3', '#1aa075']
def tandem_distplot(
real_tandems_fn, count_sim_size_fn, family_name, show_pdf=True, split_axis_auto=True
):
"""Plots the distribution of the tandems by simulation vs the real value"""
# load data
df_real = pd.read_csv(real_tandems_fn, sep="\t")
df_sim = pd.read_csv(count_sim_size_fn, sep="\t")
# filter size
df_real = df_real[df_real["size"] == 2]
df_sim = df_sim[df_sim["size"] == 2]
real_tandems = len(df_real)
sim_tandems = df_sim.groupby("simulation").agg({"n": "sum"})
# this value determines bin size and also where to cut axis if split_axis_auto is True
n_sim_range = np.percentile(sim_tandems.n, 90) - np.percentile(sim_tandems.n, 10)
adjust_value = 10
# guarantees good visualization in the pdf output
bin_step = np.ceil(n_sim_range / (adjust_value * 2.5))
# split the axis in two if the real value is too far away from the simulation distribution
split_axis = split_axis_auto and (real_tandems - max(sim_tandems.n)) / n_sim_range > 1
if split_axis:
# Make a figure with 2 subplots
fig, (ax, ax2) = plt.subplots(
1, 2, sharey=True, figsize=(12, 8), gridspec_kw={"width_ratios": [7, 1]}
)
ax.set_xlim(
adjust_value * np.floor(min(sim_tandems.n) / adjust_value),
adjust_value * np.ceil(max(sim_tandems.n) / adjust_value),
)
ax2_limits = [
adjust_value * np.floor(real_tandems / adjust_value),
adjust_value * (np.floor(real_tandems / adjust_value) + 1),
]
ax2.set_xlim(*ax2_limits)
else:
fig, ax = plt.subplots(figsize=(12, 8))
# this way is not neccesary to use conditions to know where to plot the real value
ax2 = ax
# plot simulated and real values
ax.hist(
sim_tandems.n,
bins=np.arange(min(sim_tandems.n), max(sim_tandems.n), bin_step),
align="left",
lw=0.1,
edgecolor="w",
color=PALETTE[0],
label="simulated"
)
ax2.axvline(
x=real_tandems,
ymin=0,
ymax=1,
linewidth=1.5,
color=PALETTE[1],
label="observed",
)
# annotations and labels
ax2.text(
real_tandems,
ax.get_ylim()[1] * 0.9,
"observed = {}".format(real_tandems),
ha="center",
bbox=dict(fc="w", ec="0.5", alpha=0.5),
)
ax.set(ylabel="Simulations")
fig.suptitle(
"Distribution of tandems by simulation ({})".format(family_name),
fontsize=rcParams["axes.titlesize"],
y=0.92,
)
fig.text(0.5, 0.04, "Tandems by simulations", ha="center")
if show_pdf:
pdf = stats.norm.pdf(
real_tandems, np.mean(sim_tandems.n), np.std(sim_tandems.n)
)
ax2.text(
real_tandems,
ax.get_ylim()[1] * 0.03,
"pdf = {:.2e}".format(pdf),
ha="right",
bbox=dict(fc="w", ec="0.5", alpha=0.7),
)
sns.despine()
if split_axis:
# options to make the two plots look like a single one
ax.spines["right"].set_visible(False)
ax2.spines["left"].set_visible(False)
ax.tick_params(labelright=False)
ax2.xaxis.set_ticks(ax2_limits)
ax2.yaxis.set_ticks_position("none")
return fig
def mutations_byseq_distplot(mutations_by_seq_fn, family_name):
"""Plots the distribution of the mutations by each sequence"""
with open(mutations_by_seq_fn) as f:
for line in f.read().splitlines():
line = line.split("\t")
if line[0] == family_name.replace("*", "_"):
mutations_by_seq = [int(x) for x in line[1].split(",")]
break
# plot
fig, ax = plt.subplots(figsize=(8, 5))
ax.hist(
mutations_by_seq,
bins=np.arange(min(mutations_by_seq), max(mutations_by_seq)),
align="left",
rwidth=0.8,
color=PALETTE[0]
)
ax.set(
title="Distribution of mutations by sequence ({})".format(family_name),
xlabel="Mutations by sequence",
ylabel="Sequences",
)
sns.despine()
return fig
def mutation_probability(mutation_info_fn, family_name):
"""Plots the probability of mutation of each position"""
df = pd.read_csv(mutation_info_fn, sep="\t")
# plot
fig, ax = plt.subplots(figsize=(10, 5))
ax.bar(x=df.position, height=df.mutation_probability, lw=0, width=0.9, color=PALETTE[0])
ax.set(
title="Mutation probabilities by position ({})".format(family_name),
xlabel="Position in sequence",
ylabel="Mutation probability",
)
sns.despine()
return fig
def cluster_size_distribution(
real_tandems_fn, count_size_fn, family_name, n_simulations
):
"""Plots the distribution of the sizes of clusters found (simulated vs real)"""
df_real = pd.read_csv(real_tandems_fn, sep="\t")
df_sim = pd.read_csv(count_size_fn, sep="\t")
# group clusters by size
df_real_bysize = (
df_real.groupby("size")
.agg({"seq_id": "count"})
.reset_index()
.rename(columns={"seq_id": "real"})
)
df_sim_bysize = df_sim.rename(columns={"n": "simulation"})
# get the average of all the simulations
df_sim_bysize["simulation"] = np.round(df_sim_bysize["simulation"] / n_simulations)
# drop the rows with 0 tandems
df_sim_bysize = df_sim_bysize[df_sim_bysize.simulation != 0]
# join data in a single dataframe
df_by_size = pd.merge(df_real_bysize, df_sim_bysize, how="outer").fillna(0)
df_by_size = df_by_size.melt(
id_vars="size",
value_vars=["real", "simulation"],
var_name="target",
value_name="n",
)
# plot
fig, ax = plt.subplots(figsize=(8, 5))
sns.barplot(data=df_by_size, x="size", y="n", hue="target", ax=ax, palette=PALETTE)
# annotations
for p in ax.patches:
ax.annotate(
int(p.get_height()),
(p.get_x() + p.get_width() / 2.0, p.get_height()),
ha="center",
va="bottom",
rotation=90,
xytext=(0, 5),
color="gray",
textcoords="offset points",
)
ax.set_ylim([0, max(df_by_size.n) * 1.2])
ax.set(
title="Distribution of mutation clusters by size ({})".format(family_name),
xlabel="Cluster size",
ylabel="Number of clusters",
)
ax.legend()
sns.despine()
return fig
def stop_codon_distribution(
count_size_fn, count_size_stop_fn, family_name, n_simulations
):
"""Plots the distribution of the clusters found (with/without stop codons)"""
df_nostop = pd.read_csv(count_size_fn, sep="\t")
df_stop = pd.read_csv(count_size_stop_fn, sep="\t")
# group clusters by size
df_nostop_bysize = df_nostop.rename(columns={"n": "no stop codon"})
df_nostop_bysize["no stop codon"] = np.round(
df_nostop_bysize["no stop codon"] / n_simulations
)
df_nostop_bysize = df_nostop_bysize[df_nostop_bysize["no stop codon"] != 0]
df_stop_bysize = df_stop.rename(columns={"n": "stop codon"})
df_stop_bysize["stop codon"] = np.round(
df_stop_bysize["stop codon"] / n_simulations
)
df_stop_bysize = df_stop_bysize[df_stop_bysize["stop codon"] != 0]
# join data in a single dataframe
df_by_size = pd.merge(df_nostop_bysize, df_stop_bysize, how="outer").fillna(0)
df_by_size = df_by_size.melt(
id_vars="size",
value_vars=["no stop codon", "stop codon"],
var_name="category",
value_name="n",
)
# plot
fig, ax = plt.subplots(figsize=(8, 5))
sns.barplot(data=df_by_size, x="size", y="n", hue="category", ax=ax, palette=PALETTE)
# annotations
for p in ax.patches:
ax.annotate(
int(p.get_height()),
(p.get_x() + p.get_width() / 2.0, p.get_height()),
ha="center",
va="bottom",
rotation=90,
xytext=(0, 5),
color="gray",
textcoords="offset points",
)
ax.set_ylim([0, max(df_by_size.n) * 1.2])
ax.set(
title="Distribution of mutation clusters with stop codons ({})".format(
family_name
),
xlabel="Cluster size",
ylabel="Number of clusters",
)
ax.legend()
sns.despine()
return fig
def tandem_heatmap(filename, family_name, target="real", n_simulations=1):
"""Plots a heatmap of all the possible tandem substitutions"""
df = pd.read_csv(filename, sep="\t")
df = df[df["size"] == 2]
stop_codons = "_stop" in filename
# get all combinations of dinucleotides to fill with 0 later
dinucleotides = ["".join(x) for x in product("ACGT", repeat=2)]
dinucleotide_combinations = [
[x, y, 0]
for x in dinucleotides
for y in dinucleotides
if (x[0] != y[0] and x[1] != y[1])
]
dc_df = pd.DataFrame(dinucleotide_combinations, columns=["ref", "alt", "n"])
# group by substitution and count
if target == "real":
grouped = (
df.groupby(["ref", "alt"])
.agg({"seq_id": "count"})
.reset_index()
.rename(columns={"seq_id": "n"})
)
else:
grouped = df.groupby(["ref", "alt"]).agg({"n": "sum"}).reset_index()
# add rows with 0 and make the table
table = (
| pd.concat([grouped, dc_df]) | pandas.concat |
import csv
import os
import numpy as np
import pandas as pd
from scipy.special import logit
import sklearn
from sklearn.linear_model import (MultiTaskLassoCV, LogisticRegressionCV,
LinearRegression, LogisticRegression)
pd.options.mode.chained_assignment = None
DIR = os.path.dirname(os.path.realpath(__file__))
DATA = os.path.join(DIR, 'RegularSeasonDetailedResults.csv')
TEAM_DATA = os.path.join(DIR, 'Teams.csv')
CLEAN_DATA_DIR = os.path.join(DIR, 'clean_data')
LSUFFIX = '_'
RSUFFIX = '_opponent'
TRANSFORM_PREFIX = 'transformed_'
if not os.path.isdir(CLEAN_DATA_DIR):
os.mkdir(CLEAN_DATA_DIR)
def get_filename(year):
if year is not None:
return os.path.join(CLEAN_DATA_DIR, '{}.csv'.format(year))
else:
return os.path.join(CLEAN_DATA_DIR, 'all.csv')
class TeamCache(object):
def __init__(self):
self._id_to_team = {}
self._team_to_id = {}
self._loaded = False
def _load(self):
if not self._loaded:
with open(TEAM_DATA, 'r') as buff:
for row in csv.DictReader(buff):
self._id_to_team[int(row['Team_Id'])] = row['Team_Name']
self._team_to_id[row['Team_Name']] = int(row['Team_Id'])
self._loaded = True
def id_to_team(self, id_):
self._load()
return self._id_to_team.get(int(id_))
def team_to_id(self, team):
self._load()
return self._team_to_id.get(team)
def find_team(self, team):
self._load()
if team in self._team_to_id:
return team
matches = [t for t in self._team_to_id if team.lower() in t.lower()]
if matches:
return ', '.join(matches)
return 'No matches found'
def check_teams(self, *teams):
self._load()
for team in teams:
if team not in self._team_to_id:
raise LookupError(self.find_team(team))
TEAM_CACHE = TeamCache()
def data_gen(year):
if year is None:
def row_filter(row):
return True
else:
year = str(year)
def row_filter(row):
return row['Season'] == year
with open(DATA, 'r') as buff:
for idx, row in enumerate(csv.DictReader(buff)):
if row_filter(row):
for letter in ('W', 'L'):
data = {
'game_id': idx,
'won': letter == 'W',
'day_num': int(row['Daynum']),
'season': int(row['Season']),
'num_ot': int(row['Numot']),
'home_game': row['Wloc'] == {'W': 'H', 'L': 'A'}[letter],
'team_name': TEAM_CACHE.id_to_team(row[letter + 'team']),
}
for key, value in row.items():
if key.startswith(letter) and key != 'Wloc':
data[key[1:]] = int(value)
yield data
def rolling_avg(group, col, min_periods=5):
return group[col].shift(1).expanding(min_periods=min_periods).mean()
def rolling_sum(group, col, min_periods=5):
return group[col].shift(1).expanding(min_periods=min_periods).sum()
def get_df(year):
return pd.DataFrame(list(data_gen(year)))
def gen_features(df, min_periods=5):
avg_features = [
'score',
'won']
sum_features = [
'fga',
'fgm',
]
def transformer(group):
transformed = {'avg_{}'.format(x): rolling_avg(group, x, min_periods) for x in avg_features}
transformed.update(
{'tot_{}'.format(x): rolling_sum(group, x, min_periods) for x in sum_features}
)
transformed.update(group.to_dict())
return pd.DataFrame(transformed)
features = df.groupby(['season', 'team']).apply(transformer).dropna()
features['fg_pct'] = features.tot_fgm / np.maximum(features.tot_fga, 1)
# features['fg3_pct'] = features.tot_fgm3 / np.maximum(features.tot_fga3, 1)
# features['ft_pct'] = features.tot_ftm / np.maximum(features.tot_fta, 1)
return features.reset_index(drop=True)
def get_training_data(features):
win_first = features[features.won].join(features[~features.won].set_index('game_id'),
on='game_id', how='inner',
lsuffix=LSUFFIX, rsuffix=RSUFFIX)
lose_first = features[~features.won].join(features[features.won].set_index('game_id'),
on='game_id', how='inner',
lsuffix=LSUFFIX, rsuffix=RSUFFIX)
return sklearn.utils.shuffle(pd.concat([win_first, lose_first])).reset_index(drop=True)
def get_predict_data(df):
df.loc[:, 'game_id'] = 0
df = | pd.DataFrame(df.iloc[[0]]) | pandas.DataFrame |
import pandas as pd
import re
import torchvision
from PIL import Image
import clip
import torch
import os
from torch.utils.data import Dataset, DataLoader
from transformers import CLIPTokenizer, CLIPProcessor
# import pandas as pd
# gtin_mapping=pd.read_csv(os.path.join(os.getcwd(),"dvc-manual/520_gtin_product_name.csv"))
# [gtin_mapping[gtin_mapping["gtin"]==gtin_mapping["gtin"].value_counts().index[1]]["product_name"].iloc[i] for i in range(0,4)]
def cleanhtml(raw_html):
CLEANR = re.compile("<.*?>") # remove html tags
cleantext = re.sub(CLEANR, "", raw_html)
pattern = r"\d*\.\d+" # r'[0-9]' # remove decimal numbers
cleantext = re.sub(pattern, "", cleantext)
pattern = r"[0-9]" # remove any digits
cleantext = re.sub(pattern, "", cleantext)
return cleantext
def get_mapping():
gtin_mapping = | pd.read_csv("/home/jupyter/dvc-manual/gtin_attr.csv") | pandas.read_csv |
import os
import math
import datetime
import itertools
from multiprocessing import Pool
import pandas as pd
import numpy as np
from numpy.linalg import inv
import matplotlib.pyplot as plt
from matplotlib import cm
import scipy.integrate
from sklearn.metrics import mean_squared_error
import bokeh.io
import bokeh.application
import bokeh.application.handlers
import bokeh.models
import holoviews as hv
# bokeh.io.output_notebook()
hv.extension('bokeh')
import git
import sys
repo = git.Repo("./", search_parent_directories=True)
homedir = repo.working_dir
sys.path.insert(1, f"{homedir}" + '/models/data_processing')
import loader
death_time = 14
###########################################################
def test_sklearn(end, death_metric="deaths"):
from sklearn import gaussian_process
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import Matern, WhiteKernel, ConstantKernel
counties_dates = []
counties_death_errors = []
counties_fips = []
# us = process_data("/data/us/covid/nyt_us_counties_daily.csv", "/data/us/demographics/county_populations.csv")
us = loader.load_data("/models/gaussian/us_training_data.csv")
policies = loader.load_data("/data/us/other/policies.csv")
fips_key = loader.load_data("/data/us/processing_data/fips_key.csv", encoding="latin-1")
# fips_list = fips_key["FIPS"][0:10]
fips_list = [36061] #56013,1017
total = len(fips_list)
for index, county in enumerate(fips_list):
print(f"{index+1} / {total}")
county_data = loader.query(us, "fips", county)
county_data['avg_deaths'] = county_data.iloc[:,6].rolling(window=3).mean()
county_data = county_data[2:]
dates = pd.to_datetime(county_data["date"].values)
extrapolate = (end-dates[-1])/np.timedelta64(1, 'D')
print(extrapolate)
# X = np.arange(0, len(county_data)+extrapolate)
X_pred = np.arange(0, len(county_data)+extrapolate).reshape(-1,1)
X_train = np.arange(0, len(county_data)).reshape(-1, 1)
Y_train = county_data[death_metric].values
# kernel = ConstantKernel() + Matern(length_scale=1, nu=3/2) + WhiteKernel(noise_level=1)
# kernel = WhiteKernel(noise_level=1)
# gp = gaussian_process.GaussianProcessRegressor(kernel=kernel)
# gp.fit(X_train, Y_train)
# GaussianProcessRegressor(alpha=1e-10, copy_X_train=True,
# kernel=1**2 + Matern(length_scale=2, nu=1.5) + WhiteKernel(noise_level=1),
# n_restarts_optimizer=1, normalize_y=False,
# optimizer='fmin_l_bfgs_b', random_state=None)
# y_pred, sigma = gp.predict(X_pred, return_std=True)
clf = GaussianProcessRegressor(random_state=42, alpha=0.1)
clf.fit(X_train, Y_train)
y_pred, sigma = clf.predict(X_pred, return_std=True)
# Plot the function, the prediction and the 95% confidence interval based on
# the MSE
plt.figure()
plt.scatter(X_train, Y_train, c='b', label="Daily Deaths")
plt.plot(X_pred, y_pred, label="Prediction")
plt.fill_between(X_pred[:, 0], y_pred - sigma, y_pred + sigma,
alpha=0.5, color='blue')
# plt.plot(x, f(x), 'r:', label=r'$f(x) = x\,\sin(x)$')
# plt.errorbar(X.ravel(), y, dy, fmt='r.', markersize=10, label='Observations')
# plt.plot(x_pred, y_pred, 'b-', label='Prediction')
# plt.fill(np.concatenate([x, x[::-1]]),
# np.concatenate([y_pred - 1.9600 * sigma,
# (y_pred + 1.9600 * sigma)[::-1]]),
# alpha=.5, fc='b', ec='None', label='95% confidence interval')
plt.legend(loc='upper left')
plt.show()
###########################################################
def process_data(data_covid, data_population, save=True):
covid = loader.load_data(data_covid)
loader.convert_dates(covid, "date")
population = loader.load_data(data_population)
covid.loc[covid["county"]=='New York City', "fips"]=36061
covid['Population'] = covid.apply(lambda row: loader.query(population, "FIPS", row.fips)['total_pop'], axis=1)
covid.dropna(subset=['fips'], inplace=True)
covid['fips']=covid['fips'].astype(int)
# covid = add_active_cases(covid, "/data/us/covid/JHU_daily_US.csv")
if save:
covid.to_csv(f"{homedir}" + "/models/gaussian/us_training_data.csv")
return covid
def add_active_cases(us, data_active_cases):
active_cases = loader.load_data(data_active_cases)
active_cases['FIPS']=active_cases['FIPS'].astype(int)
loader.convert_dates(active_cases, "Date")
difference = (pd.to_datetime(active_cases['Date'])[0] - pd.to_datetime(us['date'])[0])/np.timedelta64(1, 'D')
active_column = []
end = len(us)-1
for index, row in us.iterrows():
print(f"{index}/{end}")
county = row['fips']
date = row['date_processed']
if date < difference:
active_column.append(-1)
else:
entry = (active_cases[(active_cases.date_processed==date-difference) & (active_cases.FIPS == county)])["Active"].values
if len(entry) != 0:
active_column.append(entry[0])
else:
active_column.append(-1)
us["active_cases"] = active_column
return us
###########################################################
def plot_deaths(res, data, extrapolate=14, boundary=None, death_metric="deaths"):
# res is results from fitting
t = np.arange(0, len(data))
tp = np.arange(0, len(data)+extrapolate)
p = bokeh.plotting.figure(plot_width=1000,
plot_height=600,
title = ' PECAIQR Model',
x_axis_label = 't (days)',
y_axis_label = '# people')
p.circle(t, data[death_metric], color ='black', legend='Real Death')
if boundary is not None:
vline = bokeh.models.Span(location=boundary, dimension='height', line_color='black', line_width=3)
p.renderers.extend([vline])
p.legend.location = 'top_left'
bokeh.io.show(p)
def plot_gp(mu, cov, X, X_train=None, Y_train=None, samples=[], name="figures/test.png"):
X = X.ravel()
mu = mu.ravel()
uncertainty = 1.96 * np.sqrt(np.diag(cov))
plt.fill_between(X, mu + uncertainty, mu - uncertainty, alpha=0.3)
plt.plot(X, mu, label='Mean')
for i, sample in enumerate(samples):
plt.plot(X, sample, lw=1, ls='--', label=f'Sample {i+1}')
if X_train is not None:
plt.scatter(X_train, Y_train, c="black", s=2)
plt.legend()
plt.savefig(name)
###########################################################
def kernel(X1, X2, l=1.0, sigma_f=1.0):
'''
Isotropic squared exponential kernel. Computes
a covariance matrix from points in X1 and X2.
Args:
X1: Array of m points (m x d).
X2: Array of n points (n x d).
Returns:
Covariance matrix (m x n).
'''
sqdist = np.sum(X1**2, 1).reshape(-1, 1) + np.sum(X2**2, 1) - 2 * np.dot(X1, X2.T)
return sigma_f**2 * np.exp(-0.5 / l**2 * sqdist)
def posterior_predictive(X_s, X_train, Y_train, l=1.0, sigma_f=1.0, sigma_y=1e-8, noise=100):
'''
Computes the suffifient statistics of the GP posterior predictive distribution
from m training data X_train and Y_train and n new inputs X_s.
Args:
X_s: New input locations (n x d).
X_train: Training locations (m x d).
Y_train: Training targets (m x 1).
l: Kernel length parameter.
sigma_f: Kernel vertical variation parameter.
sigma_y: Noise parameter.
Returns:
Posterior mean vector (n x d) and covariance matrix (n x n).
'''
K = kernel(X_train, X_train, l, sigma_f) + sigma_y**2 * np.eye(len(X_train))
K_s = kernel(X_train, X_s, l, sigma_f)
K_ss = kernel(X_s, X_s, l, sigma_f) + (noise**2) * np.eye(len(X_s))
K_inv = inv(K)
# Equation (4)
mu_s = K_s.T.dot(K_inv).dot(Y_train)
# Equation (5)
cov_s = K_ss - K_s.T.dot(K_inv).dot(K_s)
return mu_s, cov_s
def calculate_noise(county_data, death_metric):
# find standard deviation away from moving average
# firstnonzero = next((index for index,value in enumerate(county_data[death_metric].values) if value != 0), None)
# actual_deaths = (county_data['deaths'].values)[firstnonzero:]
# moving_deaths = (county_data['avg_deaths'].values)[firstnonzero:]
# residuals = []
# for index in range(1, len(actual_deaths)):
# if moving_deaths[index] > 0:
# residue = actual_deaths[index]/moving_deaths[index]
# residue = residue/(moving_deaths[index])
# residuals.append(residue)
# noise = np.std(residuals)
# noise = noise * np.mean(county_data['avg_deaths'])
# return noise
firstnonzero = next((index for index,value in enumerate(county_data[death_metric].values) if value != 0), None)
actual_deaths = (county_data['deaths'].values)[firstnonzero:]
moving_deaths = (county_data['avg_deaths'].values)[firstnonzero:]
residuals = []
for index in range(1, len(actual_deaths)):
if moving_deaths[index] > 0:
residue = actual_deaths[index]-moving_deaths[index]
residuals.append(residue)
noise = math.sqrt(np.std(residuals))
return noise
def fit(X_train, Y_train, X_pred, noise, params=[6.0, 0.1, 0.2]):
(l, sigma_f, sigma_y) = params
mu_s, cov_s = posterior_predictive(X_pred, X_train, Y_train, l=l, sigma_f=sigma_f, sigma_y=sigma_y, noise=noise)
return mu_s, cov_s
###########################################################
def test(end, death_metric="deaths"):
counties_dates = []
counties_death_errors = []
counties_fips = []
# us = process_data("/data/us/covid/nyt_us_counties_daily.csv", "/data/us/demographics/county_populations.csv")
us = loader.load_data("/models/gaussian/us_training_data.csv")
policies = loader.load_data("/data/us/other/policies.csv")
fips_key = loader.load_data("/data/us/processing_data/fips_key.csv", encoding="latin-1")
# fips_list = fips_key["FIPS"]
fips_list = [36061] #56013,1017
total = len(fips_list)
for index, county in enumerate(fips_list):
print(f"{index+1} / {total}")
county_data = loader.query(us, "fips", county)
county_data['avg_deaths'] = county_data.iloc[:,6].rolling(window=3).mean()
county_data = county_data[2:]
dates = pd.to_datetime(county_data["date"].values)
extrapolate = (end-dates[-1])/np.timedelta64(1, 'D')
print(extrapolate)
X_pred = np.arange(0, len(county_data)+extrapolate).reshape(-1,1)
X_train = np.arange(0, len(county_data)).reshape(-1, 1)
Y_train = county_data[death_metric].values
noise = calculate_noise(county_data, death_metric=death_metric)
# Compute mean and covariance of the posterior predictive distribution
# mu_s, cov_s = posterior_predictive(X_pred, X_train, Y_train, sigma_y=noise)
# samples = np.random.multivariate_normal(mu_s.ravel(), cov_s, 3)
# plot_gp(mu_s, cov_s, X_pred, X_train=X_train, Y_train=Y_train, samples=samples)
params = [
(3.0, 0.2, 0.1),
(6.0, 0.2, 0.2),
(6.0, 0.3, 0.2),
(6.0, 0.1, 0.2),
(8.0, 0.2, 0.05),
(10.0, 0.3, 0.1),
]
plt.figure(figsize=(12, 8))
for i, (l, sigma_f, sigma_y) in enumerate(params):
mu_s, cov_s = posterior_predictive(X_pred, X_train, Y_train, l=l, sigma_f=sigma_f, sigma_y=sigma_y, noise=noise)
plt.subplot(3, 2, i + 1)
plt.subplots_adjust()
plt.title(f'l = {l}, sigma_f = {sigma_f}, sigma_y = {sigma_y}')
# plot_gp(mu_s, cov_s, X_pred, X_train=X_train, Y_train=Y_train)
samples = np.random.multivariate_normal(mu_s.ravel(), cov_s, 3)
plot_gp(mu_s, cov_s, X_pred, X_train=X_train, Y_train=Y_train, samples=samples, name=f"figures/{county}_{death_metric}test.png")
plt.show()
def fit_single_county(input_dict):
#put the logic to fit a single county here
#all the data should be in input_dict
us = input_dict["us"]
policies = input_dict["policies"]
county = input_dict["county"]
end = input_dict["end"]
death_metric = input_dict["death_metric"]
county_data = loader.query(us, "fips", county)
county_data['avg_deaths'] = county_data.iloc[:,6].rolling(window=3).mean()
county_data = county_data[2:]
if len(county_data) == 0:
return None
dates = | pd.to_datetime(county_data["date"].values) | pandas.to_datetime |
#
# Copyright (c) 2021 salesforce.com, inc.
# All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
# For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
#
from bisect import bisect_left, bisect_right
import itertools
import logging
from typing import Any, Dict, Iterable, Mapping, Sequence, Tuple, Union
import warnings
import numpy as np
import pandas as pd
from .misc import ValIterOrderedDict
from .resample import (
AggregationPolicy,
AlignPolicy,
MissingValuePolicy,
get_gcd_timedelta,
granularity_str_to_seconds,
reindex_df,
to_pd_datetime,
)
logger = logging.getLogger(__name__)
class UnivariateTimeSeries(pd.Series):
"""
Please read the `tutorial <examples/TimeSeries>` before reading this API doc.
This class is a time-indexed ``pd.Series`` which represents a univariate
time series. For the most part, it supports all the same features as
``pd.Series``, with the following key differences to iteration and indexing:
1. Iterating over a `UnivariateTimeSeries` is implemented as
.. code-block:: python
for timestamp, value in univariate:
# do stuff...
where ``timestamp`` is a Unix timestamp, and ``value`` is the
corresponding time series value.
2. Integer index: ``u[i]`` yields the tuple ``(u.time_stamps[i], u.values[i])``
3. Slice index: ``u[i:j:k]`` yields a new
``UnivariateTimeSeries(u.time_stamps[i:j:k], u.values[i:j:k])``
The class also supports the following additional features:
1. ``univariate.time_stamps`` returns the list of Unix timestamps, and
``univariate.values`` returns the list of the time series values. You
may access the ``pd.DatetimeIndex`` directly with ``univariate.index``
(or its ``np.ndarray`` representation with ``univariate.np_time_stamps``),
and the ``np.ndarray`` of values with ``univariate.np_values``.
2. ``univariate.concat(other)`` will concatenate the UnivariateTimeSeries
``other`` to the right end of ``univariate``.
3. ``left, right = univariate.bisect(t)`` will split the univariate at the
given timestamp ``t``.
4. ``window = univariate.window(t0, tf)`` will return the subset of the time
series occurring between timestamps ``t0`` (inclusive) and ``tf``
(non-inclusive)
5. ``series = univariate.to_pd()`` will convert the `UnivariateTimeSeries`
into a regular ``pd.Series`` (for compatibility).
6. ``univariate = UnivariateTimeSeries.from_pd(series)`` uses a time-indexed
``pd.Series`` to create a `UnivariateTimeSeries` object directly.
.. document special functions
.. automethod:: __getitem__
.. automethod:: __iter__
"""
def __init__(
self,
time_stamps: Union[None, Sequence[Union[int, float]]],
values: Sequence[float],
name: str = None,
freq="1h",
):
"""
:param time_stamps: a sequence of Unix timestamps. You may specify
``None`` if you only have ``values`` with no specific time stamps.
:param values: a sequence of univariate values, where ``values[i]``
occurs at time ``time_stamps[i]``
:param name: the name of the univariate time series
:param freq: if ``time_stamps`` is not provided, the univariate is
assumed to be sampled at frequency ``freq``. ``freq`` may be a
string (e.g. ``"1h"``), timedelta, or ``int``/``float`` (in units
of seconds).
"""
is_pd = isinstance(values, pd.Series)
if name is None and is_pd:
name = values.name
if is_pd and isinstance(values.index, pd.DatetimeIndex):
super().__init__(values, name=name)
else:
if time_stamps is None:
if isinstance(freq, (int, float)):
freq = pd.to_timedelta(freq, unit="s")
else:
freq = pd.to_timedelta(freq)
if is_pd and values.index.dtype in ("int64", "float64"):
index = values.index * freq + pd.to_datetime(0)
else:
index = pd.date_range(start=0, periods=len(values), freq=freq)
else:
index = to_pd_datetime(time_stamps)
super().__init__(np.asarray(values), index=index, name=name, dtype=float)
if len(self) >= 3 and self.index.freq is None:
self.index.freq = pd.infer_freq(self.index)
@property
def np_time_stamps(self):
"""
:rtype: np.ndarray
:return: the ``numpy`` representation of this time series's Unix timestamps
"""
ts = self.index.values.astype("datetime64[ms]").astype(float) / 1000
return ts
@property
def np_values(self):
"""
:rtype: np.ndarray
:return: the ``numpy`` representation of this time series's values
"""
return super().values
@property
def time_stamps(self):
"""
:rtype: List[float]
:return: the list of Unix timestamps for the time series
"""
return self.np_time_stamps.tolist()
@property
def values(self):
"""
:rtype: List[float]
:return: the list of values for the time series.
"""
return self.np_values.tolist()
@property
def t0(self):
"""
:rtype: float
:return: the first timestamp in the univariate time series.
"""
return self.np_time_stamps[0]
@property
def tf(self):
"""
:rtype: float
:return: the final timestamp in the univariate time series.
"""
return self.np_time_stamps[-1]
def is_empty(self):
"""
:rtype: bool
:return: True if the univariate is empty, False if not.
"""
return len(self) == 0
def __iter__(self):
"""
The i'th item in the iterator is the tuple
``(self.time_stamps[i], self.values[i])``.
"""
return itertools.starmap(lambda t, x: (t.item(), x.item()), zip(self.np_time_stamps, self.np_values))
def __getitem__(self, i: Union[int, slice]):
"""
:param i: integer index or slice
:rtype: Union[Tuple[float, float], UnivariateTimeSeries]
:return: ``(self.time_stamps[i], self.values[i])`` if ``i`` is
an integer. ``UnivariateTimeSeries(self.time_series[i], self.values[i])``
if ``i`` is a slice.
"""
if isinstance(i, int):
return self.np_time_stamps[i].item(), self.np_values[i].item()
elif isinstance(i, slice):
return UnivariateTimeSeries.from_pd(self.iloc[i])
else:
raise KeyError(
f"Indexing a `UnivariateTimeSeries` with key {i} of "
f"type {type(i).__name__} is not supported. Try "
f"using loc[] or iloc[] for more complicated "
f"indexing."
)
def __eq__(self, other):
return self.time_stamps == other.time_stamps and (self.np_values == other.np_values).all()
def copy(self, deep=True):
"""
Copies the `UnivariateTimeSeries`. Simply a wrapper around the
``pd.Series.copy()`` method.
"""
return UnivariateTimeSeries.from_pd(super().copy(deep=deep))
def concat(self, other):
"""
Concatenates the `UnivariateTimeSeries` ``other`` to the right of this one.
:param UnivariateTimeSeries other: another `UnivariateTimeSeries`
:rtype: UnivariateTimeSeries
:return: concatenated univariate time series
"""
return UnivariateTimeSeries.from_pd( | pd.concat((self, other)) | pandas.concat |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
#%matplotlib inline
class BlandAltman():
def __init__(self,gold_std,new_measure,averaged=False):
# set averaged to True if multiple observations from each participant are averaged together to get one value
import pandas as pd
# Check that inputs are list or pandas series, convert to series if list
if isinstance(gold_std,list) or isinstance(gold_std, (np.ndarray, np.generic) ):
df = | pd.DataFrame() | pandas.DataFrame |
import numpy as np
import pytest
import pandas as pd
from pandas import DataFrame, Index, Series
import pandas._testing as tm
class TestDataFrameAlign:
def test_align_float(self, float_frame):
af, bf = float_frame.align(float_frame)
assert af._mgr is not float_frame._mgr
af, bf = float_frame.align(float_frame, copy=False)
assert af._mgr is float_frame._mgr
# axis = 0
other = float_frame.iloc[:-5, :3]
af, bf = float_frame.align(other, axis=0, fill_value=-1)
tm.assert_index_equal(bf.columns, other.columns)
# test fill value
join_idx = float_frame.index.join(other.index)
diff_a = float_frame.index.difference(join_idx)
diff_b = other.index.difference(join_idx)
diff_a_vals = af.reindex(diff_a).values
diff_b_vals = bf.reindex(diff_b).values
assert (diff_a_vals == -1).all()
af, bf = float_frame.align(other, join="right", axis=0)
tm.assert_index_equal(bf.columns, other.columns)
tm.assert_index_equal(bf.index, other.index)
tm.assert_index_equal(af.index, other.index)
# axis = 1
other = float_frame.iloc[:-5, :3].copy()
af, bf = float_frame.align(other, axis=1)
tm.assert_index_equal(bf.columns, float_frame.columns)
tm.assert_index_equal(bf.index, other.index)
# test fill value
join_idx = float_frame.index.join(other.index)
diff_a = float_frame.index.difference(join_idx)
diff_b = other.index.difference(join_idx)
diff_a_vals = af.reindex(diff_a).values
# TODO(wesm): unused?
diff_b_vals = bf.reindex(diff_b).values # noqa
assert (diff_a_vals == -1).all()
af, bf = float_frame.align(other, join="inner", axis=1)
tm.assert_index_equal(bf.columns, other.columns)
af, bf = float_frame.align(other, join="inner", axis=1, method="pad")
tm.assert_index_equal(bf.columns, other.columns)
af, bf = float_frame.align(
other.iloc[:, 0], join="inner", axis=1, method=None, fill_value=None
)
tm.assert_index_equal(bf.index, Index([]))
af, bf = float_frame.align(
other.iloc[:, 0], join="inner", axis=1, method=None, fill_value=0
)
tm.assert_index_equal(bf.index, Index([]))
# Try to align DataFrame to Series along bad axis
msg = "No axis named 2 for object type DataFrame"
with pytest.raises(ValueError, match=msg):
float_frame.align(af.iloc[0, :3], join="inner", axis=2)
# align dataframe to series with broadcast or not
idx = float_frame.index
s = Series(range(len(idx)), index=idx)
left, right = float_frame.align(s, axis=0)
tm.assert_index_equal(left.index, float_frame.index)
tm.assert_index_equal(right.index, float_frame.index)
assert isinstance(right, Series)
left, right = float_frame.align(s, broadcast_axis=1)
tm.assert_index_equal(left.index, float_frame.index)
expected = {c: s for c in float_frame.columns}
expected = DataFrame(
expected, index=float_frame.index, columns=float_frame.columns
)
tm.assert_frame_equal(right, expected)
# see gh-9558
df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
result = df[df["a"] == 2]
expected = DataFrame([[2, 5]], index=[1], columns=["a", "b"])
tm.assert_frame_equal(result, expected)
result = df.where(df["a"] == 2, 0)
expected = DataFrame({"a": [0, 2, 0], "b": [0, 5, 0]})
tm.assert_frame_equal(result, expected)
def test_align_int(self, int_frame):
# test other non-float types
other = DataFrame(index=range(5), columns=["A", "B", "C"])
af, bf = int_frame.align(other, join="inner", axis=1, method="pad")
tm.assert_index_equal(bf.columns, other.columns)
def test_align_mixed_type(self, float_string_frame):
af, bf = float_string_frame.align(
float_string_frame, join="inner", axis=1, method="pad"
)
tm.assert_index_equal(bf.columns, float_string_frame.columns)
def test_align_mixed_float(self, mixed_float_frame):
# mixed floats/ints
other = DataFrame(index=range(5), columns=["A", "B", "C"])
af, bf = mixed_float_frame.align(
other.iloc[:, 0], join="inner", axis=1, method=None, fill_value=0
)
tm.assert_index_equal(bf.index, Index([]))
def test_align_mixed_int(self, mixed_int_frame):
other = DataFrame(index=range(5), columns=["A", "B", "C"])
af, bf = mixed_int_frame.align(
other.iloc[:, 0], join="inner", axis=1, method=None, fill_value=0
)
tm.assert_index_equal(bf.index, Index([]))
def test_align_multiindex(self):
# GH#10665
# same test cases as test_align_multiindex in test_series.py
midx = pd.MultiIndex.from_product(
[range(2), range(3), range(2)], names=("a", "b", "c")
)
idx = pd.Index(range(2), name="b")
df1 = pd.DataFrame(np.arange(12, dtype="int64"), index=midx)
df2 = pd.DataFrame(np.arange(2, dtype="int64"), index=idx)
# these must be the same results (but flipped)
res1l, res1r = df1.align(df2, join="left")
res2l, res2r = df2.align(df1, join="right")
expl = df1
tm.assert_frame_equal(expl, res1l)
tm.assert_frame_equal(expl, res2r)
expr = pd.DataFrame([0, 0, 1, 1, np.nan, np.nan] * 2, index=midx)
tm.assert_frame_equal(expr, res1r)
| tm.assert_frame_equal(expr, res2l) | pandas._testing.assert_frame_equal |
""" Analyze MCMC output - chain length, etc. """
# Built-in libraries
import glob
import os
import pickle
# External libraries
import cartopy
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
from matplotlib.ticker import MultipleLocator
#from matplotlib.colors import Normalize
import matplotlib.colors as colors
import numpy as np
import pandas as pd
import pymc
from scipy import stats
from scipy.stats.kde import gaussian_kde
from scipy.stats import norm
from scipy.stats import truncnorm
from scipy.stats import uniform
from scipy.stats import linregress
from scipy.stats import lognorm
#from scipy.optimize import minimize
import xarray as xr
# Local libraries
import class_climate
import class_mbdata
import pygem_input as input
import pygemfxns_gcmbiasadj as gcmbiasadj
import pygemfxns_massbalance as massbalance
import pygemfxns_modelsetup as modelsetup
import run_calibration as calibration
#%%
# Paper figures
option_observation_vs_calibration = 0
option_papermcmc_prior_vs_posterior = 0
option_papermcmc_modelparameter_map_and_postvprior = 0
option_metrics_histogram_all = 0
option_metrics_vs_chainlength = 1
option_correlation_scatter = 0
option_regional_priors = 0
option_glacier_mb_vs_params = 0
option_papermcmc_hh2015_map = 0
# Others
option_glacier_mcmc_plots = 0
option_raw_plotchain = 0
option_convertcal2table = 0
option_plot_era_normalizedchange = 0
# Export option
mcmc_output_netcdf_fp_3chain = input.output_filepath + 'cal_opt2_spc_20190815_3chain/'
mcmc_output_netcdf_fp_all = input.output_filepath + 'cal_opt2_spc_20190806/'
hh2015_output_netcdf_fp_all = input.output_filepath + 'cal_opt3/cal_opt3/'
mcmc_output_figures_fp = input.output_filepath + 'figures/'
regions = [13,14,15]
#regions = [13]
cal_datasets = ['shean']
burn=1000
chainlength = 10000
# Bounds (80% bounds --> 90% above/below given threshold)
low_percentile = 10
high_percentile = 90
variables = ['massbal', 'precfactor', 'tempchange', 'ddfsnow']
vn_title_dict = {'massbal':'Mass Balance',
'precfactor':'$\mathregular{k_{p}}$',
'tempchange':'$\mathregular{T_{bias}}$',
'ddfsnow':'$\mathregular{f_{snow}}$'}
vn_abbreviations_wunits_dict = {
'massbal':'B (m w.e. $\mathregular{a^{-1}}$)',
'precfactor':'$\mathregular{k_{p}}$ (-)',
'tempchange':'$\mathregular{T_{bias}}$ ($\mathregular{^{\circ}C}$)',
'ddfsnow':'$\mathregular{f_{snow}}$ (mm w.e. $\mathregular{d^{-1}}$ $\mathregular{^{\circ}C^{-1}}$)'}
vn_abbreviations_dict = {'massbal':'$\mathregular{B}$',
'precfactor':'$\mathregular{k_{p}}$',
'tempchange':'$\mathregular{T_{bias}}$',
'ddfsnow':'$\mathregular{f_{snow}}$'}
vn_title_wunits_dict = {'massbal':'Mass Balance (m w.e. $\mathregular{a^{-1}}$)',
'dif_masschange':'$\mathregular{B_{obs} - B_{mod}}$\n(m w.e. $\mathregular{a^{-1}}$)',
'precfactor':'$\mathregular{k_{p}}$ (-)',
'tempchange':'$\mathregular{T_{bias}}$ ($\mathregular{^{\circ}C}$)',
'ddfsnow':'$\mathregular{f_{snow}}$ (mm w.e. $\mathregular{d^{-1}}$ $\mathregular{^{\circ}C^{-1}}$)'}
vn_title_noabbreviations_dict = {'massbal':'Mass Balance',
'precfactor':'Precipitation Factor',
'tempchange':'Temperature Bias',
'ddfsnow':'$\mathregular{f_{snow}}$'}
vn_label_dict = {'massbal':'Mass Balance (m w.e. $\mathregular{a^{-1}}$)',
'precfactor':'Precipitation Factor (-)',
'tempchange':'Temperature Bias ($\mathregular{^{\circ}C}$)',
'ddfsnow':'f$_{snow}$ (mm w.e. $\mathregular{d^{-1}}$ $\mathregular{^{\circ}C^{-1}}$)',
'dif_masschange':'Mass Balance (Observation - Model, mwea)'}
vn_label_units_dict = {'massbal':'(m w.e. $\mathregular{a^{-1}}$)',
'precfactor':'(-)',
'tempchange':'($\mathregular{^{\circ}}$C)',
'ddfsnow':'(mm w.e. d$^{-1}$ $^\circ$C$^{-1}$)'}
metric_title_dict = {'Gelman-Rubin':'Gelman-Rubin Statistic',
'MC Error': 'Monte Carlo Error',
'Effective N': 'Effective Sample Size'}
metrics = ['Gelman-Rubin', 'MC Error', 'Effective N']
title_dict = {'Amu_Darya': 'Amu Darya',
'Brahmaputra': 'Brahmaputra',
'Ganges': 'Ganges',
'Ili': 'Ili',
'Indus': 'Indus',
'Inner_Tibetan_Plateau': 'Inner TP',
'Inner_Tibetan_Plateau_extended': 'Inner TP ext',
'Irrawaddy': 'Irrawaddy',
'Mekong': 'Mekong',
'Salween': 'Salween',
'Syr_Darya': 'Syr Darya',
'Tarim': 'Tarim',
'Yangtze': 'Yangtze',
'inner_TP': 'Inner TP',
'Karakoram': 'Karakoram',
'Yigong': 'Yigong',
'Yellow': 'Yellow',
'Bhutan': 'Bhutan',
'Everest': 'Everest',
'West Nepal': 'West Nepal',
'Spiti Lahaul': 'Spiti Lahaul',
'tien_shan': 'Tien Shan',
'Pamir': 'Pamir',
'pamir_alai': 'Pamir Alai',
'Kunlun': 'Kunlun',
'Hindu Kush': 'Hindu Kush',
13: 'Central Asia',
14: 'South Asia West',
15: 'South Asia East',
'all': 'HMA',
'Altun Shan':'Altun Shan',
'Central Himalaya':'C Himalaya',
'Central Tien Shan':'C Tien Shan',
'Dzhungarsky Alatau':'Dzhungarsky Alatau',
'Eastern Himalaya':'E Himalaya',
'Eastern Hindu Kush':'E Hindu Kush',
'Eastern Kunlun Shan':'E Kunlun Shan',
'Eastern Pamir':'E Pamir',
'Eastern Tibetan Mountains':'E Tibetan Mtns',
'Eastern Tien Shan':'E Tien Shan',
'Gangdise Mountains':'Gangdise Mtns',
'Hengduan Shan':'Hengduan Shan',
'Karakoram':'Karakoram',
'Northern/Western Tien Shan':'N/W Tien Shan',
'Nyainqentanglha':'Nyainqentanglha',
'Pamir Alay':'Pamir Alay',
'Qilian Shan':'Qilian Shan',
'Tanggula Shan':'Tanggula Shan',
'Tibetan Interior Mountains':'Tibetan Int Mtns',
'Western Himalaya':'W Himalaya',
'Western Kunlun Shan':'W Kunlun Shan',
'Western Pamir':'W Pamir'
}
#colors = ['#387ea0', '#fcb200', '#d20048']
linestyles = ['-', '--', ':']
# Group dictionaries
watershed_dict_fn = '/Users/davidrounce/Documents/Dave_Rounce/HiMAT/qgis_himat/rgi60_HMA_dict_watershed.csv'
watershed_csv = pd.read_csv(watershed_dict_fn)
watershed_dict = dict(zip(watershed_csv.RGIId, watershed_csv.watershed))
kaab_dict_fn = '/Users/davidrounce/Documents/Dave_Rounce/HiMAT/qgis_himat/rgi60_HMA_dict_kaab.csv'
kaab_csv = pd.read_csv(kaab_dict_fn)
kaab_dict = dict(zip(kaab_csv.RGIId, kaab_csv.kaab_name))
himap_dict_fn = '/Users/davidrounce/Documents/Dave_Rounce/HiMAT/qgis_himat/rgi60_HMA_dict_bolch.csv'
himap_csv = pd.read_csv(himap_dict_fn)
himap_dict = dict(zip(himap_csv.RGIId, himap_csv.bolch_name))
# Shapefiles
rgiO1_shp_fn = '/Users/davidrounce/Documents/Dave_Rounce/HiMAT/RGI/rgi60/00_rgi60_regions/00_rgi60_O1Regions.shp'
rgi_glac_shp_fn = '/Users/davidrounce/Documents/Dave_Rounce/HiMAT/qgis_himat/rgi60_HMA.shp'
watershed_shp_fn = '/Users/davidrounce/Documents/Dave_Rounce/HiMAT/qgis_himat/HMA_basins_20181018_4plot.shp'
kaab_shp_fn = '/Users/davidrounce/Documents/Dave_Rounce/HiMAT/qgis_himat/kaab2015_regions.shp'
bolch_shp_fn = '/Users/davidrounce/Documents/Dave_Rounce/HiMAT/qgis_himat/himap_regions/boundary_mountain_regions_hma_v3.shp'
srtm_fn = '/Users/davidrounce/Documents/Dave_Rounce/HiMAT/qgis_himat/SRTM_HMA.tif'
srtm_contour_fn = '/Users/davidrounce/Documents/Dave_Rounce/HiMAT/qgis_himat/SRTM_HMA_countours_2km_gt3000m_smooth.shp'
class MidpointNormalize(colors.Normalize):
def __init__(self, vmin=None, vmax=None, midpoint=None, clip=False):
self.midpoint = midpoint
colors.Normalize.__init__(self, vmin, vmax, clip)
def __call__(self, value, clip=None):
# Note that I'm ignoring clipping and other edge cases here.
result, is_scalar = self.process_value(value)
x, y = [self.vmin, self.midpoint, self.vmax], [0, 0.5, 1]
return np.ma.array(np.interp(value, x, y), mask=result.mask, copy=False)
def load_glacierdata_byglacno(glac_no, option_loadhyps_climate=1, option_loadcal_data=1):
""" Load glacier data, climate data, and calibration data for list of glaciers
Parameters
----------
glac_no : list
list of glacier numbers (ex. ['13.0001', 15.00001'])
Returns
-------
main_glac_rgi, main_glac_hyps, main_glac_icethickness, main_glac_width, gcm_temp, gcm_prec, gcm_elev, gcm_lr,
cal_data, dates_table
"""
glac_no_byregion = {}
regions = [int(i.split('.')[0]) for i in glac_no]
regions = list(set(regions))
for region in regions:
glac_no_byregion[region] = []
for i in glac_no:
region = i.split('.')[0]
glac_no_only = i.split('.')[1]
glac_no_byregion[int(region)].append(glac_no_only)
for region in regions:
glac_no_byregion[region] = sorted(glac_no_byregion[region])
# EXCEPTION COULD BE ADDED HERE INSTEAD
# Load data for glaciers
dates_table_nospinup = modelsetup.datesmodelrun(startyear=input.startyear, endyear=input.endyear, spinupyears=0)
dates_table = modelsetup.datesmodelrun(startyear=input.startyear, endyear=input.endyear,
spinupyears=input.spinupyears)
count = 0
for region in regions:
count += 1
# ====== GLACIER data =====
if ((region == 13 and len(glac_no_byregion[region]) == 54429) or
(region == 14 and len(glac_no_byregion[region]) == 27988) or
(region == 15 and len(glac_no_byregion[region]) == 13119) ):
main_glac_rgi_region = modelsetup.selectglaciersrgitable(
rgi_regionsO1=[region], rgi_regionsO2 = 'all', rgi_glac_number='all')
else:
main_glac_rgi_region = modelsetup.selectglaciersrgitable(
rgi_regionsO1=[region], rgi_regionsO2 = 'all', rgi_glac_number=glac_no_byregion[region])
# Glacier hypsometry
main_glac_hyps_region = modelsetup.import_Husstable(
main_glac_rgi_region, input.hyps_filepath,input.hyps_filedict, input.hyps_colsdrop)
if option_loadcal_data == 1:
# ===== CALIBRATION DATA =====
cal_data_region = pd.DataFrame()
for dataset in cal_datasets:
cal_subset = class_mbdata.MBData(name=dataset)
cal_subset_data = cal_subset.retrieve_mb(main_glac_rgi_region, main_glac_hyps_region,
dates_table_nospinup)
cal_data_region = cal_data_region.append(cal_subset_data, ignore_index=True)
cal_data_region = cal_data_region.sort_values(['glacno', 't1_idx'])
cal_data_region.reset_index(drop=True, inplace=True)
# ===== OTHER DATA =====
if option_loadhyps_climate == 1:
# Ice thickness [m], average
main_glac_icethickness_region = modelsetup.import_Husstable(
main_glac_rgi_region, input.thickness_filepath, input.thickness_filedict,
input.thickness_colsdrop)
main_glac_hyps_region[main_glac_icethickness_region == 0] = 0
# Width [km], average
main_glac_width_region = modelsetup.import_Husstable(
main_glac_rgi_region, input.width_filepath, input.width_filedict, input.width_colsdrop)
# ===== CLIMATE DATA =====
gcm = class_climate.GCM(name=input.ref_gcm_name)
# Air temperature [degC], Precipitation [m], Elevation [masl], Lapse rate [K m-1]
gcm_temp_region, gcm_dates = gcm.importGCMvarnearestneighbor_xarray(
gcm.temp_fn, gcm.temp_vn, main_glac_rgi_region, dates_table_nospinup)
if input.option_ablation != 2 or input.ref_gcm_name not in ['ERA5']:
gcm_tempstd_region = np.zeros(gcm_temp_region.shape)
elif input.ref_gcm_name in ['ERA5']:
gcm_tempstd_region, gcm_dates = gcm.importGCMvarnearestneighbor_xarray(
gcm.tempstd_fn, gcm.tempstd_vn, main_glac_rgi_region, dates_table_nospinup)
gcm_prec_region, gcm_dates = gcm.importGCMvarnearestneighbor_xarray(
gcm.prec_fn, gcm.prec_vn, main_glac_rgi_region, dates_table_nospinup)
gcm_elev_region = gcm.importGCMfxnearestneighbor_xarray(gcm.elev_fn, gcm.elev_vn, main_glac_rgi_region)
# Lapse rate [K m-1]
gcm_lr_region, gcm_dates = gcm.importGCMvarnearestneighbor_xarray(
gcm.lr_fn, gcm.lr_vn, main_glac_rgi_region, dates_table_nospinup)
# ===== APPEND DATASETS =====
if count == 1:
main_glac_rgi = main_glac_rgi_region
if option_loadcal_data == 1:
cal_data = cal_data_region
if option_loadhyps_climate == 1:
main_glac_hyps = main_glac_hyps_region
main_glac_icethickness = main_glac_icethickness_region
main_glac_width = main_glac_width_region
gcm_temp = gcm_temp_region
gcm_tempstd = gcm_tempstd_region
gcm_prec = gcm_prec_region
gcm_elev = gcm_elev_region
gcm_lr = gcm_lr_region
else:
main_glac_rgi = main_glac_rgi.append(main_glac_rgi_region)
if option_loadcal_data == 1:
cal_data = cal_data.append(cal_data_region)
if option_loadhyps_climate == 1:
# If more columns in region, then need to expand existing dataset
if main_glac_hyps_region.shape[1] > main_glac_hyps.shape[1]:
all_col = list(main_glac_hyps.columns.values)
reg_col = list(main_glac_hyps_region.columns.values)
new_cols = [item for item in reg_col if item not in all_col]
for new_col in new_cols:
main_glac_hyps[new_col] = 0
main_glac_icethickness[new_col] = 0
main_glac_width[new_col] = 0
elif main_glac_hyps_region.shape[1] < main_glac_hyps.shape[1]:
all_col = list(main_glac_hyps.columns.values)
reg_col = list(main_glac_hyps_region.columns.values)
new_cols = [item for item in all_col if item not in reg_col]
for new_col in new_cols:
main_glac_hyps_region[new_col] = 0
main_glac_icethickness_region[new_col] = 0
main_glac_width_region[new_col] = 0
main_glac_hyps = main_glac_hyps.append(main_glac_hyps_region)
main_glac_icethickness = main_glac_icethickness.append(main_glac_icethickness_region)
main_glac_width = main_glac_width.append(main_glac_width_region)
gcm_temp = np.vstack([gcm_temp, gcm_temp_region])
gcm_tempstd = np.vstack([gcm_tempstd, gcm_tempstd_region])
gcm_prec = np.vstack([gcm_prec, gcm_prec_region])
gcm_elev = np.concatenate([gcm_elev, gcm_elev_region])
gcm_lr = np.vstack([gcm_lr, gcm_lr_region])
# reset index
main_glac_rgi.reset_index(inplace=True, drop=True)
if option_loadcal_data == 1:
cal_data.reset_index(inplace=True, drop=True)
if option_loadhyps_climate == 1:
main_glac_hyps.reset_index(inplace=True, drop=True)
main_glac_icethickness.reset_index(inplace=True, drop=True)
main_glac_width.reset_index(inplace=True, drop=True)
if option_loadhyps_climate == 0 and option_loadcal_data == 0:
return main_glac_rgi
if option_loadhyps_climate == 0 and option_loadcal_data == 1:
return main_glac_rgi, cal_data
else:
return (main_glac_rgi, main_glac_hyps, main_glac_icethickness, main_glac_width,
gcm_temp, gcm_tempstd, gcm_prec, gcm_elev, gcm_lr,
cal_data, dates_table)
def select_groups(grouping, main_glac_rgi_all):
"""
Select groups based on grouping
"""
if grouping == 'rgi_region':
groups = main_glac_rgi_all.O1Region.unique().tolist()
group_cn = 'O1Region'
elif grouping == 'watershed':
groups = main_glac_rgi_all.watershed.unique().tolist()
group_cn = 'watershed'
elif grouping == 'kaab':
groups = main_glac_rgi_all.kaab.unique().tolist()
group_cn = 'kaab'
groups = [x for x in groups if str(x) != 'nan']
elif grouping == 'degree':
groups = main_glac_rgi_all.deg_id.unique().tolist()
group_cn = 'deg_id'
elif grouping == 'mascon':
groups = main_glac_rgi_all.mascon_idx.unique().tolist()
groups = [int(x) for x in groups]
group_cn = 'mascon_idx'
else:
groups = ['all']
group_cn = 'all_group'
try:
groups = sorted(groups, key=str.lower)
except:
groups = sorted(groups)
return groups, group_cn
def partition_groups(grouping, vn, main_glac_rgi_all, regional_calc='mean'):
"""Partition variable by each group
Parameters
----------
grouping : str
name of grouping to use
vn : str
variable name
main_glac_rgi_all : pd.DataFrame
glacier table
regional_calc : str
calculation used to compute region value (mean, sum, area_weighted_mean)
Output
------
groups : list
list of group names
ds_group : list of lists
dataset containing the multimodel data for a given variable for all the GCMs
"""
# Groups
groups, group_cn = select_groups(grouping, main_glac_rgi_all)
ds_group = [[] for group in groups]
# Cycle through groups
for ngroup, group in enumerate(groups):
# Select subset of data
main_glac_rgi = main_glac_rgi_all.loc[main_glac_rgi_all[group_cn] == group]
vn_glac = main_glac_rgi_all[vn].values[main_glac_rgi.index.values.tolist()]
if 'area_weighted' in regional_calc:
vn_glac_area = main_glac_rgi_all['Area'].values[main_glac_rgi.index.values.tolist()]
# Regional calc
if regional_calc == 'mean':
vn_reg = vn_glac.mean(axis=0)
elif regional_calc == 'sum':
vn_reg = vn_glac.sum(axis=0)
elif regional_calc == 'area_weighted_mean':
vn_reg = (vn_glac * vn_glac_area).sum() / vn_glac_area.sum()
# Record data for each group
ds_group[ngroup] = [group, vn_reg]
return groups, ds_group
def effective_n(ds, vn, iters, burn, chain=0):
"""
Compute the effective sample size of a trace.
Takes the trace and computes the effective sample size
according to its detrended autocorrelation.
Parameters
----------
ds : xarray.Dataset
dataset containing mcmc traces
vn : str
Parameter variable name
iters : int
number of mcmc iterations to test
burn : int
number of initial iterations to throw away
Returns
-------
effective_n : int
effective sample size
"""
# Effective sample size
x = ds['mp_value'].sel(chain=chain, mp=vn).values[burn:iters]
# detrend trace using mean to be consistent with statistics
# definition of autocorrelation
x = (x - x.mean())
# compute autocorrelation (note: only need second half since
# they are symmetric)
rho = np.correlate(x, x, mode='full')
rho = rho[len(rho)//2:]
# normalize the autocorrelation values
# note: rho[0] is the variance * n_samples, so this is consistent
# with the statistics definition of autocorrelation on wikipedia
# (dividing by n_samples gives you the expected value).
rho_norm = rho / rho[0]
# Iterate until sum of consecutive estimates of autocorrelation is
# negative to avoid issues with the sum being -0.5, which returns an
# effective_n of infinity
negative_autocorr = False
t = 1
n = len(x)
while not negative_autocorr and (t < n):
if not t % 2:
negative_autocorr = sum(rho_norm[t-1:t+1]) < 0
t += 1
return int(n / (1 + 2*rho_norm[1:t].sum()))
def gelman_rubin(ds, vn, iters=1000, burn=0, debug=False):
"""
Calculate Gelman-Rubin statistic.
Parameters
----------
ds : xarray.Dataset
Dataset containing MCMC iterations for a single glacier with 3 chains
vn : str
Parameter variable name
iters : int
number of MCMC iterations to test for the gelman-rubin statistic
burn : int
number of MCMC iterations to ignore at start of chain before performing test
Returns
-------
gelman_rubin_stat : float
gelman_rubin statistic (R_hat)
"""
if debug:
if len(ds.chain) != 3:
raise ValueError('Given dataset has an incorrect number of chains')
if iters > len(ds.chain):
raise ValueError('iters value too high')
if (burn >= iters):
raise ValueError('Given iters and burn in are incompatible')
# unpack iterations from dataset
for n_chain in ds.chain.values:
if n_chain == 0:
chain = ds['mp_value'].sel(chain=n_chain, mp=vn).values[burn:iters]
chain = np.reshape(chain, (1,len(chain)))
else:
chain2add = ds['mp_value'].sel(chain=n_chain, mp=vn).values[burn:iters]
chain2add = np.reshape(chain2add, (1,chain.shape[1]))
chain = np.append(chain, chain2add, axis=0)
#calculate statistics with pymc in-built function
return pymc.gelman_rubin(chain)
def mc_error(ds, vn, iters=None, burn=0, chain=None, method='overlapping'):
""" Calculates Monte Carlo standard error using the batch mean method for each chain
For multiple chains, it outputs a list of the values
Parameters
----------
ds : xarray.Dataset
Dataset containing MCMC iterations for a single glacier with 3 chains
vn : str
Parameter variable name
iters : int
Number of iterations to use
Returns
-------
chains_mcse : list of floats
list of the Monte Carlo standard error for each chain
chains_ci : list of floats
list of the +/- confidence interval value for each chain
"""
if iters is None:
iters = len(ds.mp_value)
trace = [ds['mp_value'].sel(chain=n_chain, mp=vn).values[burn:iters] for n_chain in ds.chain.values]
mcse_output = [mcse_batchmeans(i, method=method) for i in trace]
chains_mcse = [i[0] for i in mcse_output]
chains_ci = [i[1] for i in mcse_output]
return chains_mcse, chains_ci
def mcse_batchmeans(trace, t_quantile=0.95, method='overlapping'):
""" Calculates Monte Carlo standard error for a given trace using batch means method from Flegal and Jones (2010)
Splitting uses all values in trace, so batches can have different lengths (maximum difference is 1)
Parameters
----------
trace: np.ndarray
Array representing MCMC chain
t_quantile : float
student t-test quantile (default = 0.95)
method : str
method used to compute batch means (default = 'overlapping', other option is 'nonoverlapping')
Returns
-------
trace_mcse : float
Monte Carlo standard error for a given trace
trace_ci : float
+/- value for confidence interval
"""
# Number of batches (n**0.5 based on Flegal and Jones (2010))
batches = int(len(trace)**0.5)
batch_size = int(len(trace)/batches)
# Split into batches
if method == 'overlapping':
trace_batches = [trace[i:i+batch_size] for i in range(0,int(len(trace)-batches+1))]
elif method == 'nonoverlapping':
trace_batches = split_array(trace,batches)
# Sample batch means
trace_batches_means = [np.mean(i) for i in trace_batches]
# Batch mean estimator
trace_batches_mean = np.mean(trace_batches_means)
# Sample variance
if method == 'overlapping':
trace_samplevariance = (
(len(trace)/batches) / len(trace) * np.sum([(i - trace_batches_mean)**2 for i in trace_batches_means]))
elif method == 'nonoverlapping':
trace_samplevariance = (
(len(trace)/batches) / (batches-1) * np.sum([(i - trace_batches_mean)**2 for i in trace_batches_means]))
# Monte Carlo standard error
trace_mcse = trace_samplevariance**0.5 / len(trace)**0.5
# Confidence interval value (actual confidence interval is batch_mean_estimator +/- trace_ci)
trace_ci = stats.t.ppf(t_quantile, (len(trace)**0.5)-1) * trace_mcse
return trace_mcse, trace_ci
def split_array(arr, n=1):
"""
Split array of glaciers into batches for batch means.
Parameters
----------
arr : np.array
array that you want to split into separate batches
n : int
Number of batches to split glaciers into.
Returns
-------
arr_batches : np.array
list of n arrays that have sequential values in each list
"""
# If batches is more than list, the one in each list
if n > len(arr):
n = len(arr)
# number of values per list rounded down/up
n_perlist_low = int(len(arr)/n)
n_perlist_high = int(np.ceil(len(arr)/n))
# number of lists with higher number per list (uses all values of array, but chains not necessarily equal length)
n_lists_high = len(arr)%n
# loop through and select values
count = 0
arr_batches = []
for x in np.arange(n):
count += 1
if count <= n_lists_high:
arr_subset = arr[0:n_perlist_high]
arr_batches.append(arr_subset)
arr = arr[n_perlist_high:]
else:
arr_subset = arr[0:n_perlist_low]
arr_batches.append(arr_subset)
arr = arr[n_perlist_low:]
return arr_batches
def pickle_data(fn, data):
"""Pickle data
Parameters
----------
fn : str
filename including filepath
data : list, etc.
data to be pickled
Returns
-------
.pkl file
saves .pkl file of the data
"""
with open(fn, 'wb') as f:
pickle.dump(data, f)
def plot_hist(df, cn, bins, xlabel=None, ylabel=None, fig_fn='hist.png', fig_fp=input.output_filepath):
"""
Plot histogram for any bin size
"""
data = df[cn].values
hist, bin_edges = np.histogram(data,bins) # make the histogram
fig,ax = plt.subplots()
# Plot the histogram heights against integers on the x axis
ax.bar(range(len(hist)),hist,width=1, edgecolor='k')
# Set the ticks to the middle of the bars
ax.set_xticks([0.5+i for i,j in enumerate(hist)])
# Set the xticklabels to a string that tells us what the bin edges were
ax.set_xticklabels(['{} - {}'.format(bins[i],bins[i+1]) for i,j in enumerate(hist)], rotation=45, ha='right')
ax.set_xlabel(xlabel, fontsize=16)
ax.set_ylabel(ylabel, fontsize=16)
# Save figure
fig.set_size_inches(6,4)
fig.savefig(fig_fp + fig_fn, bbox_inches='tight', dpi=300)
def plot_mb_vs_parameters(tempchange_iters, precfactor_iters, ddfsnow_iters, modelparameters, glacier_rgi_table,
glacier_area_t0, icethickness_t0, width_t0, elev_bins, glacier_gcm_temp, glacier_gcm_tempstd,
glacier_gcm_prec, glacier_gcm_elev, glacier_gcm_lrgcm, glacier_gcm_lrglac, dates_table,
observed_massbal, observed_error, tempchange_boundhigh, tempchange_boundlow,
tempchange_opt_init=None, mb_max_acc=None, mb_max_loss=None, option_areaconstant=0,
option_plotsteps=1, fig_fp=input.output_filepath):
"""
Plot the mass balance [mwea] versus all model parameters to see how parameters effect mass balance
"""
#%%
mb_vs_parameters = pd.DataFrame(np.zeros((len(ddfsnow_iters) * len(precfactor_iters) * len(tempchange_iters), 4)),
columns=['precfactor', 'tempbias', 'ddfsnow', 'massbal'])
count=0
for n, precfactor in enumerate(precfactor_iters):
modelparameters[2] = precfactor
# run mass balance calculation
# if modelparameters[2] == 1:
# option_areaconstant = 0
# else:
# option_areaconstant = 1
option_areaconstant = 0
print('PF:', precfactor, 'option_areaconstant:', option_areaconstant)
for n, tempchange in enumerate(tempchange_iters):
modelparameters[7] = tempchange
for c, ddfsnow in enumerate(ddfsnow_iters):
modelparameters[4] = ddfsnow
modelparameters[5] = modelparameters[4] / input.ddfsnow_iceratio
(glac_bin_temp, glac_bin_prec, glac_bin_acc, glac_bin_refreeze, glac_bin_snowpack, glac_bin_melt,
glac_bin_frontalablation, glac_bin_massbalclim, glac_bin_massbalclim_annual, glac_bin_area_annual,
glac_bin_icethickness_annual, glac_bin_width_annual, glac_bin_surfacetype_annual,
glac_wide_massbaltotal, glac_wide_runoff, glac_wide_snowline, glac_wide_snowpack,
glac_wide_area_annual, glac_wide_volume_annual, glac_wide_ELA_annual, offglac_wide_prec,
offglac_wide_refreeze, offglac_wide_melt, offglac_wide_snowpack, offglac_wide_runoff) = (
massbalance.runmassbalance(modelparameters[0:8], glacier_rgi_table, glacier_area_t0,
icethickness_t0, width_t0, elev_bins, glacier_gcm_temp,
glacier_gcm_tempstd, glacier_gcm_prec, glacier_gcm_elev,
glacier_gcm_lrgcm, glacier_gcm_lrglac, dates_table,
option_areaconstant=option_areaconstant))
# Compute glacier volume change for every time step and use this to compute mass balance
# this will work for any indexing
glac_wide_area = glac_wide_area_annual[:-1].repeat(12)
# Mass change [km3 mwe]
# mb [mwea] * (1 km / 1000 m) * area [km2]
glac_wide_masschange = glac_wide_massbaltotal / 1000 * glac_wide_area
# Mean annual mass balance [mwea]
mb_mwea = (glac_wide_masschange.sum() / glac_wide_area[0] * 1000 /
(glac_wide_masschange.shape[0] / 12))
mb_vs_parameters.loc[count,:] = np.array([precfactor, tempchange, ddfsnow, mb_mwea])
count += 1
# print(modelparameters[2], modelparameters[7], modelparameters[4], np.round(mb_mwea,3))
# Subset data for each precfactor
linestyles = ['-', '--', ':', '-.']
linecolors = ['b', 'k', 'r']
prec_linedict = {precfactor : linestyles[n] for n, precfactor in enumerate(precfactor_iters)}
ddfsnow_colordict = {ddfsnow : linecolors[n] for n, ddfsnow in enumerate(ddfsnow_iters)}
#%%
# Plot the mass balance versus model parameters
fig, ax = plt.subplots(figsize=(6,4))
for precfactor in precfactor_iters:
modelparameters[2] = precfactor
mb_vs_parameters_subset = mb_vs_parameters.loc[mb_vs_parameters.loc[:,'precfactor'] == precfactor]
for ddfsnow in ddfsnow_iters:
mb_vs_parameters_plot = mb_vs_parameters_subset.loc[mb_vs_parameters_subset.loc[:,'ddfsnow'] == ddfsnow]
ax.plot(mb_vs_parameters_plot.loc[:,'tempbias'], mb_vs_parameters_plot.loc[:,'massbal'],
linestyle=prec_linedict[precfactor], color=ddfsnow_colordict[ddfsnow])
# Add horizontal line of mass balance observations
ax.axhline(observed_massbal, color='gray', linewidth=2, zorder=2)
observed_mb_min = observed_massbal - 3*observed_error
observed_mb_max = observed_massbal + 3*observed_error
fillcolor = 'lightgrey'
ax.fill_between([np.min(tempchange_iters), np.max(tempchange_iters)], observed_mb_min, observed_mb_max,
facecolor=fillcolor, label=None, zorder=1)
if option_plotsteps == 1:
# marker='*'
# marker_size = 20
marker='D'
marker_size = 10
markeredge_color = 'black'
marker_color = 'black'
txt_xadj = -0.1
txt_yadj = -0.06
xytxt_list = [(tempchange_boundhigh, mb_max_loss, '1'),
(tempchange_boundlow, mb_max_loss + 0.9*(mb_max_acc - mb_max_loss), '3'),
(tempchange_opt_init, observed_massbal, '4'),
(tempchange_opt_init + 3*tempchange_sigma, observed_mb_min, '6'),
(tempchange_opt_init - 3*tempchange_sigma, observed_mb_max, '6'),
(tempchange_opt_init - tempchange_sigma, observed_mb_max, '7'),
(tempchange_opt_init + tempchange_sigma, observed_mb_min, '7'),
(tempchange_mu, observed_massbal, '9')]
for xytxt in xytxt_list:
x,y,txt = xytxt[0], xytxt[1], xytxt[2]
ax.plot([x], [y], marker=marker, markersize=marker_size,
markeredgecolor=markeredge_color, color=marker_color, zorder=3)
ax.text(x+txt_xadj, y+txt_yadj, txt, zorder=4, color='white', fontsize=10)
ax.set_xlim(np.min(tempchange_iters), np.max(tempchange_iters))
if observed_massbal - 3*observed_error < mb_max_loss:
ylim_lower = observed_massbal - 3*observed_error
else:
ylim_lower = np.floor(mb_max_loss)
ax.set_ylim(int(ylim_lower),np.ceil(mb_vs_parameters['massbal'].max()))
print('\nMANUALLY SET YLIM\n')
ax.set_ylim(-2,2)
# Labels
# ax.set_title('Mass balance versus Parameters ' + glacier_str)
ax.set_xlabel('Temperature Bias ($\mathregular{^{\circ}}$C)', fontsize=12)
ax.set_ylabel('Mass Balance (m w.e. $\mathregular{a^{-1}}$)', fontsize=12)
# Add legend
x_min = mb_vs_parameters.loc[:,'tempbias'].min()
y_min = mb_vs_parameters.loc[:,'massbal'].min()
leg_lines = []
leg_names = []
for precfactor in precfactor_iters:
line = Line2D([x_min,y_min],[x_min,y_min], linestyle=prec_linedict[precfactor], color='gray')
leg_lines.append(line)
leg_names.append(str(precfactor))
leg_pf = ax.legend(leg_lines, leg_names, loc='upper right', title='$\mathit{k_{p}}$', frameon=False,
labelspacing=0.25, bbox_to_anchor=(0.99, 0.99))
leg_lines = []
leg_names = []
for ddfsnow in ddfsnow_iters:
line = Line2D([x_min,y_min],[x_min,y_min], linestyle='-', color=ddfsnow_colordict[ddfsnow])
leg_lines.append(line)
leg_names.append(str(np.round(ddfsnow*10**3,1)))
leg_ddf = ax.legend(leg_lines, leg_names, loc='upper left', title='$\mathit{f_{snow}}$', frameon=False,
labelspacing=0.25, bbox_to_anchor=(0.63, 0.99))
ax.add_artist(leg_pf)
# for precfactor in reversed(precfactor_iters):
# line = Line2D([x_min,y_min],[x_min,y_min], linestyle=prec_linedict[precfactor], color='gray')
# leg_lines.append(line)
# leg_names.append('$\mathregular{k_{p}}$ ' + str(precfactor))
# for ddfsnow in ddfsnow_iters:
# line = Line2D([x_min,y_min],[x_min,y_min], linestyle='-', color=ddfsnow_colordict[ddfsnow])
# leg_lines.append(line)
# leg_names.append('$\mathregular{f_{snow}}$ ' + str(np.round(ddfsnow*10**3,1)))
fig.savefig(fig_fp + glacier_str + '_mb_vs_parameters_areachg.eps',
bbox_inches='tight', dpi=300)
#%%
# ===== PLOT OPTIONS ==================================================================================================
def grid_values(vn, grouping, modelparams_all, midpt_value=np.nan):
""" XYZ of grid values """
# Group data
if vn in ['precfactor', 'tempchange', 'ddfsnow']:
groups, ds_vn_deg = partition_groups(grouping, vn, modelparams_all, regional_calc='area_weighted_mean')
groups, ds_group_area = partition_groups(grouping, 'Area', modelparams_all, regional_calc='sum')
elif vn == 'dif_masschange':
# Group calculations
groups, ds_group_cal = partition_groups(grouping, 'mb_cal_Gta', modelparams_all, regional_calc='sum')
groups, ds_group_era = partition_groups(grouping, 'mb_era_Gta', modelparams_all, regional_calc='sum')
groups, ds_group_area = partition_groups(grouping, 'Area', modelparams_all, regional_calc='sum')
# Group difference [Gt/yr]
dif_cal_era_Gta = (np.array([x[1] for x in ds_group_cal]) - np.array([x[1] for x in ds_group_era])).tolist()
# Group difference [mwea]
area = [x[1] for x in ds_group_area]
ds_group_dif_cal_era_mwea = [[x[0], dif_cal_era_Gta[n] / area[n] * 1000] for n, x in enumerate(ds_group_cal)]
ds_vn_deg = ds_group_dif_cal_era_mwea
z = [ds_vn_deg[ds_idx][1] for ds_idx in range(len(ds_vn_deg))]
x = np.array([x[0] for x in deg_groups])
y = np.array([x[1] for x in deg_groups])
lons = np.arange(x.min(), x.max() + 2 * degree_size, degree_size)
lats = np.arange(y.min(), y.max() + 2 * degree_size, degree_size)
x_adj = np.arange(x.min(), x.max() + 1 * degree_size, degree_size) - x.min()
y_adj = np.arange(y.min(), y.max() + 1 * degree_size, degree_size) - y.min()
z_array = np.zeros((len(y_adj), len(x_adj)))
z_array[z_array==0] = np.nan
for i in range(len(z)):
row_idx = int((y[i] - y.min()) / degree_size)
col_idx = int((x[i] - x.min()) / degree_size)
z_array[row_idx, col_idx] = z[i]
return lons, lats, z_array
def plot_spatialmap_mbdif(vns, grouping, modelparams_all, xlabel, ylabel, figure_fp, fig_fn_prefix='',
option_contour_lines=0, option_rgi_outlines=0, option_group_regions=0):
"""Plot spatial map of model parameters"""
#%%
fig = plt.figure()
# Custom subplots
gs = mpl.gridspec.GridSpec(20, 1)
ax1 = plt.subplot(gs[0:11,0], projection=cartopy.crs.PlateCarree())
ax2 = plt.subplot(gs[12:20,0])
# # Third subplot
# gs = mpl.gridspec.GridSpec(20, 20)
# ax1 = plt.subplot(gs[0:11,0:20], projection=cartopy.crs.PlateCarree())
# ax2 = plt.subplot(gs[12:20,0:7])
# ax2 = plt.subplot(gs[12:20,13:20])
cmap = 'RdYlBu_r'
# cmap = plt.cm.get_cmap(cmap, 5)
norm = plt.Normalize(colorbar_dict['dif_masschange'][0], colorbar_dict['dif_masschange'][1])
vn = 'dif_masschange'
lons, lats, z_array = grid_values(vn, grouping, modelparams_all)
ax1.pcolormesh(lons, lats, z_array, cmap=cmap, norm=norm, zorder=2, alpha=0.8)
# Add country borders for reference
# ax1.add_feature(cartopy.feature.BORDERS, facecolor='none', edgecolor='lightgrey', zorder=10)
# ax1.add_feature(cartopy.feature.COASTLINE, facecolor='none', edgecolor='lightgrey', zorder=10)
# Set the extent
ax1.set_extent([east, west, south, north], cartopy.crs.PlateCarree())
# Label title, x, and y axes
ax1.set_xticks(np.arange(east,west+1,xtick), cartopy.crs.PlateCarree())
ax1.set_yticks(np.arange(south,north+1,ytick), cartopy.crs.PlateCarree())
ax1.set_xlabel(xlabel, size=labelsize, labelpad=0)
ax1.set_ylabel(ylabel, size=labelsize)
# Add colorbar
# sm = plt.cm.ScalarMappable(cmap=cmap, norm=norm)
# sm._A = []
# cbar = plt.colorbar(sm, ax=ax1, fraction=0.04, pad=0.01)
# cbar.set_ticks(list(np.arange(colorbar_dict[vn][0], colorbar_dict[vn][1] + 0.01, 0.1)))
# fig.text(1.01, 0.6, '$\mathregular{B_{mod} - B_{obs}}$ (m w.e. $\mathregular{a^{-1}}$)', va='center',
# rotation='vertical', size=12)
# Add colorbar
sm = plt.cm.ScalarMappable(cmap=cmap, norm=norm)
sm._A = []
fig.subplots_adjust(right=0.9)
cbar_ax = fig.add_axes([0.92, 0.5, 0.02, 0.35])
cbar = fig.colorbar(sm, cax=cbar_ax)
cbar.set_ticks(list(np.arange(colorbar_dict['dif_masschange'][0], colorbar_dict['dif_masschange'][1] + 0.01, 0.1)))
fig.text(1.04, 0.67, '$\mathit{B_{mod}} - \mathit{B_{obs}}$ (m w.e. $\mathregular{a^{-1}}$)', va='center',
rotation='vertical', size=12)
# Add contour lines and/or rgi outlines
if option_contour_lines == 1:
srtm_contour_shp = cartopy.io.shapereader.Reader(srtm_contour_fn)
srtm_contour_feature = cartopy.feature.ShapelyFeature(srtm_contour_shp.geometries(), cartopy.crs.PlateCarree(),
edgecolor='lightgrey', facecolor='none', linewidth=0.05)
ax1.add_feature(srtm_contour_feature, zorder=9)
if option_rgi_outlines == 1:
rgi_shp = cartopy.io.shapereader.Reader(rgi_glac_shp_fn)
rgi_feature = cartopy.feature.ShapelyFeature(rgi_shp.geometries(), cartopy.crs.PlateCarree(),
edgecolor='black', facecolor='none', linewidth=0.1)
ax1.add_feature(rgi_feature, zorder=9)
if option_group_regions == 1:
rgi_shp = cartopy.io.shapereader.Reader(bolch_shp_fn)
rgi_feature = cartopy.feature.ShapelyFeature(rgi_shp.geometries(), cartopy.crs.PlateCarree(),
edgecolor='lightgrey', facecolor='none', linewidth=1)
ax1.add_feature(rgi_feature, zorder=9)
ax1.text(101., 28.0, 'Hengduan\nShan', zorder=10, size=8, va='center', ha='center')
ax1.text(99.0, 26.5, 'Nyainqentanglha', zorder=10, size=8, va='center', ha='center')
ax1.plot([98,96], [27,29.3], color='k', linewidth=0.25, zorder=10)
ax1.text(93.0, 27.5, 'Eastern Himalaya', zorder=10, size=8, va='center', ha='center')
ax1.text(80.0, 27.3, 'Central Himalaya', zorder=10, size=8, va='center', ha='center')
ax1.text(72.0, 31.7, 'Western Himalaya', zorder=10, size=8, va='center', ha='center')
ax1.text(70.5, 33.7, 'Eastern\nHindu Kush', zorder=10, size=8, va='center', ha='center')
ax1.text(79.0, 39.7, 'Karakoram', zorder=10, size=8, va='center', ha='center')
ax1.plot([76,78], [36,39], color='k', linewidth=0.25, zorder=10)
ax1.text(80.7, 38.0, 'Western\nKunlun Shan', zorder=10, size=8, va='center', ha='center')
ax1.text(86.0, 33.7, 'Tibetan Interior\nMountains', zorder=10, size=8, va='center', ha='center')
ax1.text(73.0, 29.0, 'Gandise Mountains', zorder=10, size=8, va='center', ha='center')
ax1.plot([77.5,81.5], [29,31.4], color='k', linewidth=0.25, zorder=10)
# Scatter plot
# # Scatterplot: Model vs. Observed Mass balance colored by Area
# cmap = 'RdYlBu_r'
# norm = colors.LogNorm(vmin=0.1, vmax=10)
# a = ax2.scatter(modelparams_all['mb_mwea'], modelparams_all['mb_mean'], c=modelparams_all['Area'],
# cmap=cmap, norm=norm, s=20, linewidth=0.5)
# a.set_facecolor('none')
# ax2.plot([-2.5,2],[-2.5,2], color='k', linewidth=0.5)
# ax2.set_xlim([-2.5,1.75])
# ax2.set_ylim([-2.5,1.75])
# ax2.set_ylabel('$\mathregular{B_{obs}}$ $\mathregular{(m w.e. a^{-1})}$', size=12)
# ax2.set_xlabel('$\mathregular{B_{mod}}$ $\mathregular{(m w.e. a^{-1})}$', size=12)
## # Add colorbar
## sm = plt.cm.ScalarMappable(cmap=cmap, norm=norm)
## sm._A = []
## cbar = plt.colorbar(sm, ax=ax2, fraction=0.04, pad=0.01)
## fig.text(1.01, 0.5, 'Area ($\mathregular{km^{2}}$)', va='center', rotation='vertical', size=12)
#
# # Add colorbar
# sm = plt.cm.ScalarMappable(cmap=cmap, norm=norm)
# sm._A = []
# cbar_ax = fig.add_axes([0.92, 0.13, 0.02, 0.29])
# cbar = fig.colorbar(sm, cax=cbar_ax)
## cbar.set_ticks(list(np.arange(colorbar_dict['massbal'][0], colorbar_dict['massbal'][1] + 0.01, 0.5)))
# fig.text(1.04, 0.28, 'Area ($\mathregular{km^{2}}$)', va='center', rotation='vertical', size=12)
# Z-score
ax2.axhline(y=0, xmin=0, xmax=200, color='black', linewidth=0.5, zorder=1)
# ax2.scatter(modelparams_all['Area'], modelparams_all['mb_mwea'], c=modelparams_all['dif_cal_era_mean'],
# cmap=cmap, norm=norm, s=5)
# ax2.set_xlim([0,200])
# ax2.set_ylim([-2.9,1.25])
# ax2.set_ylabel('$\mathregular{B_{obs}}$ $\mathregular{(m w.e. a^{-1})}$', size=12)
# ax2.set_xlabel('Area ($\mathregular{km^{2}}$)', size=12)
#
# # Inset axis over main axis
# ax_inset = plt.axes([.37, 0.16, .51, .14])
# ax_inset.axhline(y=0, xmin=0, xmax=5, color='black', linewidth=0.5)
# ax_inset.scatter(modelparams_all['Area'], modelparams_all['mb_mwea'], c=modelparams_all['dif_cal_era_mean'],
# cmap=cmap, norm=norm, s=3)
# ax_inset.set_xlim([0,5])
#
# # Add colorbar
# sm = plt.cm.ScalarMappable(cmap=cmap, norm=norm)
# sm._A = []
# fig.subplots_adjust(right=0.9)
# cbar_ax = fig.add_axes([0.92, 0.16, 0.03, 0.67])
# cbar = fig.colorbar(sm, cax=cbar_ax)
# cbar.set_ticks(list(np.arange(colorbar_dict['dif_masschange'][0], colorbar_dict['dif_masschange'][1] + 0.01, 0.1)))
# fig.text(1.04, 0.5, '$\mathregular{B_{mod} - B_{obs}}$ (m w.e. $\mathregular{a^{-1}}$)', va='center',
# rotation='vertical', size=12)
# Scatterplot
cmap = 'RdYlBu'
# cmap = plt.cm.get_cmap(cmap, 5)
# norm = plt.Normalize(colorbar_dict['massbal'][0], colorbar_dict['massbal'][1])
norm = MidpointNormalize(midpoint=0, vmin=colorbar_dict['massbal'][0], vmax=colorbar_dict['massbal'][1])
a = ax2.scatter(modelparams_all['Area'], modelparams_all['zscore'], c=modelparams_all['mb_mwea'],
cmap=cmap, norm=norm, s=20, linewidth=0.5, zorder=2)
a.set_facecolor('none')
ax2.set_xlim([0,200])
ax2.set_ylim([-3.8,2.5])
# ax2.set_ylabel('z-score ($\\frac{B_{mod} - B_{obs}}{B_{std}}$)', size=12)
ax2.set_ylabel('z-score (-)', size=12)
ax2.set_xlabel('Area ($\mathregular{km^{2}}$)', size=12)
# Inset axis over main axis
ax_inset = plt.axes([.37, 0.16, .51, .12])
b = ax_inset.scatter(modelparams_all['Area'], modelparams_all['zscore'], c=modelparams_all['mb_mwea'],
cmap=cmap, norm=norm, s=10,linewidth=0.5)
b.set_facecolor('none')
ax_inset.set_xlim([0,5])
# Add colorbar
sm = plt.cm.ScalarMappable(cmap=cmap, norm=norm)
sm._A = []
cbar_ax = fig.add_axes([0.92, 0.13, 0.02, 0.29])
cbar = fig.colorbar(sm, cax=cbar_ax)
cbar.set_ticks(list(np.arange(colorbar_dict['massbal'][0], colorbar_dict['massbal'][1] + 0.01, 0.5)))
fig.text(1.04, 0.28, '$\mathit{B_{obs}}$ $\mathregular{(m w.e. a^{-1})}$', va='center',
rotation='vertical', size=12)
# cbar = plt.colorbar(sm, ax=ax2, fraction=0.04, pad=0.01)
# cbar.set_ticks(list(np.arange(colorbar_dict['massbal'][0], colorbar_dict['massbal'][1] + 0.01, 0.5)))
# fig.text(1.01, 0.3, '$\mathregular{B_{obs}}$ $\mathregular{(m w.e. a^{-1})}$', va='center',
# rotation='vertical', size=12)
# Add subplot labels
fig.text(0.15, 0.83, 'A', zorder=4, color='black', fontsize=12, fontweight='bold')
fig.text(0.15, 0.40, 'B', zorder=4, color='black', fontsize=12, fontweight='bold')
# Save figure
fig.set_size_inches(6,7)
if degree_size < 1:
degsize_name = 'pt' + str(int(degree_size * 100))
else:
degsize_name = str(degree_size)
fig_fn = fig_fn_prefix + 'MB_dif_map_scatter_' + degsize_name + 'deg.png'
fig.savefig(figure_fp + fig_fn, bbox_inches='tight', dpi=300)
#%%
def plot_spatialmap_parameters(vns, grouping, modelparams_all, xlabel, ylabel, midpt_dict, cmap_dict, title_adj,
figure_fp, fig_fn_prefix='', option_contour_lines=0, option_rgi_outlines=0,
option_group_regions=0):
"""Plot spatial map of model parameters"""
fig, ax = plt.subplots(len(vns), 1, subplot_kw={'projection':cartopy.crs.PlateCarree()},
gridspec_kw = {'wspace':0.1, 'hspace':0.03})
for nvar, vn in enumerate(vns):
class MidpointNormalize(colors.Normalize):
def __init__(self, vmin=None, vmax=None, midpoint=None, clip=False):
self.midpoint = midpoint
colors.Normalize.__init__(self, vmin, vmax, clip)
def __call__(self, value, clip=None):
# Note that I'm ignoring clipping and other edge cases here.
result, is_scalar = self.process_value(value)
x, y = [self.vmin, self.midpoint, self.vmax], [0, 0.5, 1]
return np.ma.array(np.interp(value, x, y), mask=result.mask, copy=False)
cmap = cmap_dict[vn]
norm = MidpointNormalize(midpoint=midpt_dict[vn], vmin=colorbar_dict[vn][0], vmax=colorbar_dict[vn][1])
lons, lats, z_array = grid_values(vn, grouping, modelparams_all)
if len(vns) > 1:
ax[nvar].pcolormesh(lons, lats, z_array, cmap=cmap, norm=norm, zorder=2, alpha=0.8)
else:
ax.pcolormesh(lons, lats, z_array, cmap=cmap, norm=norm, zorder=2, alpha=0.8)
# Set the extent
ax[nvar].set_extent([east, west, south, north], cartopy.crs.PlateCarree())
# Label title, x, and y axes
ax[nvar].set_xticks(np.arange(east,west+1,xtick), cartopy.crs.PlateCarree())
ax[nvar].set_yticks(np.arange(south,north+1,ytick), cartopy.crs.PlateCarree())
if nvar + 1 == len(vns):
ax[nvar].set_xlabel(xlabel, size=labelsize, labelpad=0)
# Add colorbar
sm = plt.cm.ScalarMappable(cmap=cmap, norm=norm)
sm._A = []
cbar = plt.colorbar(sm, ax=ax[nvar], fraction=0.03, pad=0.01)
# Set tick marks manually
if vn == 'dif_masschange':
cbar.set_ticks(list(np.arange(colorbar_dict[vn][0], colorbar_dict[vn][1] + 0.01, 0.05)))
elif vn == 'tempchange':
cbar.set_ticks(list(np.arange(colorbar_dict[vn][0], colorbar_dict[vn][1] + 0.01, 0.5))[1:-1])
ax[nvar].text(lons.max()+title_adj[vn], lats.mean(), vn_title_wunits_dict[vn], va='center', ha='center',
rotation='vertical', size=labelsize)
if option_group_regions == 1:
rgi_shp = cartopy.io.shapereader.Reader(bolch_shp_fn)
rgi_feature = cartopy.feature.ShapelyFeature(rgi_shp.geometries(), cartopy.crs.PlateCarree(),
edgecolor='lightgrey', facecolor='none', linewidth=1)
ax[nvar].add_feature(rgi_feature, zorder=9)
ax[nvar].text(101., 28.0, 'Hengduan\nShan', zorder=10, size=8, va='center', ha='center')
ax[nvar].text(99.0, 26.5, 'Nyainqentanglha', zorder=10, size=8, va='center', ha='center')
ax[nvar].plot([98,96], [27,29.3], color='k', linewidth=0.25, zorder=10)
ax[nvar].text(93.0, 27.5, 'Eastern Himalaya', zorder=10, size=8, va='center', ha='center')
ax[nvar].text(80.0, 27.3, 'Central Himalaya', zorder=10, size=8, va='center', ha='center')
ax[nvar].text(72.0, 31.7, 'Western Himalaya', zorder=10, size=8, va='center', ha='center')
ax[nvar].text(70.5, 33.7, 'Eastern\nHindu Kush', zorder=10, size=8, va='center', ha='center')
ax[nvar].text(79.0, 39.7, 'Karakoram', zorder=10, size=8, va='center', ha='center')
ax[nvar].plot([76,78], [36,39], color='k', linewidth=0.25, zorder=10)
ax[nvar].text(80.7, 38.0, 'Western\nKunlun Shan', zorder=10, size=8, va='center', ha='center')
ax[nvar].text(86.0, 33.7, 'Tibetan Interior\nMountains', zorder=10, size=8, va='center', ha='center')
ax[nvar].text(73.0, 29.0, 'Gandise Mountains', zorder=10, size=8, va='center', ha='center')
ax[nvar].plot([77.5,81.5], [29,31.4], color='k', linewidth=0.25, zorder=10)
else:
# Add country borders for reference
ax[nvar].add_feature(cartopy.feature.BORDERS, facecolor='none', edgecolor='lightgrey', zorder=10)
ax[nvar].add_feature(cartopy.feature.COASTLINE, facecolor='none', edgecolor='lightgrey', zorder=10)
# Add contour lines and/or rgi outlines
if option_contour_lines == 1:
srtm_contour_shp = cartopy.io.shapereader.Reader(srtm_contour_fn)
srtm_contour_feature = cartopy.feature.ShapelyFeature(srtm_contour_shp.geometries(),
cartopy.crs.PlateCarree(),
edgecolor='lightgrey', facecolor='none',
linewidth=0.05)
ax[nvar].add_feature(srtm_contour_feature, zorder=9)
if option_rgi_outlines == 1:
rgi_shp = cartopy.io.shapereader.Reader(rgi_glac_shp_fn)
rgi_feature = cartopy.feature.ShapelyFeature(rgi_shp.geometries(), cartopy.crs.PlateCarree(),
edgecolor='black', facecolor='none', linewidth=0.1)
ax[nvar].add_feature(rgi_feature, zorder=9)
# Add subplot labels
if len(vns) == 3:
fig.text(0.21, 0.86, 'A', zorder=4, color='black', fontsize=12, fontweight='bold')
fig.text(0.21, 0.605, 'B', zorder=4, color='black', fontsize=12, fontweight='bold')
fig.text(0.21, 0.35, 'C', zorder=4, color='black', fontsize=12, fontweight='bold')
elif len(vns) == 2:
fig.text(0.21, 0.85, 'A', zorder=4, color='black', fontsize=12, fontweight='bold')
fig.text(0.21, 0.46, 'B', zorder=4, color='black', fontsize=12, fontweight='bold')
if len(vns) > 1:
fig.text(0.1, 0.5, ylabel, va='center', rotation='vertical', size=12)
# Save figure
fig.set_size_inches(6,3*len(vns))
if degree_size < 1:
degsize_name = 'pt' + str(int(degree_size * 100))
else:
degsize_name = str(degree_size)
fig_fn = fig_fn_prefix + 'mp_maps_' + degsize_name + 'deg_' + str(len(vns)) + 'params.png'
fig.savefig(figure_fp + fig_fn, bbox_inches='tight', dpi=300)
#%%
def observation_vs_calibration(regions, netcdf_fp, chainlength=chainlength, burn=0, chain_no=0, netcdf_fn=None):
"""
Compare mass balance observations with model calibration
Parameters
----------
regions : list of strings
list of regions
chainlength : int
chain length
burn : int
burn-in number
Returns
-------
.png files
saves histogram of differences between observations and calibration
.csv file
saves .csv file of comparison
"""
#%%
#for batman in [0]:
# netcdf_fp = mcmc_output_netcdf_fp_all
# chain_no = 0
csv_fp = netcdf_fp + 'csv/'
fig_fp = netcdf_fp + 'figures/'
# Load mean of all model parameters
if os.path.isfile(csv_fp + netcdf_fn) == False:
filelist = []
for region in regions:
filelist.extend(glob.glob(netcdf_fp + str(region) + '*.nc'))
glac_no = []
reg_no = []
for netcdf in filelist:
glac_str = netcdf.split('/')[-1].split('.nc')[0]
glac_no.append(glac_str)
reg_no.append(glac_str.split('.')[0])
glac_no = sorted(glac_no)
(main_glac_rgi, main_glac_hyps, main_glac_icethickness, main_glac_width,
gcm_temp, gcm_tempstd, gcm_prec, gcm_elev, gcm_lr, cal_data, dates_table) = load_glacierdata_byglacno(glac_no)
posterior_cns = ['glacno', 'mb_mean', 'mb_std', 'pf_mean', 'pf_std', 'tc_mean', 'tc_std', 'ddfsnow_mean',
'ddfsnow_std']
posterior_all = pd.DataFrame(np.zeros((main_glac_rgi.shape[0], len(posterior_cns))), columns=posterior_cns)
print('burn:', burn, 'chain length:', chainlength)
for n, glac_str_wRGI in enumerate(main_glac_rgi['RGIId'].values):
if n%500 == 0:
print(n, glac_str_wRGI)
# Glacier string
glacier_str = glac_str_wRGI.split('-')[1]
# MCMC Analysis
ds = xr.open_dataset(netcdf_fp + glacier_str + '.nc')
df = | pd.DataFrame(ds['mp_value'].values[burn:chainlength,:,0], columns=ds.mp.values) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created by <NAME> July 2021
Generates NIPS4Bplus test and train datasets to run on the modified SincNet
code
Generates train and test sets for three selections of classes: "All Classes",
"Bird Classes" and "Bird Species"
NIPS4Bplus annotations:
https://doi.org/10.6084/m9.figshare.6798548
NIPS4B species list:
http://sabiod.univ-tln.fr/nips4b/media/birds/NIPS4B_BIRD_CHALLENGE_TRAIN_LABELS.tar
Choose: nips4b_birdchallenge_espece_list.csv
Instructions
https://github.com/fbravosanchez/NIPS4Bplus#readme
"""
import os
import glob
import pandas as pd
from sklearn.model_selection import train_test_split
import sys
def export_csv(file, name):
file.to_csv(os.path.join(output_path, '') + name +'.csv',index=False, header=True, encoding='utf-8')
#path to NIPS4Bplus csv annotation files
annotations_path=sys.argv[1]
#path to NIPS4B species list
sps_list_file=sys.argv[2]
#output path for csv list files
output_path=sys.argv[3]
if not os.path.exists(output_path):
os.makedirs(output_path)
#collect csv label file list
lbl_files = pd.DataFrame(glob.glob(os.path.join(annotations_path, '') + '*.csv'))
lbl_files.columns = ['csv']
lbl_files['wav'] = 'nips4b_birds_trainfile' + lbl_files['csv'].str[-7:-4]
#read species list
sps_list = pd.read_csv(sps_list_file)
file_list = []
#process by csv file
for i, j in lbl_files.iterrows():
#skip empty files
try:
k = pd.read_csv(j['csv'], header=None)
tags = True
except pd.errors.EmptyDataError:
tags = False
#for each valid csv file process tags
if tags:
for l, m in k.iterrows():
file_out = str(j['wav'])+'.wav'
try:
type_out = sps_list.loc[sps_list['class name'] ==
m[2]].type.values[0]
scient_out = sps_list.loc[sps_list['class name'] ==
m[2]].Scientific_name.values[0]
except:
type_out = ''
scient_out = ''
dict_out = dict(zip(['file', 'type', 'class_name', 'species', 'start', 'length'],
[file_out, type_out, m[2], scient_out, m[0],m[1]]))
file_list.append(dict_out)
file_list = | pd.DataFrame(file_list) | pandas.DataFrame |
#!/usr/bin/python
# Note
# The script is to analyze the duplicated gene seq for the 1011 sce sequence project.
import os
import pandas as pd
input0 = "/media/luhongzhong/newdisk/Genomics_data/cds_align_unify/"
os.system("mkdir /media/luhongzhong/newdisk/Genomics_data/cds_align_unify_remove_duplicates/")
outfile0 = "/media/luhongzhong/newdisk/Genomics_data/cds_align_unify_remove_duplicates/"
all_gene = os.listdir("/media/luhongzhong/newdisk/Genomics_data/cds_align_unify/")
gene_cluster = []
duplicated_seq_num = []
non_duplicate_seq_num = []
for gene in all_gene:
if ".phy" in gene:
print(gene)
# fasta0 = input0 + "YPR204W.phy"
fasta0 = input0 + gene
s0 = open(fasta0, "r").readlines()
# firstly build the id and seq dict
general_information = s0[0].split(" ")
id_index = []
id = []
for i, s in enumerate(s0):
if " \n" in s:
# print(s)
id.append(s)
id_index.append(i)
id_seq_dict = {}
for j, id0 in enumerate(id):
# print(j)
if j < len(id) - 1:
index_s = id_index[j] + 1
index_e = id_index[j + 1]
seq_choose = s0[index_s:index_e]
else:
index_s = id_index[j] + 1
seq_choose = s0[index_s:]
# print(seq_choose)
id_new = id0.strip("\n")
id_new = id_new.strip(" ")
id_seq_dict[id_new] = seq_choose
# check the duplicated seq
id_all = []
seq_all = []
for key, value in id_seq_dict.items():
value0 = "".join(value)
id_all.append(key)
seq_all.append(value0)
df = pd.DataFrame({"ID": id_all, "seq": seq_all})
duplicate0 = df[df.duplicated(['seq'], keep=False)]
print("total seq:" + str(len(seq_all)) + "====> duplicate_seq:" + str(len(duplicate0["seq"])))
# summarize the duplicated seq
gene_cluster.append(gene)
duplicated_seq_num.append(len(duplicate0["seq"]))
non_duplicate_seq_num.append(1012-len(duplicate0["seq"]))
# save the non duplicate ones
new_df = df.drop_duplicates(subset=['seq'], keep=False)
id_kept = new_df["ID"].tolist()
general_information_new = str(len(id_kept)) + " " + " ".join(general_information[1:])
file_out = outfile0 + gene
out = open(file_out, "w")
out.write(general_information_new)
for ss0 in id_kept:
out.write(ss0 + " " + "\n")
out.writelines("".join(id_seq_dict[ss0]))
out.close()
# further summarize the duplicate seq information
duplicate_seq = | pd.DataFrame({"cluster": gene_cluster, "duplicate_num": duplicated_seq_num, "unique_num": non_duplicate_seq_num}) | pandas.DataFrame |
from collections import defaultdict, OrderedDict, namedtuple
from itertools import groupby
import numpy as np
import pandas as pd
import vigra
import logging
logger = logging.getLogger(__name__)
from .util import label_vol_mapping, edge_mask_for_axis, edge_ids_for_axis, \
unique_edge_labels, extract_edge_values_for_axis, nonzero_coord_array, \
dataframe_to_hdf5, dataframe_from_hdf5
from .accumulators.base import BaseEdgeAccumulator, BaseSpAccumulator
from .accumulators.standard import StandardEdgeAccumulator, StandardSpAccumulator, StandardFlatEdgeAccumulator
from .accumulators.similarity import SimilarityFlatEdgeAccumulator
from .accumulators.edgeregion import EdgeRegionEdgeAccumulator
class Rag(object):
"""
Region Adjacency Graph
Initialized with an ND label image of superpixels, and stores
the edges between superpixels.
+----------------------+------------------------------------------------------------------------------+
| Attribute | Description |
+======================+==============================================================================+
| label_img | The label volume you passed in. |
+----------------------+------------------------------------------------------------------------------+
| sp_ids | 1D ndarray of superpixel ID values, sorted. |
+----------------------+------------------------------------------------------------------------------+
| max_sp | The maximum superpixel ID in the label volume |
+----------------------+------------------------------------------------------------------------------+
| num_sp | The number of superpixels in ``label_img``. |br| |
| | Not necessarily the same as ``max_sp``. |br| |
+----------------------+------------------------------------------------------------------------------+
| num_edges | The number of edges in the label volume. |
+----------------------+------------------------------------------------------------------------------+
| edge_ids | *ndarray, shape=(N,2)* |br| |
| | List of adjacent superpixel IDs, sorted. (No duplicates). |br| |
| | *Guarantee:* For all edge_ids (sp1,sp2): sp1 < sp2. |br| |
+----------------------+------------------------------------------------------------------------------+
| unique_edge_tables | *dict* of *pandas.DataFrame* objects |br| |
| | Columns: ``[sp1, sp2, edge_label]``, where ``edge_label`` |br| |
| | uniquely identifies each edge ``(sp1, sp2)`` within that table. |br| |
| | See :py:attr:`unique_edge_tables` for details. |br| |
+----------------------+------------------------------------------------------------------------------+
| dense_edge_tables | *OrderedDict* of *pandas.DataFrame* objects (one per isotropic axis). |br| |
| | Each DataFrame stores the id and location of all pixel |br| |
| | edge pairs in the volume *along a particular axis.* |br| |
| | See :py:attr:`dense_edge_tables` for details. |br| |
+----------------------+------------------------------------------------------------------------------+
| flat_edge_label_img | *ndarray, same shape as label_img except for the z-axis (1 px smaller)* |br| |
| | If ``flat_superpixels=True``, this is a label volume for edges along |br| |
| | the z-axis, labeled according to the ``edge_label`` column from |br| |
| | :py:attr:`unique_edge_tables['z'] <unique_edge_tables>`. |br| |
+----------------------+------------------------------------------------------------------------------+
**Limitations:**
- This representation does not check for edge contiguity, so if two
superpixels are connected via multiple 'faces', those faces will both
be lumped into one 'edge'.
- No support for parallelization yet.
"""
# Maintenance docs
#
"""
Implementation notes
--------------------
Internally, the edges along each axis are found independently and stored
in separate pandas.DataFrame objects (one per axis in the volume).
Every pixel face between two different superpixels is stored as a separate
row in one of those DataFrames.
This data structure's total RAM usage is proportional to the number of
pixel faces on superpixel boundaries in the volume (i.e. the manhattan
distance of all superpixel boundaries interior to the label volume).
It needs about 23 bytes per pixel face. (Each DataFrame row is 23 bytes.)
Here are some example stats for a typical 512^3 cube of isotropic EM data:
- 7534 superpixels
- 53354 edges between superpixels
- 19926582 (~20 million) individual edge pixel faces
So, to handle that 0.5 GB label volume, this datastructure needs:
20e6 pixel faces * 23 bytes == 0.46 GB of storage.
Obviously, a volume with smaller superpixels will require more storage.
TODO
----
- Adding a function to merge two Rags should be trivial, if it seems useful
(say, for parallelizing construction.)
"""
# Used internally, during initialization
_EdgeData = namedtuple("_EdgeData", "mask mask_coords ids forwardness")
def __init__( self, label_img, flat_superpixels=False ):
"""
Parameters
----------
label_img
*VigraArray* |br|
Label values do not need to be consecutive, but *excessively* high label values
will require extra RAM when computing features, due to zeros stored
within ``RegionFeatureAccumulators``.
flat_superpixels
*bool* |br|
Set to ``True`` if ``label_img`` is a 3D volume whose superpixels are flat in the xy direction.
"""
if isinstance(label_img, str) and label_img == '__will_deserialize__':
return
assert hasattr(label_img, 'axistags'), \
"For optimal performance, make sure label_img is a VigraArray with accurate axistags"
assert set(label_img.axistags.keys()).issubset('zyx'), \
"Only axes z,y,x are permitted, not {}".format( label_img.axistags.keys() )
assert label_img.dtype == np.uint32, \
"label_img must have dtype uint32"
assert not flat_superpixels or set('zyx').issubset(set(label_img.axistags.keys())), \
"Can't use flat_superpixels with a 2D image."
axes = 'zyx'[-label_img.ndim:]
self._label_img = label_img.withAxes(axes)
self._flat_superpixels = flat_superpixels
edge_datas = OrderedDict()
for axis, axiskey in enumerate(label_img.axistags.keys()):
if flat_superpixels and axiskey == 'z':
edge_mask = None # edge_ids_for_axis() supports edge_mask=None
edge_mask_coords = None
else:
edge_mask = edge_mask_for_axis(label_img, axis)
edge_mask_coords = nonzero_coord_array(edge_mask).transpose()
# Save RAM: Convert to the smallest dtype we can get away with.
if (np.array(label_img.shape) < 2**16).all():
edge_mask_coords = edge_mask_coords.astype(np.uint16)
else:
edge_mask_coords = edge_mask_coords.astype(np.uint32)
edge_ids = edge_ids_for_axis(label_img, edge_mask, axis)
edge_forwardness = edge_ids[:,0] < edge_ids[:,1]
edge_ids.sort()
edge_datas[axiskey] = Rag._EdgeData(edge_mask, edge_mask_coords, edge_ids, edge_forwardness)
self._init_unique_edge_tables(edge_datas)
self._init_dense_edge_tables(edge_datas)
self._init_edge_ids()
self._init_sp_attributes()
if flat_superpixels:
self._init_flat_edge_label_img(edge_datas)
@property
def label_img(self):
return self._label_img
@property
def flat_superpixels(self):
return self._flat_superpixels
@property
def sp_ids(self):
return self._sp_ids
@property
def num_sp(self):
return self._num_sp
@property
def max_sp(self):
return self._max_sp
@property
def num_edges(self):
all_axes = ''.join(self._label_img.axistags.keys())
return len(self._unique_edge_tables[all_axes])
@property
def edge_ids(self):
return self._edge_ids
@property
def flat_edge_label_img(self):
if self._flat_superpixels:
return self._flat_edge_label_img
return None
@property
def unique_edge_tables(self):
"""
*OrderedDict* of *pandas.DataFrame* objects.
Each of these tables represents the set of edges that lie along a particular set of axes.
If ``flat_superpixels=False``, then this dict contains just one item,
with key ``zyx`` or ``yx``, depending on whether or not ``label_img`` is 3D or 2D.
If ``flat_superpixels=True``, then this dict contains two tables for the disjoint
sets of ``yx`` edges and ``z`` edges. And additionally, it contains a third table in
key ``zyx`` with all edges in the Rag (i.e. the superset of edges ``z``and ``yx``).
Each table has columns: ``[sp1, sp2, edge_label]``, where ``edge_label``
uniquely identifies each edge ``(sp1, sp2)`` within *that table*.
.. note::
Each table has an independent ``edge_label`` column. For a given edge
``(sp1,sp2)``, ``edge_label`` in table ``yx`` will not match the edge_label
in table ``zyx``.
"""
return self._unique_edge_tables
@property
def dense_edge_tables(self):
"""
Read-only property. |br|
A list of ``pandas.DataFrame`` objects (one per image axis). |br|
Each DataFrame stores the location and superpixel ids of all pixelwise |br|
edge pairs in the volume *along a particular axis.* |br|
**Example:**
+---------+---------+-----------------+----------------+--------+--------+--------+
| ``sp1`` | ``sp2`` | ``forwardness`` | ``edge_label`` | ``z`` | ``y`` | ``x`` |
+=========+=========+=================+================+========+========+========+
| 1 | 2 | True | 10 | 0 | 10 | 13 |
+---------+---------+-----------------+----------------+--------+--------+--------+
| 1 | 2 | False | 10 | 0 | 10 | 14 |
+---------+---------+-----------------+----------------+--------+--------+--------+
| 1 | 2 | False | 10 | 0 | 10 | 15 |
+---------+---------+-----------------+----------------+--------+--------+--------+
| 1 | 3 | True | 11 | 1 | 20 | 42 |
+---------+---------+-----------------+----------------+--------+--------+--------+
| 1 | 3 | True | 11 | 1 | 20 | 43 |
+---------+---------+-----------------+----------------+--------+--------+--------+
| 1 | 3 | False | 11 | 1 | 20 | 44 |
+---------+---------+-----------------+----------------+--------+--------+--------+
| ... | ... | ... | ... | ... | ... | ... |
+---------+---------+-----------------+----------------+--------+--------+--------+
**Column definitions:**
+-----------------+----------------------------------------------------------------------------------------+
| Column | Description |
+=================+========================================================================================+
| ``sp1`` | Superpixel ID |
+-----------------+----------------------------------------------------------------------------------------+
| ``sp2`` | Superpixel ID. *Guarantee:* ``(sp1 < sp2)`` |
+-----------------+----------------------------------------------------------------------------------------+
| ``forwardness`` | ``True`` if ``sp1`` was on the "left" (or "upper", etc.) side of the edge. |
+-----------------+----------------------------------------------------------------------------------------+
| ``edge_label`` | A ``uint32`` that uniquely identifies this ``(sp1,sp2)`` pair, regardless of axis. |
+-----------------+----------------------------------------------------------------------------------------+
| ``z`` | Z-coordinate of this pixel edge |
+-----------------+----------------------------------------------------------------------------------------+
| ``y`` | Y-coordinate of this pixel edge |
+-----------------+----------------------------------------------------------------------------------------+
| ``x`` | X-coordinate of this pixel edge |
+-----------------+----------------------------------------------------------------------------------------+
"""
return self._dense_edge_tables
def _init_unique_edge_tables(self, edge_datas):
"""
Initialize the edge_label_lookup_df attribute.
"""
all_axes = ''.join(self._label_img.axistags.keys())
all_edge_ids = [t.ids for t in edge_datas.values()]
self._unique_edge_tables = {}
if not self._flat_superpixels:
self._unique_edge_tables[all_axes] = unique_edge_labels( all_edge_ids )
else:
assert len(all_edge_ids) == 3
assert list(edge_datas.keys()) == list('zyx')
unique_z = unique_edge_labels( [all_edge_ids[0]] )
unique_yx = unique_edge_labels( all_edge_ids[1:] )
unique_zyx = unique_edge_labels( [ unique_z[['sp1', 'sp2']].values,
unique_yx[['sp1', 'sp2']].values ] )
# If the superpixels are really flat, then unique_yx and unique_z
# should be completely disjoint.
assert len(unique_zyx) == ( len(unique_z) + len(unique_yx) )
self._unique_edge_tables['z'] = unique_z
self._unique_edge_tables['yx'] = unique_yx
self._unique_edge_tables['zyx'] = unique_zyx
def _init_edge_ids(self):
# Tiny optimization:
# Users will be accessing Rag.edge_ids over and over, so let's
# cache them now instead of extracting them on-the-fly
all_axes = ''.join(self._label_img.axistags.keys())
self._edge_ids = self._unique_edge_tables[all_axes][['sp1', 'sp2']].values
def _init_flat_edge_label_img(self, edge_datas):
assert self._flat_superpixels
unique_table_z = self.unique_edge_tables['z']
assert list(unique_table_z.columns.values) == ['sp1', 'sp2', 'edge_label']
dense_table_z = pd.DataFrame(edge_datas['z'].ids, columns=['sp1', 'sp2'])
dense_table_with_labels = pd.merge(dense_table_z, unique_table_z, on=['sp1', 'sp2'], how='left', copy=False)
flat_edge_label_img = dense_table_with_labels['edge_label'].values
shape = np.subtract(self._label_img.shape, (1, 0, 0))
flat_edge_label_img.shape = tuple(shape)
assert list(self._label_img.axistags.keys()) == list('zyx')
self._flat_edge_label_img = vigra.taggedView(flat_edge_label_img, 'zyx')
def _init_dense_edge_tables(self, edge_datas):
"""
Construct the N dense_edge_tables (one for each axis)
"""
if self._flat_superpixels:
dense_axes = 'yx'
else:
dense_axes = ''.join(self._label_img.axistags.keys())
# Now create an dense_edge_table for each axis
self._dense_edge_tables = OrderedDict()
coord_cols = list(self._label_img.axistags.keys())
column_labels = ['sp1', 'sp2', 'forwardness', 'edge_label'] + coord_cols
column_default_dtypes = [np.uint32, np.uint32, np.bool, np.uint32] + [np.uint16 for _ in coord_cols]
for axiskey in dense_axes:
edge_data = edge_datas[axiskey]
n_edges = len(edge_data.ids)
if n_edges == 0:
self._dense_edge_tables[axiskey] = pd.DataFrame(
{cname: | pd.Series(dtype=dt) | pandas.Series |
"""Main optimisation functions.
"""
from .data_fetcher import get_oa_centroids, get_oa_stats
from .utils import coverage_matrix, make_job_dict
from spineq.greedy import greedy_opt
import numpy as np
import pandas as pd
import rq
from flask_socketio import SocketIO
import os
import datetime
import json
def optimise(
lad20cd="E08000021",
n_sensors=20,
theta=500,
population_weight=1,
workplace_weight=0,
pop_age_groups={
"pop_total": {"min": 0, "max": 90, "weight": 1},
"pop_children": {"min": 0, "max": 16, "weight": 0},
"pop_elderly": {"min": 70, "max": 90, "weight": 0},
},
rq_job=False,
socket=False,
redis_url="redis://",
save_result=False,
save_plot=False,
save_dir="",
run_name="",
**kwargs
):
"""Greedily place sensors to maximise coverage.
Keyword Arguments:
lad20cd {str} -- 2020 local authority district code to get output areas for (
default E08000021, which is Newcastle upon Tyne)
n_sensors {int} -- number of sensors to place (default: {20})
theta {float} -- coverage decay rate (default: {500})
population_weight, workplace_weight, pop_age_groups -- As defined in
calc_oa_weights (parameters directly passed to that function.)
(all passed to cala_oa_weights)
rq_job {boolean} -- If True attempt to get the RQ job running this
function and upate meta data with progress.
socket {boolean} -- If True attempt to make a SocketIO connection to
send updates to.
redis_url {str} -- URL of Redis server for SocketIO message queue
(default: {"redis://"})
save_result {boolean} -- If True save a json of optimisation results to
file {save_dir}/{run_name}_result.json {default: {False}}
save_plots {str} -- If 'final' save plot of final sensor network,
if 'all' save plot after placing each sensor, if False save no plots.
{default: {False}}
save_dir {str} -- Directory to save plots in.
Defaults to current directory. {default: {""}}
run_name {str} -- Prefix to add to saved plots. If empty defaults to
current date and time YYYYMMDDhhmm {default: {""}}
**kwrargs -- additional arguments to pass to plotting function.
Returns:
dict -- optimisation result.
"""
if rq_job or socket:
print("Setting up jobs and sockets...")
if rq_job:
job = rq.get_current_job()
print("rq_job", rq_job)
print("job", job)
else:
job = None
if socket:
socketIO = SocketIO(message_queue=redis_url)
print("socket", socket)
print("socketIO", socketIO)
else:
socketIO = None
print("Fetching data...")
if job:
job.meta["status"] = "Fetching data"
job.save_meta()
data = get_optimisation_inputs(
lad20cd=lad20cd,
population_weight=population_weight,
workplace_weight=workplace_weight,
pop_age_groups=pop_age_groups,
combine=True,
)
oa_x = data["oa_x"]
oa_y = data["oa_y"]
oa_weight = data["oa_weight"]
oa11cd = data["oa11cd"]
# Compute coverage matrix: coverage at each OA due to a sensor placed at
# any other OA.
coverage = coverage_matrix(oa_x, oa_y, theta=theta)
# Run the optimisation
result = greedy_opt(
n_sensors=n_sensors,
coverage=coverage,
weights=oa_weight,
job=job,
socketIO=socketIO,
)
result = make_result_dict(
lad20cd,
n_sensors,
theta,
oa_x,
oa_y,
oa11cd,
result["sensors"],
result["total_coverage"],
result["point_coverage"],
list(oa11cd[result["placement_history"]]),
result["coverage_history"],
oa_weight=result["weights"],
pop_age_groups=pop_age_groups,
population_weight=population_weight,
workplace_weight=workplace_weight,
)
if job:
job.meta["progress"] = 100
job.meta["status"] = "Finished"
job.save_meta()
if socket:
jobDict = make_job_dict(job)
jobDict["result"] = result
socketIO.emit("jobFinished", jobDict)
if save_dir:
os.makedirs(save_dir, exist_ok=True)
if not run_name:
now = datetime.datetime.now()
run_name = now.strftime("%Y%m%d%H%M")
if save_plot:
from .plotting import plot_optimisation_result
save_path = "{}/{}_nsensors_{:03d}.png".format(save_dir, run_name, n_sensors)
plot_optimisation_result(result, save_path=save_path, **kwargs)
if save_result:
result_file = "{}/{}_result.json".format(save_dir, run_name)
with open(result_file, "w") as f:
json.dump(result, f, indent=4)
return result
def calc_oa_weights(
lad20cd="E08000021",
population_weight=1,
workplace_weight=0,
pop_age_groups={
"pop_total": {"min": 0, "max": 90, "weight": 1},
"pop_children": {"min": 0, "max": 16, "weight": 0},
"pop_elderly": {"min": 70, "max": 90, "weight": 0},
},
combine=True,
):
"""Calculate weighting factor for each OA.
Keyword Arguments:
lad20cd {str} -- 2020 local authority district code to get output areas for (
default E08000021, which is Newcastle upon Tyne)
population_weight {float} -- Weighting for residential population
(default: {1})
workplace_weight {float} -- Weighting for workplace population
(default: {0})
pop_age_groups {dict} -- Residential population age groups to create
objectives for and their corresponding weights. Dict with objective
name as key. Each entry should be another dict with keys min (min age
in population group), max (max age in group), and weight (objective
weight for this group).
combine {bool} -- If True combine all the objectives weights into a
single overall weight using the defined weighting factors. If False
treat all objectives separately, in which case all weights defined in
other parameters are ignored.
Returns:
pd.DataFrame or pd.Series -- Weight for each OA (indexed by oa11cd) for
each objective. Series if only one objective defined or combine is True.
"""
data = get_oa_stats(lad20cd=lad20cd)
population_ages = data["population_ages"]
workplace = data["workplace"]
if len(population_ages) != len(workplace):
raise ValueError(
"Lengths of inputs don't match: population_ages={}, workplace={}".format(
len(population_ages), len(workplace)
)
)
# weightings for residential population by age group
if population_weight > 0:
oa_population_group_weights = {}
for name, group in pop_age_groups.items():
# skip calculation for zeroed objectives
if group["weight"] == 0:
continue
# get sum of population in group age range
group_population = population_ages.loc[
:,
(population_ages.columns >= group["min"])
& (population_ages.columns <= group["max"]),
].sum(axis=1)
# normalise total population
group_population = group_population / group_population.sum()
# if objectives will be combined, scale by group weight
if combine:
group_population = group_population * group["weight"]
oa_population_group_weights[name] = group_population
if len(oa_population_group_weights) > 0:
use_population = True # some population groups with non-zero weights
oa_population_group_weights = | pd.DataFrame(oa_population_group_weights) | pandas.DataFrame |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# *****************************************************************************/
# * Authors: <NAME>
# *****************************************************************************/
"""transformCSV.py
This module contains the basic functions for creating the content of a configuration file from CSV.
Args:
--inFile: Path for the configuration file where the time series data values CSV
--outFile: Path for the configuration file where the time series data values INI
--debug: Boolean flag to activate verbose printing for debug use
Example:
Default usage:
$ python transformCSV.py
Specific usage:
$ python transformCSV.py
--inFile C:\raad\src\software\time-series.csv
--outFile C:\raad\src\software\time-series.ini
--debug True
"""
import sys
import datetime
import optparse
import traceback
import pandas
import numpy
import os
import pprint
import csv
if sys.version_info.major > 2:
import configparser as cF
else:
import ConfigParser as cF
class TransformMetaData(object):
debug = False
fileName = None
fileLocation = None
columnsList = None
analysisFrameFormat = None
uniqueLists = None
analysisFrame = None
def __init__(self, inputFileName=None, debug=False, transform=False, sectionName=None, outFolder=None,
outFile='time-series-madness.ini'):
if isinstance(debug, bool):
self.debug = debug
if inputFileName is None:
return
elif os.path.exists(os.path.abspath(inputFileName)):
self.fileName = inputFileName
self.fileLocation = os.path.exists(os.path.abspath(inputFileName))
(analysisFrame, analysisFrameFormat, uniqueLists, columnNamesList) = self.CSVtoFrame(
inputFileName=self.fileName)
self.analysisFrame = analysisFrame
self.columnsList = columnNamesList
self.analysisFrameFormat = analysisFrameFormat
self.uniqueLists = uniqueLists
if transform:
passWrite = self.frameToINI(analysisFrame=analysisFrame, sectionName=sectionName, outFolder=outFolder,
outFile=outFile)
print(f"Pass Status is : {passWrite}")
return
def getColumnList(self):
return self.columnsList
def getAnalysisFrameFormat(self):
return self.analysisFrameFormat
def getuniqueLists(self):
return self.uniqueLists
def getAnalysisFrame(self):
return self.analysisFrame
@staticmethod
def getDateParser(formatString="%Y-%m-%d %H:%M:%S.%f"):
return (lambda x: pandas.datetime.strptime(x, formatString)) # 2020-06-09 19:14:00.000
def getHeaderFromFile(self, headerFilePath=None, method=1):
if headerFilePath is None:
return (None, None)
if method == 1:
fieldnames = pandas.read_csv(headerFilePath, index_col=0, nrows=0).columns.tolist()
elif method == 2:
with open(headerFilePath, 'r') as infile:
reader = csv.DictReader(infile)
fieldnames = list(reader.fieldnames)
elif method == 3:
fieldnames = list(pandas.read_csv(headerFilePath, nrows=1).columns)
else:
fieldnames = None
fieldDict = {}
for indexName, valueName in enumerate(fieldnames):
fieldDict[valueName] = pandas.StringDtype()
return (fieldnames, fieldDict)
def CSVtoFrame(self, inputFileName=None):
if inputFileName is None:
return (None, None)
# Load File
print("Processing File: {0}...\n".format(inputFileName))
self.fileLocation = inputFileName
# Create data frame
analysisFrame = pandas.DataFrame()
analysisFrameFormat = self._getDataFormat()
inputDataFrame = pandas.read_csv(filepath_or_buffer=inputFileName,
sep='\t',
names=self._getDataFormat(),
# dtype=self._getDataFormat()
# header=None
# float_precision='round_trip'
# engine='c',
# parse_dates=['date_column'],
# date_parser=True,
# na_values=['NULL']
)
if self.debug: # Preview data.
print(inputDataFrame.head(5))
# analysisFrame.astype(dtype=analysisFrameFormat)
# Cleanup data
analysisFrame = inputDataFrame.copy(deep=True)
analysisFrame.apply(pandas.to_numeric, errors='coerce') # Fill in bad data with Not-a-Number (NaN)
# Create lists of unique strings
uniqueLists = []
columnNamesList = []
for columnName in analysisFrame.columns:
if self.debug:
print('Column Name : ', columnName)
print('Column Contents : ', analysisFrame[columnName].values)
if isinstance(analysisFrame[columnName].dtypes, str):
columnUniqueList = analysisFrame[columnName].unique().tolist()
else:
columnUniqueList = None
columnNamesList.append(columnName)
uniqueLists.append([columnName, columnUniqueList])
if self.debug: # Preview data.
print(analysisFrame.head(5))
return (analysisFrame, analysisFrameFormat, uniqueLists, columnNamesList)
def frameToINI(self, analysisFrame=None, sectionName='Unknown', outFolder=None, outFile='nil.ini'):
if analysisFrame is None:
return False
try:
if outFolder is None:
outFolder = os.getcwd()
configFilePath = os.path.join(outFolder, outFile)
configINI = cF.ConfigParser()
configINI.add_section(sectionName)
for (columnName, columnData) in analysisFrame:
if self.debug:
print('Column Name : ', columnName)
print('Column Contents : ', columnData.values)
print("Column Contents Length:", len(columnData.values))
print("Column Contents Type", type(columnData.values))
writeList = "["
for colIndex, colValue in enumerate(columnData):
writeList = f"{writeList}'{colValue}'"
if colIndex < len(columnData) - 1:
writeList = f"{writeList}, "
writeList = f"{writeList}]"
configINI.set(sectionName, columnName, writeList)
if not os.path.exists(configFilePath) or os.stat(configFilePath).st_size == 0:
with open(configFilePath, 'w') as configWritingFile:
configINI.write(configWritingFile)
noErrors = True
except ValueError as e:
errorString = ("ERROR in {__file__} @{framePrintNo} with {ErrorFound}".format(__file__=str(__file__),
framePrintNo=str(
sys._getframe().f_lineno),
ErrorFound=e))
print(errorString)
noErrors = False
return noErrors
@staticmethod
def _validNumericalFloat(inValue):
"""
Determines if the value is a valid numerical object.
Args:
inValue: floating-point value
Returns: Value in floating-point or Not-A-Number.
"""
try:
return numpy.float128(inValue)
except ValueError:
return numpy.nan
@staticmethod
def _calculateMean(x):
"""
Calculates the mean in a multiplication method since division produces an infinity or NaN
Args:
x: Input data set. We use a data frame.
Returns: Calculated mean for a vector data frame.
"""
try:
mean = numpy.float128(numpy.average(x, weights=numpy.ones_like(numpy.float128(x)) / numpy.float128(x.size)))
except ValueError:
mean = 0
pass
return mean
def _calculateStd(self, data):
"""
Calculates the standard deviation in a multiplication method since division produces a infinity or NaN
Args:
data: Input data set. We use a data frame.
Returns: Calculated standard deviation for a vector data frame.
"""
sd = 0
try:
n = numpy.float128(data.size)
if n <= 1:
return numpy.float128(0.0)
# Use multiplication version of mean since numpy bug causes infinity.
mean = self._calculateMean(data)
sd = numpy.float128(mean)
# Calculate standard deviation
for el in data:
diff = numpy.float128(el) - numpy.float128(mean)
sd += (diff) ** 2
points = numpy.float128(n - 1)
sd = numpy.float128(numpy.sqrt(numpy.float128(sd) / numpy.float128(points)))
except ValueError:
pass
return sd
def _determineQuickStats(self, dataAnalysisFrame, columnName=None, multiplierSigma=3.0):
"""
Determines stats based on a vector to get the data shape.
Args:
dataAnalysisFrame: Dataframe to do analysis on.
columnName: Column name of the data frame.
multiplierSigma: Sigma range for the stats.
Returns: Set of stats.
"""
meanValue = 0
sigmaValue = 0
sigmaRangeValue = 0
topValue = 0
try:
# Clean out anomoly due to random invalid inputs.
if (columnName is not None):
meanValue = self._calculateMean(dataAnalysisFrame[columnName])
if meanValue == numpy.nan:
meanValue = numpy.float128(1)
sigmaValue = self._calculateStd(dataAnalysisFrame[columnName])
if float(sigmaValue) is float(numpy.nan):
sigmaValue = numpy.float128(1)
multiplier = numpy.float128(multiplierSigma) # Stats: 1 sigma = 68%, 2 sigma = 95%, 3 sigma = 99.7
sigmaRangeValue = (sigmaValue * multiplier)
if float(sigmaRangeValue) is float(numpy.nan):
sigmaRangeValue = numpy.float128(1)
topValue = numpy.float128(meanValue + sigmaRangeValue)
print("Name:{} Mean= {}, Sigma= {}, {}*Sigma= {}".format(columnName,
meanValue,
sigmaValue,
multiplier,
sigmaRangeValue))
except ValueError:
pass
return (meanValue, sigmaValue, sigmaRangeValue, topValue)
def _cleanZerosForColumnInFrame(self, dataAnalysisFrame, columnName='cycles'):
"""
Cleans the data frame with data values that are invalid. I.E. inf, NaN
Args:
dataAnalysisFrame: Dataframe to do analysis on.
columnName: Column name of the data frame.
Returns: Cleaned dataframe.
"""
dataAnalysisCleaned = None
try:
# Clean out anomoly due to random invalid inputs.
(meanValue, sigmaValue, sigmaRangeValue, topValue) = self._determineQuickStats(
dataAnalysisFrame=dataAnalysisFrame, columnName=columnName)
# dataAnalysisCleaned = dataAnalysisFrame[dataAnalysisFrame[columnName] != 0]
# When the cycles are negative or zero we missed cleaning up a row.
# logicVector = (dataAnalysisFrame[columnName] != 0)
# dataAnalysisCleaned = dataAnalysisFrame[logicVector]
logicVector = (dataAnalysisCleaned[columnName] >= 1)
dataAnalysisCleaned = dataAnalysisCleaned[logicVector]
# These timed out mean + 2 * sd
logicVector = (dataAnalysisCleaned[columnName] < topValue) # Data range
dataAnalysisCleaned = dataAnalysisCleaned[logicVector]
except ValueError:
pass
return dataAnalysisCleaned
def _cleanFrame(self, dataAnalysisTemp, cleanColumn=False, columnName='cycles'):
"""
Args:
dataAnalysisTemp: Dataframe to do analysis on.
cleanColumn: Flag to clean the data frame.
columnName: Column name of the data frame.
Returns: cleaned dataframe
"""
try:
replacementList = [pandas.NaT, numpy.Infinity, numpy.NINF, 'NaN', 'inf', '-inf', 'NULL']
if cleanColumn is True:
dataAnalysisTemp = self._cleanZerosForColumnInFrame(dataAnalysisTemp, columnName=columnName)
dataAnalysisTemp = dataAnalysisTemp.replace(to_replace=replacementList,
value=numpy.nan)
dataAnalysisTemp = dataAnalysisTemp.dropna()
except ValueError:
pass
return dataAnalysisTemp
@staticmethod
def _getDataFormat():
"""
Return the dataframe setup for the CSV file generated from server.
Returns: dictionary data format for pandas.
"""
dataFormat = {
"Serial_Number": pandas.StringDtype(),
"LogTime0": pandas.StringDtype(), # @todo force rename
"Id0": pandas.StringDtype(), # @todo force rename
"DriveId": pandas.StringDtype(),
"JobRunId": pandas.StringDtype(),
"LogTime1": pandas.StringDtype(), # @todo force rename
"Comment0": pandas.StringDtype(), # @todo force rename
"CriticalWarning": pandas.StringDtype(),
"Temperature": pandas.StringDtype(),
"AvailableSpare": pandas.StringDtype(),
"AvailableSpareThreshold": pandas.StringDtype(),
"PercentageUsed": pandas.StringDtype(),
"DataUnitsReadL": pandas.StringDtype(),
"DataUnitsReadU": pandas.StringDtype(),
"DataUnitsWrittenL": pandas.StringDtype(),
"DataUnitsWrittenU": pandas.StringDtype(),
"HostReadCommandsL": pandas.StringDtype(),
"HostReadCommandsU": pandas.StringDtype(),
"HostWriteCommandsL": pandas.StringDtype(),
"HostWriteCommandsU": pandas.StringDtype(),
"ControllerBusyTimeL": pandas.StringDtype(),
"ControllerBusyTimeU": pandas.StringDtype(),
"PowerCyclesL": pandas.StringDtype(),
"PowerCyclesU": pandas.StringDtype(),
"PowerOnHoursL": pandas.StringDtype(),
"PowerOnHoursU": pandas.StringDtype(),
"UnsafeShutdownsL": pandas.StringDtype(),
"UnsafeShutdownsU": pandas.StringDtype(),
"MediaErrorsL": pandas.StringDtype(),
"MediaErrorsU": pandas.StringDtype(),
"NumErrorInfoLogsL": pandas.StringDtype(),
"NumErrorInfoLogsU": pandas.StringDtype(),
"ProgramFailCountN": pandas.StringDtype(),
"ProgramFailCountR": pandas.StringDtype(),
"EraseFailCountN": pandas.StringDtype(),
"EraseFailCountR": pandas.StringDtype(),
"WearLevelingCountN": pandas.StringDtype(),
"WearLevelingCountR": pandas.StringDtype(),
"E2EErrorDetectCountN": pandas.StringDtype(),
"E2EErrorDetectCountR": pandas.StringDtype(),
"CRCErrorCountN": pandas.StringDtype(),
"CRCErrorCountR": pandas.StringDtype(),
"MediaWearPercentageN": pandas.StringDtype(),
"MediaWearPercentageR": pandas.StringDtype(),
"HostReadsN": pandas.StringDtype(),
"HostReadsR": pandas.StringDtype(),
"TimedWorkloadN": pandas.StringDtype(),
"TimedWorkloadR": pandas.StringDtype(),
"ThermalThrottleStatusN": pandas.StringDtype(),
"ThermalThrottleStatusR": pandas.StringDtype(),
"RetryBuffOverflowCountN": pandas.StringDtype(),
"RetryBuffOverflowCountR": pandas.StringDtype(),
"PLLLockLossCounterN": pandas.StringDtype(),
"PLLLockLossCounterR": pandas.StringDtype(),
"NandBytesWrittenN": pandas.StringDtype(),
"NandBytesWrittenR": pandas.StringDtype(),
"HostBytesWrittenN": pandas.StringDtype(),
"HostBytesWrittenR": pandas.StringDtype(),
"SystemAreaLifeRemainingN": pandas.StringDtype(),
"SystemAreaLifeRemainingR": pandas.StringDtype(),
"RelocatableSectorCountN": pandas.StringDtype(),
"RelocatableSectorCountR": pandas.StringDtype(),
"SoftECCErrorRateN": pandas.StringDtype(),
"SoftECCErrorRateR": pandas.StringDtype(),
"UnexpectedPowerLossN": pandas.StringDtype(),
"UnexpectedPowerLossR": pandas.StringDtype(),
"MediaErrorCountN": pandas.StringDtype(),
"MediaErrorCountR": pandas.StringDtype(),
"NandBytesReadN": pandas.StringDtype(),
"NandBytesReadR": pandas.StringDtype(),
"WarningCompTempTime": pandas.StringDtype(),
"CriticalCompTempTime": pandas.StringDtype(),
"TempSensor1": pandas.StringDtype(),
"TempSensor2": pandas.StringDtype(),
"TempSensor3": pandas.StringDtype(),
"TempSensor4": pandas.StringDtype(),
"TempSensor5": pandas.StringDtype(),
"TempSensor6": pandas.StringDtype(),
"TempSensor7": pandas.StringDtype(),
"TempSensor8": pandas.StringDtype(),
"ThermalManagementTemp1TransitionCount": pandas.StringDtype(),
"ThermalManagementTemp2TransitionCount": pandas.StringDtype(),
"TotalTimeForThermalManagementTemp1": pandas.StringDtype(),
"TotalTimeForThermalManagementTemp2": pandas.StringDtype(),
"Core_Num": pandas.StringDtype(),
"Id1": pandas.StringDtype(), # @todo force rename
"Job_Run_Id": pandas.StringDtype(),
"Stats_Time": pandas.StringDtype(),
"HostReads": pandas.StringDtype(),
"HostWrites": pandas.StringDtype(),
"NandReads": pandas.StringDtype(),
"NandWrites": pandas.StringDtype(),
"ProgramErrors": pandas.StringDtype(),
"EraseErrors": pandas.StringDtype(),
"ErrorCount": pandas.StringDtype(),
"BitErrorsHost1": pandas.StringDtype(),
"BitErrorsHost2": pandas.StringDtype(),
"BitErrorsHost3": pandas.StringDtype(),
"BitErrorsHost4": pandas.StringDtype(),
"BitErrorsHost5": pandas.StringDtype(),
"BitErrorsHost6": pandas.StringDtype(),
"BitErrorsHost7": pandas.StringDtype(),
"BitErrorsHost8": pandas.StringDtype(),
"BitErrorsHost9": pandas.StringDtype(),
"BitErrorsHost10": pandas.StringDtype(),
"BitErrorsHost11": pandas.StringDtype(),
"BitErrorsHost12": pandas.StringDtype(),
"BitErrorsHost13": pandas.StringDtype(),
"BitErrorsHost14": pandas.StringDtype(),
"BitErrorsHost15": pandas.StringDtype(),
"ECCFail": pandas.StringDtype(),
"GrownDefects": pandas.StringDtype(),
"FreeMemory": pandas.StringDtype(),
"WriteAllowance": pandas.StringDtype(),
"ModelString": pandas.StringDtype(),
"ValidBlocks": pandas.StringDtype(),
"TokenBlocks": pandas.StringDtype(),
"SpuriousPFCount": pandas.StringDtype(),
"SpuriousPFLocations1": pandas.StringDtype(),
"SpuriousPFLocations2": pandas.StringDtype(),
"SpuriousPFLocations3": pandas.StringDtype(),
"SpuriousPFLocations4": pandas.StringDtype(),
"SpuriousPFLocations5": pandas.StringDtype(),
"SpuriousPFLocations6": pandas.StringDtype(),
"SpuriousPFLocations7": pandas.StringDtype(),
"SpuriousPFLocations8": pandas.StringDtype(),
"BitErrorsNonHost1": pandas.StringDtype(),
"BitErrorsNonHost2": pandas.StringDtype(),
"BitErrorsNonHost3": pandas.StringDtype(),
"BitErrorsNonHost4": pandas.StringDtype(),
"BitErrorsNonHost5": pandas.StringDtype(),
"BitErrorsNonHost6": pandas.StringDtype(),
"BitErrorsNonHost7": pandas.StringDtype(),
"BitErrorsNonHost8": pandas.StringDtype(),
"BitErrorsNonHost9": pandas.StringDtype(),
"BitErrorsNonHost10": pandas.StringDtype(),
"BitErrorsNonHost11": pandas.StringDtype(),
"BitErrorsNonHost12": pandas.StringDtype(),
"BitErrorsNonHost13": pandas.StringDtype(),
"BitErrorsNonHost14": pandas.StringDtype(),
"BitErrorsNonHost15": pandas.StringDtype(),
"ECCFailNonHost": pandas.StringDtype(),
"NSversion": pandas.StringDtype(),
"numBands": pandas.StringDtype(),
"minErase": pandas.StringDtype(),
"maxErase": pandas.StringDtype(),
"avgErase": pandas.StringDtype(),
"minMVolt": pandas.StringDtype(),
"maxMVolt": pandas.StringDtype(),
"avgMVolt": pandas.StringDtype(),
"minMAmp": pandas.StringDtype(),
"maxMAmp": pandas.StringDtype(),
"avgMAmp": pandas.StringDtype(),
"comment1": pandas.StringDtype(), # @todo force rename
"minMVolt12v": pandas.StringDtype(),
"maxMVolt12v": pandas.StringDtype(),
"avgMVolt12v": pandas.StringDtype(),
"minMAmp12v": pandas.StringDtype(),
"maxMAmp12v": pandas.StringDtype(),
"avgMAmp12v": pandas.StringDtype(),
"nearMissSector": pandas.StringDtype(),
"nearMissDefect": pandas.StringDtype(),
"nearMissOverflow": pandas.StringDtype(),
"replayUNC": pandas.StringDtype(),
"Drive_Id": pandas.StringDtype(),
"indirectionMisses": pandas.StringDtype(),
"BitErrorsHost16": pandas.StringDtype(),
"BitErrorsHost17": pandas.StringDtype(),
"BitErrorsHost18": pandas.StringDtype(),
"BitErrorsHost19": pandas.StringDtype(),
"BitErrorsHost20": pandas.StringDtype(),
"BitErrorsHost21": pandas.StringDtype(),
"BitErrorsHost22": pandas.StringDtype(),
"BitErrorsHost23": pandas.StringDtype(),
"BitErrorsHost24": pandas.StringDtype(),
"BitErrorsHost25": pandas.StringDtype(),
"BitErrorsHost26": pandas.StringDtype(),
"BitErrorsHost27": pandas.StringDtype(),
"BitErrorsHost28": pandas.StringDtype(),
"BitErrorsHost29": pandas.StringDtype(),
"BitErrorsHost30": pandas.StringDtype(),
"BitErrorsHost31": pandas.StringDtype(),
"BitErrorsHost32": pandas.StringDtype(),
"BitErrorsHost33": pandas.StringDtype(),
"BitErrorsHost34": pandas.StringDtype(),
"BitErrorsHost35": pandas.StringDtype(),
"BitErrorsHost36": pandas.StringDtype(),
"BitErrorsHost37": pandas.StringDtype(),
"BitErrorsHost38": pandas.StringDtype(),
"BitErrorsHost39": pandas.StringDtype(),
"BitErrorsHost40": pandas.StringDtype(),
"XORRebuildSuccess": pandas.StringDtype(),
"XORRebuildFail": pandas.StringDtype(),
"BandReloForError": pandas.StringDtype(),
"mrrSuccess": pandas.StringDtype(),
"mrrFail": pandas.StringDtype(),
"mrrNudgeSuccess": pandas.StringDtype(),
"mrrNudgeHarmless": pandas.StringDtype(),
"mrrNudgeFail": pandas.StringDtype(),
"totalErases": pandas.StringDtype(),
"dieOfflineCount": pandas.StringDtype(),
"curtemp": pandas.StringDtype(),
"mintemp": pandas.StringDtype(),
"maxtemp": pandas.StringDtype(),
"oventemp": pandas.StringDtype(),
"allZeroSectors": pandas.StringDtype(),
"ctxRecoveryEvents": pandas.StringDtype(),
"ctxRecoveryErases": pandas.StringDtype(),
"NSversionMinor": pandas.StringDtype(),
"lifeMinTemp": pandas.StringDtype(),
"lifeMaxTemp": pandas.StringDtype(),
"powerCycles": pandas.StringDtype(),
"systemReads": pandas.StringDtype(),
"systemWrites": pandas.StringDtype(),
"readRetryOverflow": pandas.StringDtype(),
"unplannedPowerCycles": pandas.StringDtype(),
"unsafeShutdowns": pandas.StringDtype(),
"defragForcedReloCount": pandas.StringDtype(),
"bandReloForBDR": pandas.StringDtype(),
"bandReloForDieOffline": pandas.StringDtype(),
"bandReloForPFail": pandas.StringDtype(),
"bandReloForWL": pandas.StringDtype(),
"provisionalDefects": pandas.StringDtype(),
"uncorrectableProgErrors": pandas.StringDtype(),
"powerOnSeconds": pandas.StringDtype(),
"bandReloForChannelTimeout": pandas.StringDtype(),
"fwDowngradeCount": pandas.StringDtype(),
"dramCorrectablesTotal": pandas.StringDtype(),
"hb_id": pandas.StringDtype(),
"dramCorrectables1to1": pandas.StringDtype(),
"dramCorrectables4to1": pandas.StringDtype(),
"dramCorrectablesSram": pandas.StringDtype(),
"dramCorrectablesUnknown": pandas.StringDtype(),
"pliCapTestInterval": pandas.StringDtype(),
"pliCapTestCount": pandas.StringDtype(),
"pliCapTestResult": pandas.StringDtype(),
"pliCapTestTimeStamp": pandas.StringDtype(),
"channelHangSuccess": pandas.StringDtype(),
"channelHangFail": pandas.StringDtype(),
"BitErrorsHost41": pandas.StringDtype(),
"BitErrorsHost42": pandas.StringDtype(),
"BitErrorsHost43": pandas.StringDtype(),
"BitErrorsHost44": pandas.StringDtype(),
"BitErrorsHost45": pandas.StringDtype(),
"BitErrorsHost46": pandas.StringDtype(),
"BitErrorsHost47": pandas.StringDtype(),
"BitErrorsHost48": pandas.StringDtype(),
"BitErrorsHost49": pandas.StringDtype(),
"BitErrorsHost50": pandas.StringDtype(),
"BitErrorsHost51": pandas.StringDtype(),
"BitErrorsHost52": pandas.StringDtype(),
"BitErrorsHost53": pandas.StringDtype(),
"BitErrorsHost54": pandas.StringDtype(),
"BitErrorsHost55": pandas.StringDtype(),
"BitErrorsHost56": pandas.StringDtype(),
"mrrNearMiss": pandas.StringDtype(),
"mrrRereadAvg": pandas.StringDtype(),
"readDisturbEvictions": pandas.StringDtype(),
"L1L2ParityError": pandas.StringDtype(),
"pageDefects": pandas.StringDtype(),
"pageProvisionalTotal": pandas.StringDtype(),
"ASICTemp": pandas.StringDtype(),
"PMICTemp": pandas.StringDtype(),
"size": pandas.StringDtype(),
"lastWrite": pandas.StringDtype(),
"timesWritten": pandas.StringDtype(),
"maxNumContextBands": pandas.StringDtype(),
"blankCount": pandas.StringDtype(),
"cleanBands": pandas.StringDtype(),
"avgTprog": pandas.StringDtype(),
"avgEraseCount": pandas.StringDtype(),
"edtcHandledBandCnt": pandas.StringDtype(),
"bandReloForNLBA": pandas.StringDtype(),
"bandCrossingDuringPliCount": pandas.StringDtype(),
"bitErrBucketNum": pandas.StringDtype(),
"sramCorrectablesTotal": pandas.StringDtype(),
"l1SramCorrErrCnt": pandas.StringDtype(),
"l2SramCorrErrCnt": pandas.StringDtype(),
"parityErrorValue": pandas.StringDtype(),
"parityErrorType": pandas.StringDtype(),
"mrr_LutValidDataSize": pandas.StringDtype(),
"pageProvisionalDefects": pandas.StringDtype(),
"plisWithErasesInProgress": pandas.StringDtype(),
"lastReplayDebug": pandas.StringDtype(),
"externalPreReadFatals": pandas.StringDtype(),
"hostReadCmd": pandas.StringDtype(),
"hostWriteCmd": pandas.StringDtype(),
"trimmedSectors": pandas.StringDtype(),
"trimTokens": pandas.StringDtype(),
"mrrEventsInCodewords": pandas.StringDtype(),
"mrrEventsInSectors": pandas.StringDtype(),
"powerOnMicroseconds": pandas.StringDtype(),
"mrrInXorRecEvents": pandas.StringDtype(),
"mrrFailInXorRecEvents": pandas.StringDtype(),
"mrrUpperpageEvents": pandas.StringDtype(),
"mrrLowerpageEvents": pandas.StringDtype(),
"mrrSlcpageEvents": pandas.StringDtype(),
"mrrReReadTotal": pandas.StringDtype(),
"powerOnResets": pandas.StringDtype(),
"powerOnMinutes": pandas.StringDtype(),
"throttleOnMilliseconds": pandas.StringDtype(),
"ctxTailMagic": pandas.StringDtype(),
"contextDropCount": pandas.StringDtype(),
"lastCtxSequenceId": pandas.StringDtype(),
"currCtxSequenceId": pandas.StringDtype(),
"mbliEraseCount": pandas.StringDtype(),
"pageAverageProgramCount": pandas.StringDtype(),
"bandAverageEraseCount": pandas.StringDtype(),
"bandTotalEraseCount": pandas.StringDtype(),
"bandReloForXorRebuildFail": pandas.StringDtype(),
"defragSpeculativeMiss": pandas.StringDtype(),
"uncorrectableBackgroundScan": pandas.StringDtype(),
"BitErrorsHost57": pandas.StringDtype(),
"BitErrorsHost58": pandas.StringDtype(),
"BitErrorsHost59": pandas.StringDtype(),
"BitErrorsHost60": pandas.StringDtype(),
"BitErrorsHost61": pandas.StringDtype(),
"BitErrorsHost62": pandas.StringDtype(),
"BitErrorsHost63": pandas.StringDtype(),
"BitErrorsHost64": pandas.StringDtype(),
"BitErrorsHost65": pandas.StringDtype(),
"BitErrorsHost66": pandas.StringDtype(),
"BitErrorsHost67": pandas.StringDtype(),
"BitErrorsHost68": pandas.StringDtype(),
"BitErrorsHost69": pandas.StringDtype(),
"BitErrorsHost70": pandas.StringDtype(),
"BitErrorsHost71": pandas.StringDtype(),
"BitErrorsHost72": pandas.StringDtype(),
"BitErrorsHost73": pandas.StringDtype(),
"BitErrorsHost74": pandas.StringDtype(),
"BitErrorsHost75": pandas.StringDtype(),
"BitErrorsHost76": pandas.StringDtype(),
"BitErrorsHost77": pandas.StringDtype(),
"BitErrorsHost78": pandas.StringDtype(),
"BitErrorsHost79": pandas.StringDtype(),
"BitErrorsHost80": pandas.StringDtype(),
"bitErrBucketArray1": pandas.StringDtype(),
"bitErrBucketArray2": pandas.StringDtype(),
"bitErrBucketArray3": pandas.StringDtype(),
"bitErrBucketArray4": pandas.StringDtype(),
"bitErrBucketArray5": pandas.StringDtype(),
"bitErrBucketArray6": | pandas.StringDtype() | pandas.StringDtype |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import json
import pandas as pd
import numpy as np
from tqdm import tqdm
from sklearn.decomposition import TruncatedSVD
from sklearn.preprocessing import MinMaxScaler
# Set train file names
train_queries_file = "./dataset/train_queries.csv"
train_plans_file = "./dataset/train_plans.csv"
train_click_file= "./dataset/train_clicks.csv"
profiles_file = "./dataset/profiles.csv"
# Set test file names
test_queries_file = "./dataset/test_queries.csv"
test_plans_file = "./dataset/test_plans.csv"
def load_prepare_data():
# Load training data
train_queries = pd.read_csv(train_queries_file)
train_plans = pd.read_csv(train_plans_file)
train_click = pd.read_csv(train_click_file)
# Load testing data
test_queries = pd.read_csv(test_queries_file)
test_plans = pd.read_csv(test_plans_file)
# Prepare train data
train_data = train_queries.merge(train_click, on='sid', how='left')
train_data = train_data.merge(train_plans, on='sid', how='left')
test_data = test_queries.merge(test_plans, on='sid', how='left')
return train_data, test_data
def preprocess_features(train_data, test_data):
train_data = train_data.drop(['click_time'], axis=1)
train_data['click_mode'] = train_data['click_mode'].fillna(0)
test_data['click_mode'] = -1
# concat train and test sets
all_data = pd.concat([train_data, test_data], axis=0, sort=True)
all_data = all_data.drop(['plan_time'], axis=1)
all_data = all_data.reset_index(drop=True)
# Prepare OD features by spliting coordinates for each of them
all_data['o_first'] = all_data['o'].apply(lambda od: float(od.split(',')[0]))
all_data['o_second'] = all_data['o'].apply(lambda od: float(od.split(',')[1]))
all_data['d_first'] = all_data['d'].apply(lambda od: float(od.split(',')[0]))
all_data['d_second'] = all_data['d'].apply(lambda od: float(od.split(',')[1]))
all_data = all_data.drop(['o', 'd'], axis=1)
all_data['req_time'] = pd.to_datetime(all_data['req_time'])
all_data['reqweekday'] = all_data['req_time'].dt.dayofweek
all_data['reqhour'] = all_data['req_time'].dt.hour
all_data = all_data.drop(['req_time'], axis=1)
return all_data
def generate_plan_features(all_data):
n = all_data.shape[0]
mode_list_feasible = np.zeros((n, 12))
max_distance, min_distance, mean_distance, std_distance = np.zeros(
(n,)), np.zeros((n,)), np.zeros((n,)), np.zeros((n,))
max_price, min_price, mean_price, std_price = np.zeros(
(n,)), np.zeros((n,)), np.zeros((n,)), np.zeros((n,))
max_eta, min_eta, mean_eta, std_eta = np.zeros(
(n,)), np.zeros((n,)), np.zeros((n,)), np.zeros((n,))
mode_min_distance, mode_max_distance, mode_min_price, mode_max_price, mode_min_eta, mode_max_eta, first_mode = np.zeros(
(n,)), np.zeros((n,)), np.zeros((n,)), np.zeros((n,)), np.zeros((n,)), np.zeros((n,)), np.zeros((n,))
for i, plan in tqdm(enumerate(all_data['plans'].values)):
try:
plan_list = json.loads(plan)
except:
plan_list = []
if len(plan_list) == 0:
mode_list_feasible[i, 0] = 1
first_mode[i] = 0
max_distance[i] = -1
min_distance[i] = -1
mean_distance[i] = -1
std_distance[i] = -1
max_price[i] = -1
min_price[i] = -1
mean_price[i] = -1
std_price[i] = -1
max_eta[i] = -1
min_eta[i] = -1
mean_eta[i] = -1
std_eta[i] = -1
mode_min_distance[i] = -1
mode_max_distance[i] = -1
mode_min_price[i] = -1
mode_max_price[i] = -1
mode_min_eta[i] = -1
mode_max_eta[i] = -1
else:
distance_list = []
price_list = []
eta_list = []
mode_list = []
for tmp_dit in plan_list:
distance_list.append(int(tmp_dit['distance']))
if tmp_dit['price'] == '':
price_list.append(0)
else:
price_list.append(int(tmp_dit['price']))
eta_list.append(int(tmp_dit['eta']))
mode_list.append(int(tmp_dit['transport_mode']))
distance_list = np.array(distance_list)
price_list = np.array(price_list)
eta_list = np.array(eta_list)
mode_list = np.array(mode_list, dtype='int')
mode_list_feasible[i, mode_list] = 1
distance_sort_idx = np.argsort(distance_list)
price_sort_idx = np.argsort(price_list)
eta_sort_idx = np.argsort(eta_list)
max_distance[i] = distance_list[distance_sort_idx[-1]]
min_distance[i] = distance_list[distance_sort_idx[0]]
mean_distance[i] = np.mean(distance_list)
std_distance[i] = np.std(distance_list)
max_price[i] = price_list[price_sort_idx[-1]]
min_price[i] = price_list[price_sort_idx[0]]
mean_price[i] = np.mean(price_list)
std_price[i] = np.std(price_list)
max_eta[i] = eta_list[eta_sort_idx[-1]]
min_eta[i] = eta_list[eta_sort_idx[0]]
mean_eta[i] = np.mean(eta_list)
std_eta[i] = np.std(eta_list)
first_mode[i] = mode_list[0]
mode_max_distance[i] = mode_list[distance_sort_idx[-1]]
mode_min_distance[i] = mode_list[distance_sort_idx[0]]
mode_max_price[i] = mode_list[price_sort_idx[-1]]
mode_min_price[i] = mode_list[price_sort_idx[0]]
mode_max_eta[i] = mode_list[eta_sort_idx[-1]]
mode_min_eta[i] = mode_list[eta_sort_idx[0]]
feature_data = pd.DataFrame(mode_list_feasible)
feature_data.columns = ['mode_feasible_{}'.format(i) for i in range(12)]
feature_data['max_distance'] = max_distance
feature_data['min_distance'] = min_distance
feature_data['mean_distance'] = mean_distance
feature_data['std_distance'] = std_distance
feature_data['max_price'] = max_price
feature_data['min_price'] = min_price
feature_data['mean_price'] = mean_price
feature_data['std_price'] = std_price
feature_data['max_eta'] = max_eta
feature_data['min_eta'] = min_eta
feature_data['mean_eta'] = mean_eta
feature_data['std_eta'] = std_eta
feature_data['mode_max_distance'] = mode_max_distance
feature_data['mode_min_distance'] = mode_min_distance
feature_data['mode_max_price'] = mode_max_price
feature_data['mode_min_price'] = mode_min_price
feature_data['mode_max_eta'] = mode_max_eta
feature_data['mode_min_eta'] = mode_min_eta
feature_data['first_mode'] = first_mode
all_data = pd.concat([all_data, feature_data], axis=1)
all_data = all_data.drop(['plans'], axis=1)
return all_data
def read_profile_data():
profile_data = pd.read_csv(profiles_file)
profile_na = np.zeros(67)
profile_na[0] = -1
profile_na = pd.DataFrame(profile_na.reshape(1, -1))
profile_na.columns = profile_data.columns
profile_data = profile_data.append(profile_na)
return profile_data
def generate_profile_features(data):
profile_data = read_profile_data()
x = profile_data.drop(['pid'], axis=1).values
svd = TruncatedSVD(n_components=20, n_iter=20, random_state=42)
svd_x = svd.fit_transform(x)
svd_feas = pd.DataFrame(svd_x)
svd_feas.columns = ['svd_attribute_{}'.format(i) for i in range(20)]
svd_feas['pid'] = profile_data['pid'].values
data['pid'] = data['pid'].fillna(-1)
data = data.merge(svd_feas, on='pid', how='left')
return data
def split_train_test(data):
train_data = data[data['click_mode'] != -1]
test_data = data[data['click_mode'] == -1]
submit = test_data[['sid']].copy()
train_data = train_data.drop(['sid', 'pid'], axis=1)
test_data = test_data.drop(['sid', 'pid'], axis=1)
test_data = test_data.drop(['click_mode'], axis=1)
train_y = train_data['click_mode'].values
train_x = train_data.drop(['click_mode'], axis=1)
return train_x, train_y, test_data, submit
def save_data(trainX, y_train, testX, y_test):
trainX.to_csv('preprocess_data/train.csv',index = False)
testX.to_csv('preprocess_data/test.csv',index = False)
y_train = pd.DataFrame({'click_mode': y_train})
y_train.to_csv('preprocess_data/train_label.csv',index = False)
y_test.to_csv('preprocess_data/test_label.csv',index = False)
def load_data():
trainX = pd.read_csv('preprocess_data/train.csv')
testX = pd.read_csv('preprocess_data/test.csv')
y_train = pd.read_csv('preprocess_data/train_label.csv')
y_test = pd.read_csv('preprocess_data/test_label.csv')
return trainX, y_train, testX, y_test
def build_norm_context(trainX, testX):
trainX = np.array(trainX)
context_input = trainX[:,:37]
user_input = trainX[:,37:]
testX = np.array(testX)
context_input_test = testX[:,:37]
user_input_test = testX[:,37:]
scaler = MinMaxScaler()
scaler.fit(context_input)
# apply transform
normalized_train = scaler.transform(context_input)
normalized_test = scaler.transform(context_input_test)
normalized_train= pd.DataFrame(normalized_train)
user_input= pd.DataFrame(user_input)
merged_train = pd.concat([normalized_train, user_input], axis=1)
normalized_test= pd.DataFrame(normalized_test)
user_input_test= pd.DataFrame(user_input_test)
merged_test = | pd.concat([normalized_test, user_input_test], axis=1) | pandas.concat |
# coding=utf-8
# pylint: disable-msg=E1101,W0612
from datetime import datetime, timedelta
from numpy import nan
import numpy as np
import pandas as pd
from pandas.types.common import is_integer, is_scalar
from pandas import Index, Series, DataFrame, isnull, date_range
from pandas.core.index import MultiIndex
from pandas.core.indexing import IndexingError
from pandas.tseries.index import Timestamp
from pandas.tseries.offsets import BDay
from pandas.tseries.tdi import Timedelta
from pandas.compat import lrange, range
from pandas import compat
from pandas.util.testing import assert_series_equal, assert_almost_equal
import pandas.util.testing as tm
from pandas.tests.series.common import TestData
JOIN_TYPES = ['inner', 'outer', 'left', 'right']
class TestSeriesIndexing(TestData, tm.TestCase):
_multiprocess_can_split_ = True
def test_get(self):
# GH 6383
s = Series(np.array([43, 48, 60, 48, 50, 51, 50, 45, 57, 48, 56, 45,
51, 39, 55, 43, 54, 52, 51, 54]))
result = s.get(25, 0)
expected = 0
self.assertEqual(result, expected)
s = Series(np.array([43, 48, 60, 48, 50, 51, 50, 45, 57, 48, 56,
45, 51, 39, 55, 43, 54, 52, 51, 54]),
index=pd.Float64Index(
[25.0, 36.0, 49.0, 64.0, 81.0, 100.0,
121.0, 144.0, 169.0, 196.0, 1225.0,
1296.0, 1369.0, 1444.0, 1521.0, 1600.0,
1681.0, 1764.0, 1849.0, 1936.0],
dtype='object'))
result = s.get(25, 0)
expected = 43
self.assertEqual(result, expected)
# GH 7407
# with a boolean accessor
df = pd.DataFrame({'i': [0] * 3, 'b': [False] * 3})
vc = df.i.value_counts()
result = vc.get(99, default='Missing')
self.assertEqual(result, 'Missing')
vc = df.b.value_counts()
result = vc.get(False, default='Missing')
self.assertEqual(result, 3)
result = vc.get(True, default='Missing')
self.assertEqual(result, 'Missing')
def test_delitem(self):
# GH 5542
# should delete the item inplace
s = Series(lrange(5))
del s[0]
expected = Series(lrange(1, 5), index=lrange(1, 5))
assert_series_equal(s, expected)
del s[1]
expected = Series(lrange(2, 5), index=lrange(2, 5))
assert_series_equal(s, expected)
# empty
s = Series()
def f():
del s[0]
self.assertRaises(KeyError, f)
# only 1 left, del, add, del
s = Series(1)
del s[0]
assert_series_equal(s, Series(dtype='int64', index=Index(
[], dtype='int64')))
s[0] = 1
assert_series_equal(s, Series(1))
del s[0]
assert_series_equal(s, Series(dtype='int64', index=Index(
[], dtype='int64')))
# Index(dtype=object)
s = Series(1, index=['a'])
del s['a']
assert_series_equal(s, Series(dtype='int64', index=Index(
[], dtype='object')))
s['a'] = 1
assert_series_equal(s, Series(1, index=['a']))
del s['a']
assert_series_equal(s, Series(dtype='int64', index=Index(
[], dtype='object')))
def test_getitem_setitem_ellipsis(self):
s = Series(np.random.randn(10))
np.fix(s)
result = s[...]
assert_series_equal(result, s)
s[...] = 5
self.assertTrue((result == 5).all())
def test_getitem_negative_out_of_bounds(self):
s = Series(tm.rands_array(5, 10), index=tm.rands_array(10, 10))
self.assertRaises(IndexError, s.__getitem__, -11)
self.assertRaises(IndexError, s.__setitem__, -11, 'foo')
def test_pop(self):
# GH 6600
df = DataFrame({'A': 0, 'B': np.arange(5, dtype='int64'), 'C': 0, })
k = df.iloc[4]
result = k.pop('B')
self.assertEqual(result, 4)
expected = Series([0, 0], index=['A', 'C'], name=4)
assert_series_equal(k, expected)
def test_getitem_get(self):
idx1 = self.series.index[5]
idx2 = self.objSeries.index[5]
self.assertEqual(self.series[idx1], self.series.get(idx1))
self.assertEqual(self.objSeries[idx2], self.objSeries.get(idx2))
self.assertEqual(self.series[idx1], self.series[5])
self.assertEqual(self.objSeries[idx2], self.objSeries[5])
self.assertEqual(
self.series.get(-1), self.series.get(self.series.index[-1]))
self.assertEqual(self.series[5], self.series.get(self.series.index[5]))
# missing
d = self.ts.index[0] - BDay()
self.assertRaises(KeyError, self.ts.__getitem__, d)
# None
# GH 5652
for s in [Series(), Series(index=list('abc'))]:
result = s.get(None)
self.assertIsNone(result)
def test_iget(self):
s = Series(np.random.randn(10), index=lrange(0, 20, 2))
# 10711, deprecated
with tm.assert_produces_warning(FutureWarning):
s.iget(1)
# 10711, deprecated
with tm.assert_produces_warning(FutureWarning):
s.irow(1)
# 10711, deprecated
with tm.assert_produces_warning(FutureWarning):
s.iget_value(1)
for i in range(len(s)):
result = s.iloc[i]
exp = s[s.index[i]]
assert_almost_equal(result, exp)
# pass a slice
result = s.iloc[slice(1, 3)]
expected = s.ix[2:4]
assert_series_equal(result, expected)
# test slice is a view
result[:] = 0
self.assertTrue((s[1:3] == 0).all())
# list of integers
result = s.iloc[[0, 2, 3, 4, 5]]
expected = s.reindex(s.index[[0, 2, 3, 4, 5]])
assert_series_equal(result, expected)
def test_iget_nonunique(self):
s = Series([0, 1, 2], index=[0, 1, 0])
self.assertEqual(s.iloc[2], 2)
def test_getitem_regression(self):
s = Series(lrange(5), index=lrange(5))
result = s[lrange(5)]
assert_series_equal(result, s)
def test_getitem_setitem_slice_bug(self):
s = Series(lrange(10), lrange(10))
result = s[-12:]
assert_series_equal(result, s)
result = s[-7:]
| assert_series_equal(result, s[3:]) | pandas.util.testing.assert_series_equal |
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright (c) 2011-2012 Lambda Foundry, Inc. and PyData Development Team
# Copyright (c) 2013 <NAME> and contributors
# Copyright (c) 2014-2015 <NAME> <<EMAIL>>
# Copyright (c) 2014-2016 <NAME> "wave++" <<EMAIL>>
# Copyright (c) 2014- Spyder Project Contributors
#
# Components of gtabview originally distributed under the MIT (Expat) license.
# This file as a whole distributed under the terms of the New BSD License
# (BSD 3-clause; see NOTICE.txt in the Spyder root directory for details).
# -----------------------------------------------------------------------------
"""
Pandas DataFrame Editor Dialog.
DataFrameModel is based on the class ArrayModel from array editor
and the class DataFrameModel from the pandas project.
Present in pandas.sandbox.qtpandas in v0.13.1.
DataFrameHeaderModel and DataFrameLevelModel are based on the classes
Header4ExtModel and Level4ExtModel from the gtabview project.
DataFrameModel is based on the classes ExtDataModel and ExtFrameModel, and
DataFrameEditor is based on gtExtTableView from the same project.
DataFrameModel originally based on pandas/sandbox/qtpandas.py of the
`pandas project <https://github.com/pandas-dev/pandas>`_.
The current version is qtpandas/models/DataFrameModel.py of the
`QtPandas project <https://github.com/draperjames/qtpandas>`_.
Components of gtabview from gtabview/viewer.py and gtabview/models.py of the
`gtabview project <https://github.com/TabViewer/gtabview>`_.
"""
# Standard library imports
import time
# Third party imports
from qtpy.compat import from_qvariant, to_qvariant
from qtpy.QtCore import (QAbstractTableModel, QModelIndex, Qt, Signal, Slot,
QItemSelectionModel, QEvent)
from qtpy.QtGui import QColor, QCursor
from qtpy.QtWidgets import (QApplication, QCheckBox, QDialog, QGridLayout,
QHBoxLayout, QInputDialog, QLineEdit, QMenu,
QMessageBox, QPushButton, QTableView,
QScrollBar, QTableWidget, QFrame,
QItemDelegate)
from pandas import DataFrame, Index, Series, isna
try:
from pandas._libs.tslib import OutOfBoundsDatetime
except ImportError: # For pandas version < 0.20
from pandas.tslib import OutOfBoundsDatetime
import numpy as np
# Local imports
from spyder.config.base import _
from spyder.config.fonts import DEFAULT_SMALL_DELTA
from spyder.config.gui import get_font, config_shortcut
from spyder.py3compat import (io, is_text_string, is_type_text_string, PY2,
to_text_string)
from spyder.utils import icon_manager as ima
from spyder.utils.qthelpers import (add_actions, create_action,
keybinding, qapplication)
from spyder.plugins.variableexplorer.widgets.arrayeditor import get_idx_rect
# Supported Numbers and complex numbers
REAL_NUMBER_TYPES = (float, int, np.int64, np.int32)
COMPLEX_NUMBER_TYPES = (complex, np.complex64, np.complex128)
# Used to convert bool intrance to false since bool('False') will return True
_bool_false = ['false', 'f', '0', '0.', '0.0', ' ']
# Default format for data frames with floats
DEFAULT_FORMAT = '%.6g'
# Limit at which dataframe is considered so large that it is loaded on demand
LARGE_SIZE = 5e5
LARGE_NROWS = 1e5
LARGE_COLS = 60
ROWS_TO_LOAD = 500
COLS_TO_LOAD = 40
# Background colours
BACKGROUND_NUMBER_MINHUE = 0.66 # hue for largest number
BACKGROUND_NUMBER_HUERANGE = 0.33 # (hue for smallest) minus (hue for largest)
BACKGROUND_NUMBER_SATURATION = 0.7
BACKGROUND_NUMBER_VALUE = 1.0
BACKGROUND_NUMBER_ALPHA = 0.6
BACKGROUND_NONNUMBER_COLOR = Qt.lightGray
BACKGROUND_INDEX_ALPHA = 0.8
BACKGROUND_STRING_ALPHA = 0.05
BACKGROUND_MISC_ALPHA = 0.3
def bool_false_check(value):
"""
Used to convert bool entrance to false.
Needed since any string in bool('') will return True.
"""
if value.lower() in _bool_false:
value = ''
return value
def global_max(col_vals, index):
"""Returns the global maximum and minimum."""
col_vals_without_None = [x for x in col_vals if x is not None]
max_col, min_col = zip(*col_vals_without_None)
return max(max_col), min(min_col)
class DataFrameModel(QAbstractTableModel):
""" DataFrame Table Model.
Partly based in ExtDataModel and ExtFrameModel classes
of the gtabview project.
For more information please see:
https://github.com/wavexx/gtabview/blob/master/gtabview/models.py
"""
def __init__(self, dataFrame, format=DEFAULT_FORMAT, parent=None):
QAbstractTableModel.__init__(self)
self.dialog = parent
self.df = dataFrame
self.df_index = dataFrame.index.tolist()
self.df_header = dataFrame.columns.tolist()
self._format = format
self.complex_intran = None
self.display_error_idxs = []
self.total_rows = self.df.shape[0]
self.total_cols = self.df.shape[1]
size = self.total_rows * self.total_cols
self.max_min_col = None
if size < LARGE_SIZE:
self.max_min_col_update()
self.colum_avg_enabled = True
self.bgcolor_enabled = True
self.colum_avg(1)
else:
self.colum_avg_enabled = False
self.bgcolor_enabled = False
self.colum_avg(0)
# Use paging when the total size, number of rows or number of
# columns is too large
if size > LARGE_SIZE:
self.rows_loaded = ROWS_TO_LOAD
self.cols_loaded = COLS_TO_LOAD
else:
if self.total_rows > LARGE_NROWS:
self.rows_loaded = ROWS_TO_LOAD
else:
self.rows_loaded = self.total_rows
if self.total_cols > LARGE_COLS:
self.cols_loaded = COLS_TO_LOAD
else:
self.cols_loaded = self.total_cols
def _axis(self, axis):
"""
Return the corresponding labels taking into account the axis.
The axis could be horizontal (0) or vertical (1).
"""
return self.df.columns if axis == 0 else self.df.index
def _axis_levels(self, axis):
"""
Return the number of levels in the labels taking into account the axis.
Get the number of levels for the columns (0) or rows (1).
"""
ax = self._axis(axis)
return 1 if not hasattr(ax, 'levels') else len(ax.levels)
@property
def shape(self):
"""Return the shape of the dataframe."""
return self.df.shape
@property
def header_shape(self):
"""Return the levels for the columns and rows of the dataframe."""
return (self._axis_levels(0), self._axis_levels(1))
@property
def chunk_size(self):
"""Return the max value of the dimensions of the dataframe."""
return max(*self.shape())
def header(self, axis, x, level=0):
"""
Return the values of the labels for the header of columns or rows.
The value corresponds to the header of column or row x in the
given level.
"""
ax = self._axis(axis)
return ax.values[x] if not hasattr(ax, 'levels') \
else ax.values[x][level]
def name(self, axis, level):
"""Return the labels of the levels if any."""
ax = self._axis(axis)
if hasattr(ax, 'levels'):
return ax.names[level]
if ax.name:
return ax.name
def max_min_col_update(self):
"""
Determines the maximum and minimum number in each column.
The result is a list whose k-th entry is [vmax, vmin], where vmax and
vmin denote the maximum and minimum of the k-th column (ignoring NaN).
This list is stored in self.max_min_col.
If the k-th column has a non-numerical dtype, then the k-th entry
is set to None. If the dtype is complex, then compute the maximum and
minimum of the absolute values. If vmax equals vmin, then vmin is
decreased by one.
"""
if self.df.shape[0] == 0: # If no rows to compute max/min then return
return
self.max_min_col = []
for dummy, col in self.df.iteritems():
if col.dtype in REAL_NUMBER_TYPES + COMPLEX_NUMBER_TYPES:
if col.dtype in REAL_NUMBER_TYPES:
vmax = col.max(skipna=True)
vmin = col.min(skipna=True)
else:
vmax = col.abs().max(skipna=True)
vmin = col.abs().min(skipna=True)
if vmax != vmin:
max_min = [vmax, vmin]
else:
max_min = [vmax, vmin - 1]
else:
max_min = None
self.max_min_col.append(max_min)
def get_format(self):
"""Return current format"""
# Avoid accessing the private attribute _format from outside
return self._format
def set_format(self, format):
"""Change display format"""
self._format = format
self.reset()
def bgcolor(self, state):
"""Toggle backgroundcolor"""
self.bgcolor_enabled = state > 0
self.reset()
def colum_avg(self, state):
"""Toggle backgroundcolor"""
self.colum_avg_enabled = state > 0
if self.colum_avg_enabled:
self.return_max = lambda col_vals, index: col_vals[index]
else:
self.return_max = global_max
self.reset()
def get_bgcolor(self, index):
"""Background color depending on value."""
column = index.column()
if not self.bgcolor_enabled:
return
value = self.get_value(index.row(), column)
if self.max_min_col[column] is None or isna(value):
color = QColor(BACKGROUND_NONNUMBER_COLOR)
if is_text_string(value):
color.setAlphaF(BACKGROUND_STRING_ALPHA)
else:
color.setAlphaF(BACKGROUND_MISC_ALPHA)
else:
if isinstance(value, COMPLEX_NUMBER_TYPES):
color_func = abs
else:
color_func = float
vmax, vmin = self.return_max(self.max_min_col, column)
hue = (BACKGROUND_NUMBER_MINHUE + BACKGROUND_NUMBER_HUERANGE *
(vmax - color_func(value)) / (vmax - vmin))
hue = float(abs(hue))
if hue > 1:
hue = 1
color = QColor.fromHsvF(hue, BACKGROUND_NUMBER_SATURATION,
BACKGROUND_NUMBER_VALUE,
BACKGROUND_NUMBER_ALPHA)
return color
def get_value(self, row, column):
"""Return the value of the DataFrame."""
# To increase the performance iat is used but that requires error
# handling, so fallback uses iloc
try:
value = self.df.iat[row, column]
except OutOfBoundsDatetime:
value = self.df.iloc[:, column].astype(str).iat[row]
except:
value = self.df.iloc[row, column]
return value
def update_df_index(self):
""""Update the DataFrame index"""
self.df_index = self.df.index.tolist()
def data(self, index, role=Qt.DisplayRole):
"""Cell content"""
if not index.isValid():
return to_qvariant()
if role == Qt.DisplayRole or role == Qt.EditRole:
column = index.column()
row = index.row()
value = self.get_value(row, column)
if isinstance(value, float):
try:
return to_qvariant(self._format % value)
except (ValueError, TypeError):
# may happen if format = '%d' and value = NaN;
# see issue 4139
return to_qvariant(DEFAULT_FORMAT % value)
elif is_type_text_string(value):
# Don't perform any conversion on strings
# because it leads to differences between
# the data present in the dataframe and
# what is shown by Spyder
return value
else:
try:
return to_qvariant(to_text_string(value))
except Exception:
self.display_error_idxs.append(index)
return u'Display Error!'
elif role == Qt.BackgroundColorRole:
return to_qvariant(self.get_bgcolor(index))
elif role == Qt.FontRole:
return to_qvariant(get_font(font_size_delta=DEFAULT_SMALL_DELTA))
elif role == Qt.ToolTipRole:
if index in self.display_error_idxs:
return _("It is not possible to display this value because\n"
"an error ocurred while trying to do it")
return to_qvariant()
def sort(self, column, order=Qt.AscendingOrder):
"""Overriding sort method"""
if self.complex_intran is not None:
if self.complex_intran.any(axis=0).iloc[column]:
QMessageBox.critical(self.dialog, "Error",
"TypeError error: no ordering "
"relation is defined for complex numbers")
return False
try:
ascending = order == Qt.AscendingOrder
if column >= 0:
try:
self.df.sort_values(by=self.df.columns[column],
ascending=ascending, inplace=True,
kind='mergesort')
except AttributeError:
# for pandas version < 0.17
self.df.sort(columns=self.df.columns[column],
ascending=ascending, inplace=True,
kind='mergesort')
except ValueError as e:
# Not possible to sort on duplicate columns #5225
QMessageBox.critical(self.dialog, "Error",
"ValueError: %s" % to_text_string(e))
except SystemError as e:
# Not possible to sort on category dtypes #5361
QMessageBox.critical(self.dialog, "Error",
"SystemError: %s" % to_text_string(e))
self.update_df_index()
else:
# To sort by index
self.df.sort_index(inplace=True, ascending=ascending)
self.update_df_index()
except TypeError as e:
QMessageBox.critical(self.dialog, "Error",
"TypeError error: %s" % str(e))
return False
self.reset()
return True
def flags(self, index):
"""Set flags"""
return Qt.ItemFlags(QAbstractTableModel.flags(self, index) |
Qt.ItemIsEditable)
def setData(self, index, value, role=Qt.EditRole, change_type=None):
"""Cell content change"""
column = index.column()
row = index.row()
if index in self.display_error_idxs:
return False
if change_type is not None:
try:
value = self.data(index, role=Qt.DisplayRole)
val = from_qvariant(value, str)
if change_type is bool:
val = bool_false_check(val)
self.df.iloc[row, column] = change_type(val)
except ValueError:
self.df.iloc[row, column] = change_type('0')
else:
val = from_qvariant(value, str)
current_value = self.get_value(row, column)
if isinstance(current_value, (bool, np.bool_)):
val = bool_false_check(val)
supported_types = (bool, np.bool_) + REAL_NUMBER_TYPES
if (isinstance(current_value, supported_types) or
is_text_string(current_value)):
try:
self.df.iloc[row, column] = current_value.__class__(val)
except (ValueError, OverflowError) as e:
QMessageBox.critical(self.dialog, "Error",
str(type(e).__name__) + ": " + str(e))
return False
else:
QMessageBox.critical(self.dialog, "Error",
"Editing dtype {0!s} not yet supported."
.format(type(current_value).__name__))
return False
self.max_min_col_update()
self.dataChanged.emit(index, index)
return True
def get_data(self):
"""Return data"""
return self.df
def rowCount(self, index=QModelIndex()):
"""DataFrame row number"""
# Avoid a "Qt exception in virtual methods" generated in our
# tests on Windows/Python 3.7
# See PR 8910
try:
if self.total_rows <= self.rows_loaded:
return self.total_rows
else:
return self.rows_loaded
except AttributeError:
return 0
def fetch_more(self, rows=False, columns=False):
"""Get more columns and/or rows."""
if rows and self.total_rows > self.rows_loaded:
reminder = self.total_rows - self.rows_loaded
items_to_fetch = min(reminder, ROWS_TO_LOAD)
self.beginInsertRows(QModelIndex(), self.rows_loaded,
self.rows_loaded + items_to_fetch - 1)
self.rows_loaded += items_to_fetch
self.endInsertRows()
if columns and self.total_cols > self.cols_loaded:
reminder = self.total_cols - self.cols_loaded
items_to_fetch = min(reminder, COLS_TO_LOAD)
self.beginInsertColumns(QModelIndex(), self.cols_loaded,
self.cols_loaded + items_to_fetch - 1)
self.cols_loaded += items_to_fetch
self.endInsertColumns()
def columnCount(self, index=QModelIndex()):
"""DataFrame column number"""
# Avoid a "Qt exception in virtual methods" generated in our
# tests on Windows/Python 3.7
# See PR 8910
try:
# This is done to implement series
if len(self.df.shape) == 1:
return 2
elif self.total_cols <= self.cols_loaded:
return self.total_cols
else:
return self.cols_loaded
except AttributeError:
return 0
def reset(self):
self.beginResetModel()
self.endResetModel()
class DataFrameView(QTableView):
"""
Data Frame view class.
Signals
-------
sig_option_changed(): Raised after a sort by column.
sig_sort_by_column(): Raised after more columns are fetched.
sig_fetch_more_rows(): Raised after more rows are fetched.
"""
sig_sort_by_column = Signal()
sig_fetch_more_columns = Signal()
sig_fetch_more_rows = Signal()
def __init__(self, parent, model, header, hscroll, vscroll):
"""Constructor."""
QTableView.__init__(self, parent)
self.setModel(model)
self.setHorizontalScrollBar(hscroll)
self.setVerticalScrollBar(vscroll)
self.setHorizontalScrollMode(1)
self.setVerticalScrollMode(1)
self.sort_old = [None]
self.header_class = header
self.header_class.sectionClicked.connect(self.sortByColumn)
self.menu = self.setup_menu()
config_shortcut(self.copy, context='variable_explorer', name='copy',
parent=self)
self.horizontalScrollBar().valueChanged.connect(
lambda val: self.load_more_data(val, columns=True))
self.verticalScrollBar().valueChanged.connect(
lambda val: self.load_more_data(val, rows=True))
def load_more_data(self, value, rows=False, columns=False):
"""Load more rows and columns to display."""
try:
if rows and value == self.verticalScrollBar().maximum():
self.model().fetch_more(rows=rows)
self.sig_fetch_more_rows.emit()
if columns and value == self.horizontalScrollBar().maximum():
self.model().fetch_more(columns=columns)
self.sig_fetch_more_columns.emit()
except NameError:
# Needed to handle a NameError while fetching data when closing
# See issue 7880
pass
def sortByColumn(self, index):
"""Implement a column sort."""
if self.sort_old == [None]:
self.header_class.setSortIndicatorShown(True)
sort_order = self.header_class.sortIndicatorOrder()
self.sig_sort_by_column.emit()
if not self.model().sort(index, sort_order):
if len(self.sort_old) != 2:
self.header_class.setSortIndicatorShown(False)
else:
self.header_class.setSortIndicator(self.sort_old[0],
self.sort_old[1])
return
self.sort_old = [index, self.header_class.sortIndicatorOrder()]
def contextMenuEvent(self, event):
"""Reimplement Qt method."""
self.menu.popup(event.globalPos())
event.accept()
def setup_menu(self):
"""Setup context menu."""
copy_action = create_action(self, _('Copy'),
shortcut=keybinding('Copy'),
icon=ima.icon('editcopy'),
triggered=self.copy,
context=Qt.WidgetShortcut)
functions = ((_("To bool"), bool), (_("To complex"), complex),
(_("To int"), int), (_("To float"), float),
(_("To str"), to_text_string))
types_in_menu = [copy_action]
for name, func in functions:
slot = lambda func=func: self.change_type(func)
types_in_menu += [create_action(self, name,
triggered=slot,
context=Qt.WidgetShortcut)]
menu = QMenu(self)
add_actions(menu, types_in_menu)
return menu
def change_type(self, func):
"""A function that changes types of cells."""
model = self.model()
index_list = self.selectedIndexes()
[model.setData(i, '', change_type=func) for i in index_list]
@Slot()
def copy(self):
"""Copy text to clipboard"""
if not self.selectedIndexes():
return
(row_min, row_max,
col_min, col_max) = get_idx_rect(self.selectedIndexes())
index = header = False
df = self.model().df
obj = df.iloc[slice(row_min, row_max + 1),
slice(col_min, col_max + 1)]
output = io.StringIO()
obj.to_csv(output, sep='\t', index=index, header=header)
if not PY2:
contents = output.getvalue()
else:
contents = output.getvalue().decode('utf-8')
output.close()
clipboard = QApplication.clipboard()
clipboard.setText(contents)
class DataFrameHeaderModel(QAbstractTableModel):
"""
This class is the model for the header or index of the DataFrameEditor.
Taken from gtabview project (Header4ExtModel).
For more information please see:
https://github.com/wavexx/gtabview/blob/master/gtabview/viewer.py
"""
COLUMN_INDEX = -1 # Makes reference to the index of the table.
def __init__(self, model, axis, palette):
"""
Header constructor.
The 'model' is the QAbstractTableModel of the dataframe, the 'axis' is
to acknowledge if is for the header (horizontal - 0) or for the
index (vertical - 1) and the palette is the set of colors to use.
"""
super(DataFrameHeaderModel, self).__init__()
self.model = model
self.axis = axis
self._palette = palette
if self.axis == 0:
self.total_cols = self.model.shape[1]
self._shape = (self.model.header_shape[0], self.model.shape[1])
if self.total_cols > LARGE_COLS:
self.cols_loaded = COLS_TO_LOAD
else:
self.cols_loaded = self.total_cols
else:
self.total_rows = self.model.shape[0]
self._shape = (self.model.shape[0], self.model.header_shape[1])
if self.total_rows > LARGE_NROWS:
self.rows_loaded = ROWS_TO_LOAD
else:
self.rows_loaded = self.total_rows
def rowCount(self, index=None):
"""Get number of rows in the header."""
if self.axis == 0:
return max(1, self._shape[0])
else:
if self.total_rows <= self.rows_loaded:
return self.total_rows
else:
return self.rows_loaded
def columnCount(self, index=QModelIndex()):
"""DataFrame column number"""
if self.axis == 0:
if self.total_cols <= self.cols_loaded:
return self.total_cols
else:
return self.cols_loaded
else:
return max(1, self._shape[1])
def fetch_more(self, rows=False, columns=False):
"""Get more columns or rows (based on axis)."""
if self.axis == 1 and self.total_rows > self.rows_loaded:
reminder = self.total_rows - self.rows_loaded
items_to_fetch = min(reminder, ROWS_TO_LOAD)
self.beginInsertRows(QModelIndex(), self.rows_loaded,
self.rows_loaded + items_to_fetch - 1)
self.rows_loaded += items_to_fetch
self.endInsertRows()
if self.axis == 0 and self.total_cols > self.cols_loaded:
reminder = self.total_cols - self.cols_loaded
items_to_fetch = min(reminder, COLS_TO_LOAD)
self.beginInsertColumns(QModelIndex(), self.cols_loaded,
self.cols_loaded + items_to_fetch - 1)
self.cols_loaded += items_to_fetch
self.endInsertColumns()
def sort(self, column, order=Qt.AscendingOrder):
"""Overriding sort method."""
ascending = order == Qt.AscendingOrder
self.model.sort(self.COLUMN_INDEX, order=ascending)
return True
def headerData(self, section, orientation, role):
"""Get the information to put in the header."""
if role == Qt.TextAlignmentRole:
if orientation == Qt.Horizontal:
return Qt.AlignCenter | Qt.AlignBottom
else:
return Qt.AlignRight | Qt.AlignVCenter
if role != Qt.DisplayRole and role != Qt.ToolTipRole:
return None
if self.axis == 1 and self._shape[1] <= 1:
return None
orient_axis = 0 if orientation == Qt.Horizontal else 1
if self.model.header_shape[orient_axis] > 1:
header = section
else:
header = self.model.header(self.axis, section)
# Don't perform any conversion on strings
# because it leads to differences between
# the data present in the dataframe and
# what is shown by Spyder
if not is_type_text_string(header):
header = to_text_string(header)
return header
def data(self, index, role):
"""
Get the data for the header.
This is used when a header has levels.
"""
if not index.isValid() or \
index.row() >= self._shape[0] or \
index.column() >= self._shape[1]:
return None
row, col = ((index.row(), index.column()) if self.axis == 0
else (index.column(), index.row()))
if role != Qt.DisplayRole:
return None
if self.axis == 0 and self._shape[0] <= 1:
return None
header = self.model.header(self.axis, col, row)
# Don't perform any conversion on strings
# because it leads to differences between
# the data present in the dataframe and
# what is shown by Spyder
if not is_type_text_string(header):
header = to_text_string(header)
return header
class DataFrameLevelModel(QAbstractTableModel):
"""
Data Frame level class.
This class is used to represent index levels in the DataFrameEditor. When
using MultiIndex, this model creates labels for the index/header as Index i
for each section in the index/header
Based on the gtabview project (Level4ExtModel).
For more information please see:
https://github.com/wavexx/gtabview/blob/master/gtabview/viewer.py
"""
def __init__(self, model, palette, font):
super(DataFrameLevelModel, self).__init__()
self.model = model
self._background = palette.dark().color()
if self._background.lightness() > 127:
self._foreground = palette.text()
else:
self._foreground = palette.highlightedText()
self._palette = palette
font.setBold(True)
self._font = font
def rowCount(self, index=None):
"""Get number of rows (number of levels for the header)."""
return max(1, self.model.header_shape[0])
def columnCount(self, index=None):
"""Get the number of columns (number of levels for the index)."""
return max(1, self.model.header_shape[1])
def headerData(self, section, orientation, role):
"""
Get the text to put in the header of the levels of the indexes.
By default it returns 'Index i', where i is the section in the index
"""
if role == Qt.TextAlignmentRole:
if orientation == Qt.Horizontal:
return Qt.AlignCenter | Qt.AlignBottom
else:
return Qt.AlignRight | Qt.AlignVCenter
if role != Qt.DisplayRole and role != Qt.ToolTipRole:
return None
if self.model.header_shape[0] <= 1 and orientation == Qt.Horizontal:
if self.model.name(1,section):
return self.model.name(1,section)
return _('Index')
elif self.model.header_shape[0] <= 1:
return None
elif self.model.header_shape[1] <= 1 and orientation == Qt.Vertical:
return None
return _('Index') + ' ' + to_text_string(section)
def data(self, index, role):
"""Get the information of the levels."""
if not index.isValid():
return None
if role == Qt.FontRole:
return self._font
label = ''
if index.column() == self.model.header_shape[1] - 1:
label = str(self.model.name(0, index.row()))
elif index.row() == self.model.header_shape[0] - 1:
label = str(self.model.name(1, index.column()))
if role == Qt.DisplayRole and label:
return label
elif role == Qt.ForegroundRole:
return self._foreground
elif role == Qt.BackgroundRole:
return self._background
elif role == Qt.BackgroundRole:
return self._palette.window()
return None
class DataFrameEditor(QDialog):
"""
Dialog for displaying and editing DataFrame and related objects.
Based on the gtabview project (ExtTableView).
For more information please see:
https://github.com/wavexx/gtabview/blob/master/gtabview/viewer.py
Signals
-------
sig_option_changed(str, object): Raised if an option is changed.
Arguments are name of option and its new value.
"""
sig_option_changed = Signal(str, object)
def __init__(self, parent=None):
QDialog.__init__(self, parent)
# Destroying the C++ object right after closing the dialog box,
# otherwise it may be garbage-collected in another QThread
# (e.g. the editor's analysis thread in Spyder), thus leading to
# a segmentation fault on UNIX or an application crash on Windows
self.setAttribute(Qt.WA_DeleteOnClose)
self.is_series = False
self.layout = None
def setup_and_check(self, data, title=''):
"""
Setup DataFrameEditor:
return False if data is not supported, True otherwise.
Supported types for data are DataFrame, Series and Index.
"""
self._selection_rec = False
self._model = None
self.layout = QGridLayout()
self.layout.setSpacing(0)
self.layout.setContentsMargins(0, 0, 0, 0)
self.setLayout(self.layout)
self.setWindowIcon(ima.icon('arredit'))
if title:
title = to_text_string(title) + " - %s" % data.__class__.__name__
else:
title = _("%s editor") % data.__class__.__name__
if isinstance(data, Series):
self.is_series = True
data = data.to_frame()
elif isinstance(data, Index):
data = DataFrame(data)
self.setWindowTitle(title)
self.resize(600, 500)
self.hscroll = QScrollBar(Qt.Horizontal)
self.vscroll = QScrollBar(Qt.Vertical)
# Create the view for the level
self.create_table_level()
# Create the view for the horizontal header
self.create_table_header()
# Create the view for the vertical index
self.create_table_index()
# Create the model and view of the data
self.dataModel = DataFrameModel(data, parent=self)
self.dataModel.dataChanged.connect(self.save_and_close_enable)
self.create_data_table()
self.layout.addWidget(self.hscroll, 2, 0, 1, 2)
self.layout.addWidget(self.vscroll, 0, 2, 2, 1)
# autosize columns on-demand
self._autosized_cols = set()
self._max_autosize_ms = None
self.dataTable.installEventFilter(self)
avg_width = self.fontMetrics().averageCharWidth()
self.min_trunc = avg_width * 12 # Minimum size for columns
self.max_width = avg_width * 64 # Maximum size for columns
self.setLayout(self.layout)
self.setMinimumSize(400, 300)
# Make the dialog act as a window
self.setWindowFlags(Qt.Window)
btn_layout = QHBoxLayout()
btn = QPushButton(_("Format"))
# disable format button for int type
btn_layout.addWidget(btn)
btn.clicked.connect(self.change_format)
btn = QPushButton(_('Resize'))
btn_layout.addWidget(btn)
btn.clicked.connect(self.resize_to_contents)
bgcolor = QCheckBox(_('Background color'))
bgcolor.setChecked(self.dataModel.bgcolor_enabled)
bgcolor.setEnabled(self.dataModel.bgcolor_enabled)
bgcolor.stateChanged.connect(self.change_bgcolor_enable)
btn_layout.addWidget(bgcolor)
self.bgcolor_global = QCheckBox(_('Column min/max'))
self.bgcolor_global.setChecked(self.dataModel.colum_avg_enabled)
self.bgcolor_global.setEnabled(not self.is_series and
self.dataModel.bgcolor_enabled)
self.bgcolor_global.stateChanged.connect(self.dataModel.colum_avg)
btn_layout.addWidget(self.bgcolor_global)
btn_layout.addStretch()
self.btn_save_and_close = QPushButton(_('Save and Close'))
self.btn_save_and_close.setDisabled(True)
self.btn_save_and_close.clicked.connect(self.accept)
btn_layout.addWidget(self.btn_save_and_close)
self.btn_close = QPushButton(_('Close'))
self.btn_close.setAutoDefault(True)
self.btn_close.setDefault(True)
self.btn_close.clicked.connect(self.reject)
btn_layout.addWidget(self.btn_close)
btn_layout.setContentsMargins(4, 4, 4, 4)
self.layout.addLayout(btn_layout, 4, 0, 1, 2)
self.setModel(self.dataModel)
self.resizeColumnsToContents()
return True
@Slot(QModelIndex, QModelIndex)
def save_and_close_enable(self, top_left, bottom_right):
"""Handle the data change event to enable the save and close button."""
self.btn_save_and_close.setEnabled(True)
self.btn_save_and_close.setAutoDefault(True)
self.btn_save_and_close.setDefault(True)
def create_table_level(self):
"""Create the QTableView that will hold the level model."""
self.table_level = QTableView()
self.table_level.setEditTriggers(QTableWidget.NoEditTriggers)
self.table_level.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
self.table_level.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
self.table_level.setFrameStyle(QFrame.Plain)
self.table_level.horizontalHeader().sectionResized.connect(
self._index_resized)
self.table_level.verticalHeader().sectionResized.connect(
self._header_resized)
self.table_level.setItemDelegate(QItemDelegate())
self.layout.addWidget(self.table_level, 0, 0)
self.table_level.setContentsMargins(0, 0, 0, 0)
self.table_level.horizontalHeader().sectionClicked.connect(
self.sortByIndex)
def create_table_header(self):
"""Create the QTableView that will hold the header model."""
self.table_header = QTableView()
self.table_header.verticalHeader().hide()
self.table_header.setEditTriggers(QTableWidget.NoEditTriggers)
self.table_header.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
self.table_header.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
self.table_header.setHorizontalScrollMode(QTableView.ScrollPerPixel)
self.table_header.setHorizontalScrollBar(self.hscroll)
self.table_header.setFrameStyle(QFrame.Plain)
self.table_header.horizontalHeader().sectionResized.connect(
self._column_resized)
self.table_header.setItemDelegate(QItemDelegate())
self.layout.addWidget(self.table_header, 0, 1)
def create_table_index(self):
"""Create the QTableView that will hold the index model."""
self.table_index = QTableView()
self.table_index.horizontalHeader().hide()
self.table_index.setEditTriggers(QTableWidget.NoEditTriggers)
self.table_index.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
self.table_index.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
self.table_index.setVerticalScrollMode(QTableView.ScrollPerPixel)
self.table_index.setVerticalScrollBar(self.vscroll)
self.table_index.setFrameStyle(QFrame.Plain)
self.table_index.verticalHeader().sectionResized.connect(
self._row_resized)
self.table_index.setItemDelegate(QItemDelegate())
self.layout.addWidget(self.table_index, 1, 0)
self.table_index.setContentsMargins(0, 0, 0, 0)
def create_data_table(self):
"""Create the QTableView that will hold the data model."""
self.dataTable = DataFrameView(self, self.dataModel,
self.table_header.horizontalHeader(),
self.hscroll, self.vscroll)
self.dataTable.verticalHeader().hide()
self.dataTable.horizontalHeader().hide()
self.dataTable.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
self.dataTable.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
self.dataTable.setHorizontalScrollMode(QTableView.ScrollPerPixel)
self.dataTable.setVerticalScrollMode(QTableView.ScrollPerPixel)
self.dataTable.setFrameStyle(QFrame.Plain)
self.dataTable.setItemDelegate(QItemDelegate())
self.layout.addWidget(self.dataTable, 1, 1)
self.setFocusProxy(self.dataTable)
self.dataTable.sig_sort_by_column.connect(self._sort_update)
self.dataTable.sig_fetch_more_columns.connect(self._fetch_more_columns)
self.dataTable.sig_fetch_more_rows.connect(self._fetch_more_rows)
def sortByIndex(self, index):
"""Implement a Index sort."""
self.table_level.horizontalHeader().setSortIndicatorShown(True)
sort_order = self.table_level.horizontalHeader().sortIndicatorOrder()
self.table_index.model().sort(index, sort_order)
self._sort_update()
def model(self):
"""Get the model of the dataframe."""
return self._model
def _column_resized(self, col, old_width, new_width):
"""Update the column width."""
self.dataTable.setColumnWidth(col, new_width)
self._update_layout()
def _row_resized(self, row, old_height, new_height):
"""Update the row height."""
self.dataTable.setRowHeight(row, new_height)
self._update_layout()
def _index_resized(self, col, old_width, new_width):
"""Resize the corresponding column of the index section selected."""
self.table_index.setColumnWidth(col, new_width)
self._update_layout()
def _header_resized(self, row, old_height, new_height):
"""Resize the corresponding row of the header section selected."""
self.table_header.setRowHeight(row, new_height)
self._update_layout()
def _update_layout(self):
"""Set the width and height of the QTableViews and hide rows."""
h_width = max(self.table_level.verticalHeader().sizeHint().width(),
self.table_index.verticalHeader().sizeHint().width())
self.table_level.verticalHeader().setFixedWidth(h_width)
self.table_index.verticalHeader().setFixedWidth(h_width)
last_row = self._model.header_shape[0] - 1
if last_row < 0:
hdr_height = self.table_level.horizontalHeader().height()
else:
hdr_height = self.table_level.rowViewportPosition(last_row) + \
self.table_level.rowHeight(last_row) + \
self.table_level.horizontalHeader().height()
# Check if the header shape has only one row (which display the
# same info than the horizontal header).
if last_row == 0:
self.table_level.setRowHidden(0, True)
self.table_header.setRowHidden(0, True)
self.table_header.setFixedHeight(hdr_height)
self.table_level.setFixedHeight(hdr_height)
last_col = self._model.header_shape[1] - 1
if last_col < 0:
idx_width = self.table_level.verticalHeader().width()
else:
idx_width = self.table_level.columnViewportPosition(last_col) + \
self.table_level.columnWidth(last_col) + \
self.table_level.verticalHeader().width()
self.table_index.setFixedWidth(idx_width)
self.table_level.setFixedWidth(idx_width)
self._resizeVisibleColumnsToContents()
def _reset_model(self, table, model):
"""Set the model in the given table."""
old_sel_model = table.selectionModel()
table.setModel(model)
if old_sel_model:
del old_sel_model
def setAutosizeLimit(self, limit_ms):
"""Set maximum size for columns."""
self._max_autosize_ms = limit_ms
def setModel(self, model, relayout=True):
"""Set the model for the data, header/index and level views."""
self._model = model
sel_model = self.dataTable.selectionModel()
sel_model.currentColumnChanged.connect(
self._resizeCurrentColumnToContents)
# Asociate the models (level, vertical index and horizontal header)
# with its corresponding view.
self._reset_model(self.table_level, DataFrameLevelModel(model,
self.palette(),
self.font()))
self._reset_model(self.table_header, DataFrameHeaderModel(
model,
0,
self.palette()))
self._reset_model(self.table_index, DataFrameHeaderModel(
model,
1,
self.palette()))
# Needs to be called after setting all table models
if relayout:
self._update_layout()
def setCurrentIndex(self, y, x):
"""Set current selection."""
self.dataTable.selectionModel().setCurrentIndex(
self.dataTable.model().index(y, x),
QItemSelectionModel.ClearAndSelect)
def _sizeHintForColumn(self, table, col, limit_ms=None):
"""Get the size hint for a given column in a table."""
max_row = table.model().rowCount()
lm_start = time.clock()
lm_row = 64 if limit_ms else max_row
max_width = self.min_trunc
for row in range(max_row):
v = table.sizeHintForIndex(table.model().index(row, col))
max_width = max(max_width, v.width())
if row > lm_row:
lm_now = time.clock()
lm_elapsed = (lm_now - lm_start) * 1000
if lm_elapsed >= limit_ms:
break
lm_row = int((row / lm_elapsed) * limit_ms)
return max_width
def _resizeColumnToContents(self, header, data, col, limit_ms):
"""Resize a column by its contents."""
hdr_width = self._sizeHintForColumn(header, col, limit_ms)
data_width = self._sizeHintForColumn(data, col, limit_ms)
if data_width > hdr_width:
width = min(self.max_width, data_width)
elif hdr_width > data_width * 2:
width = max(min(hdr_width, self.min_trunc), min(self.max_width,
data_width))
else:
width = max(min(self.max_width, hdr_width), self.min_trunc)
header.setColumnWidth(col, width)
def _resizeColumnsToContents(self, header, data, limit_ms):
"""Resize all the colummns to its contents."""
max_col = data.model().columnCount()
if limit_ms is None:
max_col_ms = None
else:
max_col_ms = limit_ms / max(1, max_col)
for col in range(max_col):
self._resizeColumnToContents(header, data, col, max_col_ms)
def eventFilter(self, obj, event):
"""Override eventFilter to catch resize event."""
if obj == self.dataTable and event.type() == QEvent.Resize:
self._resizeVisibleColumnsToContents()
return False
def _resizeVisibleColumnsToContents(self):
"""Resize the columns that are in the view."""
index_column = self.dataTable.rect().topLeft().x()
start = col = self.dataTable.columnAt(index_column)
width = self._model.shape[1]
end = self.dataTable.columnAt(self.dataTable.rect().bottomRight().x())
end = width if end == -1 else end + 1
if self._max_autosize_ms is None:
max_col_ms = None
else:
max_col_ms = self._max_autosize_ms / max(1, end - start)
while col < end:
resized = False
if col not in self._autosized_cols:
self._autosized_cols.add(col)
resized = True
self._resizeColumnToContents(self.table_header, self.dataTable,
col, max_col_ms)
col += 1
if resized:
# As we resize columns, the boundary will change
index_column = self.dataTable.rect().bottomRight().x()
end = self.dataTable.columnAt(index_column)
end = width if end == -1 else end + 1
if max_col_ms is not None:
max_col_ms = self._max_autosize_ms / max(1, end - start)
def _resizeCurrentColumnToContents(self, new_index, old_index):
"""Resize the current column to its contents."""
if new_index.column() not in self._autosized_cols:
# Ensure the requested column is fully into view after resizing
self._resizeVisibleColumnsToContents()
self.dataTable.scrollTo(new_index)
def resizeColumnsToContents(self):
"""Resize the columns to its contents."""
self._autosized_cols = set()
self._resizeColumnsToContents(self.table_level,
self.table_index, self._max_autosize_ms)
self._update_layout()
def change_bgcolor_enable(self, state):
"""
This is implementet so column min/max is only active when bgcolor is
"""
self.dataModel.bgcolor(state)
self.bgcolor_global.setEnabled(not self.is_series and state > 0)
def change_format(self):
"""
Ask user for display format for floats and use it.
This function also checks whether the format is valid and emits
`sig_option_changed`.
"""
format, valid = QInputDialog.getText(self, _('Format'),
_("Float formatting"),
QLineEdit.Normal,
self.dataModel.get_format())
if valid:
format = str(format)
try:
format % 1.1
except:
msg = _("Format ({}) is incorrect").format(format)
QMessageBox.critical(self, _("Error"), msg)
return
if not format.startswith('%'):
msg = _("Format ({}) should start with '%'").format(format)
QMessageBox.critical(self, _("Error"), msg)
return
self.dataModel.set_format(format)
self.sig_option_changed.emit('dataframe_format', format)
def get_value(self):
"""Return modified Dataframe -- this is *not* a copy"""
# It is import to avoid accessing Qt C++ object as it has probably
# already been destroyed, due to the Qt.WA_DeleteOnClose attribute
df = self.dataModel.get_data()
if self.is_series:
return df.iloc[:, 0]
else:
return df
def _update_header_size(self):
"""Update the column width of the header."""
column_count = self.table_header.model().columnCount()
for index in range(0, column_count):
if index < column_count:
column_width = self.dataTable.columnWidth(index)
self.table_header.setColumnWidth(index, column_width)
else:
break
def _sort_update(self):
"""
Update the model for all the QTableView objects.
Uses the model of the dataTable as the base.
"""
self.setModel(self.dataTable.model())
def _fetch_more_columns(self):
"""Fetch more data for the header (columns)."""
self.table_header.model().fetch_more()
def _fetch_more_rows(self):
"""Fetch more data for the index (rows)."""
self.table_index.model().fetch_more()
def resize_to_contents(self):
QApplication.setOverrideCursor(QCursor(Qt.WaitCursor))
self.dataTable.resizeColumnsToContents()
self.dataModel.fetch_more(columns=True)
self.dataTable.resizeColumnsToContents()
self._update_header_size()
QApplication.restoreOverrideCursor()
#==============================================================================
# Tests
#==============================================================================
def test_edit(data, title="", parent=None):
"""Test subroutine"""
app = qapplication() # analysis:ignore
dlg = DataFrameEditor(parent=parent)
if dlg.setup_and_check(data, title=title):
dlg.exec_()
return dlg.get_value()
else:
import sys
sys.exit(1)
def test():
"""DataFrame editor test"""
from numpy import nan
from pandas.util.testing import assert_frame_equal, assert_series_equal
df1 = DataFrame([
[True, "bool"],
[1+1j, "complex"],
['test', "string"],
[1.11, "float"],
[1, "int"],
[np.random.rand(3, 3), "Unkown type"],
["Large value", 100],
["áéí", "unicode"]
],
index=['a', 'b', nan, nan, nan, 'c',
"Test global max", 'd'],
columns=[nan, 'Type'])
out = test_edit(df1)
| assert_frame_equal(df1, out) | pandas.util.testing.assert_frame_equal |
import os
import tempfile
import time
from typing import Any, Dict, List, Optional
import pandas as pd
from upgini import dataset
from upgini.http import ProviderTaskSummary, SearchTaskSummary, get_rest_client
from upgini.metadata import SYSTEM_RECORD_ID, ModelTaskType
class SearchTask:
summary: Optional[SearchTaskSummary]
def __init__(
self,
search_task_id: str,
dataset: Optional["dataset.Dataset"] = None,
return_scores: bool = False,
extract_features: bool = False,
accurate_model: bool = False,
initial_search_task_id: Optional[str] = None,
task_type: Optional[ModelTaskType] = None,
endpoint: Optional[str] = None,
api_key: Optional[str] = None,
):
self.search_task_id = search_task_id
self.initial_search_task_id = initial_search_task_id
self.dataset = dataset
self.return_scores = return_scores
self.extract_features = extract_features
self.accurate_model = accurate_model
self.task_type = task_type
self.summary = None
self.endpoint = endpoint
self.api_key = api_key
def poll_result(self, quiet: bool = False) -> "SearchTask":
completed_statuses = {"COMPLETED", "VALIDATION_COMPLETED"}
failed_statuses = {"FAILED", "VALIDATION_FAILED"}
submitted_statuses = {"SUBMITTED", "VALIDATION_SUBMITTED"}
if not quiet:
print(
f"Running {self.search_task_id} search request.\n"
"We'll email you once it's completed. Please wait a few minutes."
)
search_task_id = self.initial_search_task_id if self.initial_search_task_id is not None else self.search_task_id
try:
time.sleep(1)
self.summary = get_rest_client(self.endpoint, self.api_key).search_task_summary_v2(search_task_id)
while self.summary.status not in completed_statuses:
if not quiet:
print("\\", end="\r")
time.sleep(5)
self.summary = get_rest_client(self.endpoint, self.api_key).search_task_summary_v2(search_task_id)
if not quiet:
print("/", end="\r")
if self.summary.status in failed_statuses:
raise RuntimeError("Oh! Server did something wrong, please retry with new search request.")
if self.summary.status in submitted_statuses and len(self._get_provider_summaries(self.summary)) == 0:
raise RuntimeError(
"No datasets found to intersect with uploaded file using defined search keys. "
"Try with another set of keys or different time period."
)
time.sleep(5)
except KeyboardInterrupt:
print("Search interrupted. Stopping search request...")
get_rest_client(self.endpoint, self.api_key).stop_search_task_v2(search_task_id)
print("Search request stopped")
raise
print()
has_completed_provider_task = False
for provider_summary in self._get_provider_summaries(self.summary):
if provider_summary.status == "COMPLETED":
has_completed_provider_task = True
if not has_completed_provider_task:
raise RuntimeError(
"All search tasks in the request have failed: "
+ ",".join([self._error_message(x) for x in self._get_provider_summaries(self.summary)])
+ "."
)
return self
@staticmethod
def _get_provider_summaries(summary: SearchTaskSummary) -> List[ProviderTaskSummary]:
if summary.status in {
"VALIDATION_CREATED",
"VALIDATION_SUBMITTED",
"VALIDATION_COMPLETED",
"VALIDATION_FAILED",
}:
return summary.validation_important_providers
else:
return summary.initial_important_providers
@staticmethod
def _error_message(provider_summary: ProviderTaskSummary):
if provider_summary.error_message:
return provider_summary.error_message
else:
if provider_summary.status == "TIMED_OUT":
return "Search request timed out"
elif provider_summary.status == "EMPTY_INTERSECTION":
return "Datasets doesn't intersect with uploaded file"
else:
return "Internal error"
def validation(self, validation_dataset: "dataset.Dataset", extract_features: bool = False) -> "SearchTask":
return validation_dataset.validation(self.search_task_id, return_scores=True, extract_features=extract_features)
def _check_finished_initial_search(self) -> List[ProviderTaskSummary]:
if self.summary is None or len(self.summary.initial_important_providers) == 0:
raise RuntimeError("Initial search didn't start.")
return self.summary.initial_important_providers
def _check_finished_validation_search(self) -> List[ProviderTaskSummary]:
if self.summary is None or len(self.summary.validation_important_providers) == 0:
raise RuntimeError("Validation search didn't start.")
return self.summary.validation_important_providers
@staticmethod
def _has_metric(provider_summaries: List[ProviderTaskSummary], metric_code: str) -> bool:
for provider_summary in provider_summaries:
for code in provider_summary.metrics.keys():
if code == metric_code:
return True
return False
@staticmethod
def _metric_by_provider(provider_summaries: List[ProviderTaskSummary], metric_code: str) -> List[Dict[str, str]]:
metric_by_provider = []
for provider_summary in provider_summaries:
for code, value in provider_summary.metrics.items():
if code == metric_code:
metric_by_provider.append(
{
"provider_id": provider_summary.provider_id,
"value": value,
}
)
return metric_by_provider
@staticmethod
def _ads_search_task_id_by_provider_id(provider_summaries: List[ProviderTaskSummary], provider_id: str) -> str:
for provider_summary in provider_summaries:
if provider_summary.provider_id == provider_id:
return provider_summary.ads_search_task_id
raise RuntimeError(f"Provider {provider_id} not found.")
@staticmethod
def _search_task_id_by_provider_id(provider_summaries: List[ProviderTaskSummary], provider_id: str) -> str:
for provider_summary in provider_summaries:
if provider_summary.provider_id == provider_id:
return provider_summary.search_task_id
raise RuntimeError(f"Provider {provider_id} not found.")
@staticmethod
def _model_id_by_provider(provider_summaries: List[ProviderTaskSummary]) -> pd.DataFrame:
result = []
for provider_summary in provider_summaries:
result.append(
{
"provider_id": provider_summary.provider_id,
"model_id": provider_summary.ads_search_task_id,
}
)
return pd.DataFrame(result)
@staticmethod
def _max_by_metric(provider_summaries: List[ProviderTaskSummary], metric_code: str) -> Dict[str, Any]:
max_provider = None
max_metric = None
for x in SearchTask._metric_by_provider(provider_summaries, metric_code):
current_metric = float(x["value"])
if max_metric is None or current_metric > max_metric:
max_provider = x["provider_id"]
max_metric = current_metric
if max_metric is None:
raise RuntimeError(f"There is no {metric_code} available for search task.")
else:
return {"provider_id": max_provider, "value": max_metric}
def initial_max_auc(self) -> Optional[Dict[str, Any]]:
provider_summaries = self._check_finished_initial_search()
if self._has_metric(provider_summaries, "AUC"):
return self._max_by_metric(provider_summaries, "AUC")
else:
return None
def initial_max_accuracy(self) -> Optional[Dict[str, Any]]:
provider_summaries = self._check_finished_initial_search()
if self._has_metric(provider_summaries, "ACCURACY"):
return self._max_by_metric(provider_summaries, "ACCURACY")
else:
return None
def initial_max_rmse(self) -> Optional[Dict[str, Any]]:
provider_summaries = self._check_finished_initial_search()
if self._has_metric(provider_summaries, "RMSE"):
return self._max_by_metric(provider_summaries, "RMSE")
else:
return None
def initial_max_uplift(self) -> Optional[Dict[str, Any]]:
provider_summaries = self._check_finished_initial_search()
if self._has_metric(provider_summaries, "UPLIFT"):
return self._max_by_metric(provider_summaries, "UPLIFT")
else:
return None
def initial_max_hit_rate(self) -> Optional[Dict[str, Any]]:
provider_summaries = self._check_finished_initial_search()
if self._has_metric(provider_summaries, "HIT_RATE"):
return self._max_by_metric(provider_summaries, "HIT_RATE")
else:
return None
def _initial_min_hit_rate(self) -> float:
provider_summaries = self._check_finished_initial_search()
min_hit_rate = None
for x in self._metric_by_provider(provider_summaries, "HIT_RATE"):
current_value = float(x["value"])
if min_hit_rate is None or current_value < min_hit_rate:
min_hit_rate = current_value
if min_hit_rate is None:
raise RuntimeError("There is no hit rate available for search task.")
else:
return min_hit_rate
def initial_gini(self) -> Optional[pd.DataFrame]:
provider_summaries = self._check_finished_initial_search()
if self._has_metric(provider_summaries, "GINI"):
return pd.DataFrame(self._metric_by_provider(provider_summaries, "GINI")).rename(
columns={"value": "gini"}, inplace=False
)
else:
return None
def initial_auc(self) -> Optional[pd.DataFrame]:
provider_summaries = self._check_finished_initial_search()
if self._has_metric(provider_summaries, "AUC"):
return pd.DataFrame(self._metric_by_provider(provider_summaries, "AUC")).rename(
columns={"value": "roc-auc"}, inplace=False
)
else:
return None
def initial_accuracy(self) -> Optional[pd.DataFrame]:
provider_summaries = self._check_finished_initial_search()
if self._has_metric(provider_summaries, "ACCURACY"):
return pd.DataFrame(self._metric_by_provider(provider_summaries, "ACCURACY")).rename(
columns={"value": "accuracy"}, inplace=False
)
else:
return None
def initial_rmse(self) -> Optional[pd.DataFrame]:
provider_summaries = self._check_finished_initial_search()
if self._has_metric(provider_summaries, "RMSE"):
return pd.DataFrame(self._metric_by_provider(provider_summaries, "RMSE")).rename(
columns={"value": "rmse"}, inplace=False
)
else:
return None
def initial_uplift(self) -> Optional[pd.DataFrame]:
provider_summaries = self._check_finished_initial_search()
if self._has_metric(provider_summaries, "UPLIFT"):
return pd.DataFrame(self._metric_by_provider(provider_summaries, "UPLIFT")).rename(
columns={"value": "uplift"}, inplace=False
)
else:
return None
def initial_hit_rate(self) -> pd.DataFrame:
provider_summaries = self._check_finished_initial_search()
return pd.DataFrame(self._metric_by_provider(provider_summaries, "HIT_RATE")).rename(
columns={"value": "hit_rate"}, inplace=False
)
def initial_metadata(self) -> pd.DataFrame:
provider_summaries = self._check_finished_initial_search()
quality_df = None
auc_df = self.initial_auc()
gini_df = self.initial_gini()
accuracy_df = self.initial_accuracy()
rmse_df = self.initial_rmse()
if auc_df is not None:
quality_df = auc_df
elif gini_df is not None:
quality_df = gini_df
elif accuracy_df is not None:
quality_df = accuracy_df
elif rmse_df is not None:
quality_df = rmse_df
uplift_df = self.initial_uplift()
hit_rate_df = self.initial_hit_rate()
model_id_df = self._model_id_by_provider(provider_summaries)
result = pd.merge(model_id_df, hit_rate_df, on="provider_id")
if quality_df is not None:
result = pd.merge(result, quality_df, on="provider_id")
if uplift_df is not None:
result = pd.merge(result, uplift_df, on="provider_id")
return result
def get_initial_scores_by_provider_id(self, provider_id: str) -> Optional[pd.DataFrame]:
provider_summaries = self._check_finished_initial_search()
scores_response = get_rest_client(self.endpoint, self.api_key).get_search_scores_v2(self.search_task_id)
ads_search_task_id = self._ads_search_task_id_by_provider_id(provider_summaries, provider_id)
scores_id = None
for score_block in scores_response["adsSearchTaskTrainedScoresDTO"]:
if score_block["adsSearchTaskId"] == ads_search_task_id:
if score_block["trainedModelScoresType"] == "INITIAL_ETALON_AND_ADS":
scores_id = score_block["adsSearchTaskScoresId"]
elif score_block["trainedModelScoresType"] == "INITIAL_ADS" and not scores_id:
scores_id = score_block["adsSearchTaskScoresId"]
if scores_id is None:
print(f"Provider {provider_id} task wasn't completed in initial search")
return None
gzip_file_content = get_rest_client(self.endpoint, self.api_key).get_search_scores_file_v2(scores_id)
with tempfile.TemporaryDirectory() as tmp_dir:
gzip_file_name = "{0}/scores.gzip".format(tmp_dir)
with open(gzip_file_name, "wb") as gzip_file:
gzip_file.write(gzip_file_content)
scores = pd.read_csv(gzip_file_name, compression="gzip", low_memory=False)
# TODO implement client hashing
# if self.initial_dataset.initial_to_hashed is not None:
# # Hardcode with etalon msisdn - use system_id
# scores = pd.merge(scores, self.initial_dataset.initial_to_hashed, \
# on=["etalon_msisdn", "phone_hashed"])
# scores["etalon_msisdn"] = scores[self.initial_dataset.metadata.phone_column]
# scores.drop(columns="phone_hashed", inplace=True)
# if self.initial_dataset.drop_phone_column:
# scores.drop(columns="etalon_" + self.initial_dataset.metadata.phone_column, inplace=True)
# if self.initial_dataset.drop_date_column:
# scores.drop(columns="etalon_" + self.initial_dataset.metadata.date_column, inplace=True)
return scores
def _download_features_file(self, features_id) -> pd.DataFrame:
time.sleep(1)
gzip_file_content = get_rest_client(self.endpoint, self.api_key).get_search_features_file_v2(features_id)
with tempfile.TemporaryDirectory() as tmp_dir:
gzip_file_name = "{0}/features.gzip".format(tmp_dir)
with open(gzip_file_name, "wb") as gzip_file:
gzip_file.write(gzip_file_content)
return pd.read_csv(gzip_file_name, compression="gzip", low_memory=False)
def get_initial_raw_features_by_provider_id(self, provider_id) -> Optional[pd.DataFrame]:
provider_summaries = self._check_finished_initial_search()
time.sleep(1)
features_response = get_rest_client(self.endpoint, self.api_key).get_search_features_v2(self.search_task_id)
ads_search_task_id = self._ads_search_task_id_by_provider_id(provider_summaries, provider_id)
features_id = None
for feature_block in features_response["adsSearchTaskFeaturesDTO"]:
if feature_block["adsSearchTaskId"] == ads_search_task_id and feature_block["searchType"] == "INITIAL":
features_id = feature_block["adsSearchTaskFeaturesId"]
if features_id is None:
print(f"Provider {provider_id} task wasn't completed in initial search")
return None
return self._download_features_file(features_id)
def get_all_initial_raw_features(self) -> Optional[pd.DataFrame]:
self._check_finished_initial_search()
time.sleep(1)
features_response = get_rest_client(self.endpoint, self.api_key).get_search_features_v2(self.search_task_id)
result_df = None
for feature_block in features_response["adsSearchTaskFeaturesDTO"]:
if feature_block["searchType"] == "INITIAL":
features_id = feature_block["adsSearchTaskFeaturesId"]
features_df = self._download_features_file(features_id)
if result_df is None:
result_df = features_df
else:
result_df = pd.merge(result_df, features_df, how="outer", on=SYSTEM_RECORD_ID)
if result_df is not None:
for column in result_df.columns:
if column.startswith("etalon_"):
result_df.rename(columns={column: column[7:]}, inplace=True)
return result_df
def download_model_by_provider_id(self, provider_id: str, model_path: str) -> None:
provider_summaries = self._check_finished_initial_search()
models_response = get_rest_client(self.endpoint, self.api_key).get_search_models_v2(self.search_task_id)
ads_search_task_id = self._ads_search_task_id_by_provider_id(provider_summaries, provider_id)
model_id = None
for model_block in models_response["adsSearchTaskTrainedModelDTO"]:
if model_block["adsSearchTaskId"] == ads_search_task_id:
if model_block["trainedModelType"] == "ETALON_AND_ADS":
model_id = model_block["adsSearchTaskTrainedModelId"]
elif model_block["trainedModelType"] == "ADS" and model_id is None:
model_id = model_block["adsSearchTaskTrainedModelId"]
if model_id is None:
print(f"Provider's {provider_id} task wasn't completed in initial search")
return None
model_bytes = get_rest_client(self.endpoint, self.api_key).get_search_model_file_v2(model_id)
if model_path.startswith("/") and not os.path.exists(os.path.dirname(model_path)):
os.makedirs(os.path.dirname(model_path))
with open(model_path, "wb") as model_file:
model_file.write(model_bytes)
print(f"Model successfully saved to {model_path}")
def get_max_initial_eval_set_metrics(self) -> Optional[List[dict]]:
provider_summaries = self._check_finished_initial_search()
max_idx = None
max_hit_rate = None
for idx, summary in enumerate(provider_summaries):
if summary.eval_set_metrics is not None:
for eval in summary.eval_set_metrics:
if max_idx is None:
max_idx = idx
if max_hit_rate is None:
max_hit_rate = eval.hit_rate
elif eval.hit_rate > max_hit_rate:
max_hit_rate = eval.hit_rate
max_idx = idx
if max_idx is not None:
eval_set_metrics = provider_summaries[max_idx].eval_set_metrics
if eval_set_metrics is not None:
return [eval.dict(exclude_none=True) for eval in eval_set_metrics]
return None
def validation_max_auc(self) -> Optional[Dict[str, Any]]:
provider_summaries = self._check_finished_validation_search()
if self._has_metric(provider_summaries, "AUC"):
return self._max_by_metric(provider_summaries, "AUC")
else:
return None
def validation_max_accuracy(self) -> Optional[Dict[str, Any]]:
provider_summaries = self._check_finished_initial_search()
if self._has_metric(provider_summaries, "ACCURACY"):
return self._max_by_metric(provider_summaries, "ACCURACY")
else:
return None
def validation_max_rmse(self) -> Optional[Dict[str, Any]]:
provider_summaries = self._check_finished_initial_search()
if self._has_metric(provider_summaries, "RMSE"):
return self._max_by_metric(provider_summaries, "RMSE")
else:
return None
def validation_max_uplift(self) -> Optional[Dict[str, Any]]:
provider_summaries = self._check_finished_validation_search()
if self._has_metric(provider_summaries, "UPLIFT"):
return self._max_by_metric(provider_summaries, "UPLIFT")
else:
return None
def validation_gini(self) -> Optional[pd.DataFrame]:
provider_summaries = self._check_finished_validation_search()
if self._has_metric(provider_summaries, "GINI"):
return pd.DataFrame(self._metric_by_provider(provider_summaries, "GINI")).rename(
columns={"value": "gini"}, inplace=False
)
else:
return None
def validation_auc(self) -> Optional[pd.DataFrame]:
provider_summaries = self._check_finished_validation_search()
if self._has_metric(provider_summaries, "AUC"):
return pd.DataFrame(self._metric_by_provider(provider_summaries, "AUC")).rename(
columns={"value": "roc-auc"}, inplace=False
)
else:
return None
def validation_accuracy(self) -> Optional[pd.DataFrame]:
provider_summaries = self._check_finished_validation_search()
if self._has_metric(provider_summaries, "ACCURACY"):
return pd.DataFrame(self._metric_by_provider(provider_summaries, "ACCURACY")).rename(
columns={"value": "accuracy"}, inplace=False
)
else:
return None
def validation_rmse(self) -> Optional[pd.DataFrame]:
provider_summaries = self._check_finished_validation_search()
if self._has_metric(provider_summaries, "RMSE"):
return pd.DataFrame(self._metric_by_provider(provider_summaries, "RMSE")).rename(
columns={"value": "rmse"}, inplace=False
)
else:
return None
def validation_uplift(self) -> Optional[pd.DataFrame]:
provider_summaries = self._check_finished_validation_search()
if self._has_metric(provider_summaries, "UPLIFT"):
return pd.DataFrame(self._metric_by_provider(provider_summaries, "UPLIFT")).rename(
columns={"value": "uplift"}, inplace=False
)
else:
return None
def validation_hit_rate(self) -> pd.DataFrame:
provider_summaries = self._check_finished_validation_search()
return pd.DataFrame(self._metric_by_provider(provider_summaries, "HIT_RATE")).rename(
columns={"value": "hit_rate"}, inplace=False
)
def _validation_min_hit_rate(self) -> float:
provider_summaries = self._check_finished_validation_search()
min_hit_rate = None
for x in self._metric_by_provider(provider_summaries, "HIT_RATE"):
current_value = float(x["value"])
if min_hit_rate is None or current_value < min_hit_rate:
min_hit_rate = current_value
if min_hit_rate is None:
raise RuntimeError("There is no hit rate available for search task.")
else:
return min_hit_rate
def validation_metadata(self) -> pd.DataFrame:
provider_summaries = self._check_finished_validation_search()
quality_df = None
gini_df = self.validation_gini()
auc_df = self.validation_auc()
accuracy_df = self.validation_accuracy()
rmse_df = self.validation_rmse()
if auc_df is not None:
quality_df = auc_df
elif gini_df is not None:
quality_df = gini_df
elif accuracy_df is not None:
quality_df = accuracy_df
elif rmse_df is not None:
quality_df = rmse_df
uplift_df = self.validation_uplift()
hit_rate_df = self.validation_hit_rate()
model_id_df = self._model_id_by_provider(provider_summaries)
result = pd.merge(model_id_df, hit_rate_df, on="provider_id")
if quality_df is not None:
result = pd.merge(result, quality_df, on="provider_id")
if uplift_df is not None:
result = pd.merge(result, uplift_df, on="provider_id")
return result
def get_validation_scores_by_provider_id(self, provider_id: str) -> Optional[pd.DataFrame]:
provider_summaries = self._check_finished_validation_search()
validation_task_id = self._search_task_id_by_provider_id(provider_summaries, provider_id)
scores_response = get_rest_client(self.endpoint, self.api_key).get_search_scores_v2(validation_task_id)
ads_search_task_id = self._ads_search_task_id_by_provider_id(provider_summaries, provider_id)
scores_id = None
for score_block in scores_response["adsSearchTaskTrainedScoresDTO"]:
if score_block["adsSearchTaskId"] == ads_search_task_id:
if score_block["trainedModelScoresType"] == "VALIDATION_ETALON_AND_ADS":
scores_id = score_block["adsSearchTaskScoresId"]
elif score_block["trainedModelScoresType"] == "VALIDATION_ADS" and not scores_id:
scores_id = score_block["adsSearchTaskScoresId"]
if scores_id is None:
print("Provider ", provider_id, " not found in validation search")
return None
gzip_file_content = get_rest_client(self.endpoint, self.api_key).get_search_scores_file_v2(scores_id)
with tempfile.TemporaryDirectory() as tmp_dir:
gzip_file_name = "{0}/scores.gzip".format(tmp_dir)
with open(gzip_file_name, "wb") as gzip_file:
gzip_file.write(gzip_file_content)
scores = | pd.read_csv(gzip_file_name, compression="gzip", low_memory=False) | pandas.read_csv |
#!/usr/bin/python
# -*- coding: utf-8 -*-
__author__ = '<NAME>'
from lxml import objectify
from lxml.builder import E
import xml.etree.ElementTree as ET
import pandas as pd
import qualysapi
import qualysapi.config as qcconf
import requests
import sys
import os
import csv
import logging
import dateutil.parser as dp
csv.field_size_limit(sys.maxsize)
class qualysWhisperAPI(object):
COUNT_WEBAPP = '/count/was/webapp'
COUNT_WASSCAN = '/count/was/wasscan'
DELETE_REPORT = '/delete/was/report/{report_id}'
GET_WEBAPP_DETAILS = '/get/was/webapp/{was_id}'
QPS_REST_3 = '/qps/rest/3.0'
REPORT_DETAILS = '/get/was/report/{report_id}'
REPORT_STATUS = '/status/was/report/{report_id}'
REPORT_CREATE = '/create/was/report'
REPORT_DOWNLOAD = '/download/was/report/{report_id}'
SCAN_DETAILS = '/get/was/wasscan/{scan_id}'
SCAN_DOWNLOAD = '/download/was/wasscan/{scan_id}'
SEARCH_REPORTS = '/search/was/report'
SEARCH_WEB_APPS = '/search/was/webapp'
SEARCH_WAS_SCAN = '/search/was/wasscan'
VERSION = '/qps/rest/portal/version'
def __init__(self, config=None):
self.logger = logging.getLogger('qualysWhisperAPI')
self.config = config
try:
self.qgc = qualysapi.connect(config, 'qualys_web')
self.logger.info('Connected to Qualys at {}'.format(self.qgc.server))
except Exception as e:
self.logger.error('Could not connect to Qualys: {}'.format(str(e)))
self.headers = {
#"content-type": "text/xml"}
"Accept" : "application/json",
"Content-Type": "application/json"}
self.config_parse = qcconf.QualysConnectConfig(config, 'qualys_web')
try:
self.template_id = self.config_parse.get_template_id()
except:
self.logger.error('Could not retrieve template ID')
####
#### GET SCANS TO PROCESS
####
def get_was_scan_count(self, status):
"""
Checks number of scans, used to control the api limits
"""
parameters = (
E.ServiceRequest(
E.filters(
E.Criteria({'field': 'status', 'operator': 'EQUALS'}, status))))
xml_output = self.qgc.request(self.COUNT_WASSCAN, parameters)
root = objectify.fromstring(xml_output.encode('utf-8'))
return root.count.text
def generate_scan_result_XML(self, limit=1000, offset=1, status='FINISHED'):
report_xml = E.ServiceRequest(
E.filters(
E.Criteria({'field': 'status', 'operator': 'EQUALS'}, status
),
),
E.preferences(
E.startFromOffset(str(offset)),
E.limitResults(str(limit))
),
)
return report_xml
def get_scan_info(self, limit=1000, offset=1, status='FINISHED'):
""" Returns XML of ALL WAS Scans"""
data = self.generate_scan_result_XML(limit=limit, offset=offset, status=status)
return self.qgc.request(self.SEARCH_WAS_SCAN, data)
def xml_parser(self, xml, dupfield=None):
all_records = []
root = ET.XML(xml)
for i, child in enumerate(root):
for subchild in child:
record = {}
dup_tracker = 0
for p in subchild:
record[p.tag] = p.text
for o in p:
if o.tag in record:
dup_tracker += 1
record[o.tag + '_%s' % dup_tracker] = o.text
else:
record[o.tag] = o.text
all_records.append(record)
return pd.DataFrame(all_records)
def get_all_scans(self, limit=1000, offset=1, status='FINISHED'):
qualys_api_limit = limit
dataframes = []
_records = []
try:
total = int(self.get_was_scan_count(status=status))
self.logger.error('Already have WAS scan count')
self.logger.info('Retrieving information for {} scans'.format(total))
for i in range(0, total):
if i % limit == 0:
if (total - i) < limit:
qualys_api_limit = total - i
self.logger.info('Making a request with a limit of {} at offset {}'.format((str(qualys_api_limit)), str(i + 1)))
scan_info = self.get_scan_info(limit=qualys_api_limit, offset=i + 1, status=status)
_records.append(scan_info)
self.logger.debug('Converting XML to DataFrame')
dataframes = [self.xml_parser(xml) for xml in _records]
except Exception as e:
self.logger.error("Couldn't process all scans: {}".format(e))
return | pd.concat(dataframes, axis=0) | pandas.concat |
#tahap scrape data------------------------------------
from selenium import webdriver
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as ec
from selenium.webdriver.common.keys import Keys
from selenium.common.exceptions import NoSuchElementException
import time
from . import labeling as label
import pandas as pd
# from django.http import JsonResponse
import pandas as pd
import re
import string
import nltk
import Sastrawi
import matplotlib.pyplot as plt
import numpy as np
import Sastrawi
import seaborn as sns
import math
from nltk.tokenize import word_tokenize
from nltk.tokenize import WordPunctTokenizer
from nltk.stem import PorterStemmer
from nltk.corpus import stopwords
from Sastrawi.StopWordRemover.StopWordRemoverFactory import StopWordRemoverFactory
from os import path
from PIL import Image
from Sastrawi.Stemmer.StemmerFactory import StemmerFactory
from Sastrawi.StopWordRemover.StopWordRemoverFactory import StopWordRemoverFactory
from nltk.tokenize import word_tokenize
from nltk.probability import FreqDist
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.model_selection import train_test_split
from sklearn import naive_bayes
from sklearn.metrics import roc_auc_score
from pylab import rcParams
from bs4 import BeautifulSoup
from matplotlib import rc
from collections import Counter, defaultdict
from sklearn.metrics import accuracy_score
from sklearn.metrics import classification_report, confusion_matrix
class sentimenAnalysis:
search=""
jumlahData=0
def scrappingData(request, data, jmlDataScrapping):
search = data
jumlahData=int(jmlDataScrapping)
print(data)
nltk.download('punkt')
nltk.download('stopwords')
# %matplotlib inline
RANDOM_SEED = 42
np.random.seed(RANDOM_SEED)
chrome_path= r"C:\Users\<NAME>\Documents\za\chromedriver_win32\chromedriver.exe"
driver=webdriver.Chrome(chrome_path)
driver.get('https://play.google.com/store/search?q=' +search+ '&c=apps'+ '&hl=in')
tes = driver.find_element_by_xpath("//*[@id='fcxH9b']/div[4]/c-wiz/div/div[2]/div/c-wiz/c-wiz[1]/c-wiz/div/div[2]/div[1]/c-wiz/div/div/div[1]/div/div/a")
tes.click()
time.sleep(5)
tes1 = driver.find_element_by_xpath("//*[@id='fcxH9b']/div[4]/c-wiz[2]/div/div[2]/div/div[1]/div/div/div[1]/div[6]/div/span/span")
tes1.click()
time.sleep(4)
count = 1
i = 1
while i < 5:
try:
driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
time.sleep(2)
if((i % 5)== 0):
driver.execute_script('window.scrollTo(1, 2000);')
time.sleep(2)
tes2 = driver.find_element_by_xpath("//*[@id='fcxH9b']/div[4]/c-wiz[3]/div/div[2]/div/div[1]/div/div/div[1]/div[2]/div[2]/div/span/span")
tes2.click()
print("scroll ke -" + str(count))
i += 1
count+=1
except:
print("skip scrol")
i += 1
count+=1
print('udah scrolling')
a = 'test1'
b = 1
c = []
b = 1
d = 0
errorNumber = 0
driver.execute_script('window.scrollTo(1, 10);')
while a != 'test':
d = 2
try:
tes3 = driver.find_element_by_xpath("//*[@id='fcxH9b']/div[4]/c-wiz[3]/div/div[2]/div/div[1]/div/div/div[1]/div[2]/div[1]/div["+str(b)+"]/div/div[2]/div[2]/span[1]/div/button")
tes3.click()
except NoSuchElementException:
d = 1
try:
tes4 = driver.find_element_by_xpath("/html/body/div[1]/div[4]/c-wiz[3]/div/div[2]/div/div[1]/div/div/div[1]/div[2]/div/div["+str(b)+"]/div/div[2]/div[2]/span["+str(d)+"]")
# print(str(b) + tes4.text)
print("review ke - " +str(b))
c.append(tes4.text)
if(int(b) >= jumlahData):
a = 'test'
b += 1
errorNumber += 1
except:
print(jumlahData)
errorNumber += 1
if(int(errorNumber) >= jumlahData):
a = 'test'
b += 1
#akhir tahap scrape data------------------------------------
print(len(c))
# hapus komentar
data = | pd.DataFrame({"ulasan": c}) | pandas.DataFrame |
# -*- coding:utf8 -*-
import json
from tencentcloud.common import credential
from tencentcloud.common.profile.client_profile import ClientProfile
from tencentcloud.common.profile.http_profile import HttpProfile
from tencentcloud.common.exception.tencent_cloud_sdk_exception import TencentCloudSDKException
from tencentcloud.ocr.v20181119 import ocr_client, models
from screenshots import get_image_data
import pandas as pd
import datetime
import openpyxl
import json
import re
import base64
def tencentkey():
with open('apikey.json', 'r') as f:
data = json.load(f)
Secret_Id = data['tencentapi']['SecretId']
SECRET_KEY = data['tencentapi']['SecretKey']
cred = credential.Credential( Secret_Id , SECRET_KEY)
return cred
def tencentocrbasic1():
cred = tencentkey()
base64data = get_image_data()
httpProfile = HttpProfile()
httpProfile.endpoint = "ocr.tencentcloudapi.com"
clientProfile = ClientProfile()
clientProfile.httpProfile = httpProfile
client = ocr_client.OcrClient(cred, "ap-beijing", clientProfile)
req = models.GeneralBasicOCRRequest()
params = {
"ImageBase64": base64data
}
req.from_json_string(json.dumps(params))
resp = client.GeneralBasicOCR(req)
alpha = ''
for gama in resp.TextDetections:
alpha = alpha + gama.DetectedText
return alpha
def tencentocrbasic0():
cred = tencentkey()
base64data = get_image_data()
httpProfile = HttpProfile()
httpProfile.endpoint = "ocr.tencentcloudapi.com"
clientProfile = ClientProfile()
clientProfile.httpProfile = httpProfile
client = ocr_client.OcrClient(cred, "ap-beijing", clientProfile)
req = models.GeneralBasicOCRRequest()
params = {
"ImageBase64": base64data
}
req.from_json_string(json.dumps(params))
resp = client.GeneralBasicOCR(req)
alpha = ''
for gama in resp.TextDetections:
alpha = alpha + gama.DetectedText+'\n'
return alpha
def tencentocr_script1():
cred = tencentkey()
base64data = get_image_data()
httpProfile = HttpProfile()
httpProfile.endpoint = "ocr.tencentcloudapi.com"
clientProfile = ClientProfile()
clientProfile.httpProfile = httpProfile
client = ocr_client.OcrClient(cred, "ap-beijing", clientProfile)
req = models.GeneralHandwritingOCRRequest()
params = {
"ImageBase64": base64data
}
req.from_json_string(json.dumps(params))
resp = client.GeneralHandwritingOCR(req)
alpha = ''
for gama in resp.TextDetections:
alpha = alpha + gama.DetectedText
return alpha
def tencentocr_script0():
cred = tencentkey()
base64data = get_image_data()
httpProfile = HttpProfile()
httpProfile.endpoint = "ocr.tencentcloudapi.com"
clientProfile = ClientProfile()
clientProfile.httpProfile = httpProfile
client = ocr_client.OcrClient(cred, "ap-beijing", clientProfile)
req = models.GeneralHandwritingOCRRequest()
params = {
"ImageBase64": base64data
}
req.from_json_string(json.dumps(params))
resp = client.GeneralHandwritingOCR(req)
alpha = ''
for gama in resp.TextDetections:
alpha = alpha + gama.DetectedText+'\n'
return alpha
def tencentocr_hp1():
cred = tencentkey()
base64data = get_image_data()
httpProfile = HttpProfile()
httpProfile.endpoint = "ocr.tencentcloudapi.com"
clientProfile = ClientProfile()
clientProfile.httpProfile = httpProfile
client = ocr_client.OcrClient(cred , "ap-beijing", clientProfile)
req = models.GeneralAccurateOCRRequest()
params = {
"ImageBase64": base64data
}
req.from_json_string(json.dumps(params))
resp = client.GeneralAccurateOCR(req)
alpha = ''
for gama in resp.TextDetections:
alpha = alpha + gama.DetectedText
return alpha
def tencentocr_hp0():
cred = tencentkey()
base64data = get_image_data()
httpProfile = HttpProfile()
httpProfile.endpoint = "ocr.tencentcloudapi.com"
clientProfile = ClientProfile()
clientProfile.httpProfile = httpProfile
client = ocr_client.OcrClient(cred , "ap-beijing", clientProfile)
req = models.GeneralAccurateOCRRequest()
params = {
"ImageBase64": base64data
}
req.from_json_string(json.dumps(params))
resp = client.GeneralAccurateOCR(req)
alpha = ''
for gama in resp.TextDetections:
alpha = alpha + gama.DetectedText+'\n'
return alpha
def tencentocr_eng1():
cred = tencentkey()
base64data = get_image_data()
httpProfile = HttpProfile()
httpProfile.endpoint = "ocr.tencentcloudapi.com"
clientProfile = ClientProfile()
clientProfile.httpProfile = httpProfile
client = ocr_client.OcrClient(cred, "ap-beijing", clientProfile)
req = models.EnglishOCRRequest()
params = {
"ImageBase64": base64data
}
req.from_json_string(json.dumps(params))
resp = client.EnglishOCR(req)
alpha = ''
for gama in resp.TextDetections:
alpha = alpha + gama.DetectedText
return alpha
def tencentocr_eng0():
cred = tencentkey()
base64data = get_image_data()
httpProfile = HttpProfile()
httpProfile.endpoint = "ocr.tencentcloudapi.com"
clientProfile = ClientProfile()
clientProfile.httpProfile = httpProfile
client = ocr_client.OcrClient(cred, "ap-beijing", clientProfile)
req = models.EnglishOCRRequest()
params = {
"ImageBase64": base64data
}
req.from_json_string(json.dumps(params))
resp = client.EnglishOCR(req)
alpha = ''
for gama in resp.TextDetections:
alpha = alpha + gama.DetectedText+'\n'
return alpha
def tencent_table():
cred = tencentkey()
base64data = get_image_data()
httpProfile = HttpProfile()
httpProfile.endpoint = "ocr.tencentcloudapi.com"
clientProfile = ClientProfile()
clientProfile.httpProfile = httpProfile
client = ocr_client.OcrClient(cred, "ap-beijing", clientProfile)
req = models.RecognizeTableOCRRequest()
params = {
"ImageBase64": base64data
}
req.from_json_string(json.dumps(params))
resp = client.RecognizeTableOCR(req)
result1 = json.loads(resp.to_json_string())
rowIndex = []
colIndex = []
content = []
for item in result1['TableDetections']:
for item2 in item['Cells']:
rowIndex.append(item2['RowTl'])
colIndex.append(item2['ColTl'])
content.append(item2['Text'])
rowIndex = | pd.Series(rowIndex) | pandas.Series |
from T2GEORES import geometry as geomtr
import sqlite3
import os
import pandas as pd
import json
def checktable(table_name,c):
"""It verifies the existance of a table on the sqlite database
Parameters
----------
table_name : str
Table name
c : cursor
Conection to the database
Returns
-------
int
check: if table exists returns 1
Examples
--------
>>> checktable(table_name,c)
"""
query="SELECT COUNT(name) from sqlite_master WHERE type='table' AND name='%s'"%(table_name)
c.execute(query)
if c.fetchone()[0]==1:
check=1
else:
check=0
return check
def db_creation(input_dictionary):
"""It creates a sqlite databa base
Parameters
----------
input_dictionary : dictionary
Dictionary contaning the path and name of database on keyword 'db_path', usually on '../input/'
Returns
-------
database
name: database on desire path
Note
----
The tables: wells, survey, PT, mh, drawdown, cooling, wellfeedzone, t2wellblock, t2wellsource, layers and t2PTout are generated
Examples
--------
>>> db_creation(input_dictionary)
"""
db_path=input_dictionary['db_path']
if not os.path.isfile(db_path):
conn=sqlite3.connect(db_path)
c = conn.cursor()
# Create table - wells
if checktable('wells',c)==0:
c.execute('''CREATE TABLE wells
([well] TEXT PRIMARY KEY,
[type] TEXT,
[east] REAL,
[north] REAL,
[elevation] REAL,
[lnr_init] REAL,
[lnr_end] REAL,
[lnr_D] TEXT,
[ptube_init] REAL,
[ptube_end] REAL,
[ptube_D] TEXT,
[drilldate] datetime)''')
#Create table - survey
if checktable('survey',c)==0:
c.execute('''CREATE TABLE survey
([well] TEXT,
[MeasuredDepth] REAL,
[Delta_east] REAL,
[Delta_north] REAL)''')
#Create table - PT
if checktable('PT',c)==0:
c.execute('''CREATE TABLE PT
([well] TEXT,
[MeasuredDepth] REAL,
[Pressure] REAL,
[Temperature] REAL)''')
#Create table - mh
if checktable('mh',c)==0:
c.execute('''CREATE TABLE mh
([well] TEXT,
[type] TEXT,
[date_time] datetime,
[steam_flow] REAL,
[liquid_flow] REAL,
[flowing_enthalpy] REAL,
[well_head_pressure] REAL)''')
#Create table - drawdown
if checktable('drawdown',c)==0:
c.execute('''CREATE TABLE drawdown
([well] TEXT,
[date_time] datetime,
[TVD] REAL,
[pressure] REAL)''')
#Create table - cooling
if checktable('cooling',c)==0:
c.execute('''CREATE TABLE cooling
([well] TEXT,
[date_time] datetime,
[TVD] REAL,
[temp] REAL)''')
#Create table - wellfeedzone
if checktable('wellfeedzone',c)==0:
c.execute('''CREATE TABLE wellfeedzone
([well] TEXT,
[MeasuredDepth] REAL,
[contribution] REAL)''')
#Create table - TOUGH2 well block(correlative)
if checktable('t2wellblock',c)==0:
c.execute('''CREATE TABLE t2wellblock
([well] TEXT PRIMARY KEY,
[blockcorr] TEXT)''')
#Create table - TOUGH2 well source
if checktable('t2wellsource',c)==0:
c.execute('''CREATE TABLE t2wellsource
([well] TEXT,
[blockcorr] TEXT ,
[source_nickname] TEXT PRIMARY KEY)''')
#Create table - layers levels
if checktable('layers',c)==0:
c.execute('''CREATE TABLE layers
([correlative] TEXT PRIMARY KEY,
[top] REAL,
[middle] REAL,
[bottom] REAL)''')
#Create table - stores ELEME section of mesh
if checktable('ELEME',c)==0:
c.execute('''CREATE TABLE ELEME
([model_version] REAL,
[model_output_timestamp] timestamp,
[ELEME] TEXT,
[NSEQ] REAL,
[NADD] REAL,
[MA1] REAL,
[MA2] REAL,
[VOLX] REAL,
[AHTX] REAL,
[PMX] REAL,
[X] REAL,
[Y] REAL,
[Z] REAL,
[LAYER_N] REAL,
[h] REAL)''')
#Create table - stores CONNE section of mesh
if checktable('CONNE',c)==0:
c.execute('''CREATE TABLE CONNE
([model_version] REAL,
[model_output_timestamp] timestamp,
[ELEME1] TEXT,
[ELEME2] TEXT,
[NSEQ] REAL,
[NAD1] REAL,
[NAD2] REAL,
[ISOT] REAL,
[D1] REAL,
[D2] REAL,
[AREAX] REAL,
[BETAX] REAL,
[SIGX] REAL)''')
#Create table - stores segment
if checktable('segment',c)==0:
c.execute('''CREATE TABLE segment
([model_version] REAL,
[model_output_timestamp] timestamp,
[x1] REAL,
[y1] REAL,
[x2] REAL,
[y2] REAL,
[redundant] REAL,
[ELEME1] TEXT,
[ELEME2] TEXT)''')
#Create table - PT out
if checktable('t2PTout',c)==0:
c.execute('''CREATE TABLE t2PTout
([blockcorr] TEXT PRIMARY KEY,
[x] REAL,
[y] REAL,
[z] REAL,
[index] REAL,
[P] REAL,
[T] REAL,
[SG] REAL,
[SW] REAL,
[X1] REAL,
[X2] REAL,
[PCAP] REAL,
[DG] REAL,
[DW] REAL)''')
#Create table - stores flows TOUGH2 output section
if checktable('t2FLOWSout',c)==0:
c.execute('''CREATE TABLE t2FLOWSout
([model_version] REAL,
[model_output_timestamp] timestamp,
[ELEME1] TEXT,
[ELEME2] TEXT,
[INDEX] INT,
[FHEAT] REAL,
[FLOH] REAL,
[FLOF] REAL,
[FLOG] REAL,
[FLOAQ] REAL,
[FLOWTR2] REAL,
[VELG] REAL,
[VELAQ] REAL,
[TURB_COEFF] REAL,
[model_time] REAL)''')
#Create table - stores flows directions from every block
if checktable('t2FLOWVectors',c)==0:
c.execute('''CREATE TABLE t2FLOWVectors
([model_version] REAL,
[model_output_timestamp] timestamp,
[ELEME] TEXT,
[FHEAT_x] REAL,
[FHEAT_y] REAL,
[FHEAT_z] REAL,
[FLOH_x] REAL,
[FLOH_y] REAL,
[FLOH_z] REAL,
[FLOF_x] REAL,
[FLOF_y] REAL,
[FLOF_z] REAL,
[FLOG_x] REAL,
[FLOG_y] REAL,
[FLOG_z] REAL,
[FLOAQ_x] REAL,
[FLOAQ_y] REAL,
[FLOAQ_z] REAL,
[FLOWTR2_x] REAL,
[FLOWTR2_y] REAL,
[FLOWTR2_z] REAL,
[VELG_x] REAL,
[VELG_y] REAL,
[VELG_z] REAL,
[VELAQ_x] REAL,
[VELAQ_y] REAL,
[VELAQ_z] REAL,
[TURB_COEFF_x] REAL,
[TURB_COEFF_y] REAL,
[TURB_COEFF_z] REAL,
[model_time] REAL)''')
conn.commit()
conn.close()
def insert_wells_sqlite(input_dictionary):
"""It stores the data contain on the ubication.csv file and stores it on the database
Parameters
----------
input_dictionary: dictionary
Dictionary containing the path and name of database and the path of the input file
Note
----
The well name is written as primary key. Thus, if the coordinates of the file ubication.csv change, it is better to
eliminate the records and rerun this function again. Some print are expected.
Examples
--------
>>> insert_wells_sqlite(input_dictionary)
"""
db_path=input_dictionary['db_path']
source_txt=input_dictionary['source_txt']
conn=sqlite3.connect(db_path)
c = conn.cursor()
wells=pd.read_csv(source_txt+'ubication.csv')
wells['drilldate'] = pd.to_datetime(wells['drilldate'],format="%Y%m%d")
for index,row in wells.iterrows():
try:
q="INSERT INTO wells(well,type,east,north,elevation,drilldate) VALUES ('%s','%s',%s,%s,%s,'%s')"%\
(row['well'],row['type'],row['east'],row['north'],row['masl'],row['drilldate'])
c.execute(q)
conn.commit()
except sqlite3.IntegrityError:
print("The well %s is already on the database")
conn.close()
def insert_feedzone_to_sqlite(input_dictionary):
"""It stores the data contain on the ubication.csv file and stores it on the database
Parameters
----------
input_dictionary: dictionary
Dictionary containing the path and name of database and the path of the input file
Examples
--------
>>> insert_feedzone_to_sqlite(input_dictionary)
"""
db_path=input_dictionary['db_path']
source_txt=input_dictionary['source_txt']
conn=sqlite3.connect(db_path)
c=conn.cursor()
feedzones=pd.read_csv(source_txt+'well_feedzone.csv',delimiter=',')
for index,row in feedzones.iterrows():
q="INSERT INTO wellfeedzone(well,MeasuredDepth,contribution) VALUES ('%s',%s,%s)"%\
(row['well'],row['MD'],row['contribution'])
c.execute(q)
conn.commit()
conn.close()
def insert_survey_to_sqlite(input_dictionary):
"""It stores all the data contain on the subfolder survey from the input file folder.
Parameters
----------
input_dictionary: dictionary
Dictionary containing the path and name of database and the path of the input file
Note
----
The survey for every well must have the next headers MeasuredDepth,Delta_north,Delta_east
Examples
--------
>>> insert_survey_to_sqlite(input_dictionary)
"""
db_path=input_dictionary['db_path']
source_txt=input_dictionary['source_txt']
conn=sqlite3.connect(db_path)
c=conn.cursor()
for f in os.listdir(source_txt+'survey/'):
if os.path.isfile(os.path.join(source_txt, 'survey/',f)):
well_name=f.replace("'","").replace("_MD.dat","")
well_file=os.path.join(source_txt, 'survey/',f)
survey=pd.read_csv(well_file)
for index, row in survey.iterrows():
q="INSERT INTO survey(well,MeasuredDepth,Delta_north,Delta_east) VALUES ('%s',%s,%s,%s)"%\
(well_name,row['MeasuredDepth'],row['Delta_north'],row['Delta_east'])
c.execute(q)
conn.commit()
conn.close()
def insert_PT_to_sqlite(input_dictionary):
"""It stores all the data contain on the subfolder PT from the input file folder.
Parameters
----------
input_dictionary: dictionary
Dictionary containing the path and name of database and the path of the input file
Note
----
The PT for every well must have the next headers MD,P,T. The file name must be well_MDPT.dat
Examples
--------
>>> insert_PT_to_sqlite(input_dictionary)
"""
db_path=input_dictionary['db_path']
source_txt=input_dictionary['source_txt']
conn=sqlite3.connect(db_path)
c=conn.cursor()
for f in os.listdir(source_txt+'PT'):
if os.path.isfile(source_txt+'PT/'+f):
if '_MDPT' in f:
well_name=f.replace("'","").replace("_MDPT.dat","")
if os.path.isfile(source_txt+'PT/'+f):
PT=pd.read_csv(source_txt+'PT/'+f)
for index, row in PT.iterrows():
q="INSERT INTO PT(well,MeasuredDepth,Pressure,Temperature) VALUES ('%s',%s,%s,%s)"%\
(well_name,row['MD'],row['P'],row['T'])
c.execute(q)
conn.commit()
conn.close()
def insert_drawdown_to_sqlite(input_dictionary):
"""It stores all the data contain on the subfolder drawdown from the input file folder.
Parameters
----------
input_dictionary: dictionary
Dictionary containing the path and name of database and the path of the input file
Note
----
The drawdown register on every well must have the next headers datetime,TVD,pressure. The file name must be well_DD.dat
Examples
--------
>>> insert_drawdown_to_sqlite(input_dictionary)
"""
db_path=input_dictionary['db_path']
source_txt=input_dictionary['source_txt']
conn=sqlite3.connect(db_path)
c=conn.cursor()
for f in os.listdir(source_txt+'drawdown'):
well_name=f.replace("'","").replace("_DD.dat","")
if os.path.isfile(source_txt+'drawdown/'+f) and f!='p_res.csv':
C=pd.read_csv(source_txt+'drawdown/'+f)
for index, row in C.iterrows():
q="INSERT INTO drawdown(well,date_time,TVD,pressure) VALUES ('%s','%s',%s,%s)"%\
(well_name,row['datetime'],row['TVD'],row['pressure'])
c.execute(q)
conn.commit()
conn.close()
def insert_cooling_to_sqlite(input_dictionary):
"""It stores all the data contain on the subfolder cooling from the input file folder.
Parameters
----------
input_dictionary: dictionary
Dictionary containing the path and name of database and the path of the input file
Note
----
The cooling register on every well must have the next headers datetime,TVD,temperature. The file name must be well_C.dat
Examples
--------
>>> insert_cooling_to_sqlite(input_dictionary)
"""
db_path=input_dictionary['db_path']
source_txt=input_dictionary['source_txt']
conn=sqlite3.connect(db_path)
c=conn.cursor()
for f in os.listdir(source_txt+'cooling'):
well_name=f.replace("'","").replace("_C.dat","")
if os.path.isfile(source_txt+'cooling/'+f):
DD=pd.read_csv(source_txt+'cooling/'+f)
for index, row in DD.iterrows():
q="INSERT INTO cooling(well,date_time,TVD,temp) VALUES ('%s','%s',%s,%s)"%\
(well_name,row['datetime'],row['TVD'],row['temperature'])
c.execute(q)
conn.commit()
conn.close()
def insert_mh_to_sqlite(input_dictionary):
"""It stores all the data contain on the subfolder mh from the input file folder.
Parameters
----------
input_dictionary: dictionary
Dictionary containing the path and name of database and the path of the input file
Note
----
Every file contains information about the flow rate and flowing enthalpy of the wells. Every register must contain the next headers:
type,date-time,steam,liquid,enthalpy,WHPabs. The file name must be name well_mh.dat
Examples
--------
>>> insert_mh_to_sqlite(input_dictionary)
"""
db_path=input_dictionary['db_path']
source_txt=input_dictionary['source_txt']
conn=sqlite3.connect(db_path)
c=conn.cursor()
for f in os.listdir(source_txt+'mh'):
well_name=f.replace("'","").replace("_mh.dat","")
if os.path.isfile(source_txt+'mh/'+f):
mh=pd.read_csv(source_txt+'mh/'+f)
for index, row in mh.iterrows():
q="INSERT INTO mh(well,type,date_time,steam_flow,liquid_flow,flowing_enthalpy,well_head_pressure) VALUES ('%s','%s','%s',%s,%s,%s,%s)"%\
(well_name,row['type'],row['date-time'],row['steam'],row['liquid'],row['enthalpy'],row['WHPabs'])
c.execute(q)
conn.commit()
conn.close()
def replace_mh(wells_to_replace,input_dictionary):
"""It stores all the data contain on the subfolder mh from the input file folder from some selected wells
Parameters
----------
input_dictionary: dictionary
Dictionary containing the path and name of database and the path of the input file
wells_to_replace: list
Contains the data from the wells which flow data will be replace
Note
----
Every file contains information about the flow rate and flowing enthalpy of the wells. Every register must contain the next headers:
type,date-time,steam,liquid,enthalpy,WHPabs. The file name must be name well_mh.dat
Examples
--------
>>> replace_mh(wells_to_replace=['WELL-1','WELL-2'],input_dictionary)
"""
db_path=input_dictionary['db_path']
source_txt=input_dictionary['source_txt']
conn=sqlite3.connect(db_path)
c=conn.cursor()
for f in os.listdir(source_txt+'mh'):
well_name=f.replace("'","").replace("_mh.dat","")
if well_name in wells_to_replace:
q="DELETE FROM mh WHERE well='%s'"%well_name
c.execute(q)
conn.commit()
if os.path.isfile(source_txt+'mh/'+f):
print(source_txt+'mh/'+f)
mh= | pd.read_csv(source_txt+'mh/'+f) | pandas.read_csv |
import pandas as pd
import re
import matplotlib.pyplot as plt
import os
def parse(file_name):
# Reads CSV if exists
try:
df = pd.read_csv('data.csv',index_col=None)
# If not will make empty data frame
except FileNotFoundError:
df = | pd.DataFrame(index=None) | pandas.DataFrame |
"""
Train final models for BGC activity using gradient boosting machines and all
features on the complete tungsten set.
"""
import os
import pandas as pd
from joblib import dump
from sklearn.svm import SVC
from sklearn.impute import SimpleImputer
# set directory
git_dir = os.path.expanduser("~/git/prism-4-paper")
os.chdir(git_dir)
# read platinum and the associated activity matrix
paths = pd.read_table(git_dir + "/data/platinum/raw_paths.txt")
act = pd.read_csv(git_dir + "/data/platinum/activity_matrix.csv")
## drop unnecessary activities
act = act.loc[:, ['id', 'Bacteria', 'Fungi', 'Prokaryote', 'Virus', 'Cancer',
'Immunomodulator']]
plat = pd.merge(paths, act, how='left', on='id')
## set cluster
clusters = [os.path.basename(file) for file in plat['fasta']]
plat = plat.assign(cluster=clusters)
# read fingerprints
fps = | pd.read_csv(git_dir + "/data/platinum/PRISM_fingerprints_mean.csv.gz") | pandas.read_csv |
import pandas as pd
import numpy as np
import folium
from folium.plugins import MarkerCluster
from PyQt5.QtWidgets import QMessageBox, QTableWidget, QTableWidgetItem, QProgressDialog
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_lcd_display(object):
def setupUi(self, lcd_display):
lcd_display.setObjectName("lcd_display")
lcd_display.setEnabled(True)
lcd_display.resize(2672, 1969)
font = QtGui.QFont()
font.setPointSize(8)
lcd_display.setFont(font)
self.centralwidget = QtWidgets.QWidget(lcd_display)
self.centralwidget.setObjectName("centralwidget")
self.verticalLayout_3 = QtWidgets.QVBoxLayout(self.centralwidget)
self.verticalLayout_3.setObjectName("verticalLayout_3")
self.label_28 = QtWidgets.QLabel(self.centralwidget)
font = QtGui.QFont()
font.setPointSize(15)
font.setBold(True)
font.setWeight(75)
self.label_28.setFont(font)
self.label_28.setLayoutDirection(QtCore.Qt.LeftToRight)
self.label_28.setAutoFillBackground(True)
self.label_28.setObjectName("label_28")
self.verticalLayout_3.addWidget(self.label_28)
self.horizontalLayout_3 = QtWidgets.QHBoxLayout()
self.horizontalLayout_3.setObjectName("horizontalLayout_3")
self.verticalLayout = QtWidgets.QVBoxLayout()
self.verticalLayout.setObjectName("verticalLayout")
self.gridLayout_4 = QtWidgets.QGridLayout()
self.gridLayout_4.setObjectName("gridLayout_4")
spacerItem = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.gridLayout_4.addItem(spacerItem, 5, 9, 1, 1)
self.QComboBox_Longetude_val = QtWidgets.QComboBox(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.QComboBox_Longetude_val.sizePolicy().hasHeightForWidth())
self.QComboBox_Longetude_val.setSizePolicy(sizePolicy)
self.QComboBox_Longetude_val.setObjectName("QComboBox_Longetude_val")
self.gridLayout_4.addWidget(self.QComboBox_Longetude_val, 6, 6, 1, 4)
self.QComboBox_Timstamp = QtWidgets.QComboBox(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.QComboBox_Timstamp.sizePolicy().hasHeightForWidth())
self.QComboBox_Timstamp.setSizePolicy(sizePolicy)
self.QComboBox_Timstamp.setObjectName("QComboBox_Timstamp")
self.gridLayout_4.addWidget(self.QComboBox_Timstamp, 6, 11, 1, 2)
self.label_20 = QtWidgets.QLabel(self.centralwidget)
font = QtGui.QFont()
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.label_20.setFont(font)
self.label_20.setObjectName("label_20")
self.gridLayout_4.addWidget(self.label_20, 0, 1, 1, 5)
self.bt_sort = QtWidgets.QPushButton(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.bt_sort.sizePolicy().hasHeightForWidth())
self.bt_sort.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setPointSize(10)
font.setBold(False)
font.setItalic(False)
font.setUnderline(False)
font.setWeight(50)
self.bt_sort.setFont(font)
self.bt_sort.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.bt_sort.setObjectName("bt_sort")
self.gridLayout_4.addWidget(self.bt_sort, 7, 11, 1, 2)
spacerItem1 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.gridLayout_4.addItem(spacerItem1, 7, 13, 1, 1)
spacerItem2 = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.gridLayout_4.addItem(spacerItem2, 3, 1, 1, 1)
spacerItem3 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.gridLayout_4.addItem(spacerItem3, 6, 13, 1, 1)
self.QComboBox_Latetude_val = QtWidgets.QComboBox(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.QComboBox_Latetude_val.sizePolicy().hasHeightForWidth())
self.QComboBox_Latetude_val.setSizePolicy(sizePolicy)
self.QComboBox_Latetude_val.setCurrentText("")
self.QComboBox_Latetude_val.setObjectName("QComboBox_Latetude_val")
self.gridLayout_4.addWidget(self.QComboBox_Latetude_val, 6, 1, 1, 4)
self.text_filepath = QtWidgets.QLineEdit(self.centralwidget)
font = QtGui.QFont()
font.setPointSize(12)
font.setBold(False)
font.setWeight(50)
self.text_filepath.setFont(font)
self.text_filepath.setToolTip("")
self.text_filepath.setObjectName("text_filepath")
self.gridLayout_4.addWidget(self.text_filepath, 1, 1, 1, 9)
self.label_25 = QtWidgets.QLabel(self.centralwidget)
font = QtGui.QFont()
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.label_25.setFont(font)
self.label_25.setObjectName("label_25")
self.gridLayout_4.addWidget(self.label_25, 4, 1, 1, 5)
spacerItem4 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.gridLayout_4.addItem(spacerItem4, 5, 4, 1, 1)
spacerItem5 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.gridLayout_4.addItem(spacerItem5, 5, 7, 1, 1)
self.label_32 = QtWidgets.QLabel(self.centralwidget)
self.label_32.setObjectName("label_32")
self.gridLayout_4.addWidget(self.label_32, 2, 1, 1, 5)
self.label_18 = QtWidgets.QLabel(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_18.sizePolicy().hasHeightForWidth())
self.label_18.setSizePolicy(sizePolicy)
self.label_18.setObjectName("label_18")
self.gridLayout_4.addWidget(self.label_18, 5, 11, 1, 1)
spacerItem6 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.gridLayout_4.addItem(spacerItem6, 6, 5, 1, 1)
self.label = QtWidgets.QLabel(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label.sizePolicy().hasHeightForWidth())
self.label.setSizePolicy(sizePolicy)
self.label.setObjectName("label")
self.gridLayout_4.addWidget(self.label, 5, 1, 1, 1)
self.label_16 = QtWidgets.QLabel(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_16.sizePolicy().hasHeightForWidth())
self.label_16.setSizePolicy(sizePolicy)
self.label_16.setObjectName("label_16")
self.gridLayout_4.addWidget(self.label_16, 5, 6, 1, 1)
spacerItem7 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.gridLayout_4.addItem(spacerItem7, 5, 3, 1, 1)
self.label_29 = QtWidgets.QLabel(self.centralwidget)
self.label_29.setObjectName("label_29")
self.gridLayout_4.addWidget(self.label_29, 5, 12, 1, 1)
self.bt_fileread = QtWidgets.QPushButton(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Minimum)
sizePolicy.setHorizontalStretch(3)
sizePolicy.setVerticalStretch(3)
sizePolicy.setHeightForWidth(self.bt_fileread.sizePolicy().hasHeightForWidth())
self.bt_fileread.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setPointSize(10)
font.setBold(True)
font.setItalic(False)
font.setUnderline(False)
font.setWeight(75)
self.bt_fileread.setFont(font)
self.bt_fileread.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.bt_fileread.setToolTip("")
self.bt_fileread.setStatusTip("")
self.bt_fileread.setWhatsThis("")
self.bt_fileread.setObjectName("bt_fileread")
self.gridLayout_4.addWidget(self.bt_fileread, 1, 11, 1, 3)
spacerItem8 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.gridLayout_4.addItem(spacerItem8, 6, 10, 1, 1)
self.verticalLayout.addLayout(self.gridLayout_4)
spacerItem9 = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.verticalLayout.addItem(spacerItem9)
self.label_3 = QtWidgets.QLabel(self.centralwidget)
font = QtGui.QFont()
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.label_3.setFont(font)
self.label_3.setObjectName("label_3")
self.verticalLayout.addWidget(self.label_3)
self.gridLayout_3 = QtWidgets.QGridLayout()
self.gridLayout_3.setObjectName("gridLayout_3")
self.label_19 = QtWidgets.QLabel(self.centralwidget)
self.label_19.setObjectName("label_19")
self.gridLayout_3.addWidget(self.label_19, 6, 0, 1, 1)
self.label_9 = QtWidgets.QLabel(self.centralwidget)
self.label_9.setObjectName("label_9")
self.gridLayout_3.addWidget(self.label_9, 4, 0, 1, 1)
self.checkbox_filter_t1 = QtWidgets.QCheckBox(self.centralwidget)
self.checkbox_filter_t1.setObjectName("checkbox_filter_t1")
self.gridLayout_3.addWidget(self.checkbox_filter_t1, 9, 6, 1, 1)
self.checkbox_filter_2 = QtWidgets.QCheckBox(self.centralwidget)
self.checkbox_filter_2.setObjectName("checkbox_filter_2")
self.gridLayout_3.addWidget(self.checkbox_filter_2, 3, 6, 1, 1)
self.text_filter_par1 = QtWidgets.QLineEdit(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.text_filter_par1.sizePolicy().hasHeightForWidth())
self.text_filter_par1.setSizePolicy(sizePolicy)
self.text_filter_par1.setText("")
self.text_filter_par1.setObjectName("text_filter_par1")
self.gridLayout_3.addWidget(self.text_filter_par1, 2, 3, 1, 2)
self.checkbox_filter_1 = QtWidgets.QCheckBox(self.centralwidget)
self.checkbox_filter_1.setObjectName("checkbox_filter_1")
self.gridLayout_3.addWidget(self.checkbox_filter_1, 2, 6, 1, 1)
self.comboBox_filter_op2 = QtWidgets.QComboBox(self.centralwidget)
self.comboBox_filter_op2.setObjectName("comboBox_filter_op2")
self.comboBox_filter_op2.addItem("")
self.comboBox_filter_op2.addItem("")
self.comboBox_filter_op2.addItem("")
self.comboBox_filter_op2.addItem("")
self.comboBox_filter_op2.addItem("")
self.comboBox_filter_op2.addItem("")
self.gridLayout_3.addWidget(self.comboBox_filter_op2, 3, 2, 1, 1)
self.comboBox_filter_op1 = QtWidgets.QComboBox(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.comboBox_filter_op1.sizePolicy().hasHeightForWidth())
self.comboBox_filter_op1.setSizePolicy(sizePolicy)
self.comboBox_filter_op1.setObjectName("comboBox_filter_op1")
self.comboBox_filter_op1.addItem("")
self.comboBox_filter_op1.addItem("")
self.comboBox_filter_op1.addItem("")
self.comboBox_filter_op1.addItem("")
self.comboBox_filter_op1.addItem("")
self.comboBox_filter_op1.addItem("")
self.gridLayout_3.addWidget(self.comboBox_filter_op1, 2, 2, 1, 1)
self.comboBox_filter_par2 = QtWidgets.QComboBox(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.comboBox_filter_par2.sizePolicy().hasHeightForWidth())
self.comboBox_filter_par2.setSizePolicy(sizePolicy)
self.comboBox_filter_par2.setObjectName("comboBox_filter_par2")
self.gridLayout_3.addWidget(self.comboBox_filter_par2, 3, 1, 1, 1)
self.label_10 = QtWidgets.QLabel(self.centralwidget)
self.label_10.setObjectName("label_10")
self.gridLayout_3.addWidget(self.label_10, 1, 1, 1, 1)
self.comboBox_filter_par5 = QtWidgets.QComboBox(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.comboBox_filter_par5.sizePolicy().hasHeightForWidth())
self.comboBox_filter_par5.setSizePolicy(sizePolicy)
self.comboBox_filter_par5.setObjectName("comboBox_filter_par5")
self.gridLayout_3.addWidget(self.comboBox_filter_par5, 6, 1, 1, 1)
self.comboBox_filter_op4 = QtWidgets.QComboBox(self.centralwidget)
self.comboBox_filter_op4.setObjectName("comboBox_filter_op4")
self.comboBox_filter_op4.addItem("")
self.comboBox_filter_op4.addItem("")
self.comboBox_filter_op4.addItem("")
self.comboBox_filter_op4.addItem("")
self.comboBox_filter_op4.addItem("")
self.comboBox_filter_op4.addItem("")
self.gridLayout_3.addWidget(self.comboBox_filter_op4, 5, 2, 1, 1)
self.comboBox_filter_op6 = QtWidgets.QComboBox(self.centralwidget)
self.comboBox_filter_op6.setObjectName("comboBox_filter_op6")
self.comboBox_filter_op6.addItem("")
self.comboBox_filter_op6.addItem("")
self.comboBox_filter_op6.addItem("")
self.comboBox_filter_op6.addItem("")
self.comboBox_filter_op6.addItem("")
self.comboBox_filter_op6.addItem("")
self.gridLayout_3.addWidget(self.comboBox_filter_op6, 7, 2, 1, 1)
self.text_filter_par2 = QtWidgets.QLineEdit(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.text_filter_par2.sizePolicy().hasHeightForWidth())
self.text_filter_par2.setSizePolicy(sizePolicy)
self.text_filter_par2.setText("")
self.text_filter_par2.setObjectName("text_filter_par2")
self.gridLayout_3.addWidget(self.text_filter_par2, 3, 3, 1, 2)
self.checkbox_filter_4 = QtWidgets.QCheckBox(self.centralwidget)
self.checkbox_filter_4.setObjectName("checkbox_filter_4")
self.gridLayout_3.addWidget(self.checkbox_filter_4, 5, 6, 1, 1)
self.comboBox_filter_par4 = QtWidgets.QComboBox(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.comboBox_filter_par4.sizePolicy().hasHeightForWidth())
self.comboBox_filter_par4.setSizePolicy(sizePolicy)
self.comboBox_filter_par4.setObjectName("comboBox_filter_par4")
self.gridLayout_3.addWidget(self.comboBox_filter_par4, 5, 1, 1, 1)
self.comboBox_filter_op_t2 = QtWidgets.QComboBox(self.centralwidget)
self.comboBox_filter_op_t2.setObjectName("comboBox_filter_op_t2")
self.comboBox_filter_op_t2.addItem("")
self.comboBox_filter_op_t2.addItem("")
self.comboBox_filter_op_t2.addItem("")
self.comboBox_filter_op_t2.addItem("")
self.comboBox_filter_op_t2.addItem("")
self.comboBox_filter_op_t2.addItem("")
self.gridLayout_3.addWidget(self.comboBox_filter_op_t2, 10, 2, 1, 1)
self.comboBox_filter_op5 = QtWidgets.QComboBox(self.centralwidget)
self.comboBox_filter_op5.setObjectName("comboBox_filter_op5")
self.comboBox_filter_op5.addItem("")
self.comboBox_filter_op5.addItem("")
self.comboBox_filter_op5.addItem("")
self.comboBox_filter_op5.addItem("")
self.comboBox_filter_op5.addItem("")
self.comboBox_filter_op5.addItem("")
self.gridLayout_3.addWidget(self.comboBox_filter_op5, 6, 2, 1, 1)
self.comboBox_filter_time2 = QtWidgets.QComboBox(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.comboBox_filter_time2.sizePolicy().hasHeightForWidth())
self.comboBox_filter_time2.setSizePolicy(sizePolicy)
self.comboBox_filter_time2.setObjectName("comboBox_filter_time2")
self.gridLayout_3.addWidget(self.comboBox_filter_time2, 10, 1, 1, 1)
self.checkbox_filter_6 = QtWidgets.QCheckBox(self.centralwidget)
self.checkbox_filter_6.setObjectName("checkbox_filter_6")
self.gridLayout_3.addWidget(self.checkbox_filter_6, 7, 6, 1, 1)
self.comboBox_filter_par3 = QtWidgets.QComboBox(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.comboBox_filter_par3.sizePolicy().hasHeightForWidth())
self.comboBox_filter_par3.setSizePolicy(sizePolicy)
self.comboBox_filter_par3.setObjectName("comboBox_filter_par3")
self.gridLayout_3.addWidget(self.comboBox_filter_par3, 4, 1, 1, 1)
self.label_8 = QtWidgets.QLabel(self.centralwidget)
self.label_8.setObjectName("label_8")
self.gridLayout_3.addWidget(self.label_8, 3, 0, 1, 1)
self.comboBox_filter_op_t1 = QtWidgets.QComboBox(self.centralwidget)
self.comboBox_filter_op_t1.setObjectName("comboBox_filter_op_t1")
self.comboBox_filter_op_t1.addItem("")
self.comboBox_filter_op_t1.addItem("")
self.comboBox_filter_op_t1.addItem("")
self.comboBox_filter_op_t1.addItem("")
self.comboBox_filter_op_t1.addItem("")
self.comboBox_filter_op_t1.addItem("")
self.gridLayout_3.addWidget(self.comboBox_filter_op_t1, 9, 2, 1, 1)
self.label_21 = QtWidgets.QLabel(self.centralwidget)
self.label_21.setObjectName("label_21")
self.gridLayout_3.addWidget(self.label_21, 7, 0, 1, 1)
self.label_22 = QtWidgets.QLabel(self.centralwidget)
self.label_22.setObjectName("label_22")
self.gridLayout_3.addWidget(self.label_22, 9, 0, 1, 1)
self.text_filter_par6 = QtWidgets.QLineEdit(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.text_filter_par6.sizePolicy().hasHeightForWidth())
self.text_filter_par6.setSizePolicy(sizePolicy)
self.text_filter_par6.setText("")
self.text_filter_par6.setObjectName("text_filter_par6")
self.gridLayout_3.addWidget(self.text_filter_par6, 7, 3, 1, 2)
self.comboBox_filter_op3 = QtWidgets.QComboBox(self.centralwidget)
self.comboBox_filter_op3.setObjectName("comboBox_filter_op3")
self.comboBox_filter_op3.addItem("")
self.comboBox_filter_op3.addItem("")
self.comboBox_filter_op3.addItem("")
self.comboBox_filter_op3.addItem("")
self.comboBox_filter_op3.addItem("")
self.comboBox_filter_op3.addItem("")
self.gridLayout_3.addWidget(self.comboBox_filter_op3, 4, 2, 1, 1)
self.comboBox_filter_par1 = QtWidgets.QComboBox(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.comboBox_filter_par1.sizePolicy().hasHeightForWidth())
self.comboBox_filter_par1.setSizePolicy(sizePolicy)
self.comboBox_filter_par1.setObjectName("comboBox_filter_par1")
self.gridLayout_3.addWidget(self.comboBox_filter_par1, 2, 1, 1, 1)
self.label_27 = QtWidgets.QLabel(self.centralwidget)
self.label_27.setObjectName("label_27")
self.gridLayout_3.addWidget(self.label_27, 1, 6, 1, 1)
self.label_23 = QtWidgets.QLabel(self.centralwidget)
self.label_23.setObjectName("label_23")
self.gridLayout_3.addWidget(self.label_23, 10, 0, 1, 1)
self.label_7 = QtWidgets.QLabel(self.centralwidget)
self.label_7.setObjectName("label_7")
self.gridLayout_3.addWidget(self.label_7, 2, 0, 1, 1)
self.comboBox_filter_par6 = QtWidgets.QComboBox(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.comboBox_filter_par6.sizePolicy().hasHeightForWidth())
self.comboBox_filter_par6.setSizePolicy(sizePolicy)
self.comboBox_filter_par6.setObjectName("comboBox_filter_par6")
self.gridLayout_3.addWidget(self.comboBox_filter_par6, 7, 1, 1, 1)
self.comboBox_filter_time1 = QtWidgets.QComboBox(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.comboBox_filter_time1.sizePolicy().hasHeightForWidth())
self.comboBox_filter_time1.setSizePolicy(sizePolicy)
self.comboBox_filter_time1.setObjectName("comboBox_filter_time1")
self.gridLayout_3.addWidget(self.comboBox_filter_time1, 9, 1, 1, 1)
spacerItem10 = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.gridLayout_3.addItem(spacerItem10, 18, 3, 1, 1)
self.checkBox_filter_cutoff = QtWidgets.QCheckBox(self.centralwidget)
self.checkBox_filter_cutoff.setObjectName("checkBox_filter_cutoff")
self.gridLayout_3.addWidget(self.checkBox_filter_cutoff, 14, 6, 1, 1)
self.label_36 = QtWidgets.QLabel(self.centralwidget)
self.label_36.setObjectName("label_36")
self.gridLayout_3.addWidget(self.label_36, 10, 5, 1, 1)
self.label_35 = QtWidgets.QLabel(self.centralwidget)
self.label_35.setObjectName("label_35")
self.gridLayout_3.addWidget(self.label_35, 2, 5, 1, 1)
self.label_41 = QtWidgets.QLabel(self.centralwidget)
font = QtGui.QFont()
font.setStrikeOut(False)
self.label_41.setFont(font)
self.label_41.setObjectName("label_41")
self.gridLayout_3.addWidget(self.label_41, 11, 0, 3, 1)
self.checkBox_filter_firstrow = QtWidgets.QCheckBox(self.centralwidget)
self.checkBox_filter_firstrow.setObjectName("checkBox_filter_firstrow")
self.gridLayout_3.addWidget(self.checkBox_filter_firstrow, 11, 6, 3, 1)
self.label_45 = QtWidgets.QLabel(self.centralwidget)
self.label_45.setObjectName("label_45")
self.gridLayout_3.addWidget(self.label_45, 16, 5, 1, 1)
self.label_42 = QtWidgets.QLabel(self.centralwidget)
self.label_42.setObjectName("label_42")
self.gridLayout_3.addWidget(self.label_42, 12, 5, 1, 1)
self.checkbox_filter_5 = QtWidgets.QCheckBox(self.centralwidget)
self.checkbox_filter_5.setObjectName("checkbox_filter_5")
self.gridLayout_3.addWidget(self.checkbox_filter_5, 6, 6, 1, 1)
self.text_filter_time1 = QtWidgets.QLineEdit(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.text_filter_time1.sizePolicy().hasHeightForWidth())
self.text_filter_time1.setSizePolicy(sizePolicy)
self.text_filter_time1.setText("")
self.text_filter_time1.setObjectName("text_filter_time1")
self.gridLayout_3.addWidget(self.text_filter_time1, 9, 3, 1, 2)
self.label_40 = QtWidgets.QLabel(self.centralwidget)
self.label_40.setObjectName("label_40")
self.gridLayout_3.addWidget(self.label_40, 14, 5, 1, 1)
self.checkbox_filter_t2 = QtWidgets.QCheckBox(self.centralwidget)
self.checkbox_filter_t2.setObjectName("checkbox_filter_t2")
self.gridLayout_3.addWidget(self.checkbox_filter_t2, 10, 6, 1, 1)
self.checkBox_filter_repeat = QtWidgets.QCheckBox(self.centralwidget)
self.checkBox_filter_repeat.setObjectName("checkBox_filter_repeat")
self.gridLayout_3.addWidget(self.checkBox_filter_repeat, 16, 6, 1, 1)
self.label_44 = QtWidgets.QLabel(self.centralwidget)
font = QtGui.QFont()
font.setStrikeOut(False)
self.label_44.setFont(font)
self.label_44.setObjectName("label_44")
self.gridLayout_3.addWidget(self.label_44, 16, 0, 1, 1)
self.checkbox_filter_3 = QtWidgets.QCheckBox(self.centralwidget)
self.checkbox_filter_3.setObjectName("checkbox_filter_3")
self.gridLayout_3.addWidget(self.checkbox_filter_3, 4, 6, 1, 1)
spacerItem11 = QtWidgets.QSpacerItem(215, 13, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.gridLayout_3.addItem(spacerItem11, 11, 5, 1, 1)
self.text_filter_par5 = QtWidgets.QLineEdit(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.text_filter_par5.sizePolicy().hasHeightForWidth())
self.text_filter_par5.setSizePolicy(sizePolicy)
self.text_filter_par5.setText("")
self.text_filter_par5.setObjectName("text_filter_par5")
self.gridLayout_3.addWidget(self.text_filter_par5, 6, 3, 1, 2)
self.label_11 = QtWidgets.QLabel(self.centralwidget)
self.label_11.setObjectName("label_11")
self.gridLayout_3.addWidget(self.label_11, 1, 3, 1, 2)
self.text_filter_time2 = QtWidgets.QLineEdit(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.text_filter_time2.sizePolicy().hasHeightForWidth())
self.text_filter_time2.setSizePolicy(sizePolicy)
self.text_filter_time2.setText("")
self.text_filter_time2.setObjectName("text_filter_time2")
self.gridLayout_3.addWidget(self.text_filter_time2, 10, 3, 1, 2)
self.text_filter_par4 = QtWidgets.QLineEdit(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.text_filter_par4.sizePolicy().hasHeightForWidth())
self.text_filter_par4.setSizePolicy(sizePolicy)
self.text_filter_par4.setText("")
self.text_filter_par4.setObjectName("text_filter_par4")
self.gridLayout_3.addWidget(self.text_filter_par4, 5, 3, 1, 2)
self.text_filter_par3 = QtWidgets.QLineEdit(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.text_filter_par3.sizePolicy().hasHeightForWidth())
self.text_filter_par3.setSizePolicy(sizePolicy)
self.text_filter_par3.setText("")
self.text_filter_par3.setObjectName("text_filter_par3")
self.gridLayout_3.addWidget(self.text_filter_par3, 4, 3, 1, 2)
self.text_filter_cutoff = QtWidgets.QLineEdit(self.centralwidget)
self.text_filter_cutoff.setObjectName("text_filter_cutoff")
self.gridLayout_3.addWidget(self.text_filter_cutoff, 14, 3, 1, 2)
self.label_39 = QtWidgets.QLabel(self.centralwidget)
font = QtGui.QFont()
font.setStrikeOut(False)
self.label_39.setFont(font)
self.label_39.setObjectName("label_39")
self.gridLayout_3.addWidget(self.label_39, 14, 0, 1, 1)
self.label_17 = QtWidgets.QLabel(self.centralwidget)
self.label_17.setObjectName("label_17")
self.gridLayout_3.addWidget(self.label_17, 5, 0, 1, 1)
self.label_26 = QtWidgets.QLabel(self.centralwidget)
self.label_26.setObjectName("label_26")
self.gridLayout_3.addWidget(self.label_26, 1, 2, 1, 1)
self.text_filter_firstrow = QtWidgets.QLineEdit(self.centralwidget)
self.text_filter_firstrow.setObjectName("text_filter_firstrow")
self.gridLayout_3.addWidget(self.text_filter_firstrow, 11, 3, 3, 2)
self.label_48 = QtWidgets.QLabel(self.centralwidget)
self.label_48.setObjectName("label_48")
self.gridLayout_3.addWidget(self.label_48, 15, 0, 1, 1)
self.label_49 = QtWidgets.QLabel(self.centralwidget)
self.label_49.setObjectName("label_49")
self.gridLayout_3.addWidget(self.label_49, 15, 5, 1, 1)
self.checkBox_filter_jumppoints = QtWidgets.QCheckBox(self.centralwidget)
self.checkBox_filter_jumppoints.setObjectName("checkBox_filter_jumppoints")
self.gridLayout_3.addWidget(self.checkBox_filter_jumppoints, 15, 6, 1, 1)
self.text_filter_jumpborder = QtWidgets.QLineEdit(self.centralwidget)
self.text_filter_jumpborder.setObjectName("text_filter_jumpborder")
self.gridLayout_3.addWidget(self.text_filter_jumpborder, 15, 3, 1, 2)
self.bt_applyfilter = QtWidgets.QPushButton(self.centralwidget)
font = QtGui.QFont()
font.setPointSize(10)
font.setBold(False)
font.setItalic(False)
font.setUnderline(False)
font.setWeight(50)
self.bt_applyfilter.setFont(font)
self.bt_applyfilter.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.bt_applyfilter.setObjectName("bt_applyfilter")
self.gridLayout_3.addWidget(self.bt_applyfilter, 19, 3, 2, 3)
self.bt_undofilter = QtWidgets.QPushButton(self.centralwidget)
font = QtGui.QFont()
font.setPointSize(10)
font.setBold(False)
font.setItalic(False)
font.setUnderline(False)
font.setWeight(50)
self.bt_undofilter.setFont(font)
self.bt_undofilter.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.bt_undofilter.setObjectName("bt_undofilter")
self.gridLayout_3.addWidget(self.bt_undofilter, 19, 6, 2, 1)
self.verticalLayout.addLayout(self.gridLayout_3)
spacerItem12 = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.verticalLayout.addItem(spacerItem12)
self.horizontalLayout_2 = QtWidgets.QHBoxLayout()
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.gridLayout_2 = QtWidgets.QGridLayout()
self.gridLayout_2.setObjectName("gridLayout_2")
self.label_Maptype = QtWidgets.QLabel(self.centralwidget)
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.label_Maptype.setFont(font)
self.label_Maptype.setObjectName("label_Maptype")
self.gridLayout_2.addWidget(self.label_Maptype, 10, 0, 1, 1)
self.label_33 = QtWidgets.QLabel(self.centralwidget)
self.label_33.setObjectName("label_33")
self.gridLayout_2.addWidget(self.label_33, 4, 2, 1, 1)
self.label_31 = QtWidgets.QLabel(self.centralwidget)
self.label_31.setObjectName("label_31")
self.gridLayout_2.addWidget(self.label_31, 5, 2, 1, 2)
self.text_map_jumpborder_mark = QtWidgets.QLineEdit(self.centralwidget)
self.text_map_jumpborder_mark.setObjectName("text_map_jumpborder_mark")
self.gridLayout_2.addWidget(self.text_map_jumpborder_mark, 4, 1, 1, 1)
self.checkBox_cluster = QtWidgets.QCheckBox(self.centralwidget)
self.checkBox_cluster.setObjectName("checkBox_cluster")
self.gridLayout_2.addWidget(self.checkBox_cluster, 2, 0, 1, 2)
self.bt_Mapping = QtWidgets.QPushButton(self.centralwidget)
font = QtGui.QFont()
font.setPointSize(10)
font.setBold(True)
font.setItalic(False)
font.setUnderline(False)
font.setWeight(75)
self.bt_Mapping.setFont(font)
self.bt_Mapping.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.bt_Mapping.setObjectName("bt_Mapping")
self.gridLayout_2.addWidget(self.bt_Mapping, 13, 0, 1, 4)
self.comboBox_tooltip_1 = QtWidgets.QComboBox(self.centralwidget)
self.comboBox_tooltip_1.setObjectName("comboBox_tooltip_1")
self.gridLayout_2.addWidget(self.comboBox_tooltip_1, 7, 1, 1, 3)
self.text_Mapname = QtWidgets.QLineEdit(self.centralwidget)
self.text_Mapname.setObjectName("text_Mapname")
self.gridLayout_2.addWidget(self.text_Mapname, 11, 1, 1, 3)
self.checkBox_jump = QtWidgets.QCheckBox(self.centralwidget)
self.checkBox_jump.setObjectName("checkBox_jump")
self.gridLayout_2.addWidget(self.checkBox_jump, 5, 0, 1, 1)
spacerItem13 = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.gridLayout_2.addItem(spacerItem13, 12, 0, 1, 1)
self.text_jumprate = QtWidgets.QLineEdit(self.centralwidget)
self.text_jumprate.setObjectName("text_jumprate")
self.gridLayout_2.addWidget(self.text_jumprate, 5, 1, 1, 1)
self.label_5 = QtWidgets.QLabel(self.centralwidget)
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.label_5.setFont(font)
self.label_5.setObjectName("label_5")
self.gridLayout_2.addWidget(self.label_5, 11, 0, 1, 1)
self.comboBox_tooltip_2 = QtWidgets.QComboBox(self.centralwidget)
self.comboBox_tooltip_2.setObjectName("comboBox_tooltip_2")
self.gridLayout_2.addWidget(self.comboBox_tooltip_2, 8, 1, 1, 3)
self.comboBox_Maptype = QtWidgets.QComboBox(self.centralwidget)
self.comboBox_Maptype.setObjectName("comboBox_Maptype")
self.comboBox_Maptype.addItem("")
self.comboBox_Maptype.addItem("")
self.comboBox_Maptype.addItem("")
self.gridLayout_2.addWidget(self.comboBox_Maptype, 10, 1, 1, 3)
self.label_6 = QtWidgets.QLabel(self.centralwidget)
font = QtGui.QFont()
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.label_6.setFont(font)
self.label_6.setObjectName("label_6")
self.gridLayout_2.addWidget(self.label_6, 0, 0, 1, 1)
self.label_38 = QtWidgets.QLabel(self.centralwidget)
self.label_38.setObjectName("label_38")
self.gridLayout_2.addWidget(self.label_38, 12, 1, 1, 3)
self.label_47 = QtWidgets.QLabel(self.centralwidget)
self.label_47.setObjectName("label_47")
self.gridLayout_2.addWidget(self.label_47, 6, 1, 1, 1)
self.label_4 = QtWidgets.QLabel(self.centralwidget)
self.label_4.setObjectName("label_4")
self.gridLayout_2.addWidget(self.label_4, 7, 0, 1, 1)
self.checkBox_jumpborder_mark = QtWidgets.QCheckBox(self.centralwidget)
self.checkBox_jumpborder_mark.setChecked(True)
self.checkBox_jumpborder_mark.setObjectName("checkBox_jumpborder_mark")
self.gridLayout_2.addWidget(self.checkBox_jumpborder_mark, 4, 0, 1, 1)
self.checkBox_Marker = QtWidgets.QCheckBox(self.centralwidget)
self.checkBox_Marker.setChecked(False)
self.checkBox_Marker.setObjectName("checkBox_Marker")
self.gridLayout_2.addWidget(self.checkBox_Marker, 1, 0, 1, 2)
self.label_46 = QtWidgets.QLabel(self.centralwidget)
self.label_46.setObjectName("label_46")
self.gridLayout_2.addWidget(self.label_46, 6, 2, 1, 2)
self.label_50 = QtWidgets.QLabel(self.centralwidget)
self.label_50.setObjectName("label_50")
self.gridLayout_2.addWidget(self.label_50, 4, 3, 1, 1)
self.label_34 = QtWidgets.QLabel(self.centralwidget)
self.label_34.setObjectName("label_34")
self.gridLayout_2.addWidget(self.label_34, 1, 2, 1, 2)
self.label_37 = QtWidgets.QLabel(self.centralwidget)
self.label_37.setObjectName("label_37")
self.gridLayout_2.addWidget(self.label_37, 2, 2, 1, 2)
self.label_43 = QtWidgets.QLabel(self.centralwidget)
self.label_43.setObjectName("label_43")
self.gridLayout_2.addWidget(self.label_43, 3, 2, 1, 2)
self.checkBox_Polyline = QtWidgets.QCheckBox(self.centralwidget)
self.checkBox_Polyline.setChecked(True)
self.checkBox_Polyline.setObjectName("checkBox_Polyline")
self.gridLayout_2.addWidget(self.checkBox_Polyline, 3, 0, 1, 2)
self.horizontalLayout_2.addLayout(self.gridLayout_2)
self.gridLayout = QtWidgets.QGridLayout()
self.gridLayout.setObjectName("gridLayout")
self.label_2 = QtWidgets.QLabel(self.centralwidget)
self.label_2.setObjectName("label_2")
self.gridLayout.addWidget(self.label_2, 5, 0, 1, 1)
self.checkBox_save_jumppoints_to_xlsx = QtWidgets.QCheckBox(self.centralwidget)
self.checkBox_save_jumppoints_to_xlsx.setObjectName("checkBox_save_jumppoints_to_xlsx")
self.gridLayout.addWidget(self.checkBox_save_jumppoints_to_xlsx, 6, 0, 1, 2)
self.label_14 = QtWidgets.QLabel(self.centralwidget)
self.label_14.setObjectName("label_14")
self.gridLayout.addWidget(self.label_14, 2, 0, 1, 1)
self.comboBox_datasave_format = QtWidgets.QComboBox(self.centralwidget)
self.comboBox_datasave_format.setObjectName("comboBox_datasave_format")
self.comboBox_datasave_format.addItem("")
self.comboBox_datasave_format.addItem("")
self.gridLayout.addWidget(self.comboBox_datasave_format, 5, 1, 1, 1)
self.bt_savefiltered = QtWidgets.QPushButton(self.centralwidget)
font = QtGui.QFont()
font.setPointSize(10)
font.setBold(False)
font.setItalic(False)
font.setUnderline(False)
font.setWeight(50)
self.bt_savefiltered.setFont(font)
self.bt_savefiltered.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.bt_savefiltered.setObjectName("bt_savefiltered")
self.gridLayout.addWidget(self.bt_savefiltered, 8, 1, 1, 2)
self.save_text_jumpborder = QtWidgets.QLineEdit(self.centralwidget)
self.save_text_jumpborder.setObjectName("save_text_jumpborder")
self.gridLayout.addWidget(self.save_text_jumpborder, 7, 1, 1, 1)
self.label_24 = QtWidgets.QLabel(self.centralwidget)
self.label_24.setObjectName("label_24")
self.gridLayout.addWidget(self.label_24, 1, 2, 1, 2)
self.text_datasave_seperator = QtWidgets.QLineEdit(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.text_datasave_seperator.sizePolicy().hasHeightForWidth())
self.text_datasave_seperator.setSizePolicy(sizePolicy)
self.text_datasave_seperator.setObjectName("text_datasave_seperator")
self.gridLayout.addWidget(self.text_datasave_seperator, 1, 1, 1, 1)
spacerItem14 = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.gridLayout.addItem(spacerItem14, 9, 1, 1, 1)
self.label_52 = QtWidgets.QLabel(self.centralwidget)
self.label_52.setObjectName("label_52")
self.gridLayout.addWidget(self.label_52, 7, 2, 1, 1)
self.text_datasave_filename = QtWidgets.QLineEdit(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.text_datasave_filename.sizePolicy().hasHeightForWidth())
self.text_datasave_filename.setSizePolicy(sizePolicy)
self.text_datasave_filename.setObjectName("text_datasave_filename")
self.gridLayout.addWidget(self.text_datasave_filename, 2, 1, 1, 2)
self.label_12 = QtWidgets.QLabel(self.centralwidget)
font = QtGui.QFont()
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.label_12.setFont(font)
self.label_12.setObjectName("label_12")
self.gridLayout.addWidget(self.label_12, 0, 0, 1, 2)
self.label_51 = QtWidgets.QLabel(self.centralwidget)
self.label_51.setObjectName("label_51")
self.gridLayout.addWidget(self.label_51, 7, 0, 1, 1)
self.label_13 = QtWidgets.QLabel(self.centralwidget)
self.label_13.setObjectName("label_13")
self.gridLayout.addWidget(self.label_13, 1, 0, 1, 1)
self.horizontalLayout_2.addLayout(self.gridLayout)
self.verticalLayout.addLayout(self.horizontalLayout_2)
self.horizontalLayout_3.addLayout(self.verticalLayout)
self.verticalLayout_2 = QtWidgets.QVBoxLayout()
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.label_15 = QtWidgets.QLabel(self.centralwidget)
font = QtGui.QFont()
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.label_15.setFont(font)
self.label_15.setObjectName("label_15")
self.verticalLayout_2.addWidget(self.label_15)
self.TableWidget = QtWidgets.QTableWidget(self.centralwidget)
self.TableWidget.setToolTip("")
self.TableWidget.setAutoFillBackground(False)
self.TableWidget.setObjectName("TableWidget")
self.TableWidget.setColumnCount(0)
self.TableWidget.setRowCount(0)
self.verticalLayout_2.addWidget(self.TableWidget)
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
self.label_30 = QtWidgets.QLabel(self.centralwidget)
font = QtGui.QFont()
font.setPointSize(12)
self.label_30.setFont(font)
self.label_30.setObjectName("label_30")
self.horizontalLayout.addWidget(self.label_30)
self.lcdNumber = QtWidgets.QLCDNumber(self.centralwidget)
font = QtGui.QFont()
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.lcdNumber.setFont(font)
self.lcdNumber.setObjectName("lcdNumber")
self.horizontalLayout.addWidget(self.lcdNumber)
self.verticalLayout_2.addLayout(self.horizontalLayout)
self.bt_cleardata = QtWidgets.QPushButton(self.centralwidget)
font = QtGui.QFont()
font.setPointSize(10)
font.setBold(False)
font.setItalic(False)
font.setUnderline(False)
font.setWeight(50)
self.bt_cleardata.setFont(font)
self.bt_cleardata.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.bt_cleardata.setToolTip("")
self.bt_cleardata.setObjectName("bt_cleardata")
self.verticalLayout_2.addWidget(self.bt_cleardata)
self.horizontalLayout_3.addLayout(self.verticalLayout_2)
self.verticalLayout_3.addLayout(self.horizontalLayout_3)
lcd_display.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(lcd_display)
self.menubar.setGeometry(QtCore.QRect(0, 0, 2672, 38))
self.menubar.setObjectName("menubar")
lcd_display.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(lcd_display)
self.statusbar.setObjectName("statusbar")
lcd_display.setStatusBar(self.statusbar)
self.retranslateUi(lcd_display)
self.text_filepath.returnPressed.connect(self.bt_fileread.animateClick)
self.text_datasave_filename.returnPressed.connect(self.bt_savefiltered.animateClick)
self.text_Mapname.returnPressed.connect(self.bt_Mapping.animateClick)
self.text_filter_par1.returnPressed.connect(self.checkbox_filter_1.animateClick)
self.text_filter_par2.returnPressed.connect(self.checkbox_filter_2.animateClick)
self.text_filter_par3.returnPressed.connect(self.checkbox_filter_3.animateClick)
self.text_filter_par4.returnPressed.connect(self.checkbox_filter_4.animateClick)
self.text_filter_par5.returnPressed.connect(self.checkbox_filter_5.animateClick)
self.text_filter_par6.returnPressed.connect(self.checkbox_filter_6.animateClick)
self.text_filter_time1.returnPressed.connect(self.checkbox_filter_t1.animateClick)
self.text_filter_time2.returnPressed.connect(self.checkbox_filter_t2.animateClick)
self.text_jumprate.returnPressed.connect(self.checkBox_jump.animateClick)
self.text_filter_firstrow.returnPressed.connect(self.checkBox_filter_firstrow.animateClick)
self.text_filter_cutoff.returnPressed.connect(self.checkBox_filter_cutoff.animateClick)
self.text_filter_jumpborder.returnPressed.connect(self.checkBox_filter_jumppoints.animateClick)
self.text_map_jumpborder_mark.returnPressed.connect(self.checkBox_jumpborder_mark.animateClick)
QtCore.QMetaObject.connectSlotsByName(lcd_display)
lcd_display.setTabOrder(self.text_filepath, self.bt_fileread)
lcd_display.setTabOrder(self.bt_fileread, self.QComboBox_Latetude_val)
lcd_display.setTabOrder(self.QComboBox_Latetude_val, self.QComboBox_Longetude_val)
lcd_display.setTabOrder(self.QComboBox_Longetude_val, self.QComboBox_Timstamp)
lcd_display.setTabOrder(self.QComboBox_Timstamp, self.bt_sort)
lcd_display.setTabOrder(self.bt_sort, self.comboBox_filter_par1)
lcd_display.setTabOrder(self.comboBox_filter_par1, self.comboBox_filter_op1)
lcd_display.setTabOrder(self.comboBox_filter_op1, self.text_filter_par1)
lcd_display.setTabOrder(self.text_filter_par1, self.checkbox_filter_1)
lcd_display.setTabOrder(self.checkbox_filter_1, self.bt_applyfilter)
lcd_display.setTabOrder(self.bt_applyfilter, self.checkbox_filter_6)
lcd_display.setTabOrder(self.checkbox_filter_6, self.checkBox_cluster)
lcd_display.setTabOrder(self.checkBox_cluster, self.checkBox_Marker)
lcd_display.setTabOrder(self.checkBox_Marker, self.text_jumprate)
lcd_display.setTabOrder(self.text_jumprate, self.checkBox_jump)
lcd_display.setTabOrder(self.checkBox_jump, self.comboBox_tooltip_1)
lcd_display.setTabOrder(self.comboBox_tooltip_1, self.comboBox_tooltip_2)
lcd_display.setTabOrder(self.comboBox_tooltip_2, self.text_Mapname)
lcd_display.setTabOrder(self.text_Mapname, self.text_datasave_filename)
lcd_display.setTabOrder(self.text_datasave_filename, self.comboBox_datasave_format)
lcd_display.setTabOrder(self.comboBox_datasave_format, self.checkBox_save_jumppoints_to_xlsx)
lcd_display.setTabOrder(self.checkBox_save_jumppoints_to_xlsx, self.save_text_jumpborder)
lcd_display.setTabOrder(self.save_text_jumpborder, self.bt_savefiltered)
lcd_display.setTabOrder(self.bt_savefiltered, self.bt_Mapping)
lcd_display.setTabOrder(self.bt_Mapping, self.checkBox_filter_repeat)
lcd_display.setTabOrder(self.checkBox_filter_repeat, self.comboBox_filter_time2)
lcd_display.setTabOrder(self.comboBox_filter_time2, self.comboBox_filter_op_t1)
lcd_display.setTabOrder(self.comboBox_filter_op_t1, self.text_filter_par6)
lcd_display.setTabOrder(self.text_filter_par6, self.comboBox_filter_op3)
lcd_display.setTabOrder(self.comboBox_filter_op3, self.comboBox_filter_par6)
lcd_display.setTabOrder(self.comboBox_filter_par6, self.comboBox_filter_time1)
lcd_display.setTabOrder(self.comboBox_filter_time1, self.checkBox_filter_cutoff)
lcd_display.setTabOrder(self.checkBox_filter_cutoff, self.checkBox_filter_firstrow)
lcd_display.setTabOrder(self.checkBox_filter_firstrow, self.checkbox_filter_5)
lcd_display.setTabOrder(self.checkbox_filter_5, self.text_filter_time1)
lcd_display.setTabOrder(self.text_filter_time1, self.checkbox_filter_t2)
lcd_display.setTabOrder(self.checkbox_filter_t2, self.comboBox_filter_op4)
lcd_display.setTabOrder(self.comboBox_filter_op4, self.checkbox_filter_3)
lcd_display.setTabOrder(self.checkbox_filter_3, self.text_filter_par5)
lcd_display.setTabOrder(self.text_filter_par5, self.text_filter_time2)
lcd_display.setTabOrder(self.text_filter_time2, self.text_filter_par4)
lcd_display.setTabOrder(self.text_filter_par4, self.text_filter_par3)
lcd_display.setTabOrder(self.text_filter_par3, self.text_filter_cutoff)
lcd_display.setTabOrder(self.text_filter_cutoff, self.text_filter_firstrow)
lcd_display.setTabOrder(self.text_filter_firstrow, self.checkBox_filter_jumppoints)
lcd_display.setTabOrder(self.checkBox_filter_jumppoints, self.text_filter_jumpborder)
lcd_display.setTabOrder(self.text_filter_jumpborder, self.text_map_jumpborder_mark)
lcd_display.setTabOrder(self.text_map_jumpborder_mark, self.bt_undofilter)
lcd_display.setTabOrder(self.bt_undofilter, self.comboBox_filter_op2)
lcd_display.setTabOrder(self.comboBox_filter_op2, self.comboBox_filter_par2)
lcd_display.setTabOrder(self.comboBox_filter_par2, self.comboBox_filter_par3)
lcd_display.setTabOrder(self.comboBox_filter_par3, self.comboBox_Maptype)
lcd_display.setTabOrder(self.comboBox_Maptype, self.comboBox_filter_op5)
lcd_display.setTabOrder(self.comboBox_filter_op5, self.checkbox_filter_t1)
lcd_display.setTabOrder(self.checkbox_filter_t1, self.checkbox_filter_2)
lcd_display.setTabOrder(self.checkbox_filter_2, self.comboBox_filter_par5)
lcd_display.setTabOrder(self.comboBox_filter_par5, self.text_datasave_seperator)
lcd_display.setTabOrder(self.text_datasave_seperator, self.text_filter_par2)
lcd_display.setTabOrder(self.text_filter_par2, self.comboBox_filter_op6)
lcd_display.setTabOrder(self.comboBox_filter_op6, self.comboBox_filter_op_t2)
lcd_display.setTabOrder(self.comboBox_filter_op_t2, self.checkbox_filter_4)
lcd_display.setTabOrder(self.checkbox_filter_4, self.TableWidget)
lcd_display.setTabOrder(self.TableWidget, self.bt_cleardata)
lcd_display.setTabOrder(self.bt_cleardata, self.checkBox_jumpborder_mark)
lcd_display.setTabOrder(self.checkBox_jumpborder_mark, self.comboBox_filter_par4)
self.bt_Mapping.clicked.connect(self.mapping_bt)
self.bt_fileread.clicked.connect(self.readfile_bt)
self.bt_cleardata.clicked.connect(self.cleardata_bt)
self.bt_applyfilter.clicked.connect(lambda: self.filteroptions(activ=True))
self.bt_undofilter.clicked.connect(lambda: self.filteroptions(activ=False))
self.bt_sort.clicked.connect(self.sort_bt)
self.bt_savefiltered.clicked.connect(self.savefiltered_bt)
self.markerexistance = False
self.dataloaded = False
def readfile_bt(self):
# purpes: Reads Data from a csv File and fills comboBoxes
# ------
try:
path = self.text_filepath.text()
raw_path = fr"{path}"
self.DF = pd.read_csv(raw_path)
self.textfilter = []
self.DF.reset_index(drop=True, inplace=True) # set working frame
self.df = self.DF
# delete all items from comboBox
self.QComboBox_Longetude_val.clear()
self.QComboBox_Latetude_val.clear()
self.comboBox_filter_par1.clear()
self.comboBox_filter_par2.clear()
self.comboBox_filter_par3.clear()
self.comboBox_filter_par4.clear()
self.comboBox_filter_par5.clear()
self.comboBox_filter_par6.clear()
self.comboBox_filter_time1.clear()
self.comboBox_filter_time2.clear()
self.comboBox_tooltip_1.clear()
self.comboBox_tooltip_2.clear()
filename = "" # prealocate Filename
# befüllen der latitude/longitude Comboboxen
for i in self.df.axes[1]:
# Preselect timestap
if i == 'timestamp':
self.QComboBox_Timstamp.addItem('timestamp')
self.comboBox_tooltip_1.addItem("timestamp")
# Preselect latetude longetude
if i == "GPS_longitude":
self.QComboBox_Longetude_val.addItem(i)
if i == "GPS_latitude":
self.QComboBox_Latetude_val.addItem(i)
for i in self.df.axes[1]:
self.QComboBox_Longetude_val.addItem(i)
self.QComboBox_Latetude_val.addItem(i)
self.comboBox_filter_par1.addItem(i)
self.comboBox_filter_par2.addItem(i)
self.comboBox_filter_par3.addItem(i)
self.comboBox_filter_par4.addItem(i)
self.comboBox_filter_par5.addItem(i)
self.comboBox_filter_par6.addItem(i)
self.comboBox_filter_time1.addItem(i)
self.comboBox_filter_time2.addItem(i)
self.comboBox_tooltip_2.addItem(i)
if i == "objectid":
# Preselects a filename
filename = filename + str(self.df.loc[0, "objectid"]) + "__"
if i == 'timestamp':
self.DF = self.DF.sort_values(by=['timestamp'])
else:
self.QComboBox_Timstamp.addItem(i)
self.comboBox_tooltip_1.addItem(i)
if i == "proc_date":
# Preselects a Filename
filename = filename + str(self.df.loc[0, "proc_date"]) + "__" + str(self.df.loc[len(self.df.index) - 1, "proc_date"]) + "__"
if i != "GPS_latitude":
self.QComboBox_Longetude_val.addItem(i)
if i != "GPS_longitude":
self.QComboBox_Latetude_val.addItem(i)
# check if no error ocured
self.markerexistance = True
self.dataloaded = True
self.text_Mapname.setText(filename) # Preselects a Map Filename
self.text_datasave_filename.setText("filtereddata" + "_" + filename) # Preselects a Data Filename
except (IOError, NameError, FileNotFoundError) as e:
msg = QMessageBox()
msg.setIcon(QMessageBox.Warning)
msg.setText(str(e))
msg.setWindowTitle("WARNING")
msg.exec_()
except:
msg = QMessageBox()
msg.setIcon(QMessageBox.Warning)
msg.setWindowTitle("WARNING")
msg.setText("Unknown Error, (No IOError, FileNotFoundError ")
msg.exec_()
self.tablefiller()
def tablefiller(self):
# purpes: fills the TableWidget with current Data
# ------
self.df.reset_index(drop=True, inplace=True)
columnames = self.df.columns # gets columnames of current Data
indexnames = self.df.index # gets current index
self.lcdNumber.display(len(indexnames)) # shows current linenumber in a LCD display
# fills table with current Data
self.TableWidget.setColumnCount(len(columnames))
if len(indexnames) > 100:
self.TableWidget.setRowCount(100)
indexlaenge = 100
else:
self.TableWidget.setRowCount(len(indexnames))
indexlaenge = len(indexnames)
horHeaders = []
for i in range(0, indexlaenge):
for j in range(0, len(columnames)):
horHeaders.append(columnames[j])
a = (self.df.loc[indexnames[i]][columnames[j]])
self.TableWidget.setItem(i, j, QTableWidgetItem(str(a)))
self.TableWidget.setHorizontalHeaderLabels(horHeaders)
def cleardata_bt(self):
# purpes: clears data
# ------
self.QComboBox_Longetude_val.clear() # delete all items from comboBox
self.QComboBox_Latetude_val.clear() # delete all items from comboBox
self.comboBox_filter_par1.clear()
self.comboBox_filter_par2.clear()
self.comboBox_filter_par3.clear()
self.comboBox_filter_par4.clear()
self.comboBox_filter_par5.clear()
self.comboBox_filter_par6.clear()
self.comboBox_filter_time1.clear()
self.comboBox_filter_time2.clear()
self.comboBox_tooltip_1.clear()
self.comboBox_tooltip_2.clear()
self.comboBox_tooltip_1.clear()
self.comboBox_tooltip_2.clear()
self.df = pd.DataFrame()
self.tablefiller()
self.markerexistance = False
self.dataloaded = False
def sort_bt(self):
# purpes: sorts values by selected column
# ------
try:
sortname = self.QComboBox_Timstamp.currentText()
self.DF = self.DF.sort_values(by=[sortname])
self.df = self.df.sort_values(by=[sortname])
self.df.reset_index(drop=True, inplace=True)
self.DF.reset_index(drop=True, inplace=True)
self.tablefiller()
except (IOError, NameError, FileNotFoundError, TypeError, ValueError) as e:
msg = QMessageBox()
msg.setIcon(QMessageBox.Warning)
msg.setText(str(e))
msg.setWindowTitle("WARNING")
msg.exec_()
except:
msg = QMessageBox()
msg.setIcon(QMessageBox.Warning)
msg.setText("Unknown Error, (No IOError, NameError, FileNotFoundError,TypeError or ValueError ")
msg.setWindowTitle("WARNING")
msg.exec_()
def filteroptions(self,activ):
# purpes: Filter the data as said by the Filteroptions
# ------
# Input Parameter:
# ---------------
# activ....[bolian] if True ->filteroptions get aplyed
# if False ->filteroptions get undone
if activ: # Apply filter button
self.df = self.DF
def filtering(operator, DataFr, argument, i_par):
# translate Filteroptions into boolians
if operator == '<':
x = DataFr.loc[DataFr[i_par] < argument]
return x
elif operator == '>':
x = DataFr.loc[DataFr[i_par] > argument]
return x
elif operator == '<=':
x = DataFr.loc[DataFr[i_par] <= argument]
return x
elif operator == '>=':
x = DataFr.loc[DataFr[i_par] >= argument]
return x
elif operator == '==':
x = DataFr.loc[DataFr[i_par] == argument]
return x
elif operator == '!=':
x = DataFr.loc[DataFr[i_par] != argument]
return x
try:
# Auslesen der Datenfelder
i_par1 = self.comboBox_filter_par1.currentText() # Spaltenauswahl
i_par2 = self.comboBox_filter_par2.currentText()
i_par3 = self.comboBox_filter_par3.currentText()
i_par4 = self.comboBox_filter_par4.currentText()
i_par5 = self.comboBox_filter_par5.currentText()
i_par6 = self.comboBox_filter_par6.currentText()
i_time1 = self.comboBox_filter_time1.currentText()
i_time2 = self.comboBox_filter_time2.currentText()
op1 = self.comboBox_filter_op1.currentText() # Operatorauswahl
op2 = self.comboBox_filter_op2.currentText()
op3 = self.comboBox_filter_op3.currentText()
op4 = self.comboBox_filter_op4.currentText()
op5 = self.comboBox_filter_op5.currentText()
op6 = self.comboBox_filter_op6.currentText()
op_t1 = self.comboBox_filter_op_t1.currentText()
op_t2 = self.comboBox_filter_op_t2.currentText()
arg1 = self.text_filter_par1.text() # Filterargument
arg2 = self.text_filter_par2.text()
arg3 = self.text_filter_par3.text()
arg4 = self.text_filter_par4.text()
arg5 = self.text_filter_par5.text()
arg6 = self.text_filter_par6.text()
# Timefilter
arg_t1 = self.text_filter_time1.text()
arg_t2 = self.text_filter_time2.text()
# cutoff Filtern
first = self.text_filter_firstrow.text()
last = self.text_filter_cutoff.text()
# Filtersettings prealocate
self.textfilter=[]
# Filtern
if self.checkbox_filter_1.isChecked():
x = filtering(op1, self.df, float(arg1), i_par1) # filter data
del(self.df)
self.df = x
del(x)
self.textfilter.append(i_par1 + " " + op1 + " " + arg1) # save text for filtersettings.txt
if self.checkbox_filter_2.isChecked():
x = filtering(op2, self.df, float(arg2), i_par2)
del(self.df)
self.df = x
del(x)
self.textfilter.append(i_par2 + " " + op2 + " " + arg2)
if self.checkbox_filter_3.isChecked():
x = filtering(op3, self.df, float(arg3), i_par3)
del(self.df)
self.df = x
del(x)
self.textfilter.append(i_par3 + " " + op3 + " " + arg3)
if self.checkbox_filter_4.isChecked():
x = filtering(op4, self.df, float(arg4), i_par4)
del(self.df)
self.df = x
del(x)
self.textfilter.append(i_par4 + " " + op4 + " " + arg4)
if self.checkbox_filter_5.isChecked():
x = filtering(op5, self.df, float(arg5), i_par5)
del(self.df)
self.df = x
del(x)
self.textfilter.append(i_par5 + " " + op5 + " " + arg5)
if self.checkbox_filter_6.isChecked():
x = filtering(op6, self.df, float(arg6), i_par6)
del(self.df)
self.df = x
del(x)
self.textfilter.append(i_par6 + " " + op6 + " " + arg6)
if self.checkbox_filter_t1.isChecked(): # Timestamp Filter
x = filtering(op_t1, self.df, arg_t1, i_time1)
del(self.df)
self.df = x
del(x)
self.textfilter.append(i_time1 + " " + op_t1 + " " + arg_t1)
if self.checkbox_filter_t2.isChecked(): # Timestamp Filter
x = filtering(op_t2, self.df, arg_t2, i_time2)
del(self.df)
self.df = x
del(x)
self.textfilter.append(i_time2 + " " + op_t2 + " " + arg_t2)
self.df.reset_index(drop=True, inplace=True) # reset index for potential filtering
if self.checkBox_filter_jumppoints.isChecked():
jumpborder = float(self.text_filter_jumpborder.text()) # [km] #set classification for a jump
jumppoints = self.findjumps(jumpborder)
for i in jumppoints.index.values:
self.df = self.df.drop(i)
self.df.reset_index(drop=True, inplace=True)
self.textfilter.append("jumpfilter = True, jumpborder = " + str(jumpborder) + "km")
if self.checkBox_filter_repeat.isChecked(): # Filter for repeating Datapoints
self.df.reset_index(drop=True, inplace=True)
x = self.df
del(self.df)
ilat = self.QComboBox_Latetude_val.currentText()
ilon = self.QComboBox_Longetude_val.currentText()
un = x.loc[:, [ilon, ilat]].drop_duplicates()
self.df = x.iloc[un.index, :]
del(x)
self.textfilter.append("repeatfilter = True")
# Cutoff Filter Cuts at firstrow and at lastrow
if self.checkBox_filter_firstrow.isChecked() | self.checkBox_filter_cutoff.isChecked():
if (self.checkBox_filter_firstrow.isChecked()):
firstrow = int(first)
else:
firstrow = 1
if (self.checkBox_filter_cutoff.isChecked()):
lastrow = int(last)
else:
lastrow = len(self.df.index)
if (firstrow) < (lastrow):
if lastrow <= len(self.df.index):
x = self.df
del(self.df)
self.df = x.iloc[firstrow:lastrow, :]
del(x)
self.textfilter.append("firstrow = " + str(firstrow))
self.textfilter.append("lastrow = " + str(lastrow))
else:
msg = QMessageBox()
msg.setIcon(QMessageBox.Warning)
msg.setText("ERROR: lastrow > data length")
msg.setWindowTitle("ERROR")
msg.exec_()
else:
msg = QMessageBox()
msg.setIcon(QMessageBox.Warning)
msg.setText("ERROR: firstrow >= lastrow")
msg.setWindowTitle("ERROR")
msg.exec_()
self.df.reset_index(drop=True, inplace=True) # resets indexes of the Dataframe to 1,2,3,4,...
# gives a waring if you have Filtered all points and swich markers off
if len(self.df.index) == 0:
self.markerexistance = False
msg = QMessageBox()
msg.setIcon(QMessageBox.Warning)
msg.setText("WARNING: The selected data range is empty")
msg.setWindowTitle("WARNING")
msg.exec_()
else: # if filtered data is not empty shich markers on
self.markerexistance = True
except (IOError, NameError, FileNotFoundError, TypeError, ValueError) as e:
msg = QMessageBox()
msg.setIcon(QMessageBox.Warning)
msg.setText(str(e))
msg.setWindowTitle("WARNING")
msg.exec_()
except:
msg = QMessageBox()
msg.setIcon(QMessageBox.Warning)
msg.setText("Unknown Error, (No IOError, NameError, FileNotFoundError,TypeError or ValueError ")
msg.setWindowTitle("WARNING")
msg.exec_()
elif ~activ: # undo filter Button, resets Dataset
self.df = self.DF
self.textfilter = []
self.tablefiller()
def findjumps(self, jumpborder):
# purpes: Finds points wich jump over a special border of distance
# -------
#
# Input: jumpborder ... float number sets the distance border in km to clacify jumppoints
# ------
#
# Output: jumppoints... DataFrame wich contains jumppoints with corresponding index positions
# -----
try:
ilat = self.QComboBox_Latetude_val.currentText()
ilon = self.QComboBox_Longetude_val.currentText()
gps = self.df.loc[:, [ilat, ilon]] # build working dataframe
dlat = np.diff(gps.loc[:, ilat])
dlon = np.diff(gps.loc[:, ilon])
# create distance vector and detect jumps
d = np.sqrt(dlat**2 + dlon**2)
distance = | pd.Series(data=d) | pandas.Series |
import pandas as pd
import pytest
from toucan_data_sdk.utils.postprocess import (
add, subtract, multiply, divide
)
def test_math_operations_with_column():
""" It should return result for basic math operations with a column name"""
data = pd.DataFrame([{'value1': 10, 'value2': 20},
{'value1': 17, 'value2': 5}])
kwargs = {'new_column': 'result', 'column_1': 'value1', 'column_2': 'value2'}
res = add(data, **kwargs)
expected_col = [30, 22]
assert res['result'].tolist() == expected_col
res = subtract(data, **kwargs)
expected_col = [-10, 12]
assert res['result'].tolist() == expected_col
res = multiply(data, **kwargs)
expected_col = [200, 85]
assert res['result'].tolist() == expected_col
res = divide(data, **kwargs)
expected_col = [.5, 3.4]
assert res['result'].tolist() == expected_col
def test_math_operations_with_number():
""" It should return result for basic math operations with a constant number"""
data = pd.DataFrame([{'value1': 10}, {'value1': 17}])
kwargs = {'new_column': 'value1', 'column_1': 'value1', 'column_2': .25}
res = add(data.copy(), **kwargs)
expected_col = [10.25, 17.25]
assert res['value1'].tolist() == expected_col
res = subtract(data.copy(), **kwargs)
expected_col = [9.75, 16.75]
assert res['value1'].tolist() == expected_col
res = multiply(data.copy(), **kwargs)
expected_col = [2.5, 4.25]
assert res['value1'].tolist() == expected_col
res = divide(data.copy(), **kwargs)
expected_col = [40.0, 68.0]
assert res['value1'].tolist() == expected_col
data = pd.DataFrame([{'value1': 10}, {'value1': 25}])
kwargs = {'new_column': 'result', 'column_1': 2, 'column_2': 'value1'}
res = add(data.copy(), **kwargs)
expected_col = [12, 27]
assert res['result'].tolist() == expected_col
res = divide(data.copy(), **kwargs)
expected_col = [.2, .08]
assert res['result'].tolist() == expected_col
def test_bad_arg():
""" It should raise an error when calling a math operation with a bad parameter """
data = pd.DataFrame([{'value1': 10}, {'value1': 17}])
kwargs = {'new_column': 'value1', 'column_1': 'value1', 'column_2': [1, 2]}
with pytest.raises(TypeError) as exc_info:
add(data.copy(), **kwargs)
assert str(exc_info.value) == 'column_2 must be a string, an integer or a float'
data = | pd.DataFrame([{'value1': 10}, {'value1': 17}]) | pandas.DataFrame |
"""
Utils to plot graphs with arrows
"""
import matplotlib.transforms
import matplotlib.patches
import matplotlib.colors
import matplotlib.cm
import numpy as np
import pandas as pd
import logging
from tctx.util import plot
def _clip_arrows(arrows, tail_offset, head_offset):
"""
shorten head & tail so the arrows don't overlap with markers
:param arrows: a pd.DataFrame with columns: 'source_x', 'source_y', 'target_x', 'target_y'
:param tail_offset: how much shorter to make the tail (so it doesn't overlap with the markers)
:param head_offset: how much shorter to make the head (so it doesn't overlap with the markers)
:return: 2 numpy arrays of shape Nx2
"""
source_pos = arrows[['source_x', 'source_y']].values
target_pos = arrows[['target_x', 'target_y']].values
direction = target_pos - source_pos
length = np.sqrt(np.sum(np.square(direction), axis=1))
direction = direction / length[:, np.newaxis]
source_pos = source_pos + direction * tail_offset
target_pos = target_pos + direction * (-1 * head_offset)
return source_pos, target_pos
def plot_arrows_cmap(
ax, arrows, c, cmap=None, norm=None,
tail_offset=0, head_offset=0, head_length=4, head_width=1.25, **kwargs):
"""
Draw multiple arrows using a colormap.
:param ax: matplotlib.axes.Axes
:param arrows: a pd.DataFrame with columns: 'source_x', 'source_y', 'target_x', 'target_y'
:param c: a pd.Series with the same index as arrows or a string that identifies a column in it.
:param tail_offset: how much shorter to make the tail (so it doesn't overlap with the markers)
:param head_offset: how much shorter to make the head (so it doesn't overlap with the markers)
:param kwargs: args for matplotlib.patches.FancyArrowPatch
:return: matplotlib.cm.Mappable that can be used for a colorbar
:param cmap:
:param norm:
:param head_length:
:param head_width:
:return:
"""
if cmap is None:
cmap = 'default'
if isinstance(cmap, str):
cmap = plot.lookup_cmap(cmap)
if isinstance(c, str):
c = arrows[c]
if norm is None:
norm = matplotlib.colors.Normalize(vmin=c.min(), vmax=c.max())
arrowstyle = matplotlib.patches.ArrowStyle.CurveFilledB(head_length=head_length, head_width=head_width)
kwargs.setdefault('linewidth', .75)
source_pos, target_pos = _clip_arrows(arrows, tail_offset, head_offset)
for i, idx in enumerate(arrows.index):
color = cmap(norm(c[idx]))
_plot_single_arrow(ax, source_pos[i], target_pos[i], arrowstyle, color, **kwargs)
sm = matplotlib.cm.ScalarMappable(cmap=cmap, norm=norm)
sm.set_array(c.values)
return sm
def _plot_single_arrow(ax, source_pos, target_pos, arrowstyle, color, **kwargs):
patch_kwargs = kwargs.copy()
patch_kwargs.setdefault('edgecolor', color)
patch_kwargs.setdefault('facecolor', color)
patch = matplotlib.patches.FancyArrowPatch(
posA=source_pos,
posB=target_pos,
arrowstyle=arrowstyle,
**patch_kwargs,
)
ax.add_artist(patch)
def plot_arrows_solid(
ax, arrows, color=None,
tail_offset=0, head_offset=0, head_length=4, head_width=1.25, **kwargs):
"""
Draw multiple arrows using a solid color.
:param ax: matplotlib.axes.Axes
:param arrows: a pd.DataFrame with columns: 'source_x', 'source_y', 'target_x', 'target_y'
:param tail_offset: how much shorter to make the tail (so it doesn't overlap with the markers)
:param head_offset: how much shorter to make the head (so it doesn't overlap with the markers)
:param kwargs: args for matplotlib.patches.FancyArrowPatch
:param color:
:param head_length:
:param head_width:
:param kwargs:
:return:
"""
arrowstyle = matplotlib.patches.ArrowStyle.CurveFilledB(head_length=head_length, head_width=head_width)
kwargs.setdefault('linewidth', .75)
source_pos, target_pos = _clip_arrows(arrows, tail_offset, head_offset)
for i, idx in enumerate(arrows.index):
_plot_single_arrow(ax, source_pos[i], target_pos[i], arrowstyle, color, **kwargs)
class Graph:
"""
A class to plot graphs with per-node and per-edge styles
"""
def __init__(self, nodes, edges, styles=None, transform=None, kwargs_nodes=None, kwargs_edges=None):
"""
:param nodes: a pd.DataFrame with columns ['x', 'y'] representing the 2d position and
column 'style' that can be indexed into the styles DF
:param edges: a pd.DataFrame with columns ['source', 'target'] that can be indexed into the nodes DF and
column 'style' that can be indexed into the styles DF
:param styles: pd.DataFrame with columns for different cmaps ('cmap_from_white', etc),
color levels ('light', 'dark', etc). By default: plot.styles_df
:param kwargs_nodes: default kwargs to nodes plotting
:param kwargs_edges: default kwargs to edges plotting
:param transform: the transform to apply to the graph. Useful when drawing an inset.
"""
assert np.all(edges['source'] != edges['target']), 'self edges'
assert np.all([np.issubdtype(nodes[c].dtype, np.number) for c in ['x', 'y']])
if styles is None:
styles = plot.styles_df.copy()
self.styles = styles
self.nodes = nodes
self.edges = edges
self.transform = transform
self.default_kwargs_nodes = dict(
cmap='cmap',
marker='marker_time',
linewidth=.5,
facecolor='light',
edgecolor='darker',
)
self.default_kwargs_nodes.update(kwargs_nodes or {})
self.default_kwargs_edges = dict(
cmap='cmap',
facecolor='main',
edgecolor='main',
)
self.default_kwargs_edges.update(kwargs_edges or {})
edge_len = self.get_edge_lengths()
too_short = np.count_nonzero(np.isclose(edge_len, 0))
if too_short:
logging.warning(f'{too_short}/{len(edge_len)} edges of zero length')
# pandas complains when editing categories which is inconvenient
if self.nodes['style'].dtype.name == 'category':
self.nodes['style'] = self.nodes['style'].astype(str)
if self.edges['style'].dtype.name == 'category':
self.edges['style'] = self.edges['style'].astype(str)
def copy(self):
return Graph(
nodes=self.nodes.copy(),
edges=self.edges.copy(),
styles=self.styles.copy(),
transform=None if self.transform is None else self.transform.copy(),
kwargs_nodes=self.default_kwargs_nodes.copy(),
kwargs_edges=self.default_kwargs_edges.copy(),
)
def get_edge_lengths(self):
xy0 = self.nodes.loc[self.edges['source'], ['x', 'y']].values
xy1 = self.nodes.loc[self.edges['target'], ['x', 'y']].values
edge_len = np.sqrt(np.sum(np.square(xy0 - xy1), axis=1))
return pd.Series(edge_len, index=self.edges.index)
def _get_arrows(self, selection=None):
if selection is None:
selection = self.edges
if isinstance(selection, (np.ndarray, pd.Index)):
selection = self.edges.loc[selection]
arrows = [selection]
for end in ['source', 'target']:
pos = self.nodes[['x', 'y']].reindex(selection[end])
pos.index = selection.index
pos.columns = [end + '_' + c for c in pos.columns]
arrows.append(pos)
arrows = pd.concat(arrows, axis=1)
return arrows
def _lookup_style_kwargs(self, style, kwargs):
kwargs = kwargs.copy()
if 'style' in kwargs:
specific = kwargs.pop('style')
if style in specific:
kwargs.update(specific[style])
styled_kwargs = kwargs.copy()
for k, v in kwargs.items():
if isinstance(v, str) and v in self.styles.columns:
styled_kwargs[k] = self.styles.loc[style, v]
if self.transform is not None:
styled_kwargs['transform'] = self.transform
return styled_kwargs
def plot_nodes_solid(self, ax, selection=None, **kwargs):
"""
Plot all of the nodes with a flat color
:param ax:
:param selection: an array, index or boolean series that
can be used on self.nodes.loc to draw a subset of the known nodes
:param kwargs: scatter params
:return:
"""
final_kwargs = self.default_kwargs_nodes.copy()
final_kwargs.update(kwargs)
nodes_to_draw = self.nodes
if selection is not None:
assert isinstance(selection, (np.ndarray, pd.Index, pd.Series))
nodes_to_draw = self.nodes.loc[selection]
for style, nodes in nodes_to_draw.groupby('style'):
style_kwargs = self._lookup_style_kwargs(style, final_kwargs)
if 'cmap' in style_kwargs:
style_kwargs.pop('cmap')
ax.scatter(
nodes.x,
nodes.y,
**style_kwargs,
)
def plot_nodes_cmap(self, ax, c=None, selection=None, **kwargs):
"""
Plot all of the nodes with a color map
:param ax:
:param c: series or array matching length of self.nodes,
if none indicated, we expect a column 'c' in self.nodes
:param selection: an array, index or boolean series that
can be used on self.nodes.loc to draw a subset of the known nodes
:param kwargs: scatter params
:return: a dict of style to mappable for use in colorbars
"""
final_kwargs = self.default_kwargs_nodes.copy()
final_kwargs.update(kwargs)
nodes_to_draw = self.nodes
if selection is not None:
assert isinstance(selection, (np.ndarray, pd.Index, pd.Series))
nodes_to_draw = self.nodes.loc[selection]
if c is None:
c = 'c'
if isinstance(c, str):
c = self.nodes[c]
if isinstance(c, np.ndarray):
c = pd.Series(c, index=self.nodes.index)
all_sm = {}
for style, nodes in nodes_to_draw.groupby('style'):
style_kwargs = self._lookup_style_kwargs(style, final_kwargs)
if 'facecolor' in style_kwargs:
style_kwargs.pop('facecolor')
all_sm[style] = ax.scatter(
nodes.x,
nodes.y,
c=c.loc[nodes.index],
**style_kwargs,
)
return all_sm
def plot_nodes_labels(self, ax, nodes=None, va='center', ha='center', fmt='{index}', fontsize=6, **kwargs):
"""
plot a descriptive text for each node.
By default, the index is show, modify fmt to use something else
"""
# TODO allow the style column in the fmt to color by dark of the "label" column.
if nodes is None:
nodes = self.nodes
else:
nodes = self.nodes.loc[nodes]
for idx, row in nodes.iterrows():
ax.text(row['x'], row['y'], fmt.format(index=idx, **row), va=va, ha=ha, fontsize=fontsize, **kwargs)
def plot_edges_cmap(self, ax, c=None, **kwargs):
"""
Plot all of the nodes with a color map
:param ax:
:param c: series or array matching length of self.edges,
if none indicated, we expect a column 'c' in self.edges
:param kwargs: params to plot_arrows_cmap
:return: a dict of style to mappable for use in colorbars
"""
final_kwargs = self.default_kwargs_edges.copy()
final_kwargs.update(kwargs)
if c is None:
c = self.edges['c']
all_sm = {}
for style, arrows in self._get_arrows().groupby('style'):
style_kwargs = self._lookup_style_kwargs(style, final_kwargs)
if 'facecolor' in style_kwargs:
style_kwargs.pop('facecolor')
if 'edgecolor' in style_kwargs:
style_kwargs.pop('edgecolor')
all_sm[style] = plot_arrows_cmap(
ax, arrows, c,
**style_kwargs
)
return all_sm
def plot_edges_solid(self, ax, selection=None, **kwargs):
"""
Plot all of the edges with a flat color
:param ax:
:param selection:
:param kwargs:
:return:
"""
final_kwargs = self.default_kwargs_edges.copy()
final_kwargs.update(kwargs)
for style, arrows in self._get_arrows(selection=selection).groupby('style'):
style_kwargs = self._lookup_style_kwargs(style, final_kwargs)
if 'cmap' in style_kwargs:
style_kwargs.pop('cmap')
plot_arrows_solid(
ax, arrows,
**style_kwargs
)
@classmethod
def from_conns(cls, conns, cells, node_style='ei_type', edge_style='con_type'):
"""plot the connections in XY space"""
all_gids = np.unique(conns[['source_gid', 'target_gid']].values.flatten())
nodes = cells.loc[all_gids, ['x', 'y']].copy()
nodes['style'] = cells.loc[nodes.index, node_style]
edges = conns[['source_gid', 'target_gid']].copy()
edges.columns = ['source', 'target']
edges['style'] = conns.loc[edges.index, edge_style]
return cls(nodes, edges)
@classmethod
def from_conn_jumps(
cls, selected_jumps, detailed_spikes, node_keys, edge_style,
**kwargs):
"""plot spike jumps"""
assert 'x' in node_keys and 'y' in node_keys and 'style' in node_keys
nodes = {}
for k, v in node_keys.items():
if isinstance(v, str):
v = detailed_spikes[v]
else:
assert isinstance(v, (tuple, list, pd.Series, np.ndarray))
nodes[k] = v
nodes = pd.DataFrame(nodes)
edges = selected_jumps[['source_spike', 'target_spike']].copy()
edges.columns = ['source', 'target']
edges['style'] = selected_jumps.loc[edges.index, edge_style]
return cls(nodes, edges, **kwargs)
def get_floating_nodes(self) -> pd.Index:
"""
:return: the index of nodes with no connections in or out
"""
return self.nodes.index[
~self.nodes.index.isin(self.edges['source']) &
~self.nodes.index.isin(self.edges['target'])
]
def get_linked_nodes(self) -> pd.Index:
"""
:return: the index of nodes with at least a connection in or out
"""
return self.nodes.index[~self.nodes.index.isin(self.get_floating_nodes())]
def drop_nodes(self, drop_gids: pd.Index):
"""
remove the given nodes from the graph. This will also remove edges to/from those nodes
:param drop_gids: either a list of node ids or a boolean mask (True == remove)
:return:
"""
if drop_gids.dtype == 'bool':
if isinstance(drop_gids, pd.Series):
drop_gids = drop_gids.reindex(self.nodes.index, fill_value=False)
assert len(drop_gids) == len(self.nodes)
drop_gids = self.nodes.index[drop_gids]
drop_gids = pd.Index(np.asarray(drop_gids))
remaining_gids = self.nodes.index.difference(drop_gids)
self.nodes = self.nodes.loc[remaining_gids].copy()
bad_edges = (
self.edges['source'].isin(drop_gids) |
self.edges['target'].isin(drop_gids)
)
self.edges = self.edges.loc[~bad_edges].copy()
def drop_edges(self, drop_gids: pd.Index):
"""
remove the given edges from the graph
example:
graph.drop_edges(graph.edges['weight'] < .75 * 70)
:param drop_gids: either a list of edge ids or a boolean mask (True == remove)
:return:
"""
if drop_gids.dtype == 'bool':
if isinstance(drop_gids, pd.Series):
drop_gids = drop_gids.reindex(self.edges.index, fill_value=False)
assert len(drop_gids) == len(self.edges)
drop_gids = self.edges.index[drop_gids]
drop_gids = pd.Index(np.asarray(drop_gids))
remaining_gids = self.edges.index.difference(drop_gids)
self.edges = self.edges.loc[remaining_gids].copy()
def add_edges(self, new_edges: pd.DataFrame, **overwrite_cols):
"""
Add edges to this graph.
Inplace.
:param overwrite_cols: pairs of <column, value> to assign to new_edges before adding them.
For example, to set a style.
"""
new_edges = new_edges.copy()
for c, v in overwrite_cols.items():
new_edges[c] = v
missing_cols = self.edges.columns.difference(new_edges.columns)
if len(missing_cols) > 0:
logging.error(f'Missing columns: {list(missing_cols)}. Got: {list(new_edges.columns)}')
return
repeated = self.edges.index.intersection(new_edges.index)
if len(repeated):
logging.warning(f'Repeated edges will be ignored: {repeated}')
new_edges = new_edges.drop(repeated)
valid = (
new_edges['source'].isin(self.nodes.index) &
new_edges['target'].isin(self.nodes.index)
)
if np.any(~valid):
logging.warning(f'{np.count_nonzero(~valid):,g} edges without source or target will be ignored')
new_edges = new_edges[valid]
all_edges = pd.concat([self.edges, new_edges], axis=0, sort=False)
assert all_edges.index.is_unique
self.edges = all_edges
def add_nodes(self, new_nodes: pd.DataFrame, **overwrite_cols):
"""
Add edges to this graph.
Inplace.
:param overwrite_cols: pairs of <column, value> to assign to new_nodes before adding them.
For example, to set a style.
"""
new_nodes = new_nodes.copy()
for c, v in overwrite_cols.items():
new_nodes[c] = v
missing_cols = self.nodes.columns.difference(new_nodes.columns)
if len(missing_cols) > 0:
logging.warning(f'Missing columns: {list(missing_cols)}. Got: {list(new_nodes.columns)}')
repeated = self.nodes.index.intersection(new_nodes.index)
if len(repeated):
logging.warning(f'Repeated nodes will be ignored: {repeated}')
new_nodes = new_nodes.drop(repeated)
all_nodes = pd.concat([self.nodes, new_nodes], axis=0, sort=False)
assert all_nodes.index.is_unique
self.nodes = all_nodes
def add_graph(self, other):
"""
Add another graph to this one.
Inplace.
"""
self.add_nodes(other.nodes)
self.add_edges(other.edges)
def drop_edges_orphan(self):
"""remove edges without a known source or target"""
mask_edges = (
self.edges['source'].isin(self.nodes.index) &
self.edges['target'].isin(self.nodes.index)
)
self.edges = self.edges[mask_edges].copy()
def layout_spring(self, edges_idx=None, iterations=100, source_gid=None, **kwargs):
"""
modify inplace the XY positions of the graph using a spring force algorithm
if source_gid is provided, it will be fixed at coordinate (0, 0)
initial position are taken from the current XY.
"""
fixed = kwargs.pop('fixed', None)
if source_gid is not None:
if fixed is None:
fixed = {}
fixed[source_gid] = (0, 0)
from networkx import spring_layout
pos = spring_layout(
self._get_as_networkx_digraph(edges_idx),
pos={i: (x, y) for i, x, y in self.nodes[['x', 'y']].itertuples()},
fixed=fixed,
iterations=iterations,
**kwargs,
)
self._set_node_xy(pd.DataFrame.from_dict(pos, orient='index', columns=['x', 'y']))
def layout_graphviz(self, edges_idx=None, **kwargs):
"""
modify inplace the XY positions of the graph using a one of the graphviz algorithms
see https://stackoverflow.com/questions/21978487/improving-python-networkx-graph-layout
"""
from networkx.drawing.nx_agraph import graphviz_layout
pos = graphviz_layout(
self._get_as_networkx_digraph(edges_idx),
**kwargs)
self._set_node_xy(pd.DataFrame.from_dict(pos, orient='index', columns=['x', 'y']))
def layout_raster_graphviz(self, all_spikes):
"""
modify inplace the Y positions of the graph (preserving X)
using the 'dot' algorithm (hierarchical)
"""
oldx = self.nodes['x'].copy()
self.layout_graphviz(prog='dot')
self.layout_transpose()
self.layout_reflect('x')
# restore x as time
self.nodes['x'] = oldx
# force y to be different and unique per gid
self.nodes['y'] = self.nodes['y'].astype(np.float)
gids = all_spikes.loc[self.nodes.index, 'gid'].values
yloc = self.nodes['y'].groupby(gids).median().rank(method='first').reindex(gids)
yloc.index = self.nodes.index
self.nodes['y'] = yloc
assert np.all([np.issubdtype(self.nodes[c].dtype, np.number) for c in ['x', 'y']])
def layout_best_fit(self, around, orientation='vertical'):
"""
Place nodes using graphviz.
For 'floating' (disconnected) nodes, force them at the bottom of the plot.
Rotate plot to best use the orientation
:return:
"""
floating_gids = self.get_floating_nodes()
self.layout_graphviz()
center = self._layout_around(around)
# make sure floating gids don't interfere when we are rotationg our graph
# their position will get set afterwards
self.nodes.loc[floating_gids, 'x'] = center[0]
self.nodes.loc[floating_gids, 'y'] = center[1]
self.layout_rotate_to_match(around=center, orientation=orientation)
linked_gids = self.get_linked_nodes()
bbox = (
np.minimum(self.nodes.loc[linked_gids, ['x', 'y']].min(), -10),
np.maximum(self.nodes.loc[linked_gids, ['x', 'y']].max(), +10),
)
x = np.linspace(bbox[0]['x'], bbox[1]['x'], len(floating_gids) + 2)[1:-1]
self.nodes.loc[floating_gids, 'x'] = x
y = bbox[0]['y'] - (bbox[1]['y'] - bbox[0]['y']) * .2
self.nodes.loc[floating_gids, 'y'] = y
def _layout_around(self, around):
"""
translate the "around" param of other functions
:param around:
None: the center of mass of the graph
tuple, list or array: the exact 2d coordinates
anything else: the ID of the node we want to center around
:return: array of 2 elements containing xy position
"""
xy = self.nodes[['x', 'y']].values
if around is None:
around = np.mean(xy, axis=0)
elif isinstance(around, (list, tuple, np.ndarray)):
around = np.array(around)
else:
around = self.nodes.loc[around, ['x', 'y']].values.astype(np.float)
assert np.issubdtype(around.dtype, np.number)
return around
def sort_edges(self, by, ascending=True):
"""sort the edges by the given series. inplace"""
if isinstance(by, str):
by = self.edges[by]
if not isinstance(by, pd.Series):
by = pd.Series(np.asarray(by), by.index)
assert isinstance(by, pd.Series)
by = by.reindex(self.edges.index)
by = by.sort_values(ascending=ascending)
self.edges = self.edges.loc[by.index]
def layout_get_dists(self):
"""
get the distances for every node with respect to (0, 0)
:return:
"""
xy = self.nodes[['x', 'y']].values.T
dists = np.sqrt(np.sum(np.square(xy), axis=0))
return | pd.Series(dists, index=self.nodes.index) | pandas.Series |
##########################################
# Share issuance as factor
# December 2018
# <NAME>
##########################################
import pandas as pd
import numpy as np
import os
from pandas.tseries.offsets import *
# Note that ccm, comp and crsp_m are WRDS datasets. However, the code is useful for
# other datasets as long they are panel datasets in conformity to those from WRDS.
# There are some methodology idiosyncrasies of the US dataset, acc. Fama-French (1993),
# but once understood, the adaptation to other country dataset is totally feasible.
###################
# CRSP Block #
###################
## permco is a unique permanent identifier assigned by CRSP to all companies with issues on a CRSP file
## permno identifies a firm's security through all its history, and companies may have several stocks at one time
## shrcd is a two-digit code describing the type of shares traded. The first digit describes the type of security traded.
## exchcd is a code indicating the exchange on which a security is listed
## change variable format to int
crsp_m[['permco','permno','shrcd','exchcd']]=crsp_m[['permco','permno',
'shrcd','exchcd']].astype(int)
## Line up date to be end of month day, no adjustment on time, but on pattern
crsp_m['date']=pd.to_datetime(crsp_m['date'])
crsp_m['jdate']=crsp_m['date']+MonthEnd(0)
crsp_m = crsp_m[(crsp_m['date'].dt.year > 1993)] # This increases velocity of the algorithm,
# but pay attention on this, as it limits the dataset.
## adjusting for delisting return
dlret.permno=dlret.permno.astype(int)
dlret['dlstdt']=pd.to_datetime(dlret['dlstdt'])
dlret['jdate']=dlret['dlstdt']+MonthEnd(0) ## pick the delist date and put into the EoP
## merge the crsp dataset with the dlret on the left indexes
crsp = pd.merge(crsp_m, dlret, how='left',on=['permno','jdate'])
crsp['dlret']=crsp['dlret'].fillna(0)
crsp['ret']=crsp['ret'].fillna(0)
crsp['retadj']=(1+crsp['ret'])*(1+crsp['dlret'])-1 ## adjusting for delisting return
crsp['me']=crsp['prc'].abs()*crsp['shrout'] # calculate market equity
crsp=crsp.drop(['dlret','dlstdt','prc','shrout'], axis=1)
## axis = 0 is the row, and is default, and axis = 1 is the column to drop
crsp=crsp.sort_values(by=['jdate','permco','me'])
## sorting columns ascending = TRUE as default, by the variables: jdate is the adj date by the EoP and
## permco is the CRSP number for stocks, and me is the market equity.
### Aggregate Market Cap ###
## sum of me across different permno belonging to same permco a given date
crsp_summe = crsp.groupby(['jdate','permco'])['me'].sum().reset_index()
## reset the index to the prior numbers as default in pandas,
## and with the changed index still there drop = False as default
# largest mktcap within a permco/date
crsp_maxme = crsp.groupby(['jdate','permco'])['me'].max().reset_index()
# join by jdate/maxme to find the permno
crsp1=pd.merge(crsp, crsp_maxme, how='inner', on=['jdate','permco','me'])
## join : {‘inner’, ‘outer’}, default ‘outer’. Outer for union and inner for intersection.
## drop me column and replace with the sum me
crsp1=crsp1.drop(['me'], axis=1)
## join with sum of me to get the correct market cap info
crsp2=pd.merge(crsp1, crsp_summe, how='inner', on=['jdate','permco'])
## sort by permno and date and also drop duplicates
crsp2=crsp2.sort_values(by=['permno','jdate']).drop_duplicates()
## keep December market cap
crsp2['year']=crsp2['jdate'].dt.year
crsp2['month']=crsp2['jdate'].dt.month
decme=crsp2[crsp2['month']==12]
decme=decme[['permno','date','jdate','me','year']].rename(columns={'me':'dec_me'})
### July to June dates
crsp2['ffdate']=crsp2['jdate']+MonthEnd(-6) ## MonthEnd(-6) is to go six months in the EoM backwards
crsp2['ffyear']=crsp2['ffdate'].dt.year
crsp2['ffmonth']=crsp2['ffdate'].dt.month
crsp2['1+retx']=1+crsp2['retx'] ## retx is the holding period return w/o dividends for a month
crsp2=crsp2.sort_values(by=['permno','date'])
# cumret by stock ## pick the before year
crsp2['cumretx']=crsp2.groupby(['permno','ffyear'])['1+retx'].cumprod() ## compute the cumulative return
## of a year measured by ffyear, the data date backwards six months.
# lag cumret
crsp2['lcumretx']=crsp2.groupby(['permno'])['cumretx'].shift(1)
## shift one row (as default, axis = 0), this leads to the next period.
# lag market cap by one month
crsp2['lme']=crsp2.groupby(['permno'])['me'].shift(1)
## if first permno then use me/(1+retx) to replace the missing value
crsp2['count']=crsp2.groupby(['permno']).cumcount()
crsp2['lme']=np.where(crsp2['count']==0, crsp2['me']/crsp2['1+retx'], crsp2['lme'])
## insert a 'nan' if the count is zero, or pick the lag one market cap.
# baseline me ## pick the first month of this backwards year, and say it is the base.
mebase=crsp2[crsp2['ffmonth']==1][['permno','ffyear', 'lme']].rename(columns={'lme':'mebase'})
## merge result back together
crsp3=pd.merge(crsp2, mebase, how='left', on=['permno','ffyear'])
crsp3['wt']=np.where(crsp3['ffmonth']==1, crsp3['lme'], crsp3['mebase']*crsp3['lcumretx'])
## and really use the returns to take out the dividends distributed (but what about them?)
## wt is the adjusted lag me without dividends basically, by constructing a cum ret measure.
## the weight should have a criterium, and lagged me seems to be it. Not the current
## me, but six months behind one.
#######################
# CCM Block #
#######################
## Compustat and CRSP merged data
ccm['linkdt']=pd.to_datetime(ccm['linkdt']) ## linkdt is a calendar date marking the first effective
## date of the current link. If the link was valid before CRSP's earliest record, LINKDT is set to be
## SAS missing code ".B".
ccm['linkenddt']=pd.to_datetime(ccm['linkenddt']) ## LINKENDDT is the last effective date of the link record.
## It uses the SAS missing code ".E" if a link is still valid.
# if linkenddt is missing then set to today date
ccm['linkenddt']=ccm['linkenddt'].fillna( | pd.to_datetime('today') | pandas.to_datetime |
# Copyright 2019-2020 QuantumBlack Visual Analytics Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND
# NONINFRINGEMENT. IN NO EVENT WILL THE LICENSOR OR OTHER CONTRIBUTORS
# BE LIABLE FOR ANY CLAIM, DAMAGES, OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF, OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
# The QuantumBlack Visual Analytics Limited ("QuantumBlack") name and logo
# (either separately or in combination, "QuantumBlack Trademarks") are
# trademarks of QuantumBlack. The License does not grant you any right or
# license to the QuantumBlack Trademarks. You may not use the QuantumBlack
# Trademarks or any confusingly similar mark as a trademark for your product,
# or use the QuantumBlack Trademarks in any other manner that might cause
# confusion in the marketplace, including but not limited to in advertising,
# on websites, or on software.
#
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import numpy as np
import pandas as pd
import pytest
from causalnex.network import BayesianNetwork
from causalnex.structure import StructureModel
from causalnex.structure.notears import from_pandas
from causalnex.utils.network_utils import get_markov_blanket
class TestFitNodeStates:
"""Test behaviour of fit node states method"""
@pytest.mark.parametrize(
"weighted_edges, data",
[
([("a", "b", 1)], pd.DataFrame([[1, 1]], columns=["a", "b"])),
(
[("a", "b", 1)],
pd.DataFrame([[1, 1, 1, 1]], columns=["a", "b", "c", "d"]),
),
# c and d are isolated nodes in the data
],
)
def test_all_nodes_included(self, weighted_edges, data):
"""No errors if all the nodes can be found in the columns of training data"""
cg = StructureModel()
cg.add_weighted_edges_from(weighted_edges)
bn = BayesianNetwork(cg).fit_node_states(data)
assert all(node in data.columns for node in bn.node_states.keys())
def test_all_states_included(self):
"""All states in a node should be included"""
cg = StructureModel()
cg.add_weighted_edges_from([("a", "b", 1)])
bn = BayesianNetwork(cg).fit_node_states(
pd.DataFrame([[i, i] for i in range(10)], columns=["a", "b"])
)
assert all(v in bn.node_states["a"] for v in range(10))
def test_fit_with_null_states_raises_error(self):
"""An error should be raised if fit is called with null data"""
cg = StructureModel()
cg.add_weighted_edges_from([("a", "b", 1)])
with pytest.raises(ValueError, match="node '.*' contains None state"):
BayesianNetwork(cg).fit_node_states(
pd.DataFrame([[None, 1]], columns=["a", "b"])
)
def test_fit_with_missing_feature_in_data(self):
"""An error should be raised if fit is called with missing feature in data"""
cg = StructureModel()
cg.add_weighted_edges_from([("a", "e", 1)])
with pytest.raises(
KeyError,
match="The data does not cover all the features found in the Bayesian Network. "
"Please check the following features: {'e'}",
):
BayesianNetwork(cg).fit_node_states(
pd.DataFrame([[1, 1, 1, 1]], columns=["a", "b", "c", "d"])
)
class TestFitCPDSErrors:
"""Test errors for fit CPDs method"""
def test_invalid_method(self, bn, train_data_discrete):
"""a value error should be raised in an invalid method is provided"""
with pytest.raises(ValueError, match=r"unrecognised method.*"):
bn.fit_cpds(train_data_discrete, method="INVALID")
def test_invalid_prior(self, bn, train_data_discrete):
"""a value error should be raised in an invalid prior is provided"""
with pytest.raises(ValueError, match=r"unrecognised bayes_prior.*"):
bn.fit_cpds(
train_data_discrete, method="BayesianEstimator", bayes_prior="INVALID"
)
class TestFitCPDsMaximumLikelihoodEstimator:
"""Test behaviour of fit_cpds using MLE"""
def test_cause_only_node(self, bn, train_data_discrete, train_data_discrete_cpds):
"""Test that probabilities are fit correctly to nodes which are not caused by other nodes"""
bn.fit_cpds(train_data_discrete)
cpds = bn.cpds
assert (
np.mean(
np.abs(
cpds["d"].values.reshape(2)
- train_data_discrete_cpds["d"].reshape(2)
)
)
< 1e-7
)
assert (
np.mean(
np.abs(
cpds["e"].values.reshape(2)
- train_data_discrete_cpds["e"].reshape(2)
)
)
< 1e-7
)
def test_dependent_node(self, bn, train_data_discrete, train_data_discrete_cpds):
"""Test that probabilities are fit correctly to nodes that are caused by other nodes"""
bn.fit_cpds(train_data_discrete)
cpds = bn.cpds
assert (
np.mean(
np.abs(
cpds["a"].values.reshape(24)
- train_data_discrete_cpds["a"].reshape(24)
)
)
< 1e-7
)
assert (
np.mean(
np.abs(
cpds["b"].values.reshape(12)
- train_data_discrete_cpds["b"].reshape(12)
)
)
< 1e-7
)
assert (
np.mean(
np.abs(
cpds["c"].values.reshape(60)
- train_data_discrete_cpds["c"].reshape(60)
)
)
< 1e-7
)
def test_fit_missing_states(self):
"""test issues/15: should be possible to fit with missing states"""
sm = StructureModel([("a", "b"), ("c", "b")])
bn = BayesianNetwork(sm)
train = pd.DataFrame(
data=[[0, 0, 1], [1, 0, 1], [1, 1, 1]], columns=["a", "b", "c"]
)
test = pd.DataFrame(
data=[[0, 0, 1], [1, 0, 1], [1, 1, 2]], columns=["a", "b", "c"]
)
data = | pd.concat([train, test]) | pandas.concat |
import numpy as np
import pandas as pd
from featuretools.utils.gen_utils import find_descendents
class Variable(object):
"""Represent a variable in an entity
A Variable is analogous to a column in table in a relational database
Args:
id (str) : Id of variable. Must match underlying data in Entity
it belongs to.
entity (:class:`.Entity`) : Entity this variable belongs to.
name (str, optional) : Variable name. Defaults to id.
See Also:
:class:`.Entity`, :class:`.Relationship`, :class:`.BaseEntitySet`
"""
type_string = None
_default_pandas_dtype = object
def __init__(self, id, entity, name=None):
assert isinstance(id, str), "Variable id must be a string"
self.id = id
self._name = name
self.entity_id = entity.id
assert entity.entityset is not None, "Entity must contain reference to EntitySet"
self.entity = entity
self._interesting_values = pd.Series()
@property
def entityset(self):
return self.entity.entityset
def __eq__(self, other, deep=False):
shallow_eq = isinstance(other, self.__class__) and \
self.id == other.id and \
self.entity_id == other.entity_id
if not deep:
return shallow_eq
else:
return shallow_eq and set(self.interesting_values.values) == set(other.interesting_values.values)
def __hash__(self):
return hash((self.id, self.entity_id))
def __repr__(self):
return u"<Variable: {} (dtype = {})>".format(self.name, self.type_string)
@classmethod
def create_from(cls, variable):
"""Create new variable this type from existing
Args:
variable (Variable) : Existing variable to create from.
Returns:
:class:`.Variable` : new variable
"""
v = cls(id=variable.id, name=variable.name, entity=variable.entity)
return v
@property
def name(self):
return self._name if self._name is not None else self.id
@property
def dtype(self):
return self.type_string \
if self.type_string is not None else "generic_type"
@name.setter
def name(self, name):
self._name = name
@property
def interesting_values(self):
return self._interesting_values
@interesting_values.setter
def interesting_values(self, interesting_values):
self._interesting_values = pd.Series(interesting_values)
@property
def series(self):
return self.entity.df[self.id]
def to_data_description(self):
return {
'id': self.id,
'type': {
'value': self.type_string,
},
'properties': {
'name': self.name,
'entity': self.entity.id,
'interesting_values': self._interesting_values.to_json()
},
}
class Unknown(Variable):
pass
class Discrete(Variable):
"""Superclass representing variables that take on discrete values"""
type_string = "discrete"
def __init__(self, id, entity, name=None):
super(Discrete, self).__init__(id, entity, name)
self._interesting_values = pd.Series()
@property
def interesting_values(self):
return self._interesting_values
@interesting_values.setter
def interesting_values(self, values):
seen = set()
seen_add = seen.add
self._interesting_values = pd.Series([v for v in values if not
(v in seen or seen_add(v))])
class Boolean(Variable):
"""Represents variables that take on one of two values
Args:
true_values (list) : List of valued true values. Defaults to [1, True, "true", "True", "yes", "t", "T"]
false_values (list): List of valued false values. Defaults to [0, False, "false", "False", "no", "f", "F"]
"""
type_string = "boolean"
_default_pandas_dtype = bool
def __init__(self,
id,
entity,
name=None,
true_values=None,
false_values=None):
default = [1, True, "true", "True", "yes", "t", "T"]
self.true_values = true_values or default
default = [0, False, "false", "False", "no", "f", "F"]
self.false_values = false_values or default
super(Boolean, self).__init__(id, entity, name=name)
def to_data_description(self):
description = super(Boolean, self).to_data_description()
description['type'].update({
'true_values': self.true_values,
'false_values': self.false_values
})
return description
class Categorical(Discrete):
"""Represents variables that can take an unordered discrete values
Args:
categories (list) : List of categories. If left blank, inferred from data.
"""
type_string = "categorical"
def __init__(self, id, entity, name=None, categories=None):
self.categories = None or []
super(Categorical, self).__init__(id, entity, name=name)
def to_data_description(self):
description = super(Categorical, self).to_data_description()
description['type'].update({'categories': self.categories})
return description
class Id(Categorical):
"""Represents variables that identify another entity"""
type_string = "id"
_default_pandas_dtype = int
class Ordinal(Discrete):
"""Represents variables that take on an ordered discrete value"""
type_string = "ordinal"
_default_pandas_dtype = int
class Numeric(Variable):
"""Represents variables that contain numeric values
Args:
range (list, optional) : List of start and end. Can use inf and -inf to represent infinity. Unconstrained if not specified.
start_inclusive (bool, optional) : Whether or not range includes the start value.
end_inclusive (bool, optional) : Whether or not range includes the end value
Attributes:
max (float)
min (float)
std (float)
mean (float)
"""
type_string = "numeric"
_default_pandas_dtype = float
def __init__(self,
id,
entity,
name=None,
range=None,
start_inclusive=True,
end_inclusive=False):
self.range = None or []
self.start_inclusive = start_inclusive
self.end_inclusive = end_inclusive
super(Numeric, self).__init__(id, entity, name=name)
def to_data_description(self):
description = super(Numeric, self).to_data_description()
description['type'].update({
'range': self.range,
'start_inclusive': self.start_inclusive,
'end_inclusive': self.end_inclusive,
})
return description
class Index(Variable):
"""Represents variables that uniquely identify an instance of an entity
Attributes:
count (int)
"""
type_string = "index"
_default_pandas_dtype = int
class Datetime(Variable):
"""Represents variables that are points in time
Args:
format (str): Python datetime format string documented `here <http://strftime.org/>`_.
"""
type_string = "datetime"
_default_pandas_dtype = np.datetime64
def __init__(self, id, entity, name=None, format=None):
self.format = format
super(Datetime, self).__init__(id, entity, name=name)
def __repr__(self):
return u"<Variable: {} (dtype: {}, format: {})>".format(self.name, self.type_string, self.format)
def to_data_description(self):
description = super(Datetime, self).to_data_description()
description['type'].update({'format': self.format})
return description
class TimeIndex(Variable):
"""Represents time index of entity"""
type_string = "time_index"
_default_pandas_dtype = np.datetime64
class NumericTimeIndex(TimeIndex, Numeric):
"""Represents time index of entity that is numeric"""
type_string = "numeric_time_index"
_default_pandas_dtype = float
class DatetimeTimeIndex(TimeIndex, Datetime):
"""Represents time index of entity that is a datetime"""
type_string = "datetime_time_index"
_default_pandas_dtype = np.datetime64
class Timedelta(Variable):
"""Represents variables that are timedeltas
Args:
range (list, optional) : List of start and end of allowed range in seconds. Can use inf and -inf to represent infinity. Unconstrained if not specified.
start_inclusive (bool, optional) : Whether or not range includes the start value.
end_inclusive (bool, optional) : Whether or not range includes the end value
"""
type_string = "timedelta"
_default_pandas_dtype = np.timedelta64
def __init__(self,
id,
entity,
name=None,
range=None,
start_inclusive=True,
end_inclusive=False):
self.range = range or []
self.start_inclusive = start_inclusive
self.end_inclusive = end_inclusive
super(Timedelta, self).__init__(id, entity, name=name)
def to_data_description(self):
description = super(Timedelta, self).to_data_description()
description['type'].update({
'range': self.range,
'start_inclusive': self.start_inclusive,
'end_inclusive': self.end_inclusive,
})
return description
class Text(Variable):
"""Represents variables that are arbitary strings"""
type_string = "text"
_default_pandas_dtype = str
class PandasTypes(object):
_all = 'all'
_categorical = 'category'
_pandas_datetimes = ['datetime64[ns]', 'datetime64[ns, tz]']
_pandas_timedeltas = ['Timedelta']
_pandas_numerics = ['int16', 'int32', 'int64',
'float16', 'float32', 'float64']
class LatLong(Variable):
"""Represents an ordered pair (Latitude, Longitude)
To make a latlong in a dataframe do
data['latlong'] = data[['latitude', 'longitude']].apply(tuple, axis=1)
"""
type_string = "latlong"
class ZIPCode(Categorical):
"""Represents a postal address in the United States.
Consists of a series of digits which are casts as
string. Five digit and 9 digit zipcodes are supported.
"""
type_string = "zipcode"
_default_pandas_dtype = str
class IPAddress(Variable):
"""Represents a computer network address. Represented
in dotted-decimal notation. IPv4 and IPv6 are supported.
"""
type_string = "ip"
_default_pandas_dtype = str
class FullName(Variable):
"""Represents a person's full name. May consist of a
first name, last name, and a title.
"""
type_string = "full_name"
_default_pandas_dtype = str
class EmailAddress(Variable):
"""Represents an email box to which email message are sent.
Consists of a local-part, an @ symbol, and a domain.
"""
type_string = "email"
_default_pandas_dtype = str
class URL(Variable):
"""Represents a valid web url (with or without http/www)"""
type_string = "url"
_default_pandas_dtype = str
class PhoneNumber(Variable):
"""Represents any valid phone number.
Can be with/without parenthesis.
Can be with/without area/country codes.
"""
type_string = "phone_number"
_default_pandas_dtype = str
class DateOfBirth(Datetime):
"""Represents a date of birth as a datetime"""
type_string = "date_of_birth"
_default_pandas_dtype = np.datetime64
class CountryCode(Categorical):
"""Represents an ISO-3166 standard country code.
ISO 3166-1 (countries) are supported. These codes
should be in the Alpha-2 format.
e.g. United States of America = US
"""
type_string = "country_code"
_default_pandas_dtype = str
class SubRegionCode(Categorical):
"""Represents an ISO-3166 standard sub-region code.
ISO 3166-2 codes (sub-regions are supported. These codes
should be in the Alpha-2 format.
e.g. United States of America, Arizona = US-AZ
"""
type_string = "subregion_code"
_default_pandas_dtype = str
class FilePath(Variable):
"""Represents a valid filepath, absolute or relative"""
type_string = "filepath"
_default_pandas_dtype = str
def find_variable_types():
return {str(vtype.type_string): vtype for vtype in find_descendents(
Variable) if hasattr(vtype, 'type_string')}
DEFAULT_DTYPE_VALUES = {
np.datetime64: pd.Timestamp.now(),
int: 0,
float: 0.1,
np.timedelta64: | pd.Timedelta('1d') | pandas.Timedelta |
import pandas as pd
import pytest
from .. import testing
def test_frames_equal_not_frames():
frame = pd.DataFrame({'a': [1]})
with pytest.raises(AssertionError) as info:
testing.assert_frames_equal(frame, 1)
assert info.value.message == 'Inputs must both be pandas DataFrames.'
def test_frames_equal_mismatched_columns():
expected = pd.DataFrame({'a': [1]})
actual = pd.DataFrame({'b': [2]})
with pytest.raises(AssertionError) as info:
testing.assert_frames_equal(actual, expected)
assert info.value.message == "Expected column 'a' not found."
def test_frames_equal_mismatched_rows():
expected = pd.DataFrame({'a': [1]}, index=[0])
actual = pd.DataFrame({'a': [1]}, index=[1])
with pytest.raises(AssertionError) as info:
testing.assert_frames_equal(actual, expected)
assert info.value.message == "Expected row 0 not found."
def test_frames_equal_mismatched_items():
expected = pd.DataFrame({'a': [1]})
actual = pd.DataFrame({'a': [2]})
with pytest.raises(AssertionError) as info:
testing.assert_frames_equal(actual, expected)
assert info.value.message == """
Items are not equal:
ACTUAL: 2
DESIRED: 1
Column: 'a'
Row: 0"""
def test_frames_equal():
frame = pd.DataFrame({'a': [1]})
testing.assert_frames_equal(frame, frame)
def test_frames_equal_close():
frame1 = pd.DataFrame({'a': [1]})
frame2 = pd.DataFrame({'a': [1.00000000000002]})
with pytest.raises(AssertionError):
testing.assert_frames_equal(frame1, frame2)
testing.assert_frames_equal(frame1, frame2, use_close=True)
def test_index_equal_order_agnostic():
left = pd.Index([1, 2, 3])
right = pd.Index([3, 2, 1])
testing.assert_index_equal(left, right)
def test_index_equal_order_agnostic_raises_left():
left = pd.Index([1, 2, 3, 4])
right = pd.Index([3, 2, 1])
with pytest.raises(AssertionError):
testing.assert_index_equal(left, right)
def test_index_equal_order_agnostic_raises_right():
left = pd.Index([1, 2, 3])
right = | pd.Index([3, 2, 1, 4]) | pandas.Index |
import logging
from collections import defaultdict
from concurrent.futures import FIRST_EXCEPTION, wait
from itertools import product
from pathlib import Path
from typing import Iterable, List, Optional, Tuple, Union
import numpy as np
import pandas as pd
from hermes.typeo import typeo
from rich.progress import Progress
from bbhnet.analysis.analysis import integrate
from bbhnet.analysis.distributions import DiscreteDistribution
from bbhnet.analysis.normalizers import GaussianNormalizer
from bbhnet.io.h5 import write_timeseries
from bbhnet.io.timeslides import Segment, TimeSlide
from bbhnet.logging import configure_logging
from bbhnet.parallelize import AsyncExecutor, as_completed
event_times = [1186302519.8, 1186741861.5, 1187058327.1, 1187529256.5]
event_names = ["GW170809", "GW170814", "GW170818", "GW170823"]
events = {name: time for name, time in zip(event_names, event_times)}
def load_segment(segment: Segment):
"""
Quick utility function which just wraps a Segment's
`load` method so that we can execute it in a process
pool since methods aren't picklable.
"""
segment.load("out")
return segment
def get_write_dir(
write_dir: Path, norm: Optional[float], shift: Union[str, Segment]
) -> Path:
"""
Quick utility function for getting the name of the directory
to which to save the outputs from an analysis using a particular
time-shift/norm-seconds combination
"""
if isinstance(shift, Segment):
shift = shift.shift
write_dir = write_dir / f"norm-seconds.{norm}" / shift
write_dir.mkdir(parents=True, exist_ok=True)
return write_dir
def build_background(
thread_ex: AsyncExecutor,
process_ex: AsyncExecutor,
pbar: Progress,
background_segments: Iterable[Segment],
data_dir: Path,
write_dir: Path,
max_tb: float,
window_length: float = 1.0,
norm_seconds: Optional[Iterable[float]] = None,
num_bins: int = int(1e4),
):
"""
For a sequence of background segments, compute a discrete
distribution of integrated neural network outputs using
the indicated integration window length for each of the
normalization window lengths specified. Iterates through
the background segments in order and tries to find as
many time-shifts available for each segment as possible
in the specified data directory, stopping iteration through
segments once a maximum number of seconds of bacgkround have
been generated.
As a warning, there's a fair amount of asynchronous execution
going on in this function, and it may come off a bit complex.
Args:
thread_ex:
An `AsyncExecutor` that maintains a thread pool
for writing analyzed segments in parallel with
the analysis processes themselves.
process_ex:
An `AsyncExecutor` that maintains a process pool
for loading and integrating Segments of neural
network outputs.
pbar:
A `rich.progress.Progress` object for keeping
track of the progress of each of the various
subtasks.
background_segments:
The `Segment` objects to use for building a
background distribution. `data_dir` will be
searched for all time-shifts of each segment
for parallel analysis. Once `max_tb` seconds
worth of background have been generated, iteration
through this array will be terminated, so segments
should be ordered by some level of "importance",
since it's likely that segments near the back of the
array won't be analyzed for lower values of `max_tb`.
data_dir:
Directory containing timeslide root directories,
which will be mined for time-shifts of each `Segment`
in `background_segments`. If a time-shift doesn't exist
for a given `Segment`, the time-shift is ignored.
write_dir:
Root directory to which to write integrated NN outputs.
For each time-shift analyzed and normalization window
length specified in `norm_seconds`, results will be
written to a subdirectory
`write_dir / "norm-seconds.{norm}" / shift`, which
will be created if it does not exist.
max_tb:
The maximum number of seconds of background data
to analyze for each value of `norm_seconds` before
new segments to shift and analyze are no longer sought.
However, because we use _every_ time-shift for each
segment we iterate through, its possible that each
background distribution will utilize slightly more
than this value.
window_length:
The length of the integration window to use
for analysis in seconds.
norm_seconds:
An array of normalization window lengths to use
to standardize the integrated neural network outputs.
(i.e. the output timeseries is the integral over the
previous `window_length` seconds, normalized by the
mean and standard deviation of the previous `norm`
seconds before that, where `norm` is each value in
`norm_seconds`). A `norm` value of `None` in the
`norm_seconds` iterable indicates
no normalization, and if `norm_seconds` is left as
`None` this will be the only value used.
num_bins:
The number of bins to use to initialize the discrete
distribution used to characterize the background
distribution.
Returns:
A dictionary mapping each value in `norm_seconds` to
an associated `DiscreteDistribution` characterizing
its background distribution.
"""
write_dir.mkdir(exist_ok=True)
norm_seconds = norm_seconds or [norm_seconds]
# keep track of the min and max values of each normalization
# window's background and the corresponding filenames so
# that we can fit a discrete distribution to it after the fact
mins = defaultdict(lambda: float("inf"))
maxs = defaultdict(lambda: -float("inf"))
# keep track of all the files that we've written
# for each normalization window size so that we
# can iterate through them later and submit them
# for reloading once we have our distributions initialized
fname_futures = defaultdict(list)
# iterate through timeshifts of our background segments
# until we've generated enough background data.
background_segments = iter(background_segments)
main_task_id = pbar.add_task("[red]Building background", total=max_tb)
while not pbar.tasks[main_task_id].finished:
segment = next(background_segments)
# since we're assuming here that the background
# segments are being provided in reverse chronological
# order (with segments closest to the event segment first),
# exhaust all the time shifts we can of each segment before
# going to the previous one to keep data as fresh as possible
load_futures = {}
for shift in data_dir.iterdir():
try:
shifted = segment.make_shift(shift.name)
except ValueError:
# this segment doesn't have a shift
# at this value, so just move on
continue
# load all the timeslides up front in a separate thread
# TODO: O(1GB) memory means segment.length * N ~O(4M),
# so for ~O(10k) long segments this means this should
# be fine as long as N ~ O(100). Worth doing a check for?
future = process_ex.submit(load_segment, shifted)
load_futures[shift.name] = [future]
# create progress bar tasks for each one
# of the subprocesses involved for analyzing
# this set of timeslides
load_task_id = pbar.add_task(
f"[cyan]Loading {len(load_futures)} {segment.length}s timeslides",
total=len(load_futures),
)
analyze_task_id = pbar.add_task(
"[yelllow]Integrating timeslides",
total=len(load_futures) * len(norm_seconds),
)
write_task_id = pbar.add_task(
"[green]Writing integrated timeslides",
total=len(load_futures) * len(norm_seconds),
)
# now once each segment is loaded, submit a job
# to our process pool to integrate it using each
# one of the specified normalization periods
integration_futures = {}
sample_rate = None
for shift, seg in as_completed(load_futures):
# get the sample rate of the NN output timeseries
# dynamically from the first timeseries we load,
# since we'll need it to initialize our normalizers
if sample_rate is None:
t = seg._cache["t"]
sample_rate = 1 / (t[1] - t[0])
for norm in norm_seconds:
# build a normalizer for the given normalization window length
if norm is not None:
normalizer = GaussianNormalizer(norm * sample_rate)
else:
normalizer = None
# submit the integration job and have it update the
# corresponding progress bar task once it completes
future = process_ex.submit(
integrate,
seg,
kernel_length=1.0,
window_length=window_length,
normalizer=normalizer,
)
future.add_done_callback(
lambda f: pbar.update(analyze_task_id, advance=1)
)
integration_futures[(norm, shift)] = [future]
# advance the task keeping track of how many files
# we've loaded by one
pbar.update(load_task_id, advance=1)
# make sure we have the expected number of jobs submitted
if len(integration_futures) < (len(norm_seconds) * len(load_futures)):
raise ValueError(
"Expected {} integration jobs submitted, "
"but only found {}".format(
len(norm_seconds) * len(load_futures),
len(integration_futures),
)
)
# as the integration jobs come back, write their
# results using our thread pool and record the
# min and max values for our discrete distribution
segment_futures = []
for (norm, shift), (t, y, integrated) in as_completed(
integration_futures
):
# submit the writing job to our thread pool and
# use a callback to keep track of all the filenames
# for a given normalization window
shift_dir = get_write_dir(write_dir, norm, shift)
future = thread_ex.submit(
write_timeseries,
shift_dir,
t=t,
y=y,
integrated=integrated,
)
future.add_done_callback(
lambda f: pbar.update(write_task_id, advance=1)
)
fname_futures[norm].append(future)
segment_futures.append(future)
# keep track of the max and min values for each norm
mins[norm] = min(mins[norm], integrated.min())
maxs[norm] = max(maxs[norm], integrated.max())
# wait for all the writing to finish before we
# move on so that we don't overload our processes
wait(segment_futures, return_when=FIRST_EXCEPTION)
pbar.update(main_task_id, advance=len(load_futures) * segment.length)
# now that we've analyzed enough background data,
# we'll initialize background distributions using
# the min and max bounds we found during analysis
# and then load everything back in to bin them
# within these bounds
Tb = pbar.tasks[main_task_id].completed
logging.info(f"Accumulated {Tb}s of background matched filter outputs.")
# submit a bunch of jobs for loading these integrated
# segments back in for discretization
load_futures = defaultdict(list)
for norm, fname in as_completed(fname_futures):
future = process_ex.submit(load_segment, Segment(fname))
load_futures[norm].append(future)
# create a task for each one of the normalization windows
# tracking how far along the distribution fit is
fit_task_ids = {}
for norm in norm_seconds:
norm_name = f"{norm}s" if norm is not None else "empty"
task_id = pbar.add_task(
"[purple]Fitting background using {} normalization window".format(
norm_name
),
total=len(load_futures[norm]),
)
fit_task_ids[norm] = task_id
# now discretized the analyzed segments as they're loaded back in
backgrounds = {}
for norm, segment in as_completed(load_futures):
try:
# if we already have a background distribution
# for this event, grab it and fit it with a
# "warm start" aka don't ditch the existing histogram
background = backgrounds[norm]
warm_start = True
except KeyError:
# otherwise create a new distribution
# and fit it from scratch
mn, mx = mins[norm], maxs[norm]
background = DiscreteDistribution("integrated", mn, mx, num_bins)
backgrounds[norm] = background
warm_start = False
# fit the distribution to the new data and then
# update the corresponding task tracker
background.fit(segment, warm_start=warm_start)
pbar.update(fit_task_ids[norm], advance=1)
return backgrounds
def check_if_needs_analyzing(
event_segment: Segment,
norm_seconds: Iterable[Optional[float]],
characterizations: pd.DataFrame,
) -> Iterable[Optional[float]]:
times = [t for t in event_times if t in event_segment]
names = [name for name in event_names if events[name] in times]
combos = set(product(names, norm_seconds))
remaining = combos - set(characterizations.index)
# only do analysis on those normalization
# values that we haven't already done
# (sorry, you'll still have to do it for all events,
# but those are miniscule by comparison)
norm_seconds = list(set([j for i, j in remaining]))
return norm_seconds, names, times
def analyze_event(
thread_ex: AsyncExecutor,
process_ex: AsyncExecutor,
characterizations: pd.DataFrame,
timeseries: pd.DataFrame,
event_segment: Segment,
background_segments: Iterable[Segment],
data_dir: Path,
write_dir: Path,
results_dir: Path,
max_tb: float,
window_length: float = 1.0,
norm_seconds: Optional[Iterable[float]] = None,
num_bins: int = int(1e4),
force: bool = False,
) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
"""
Use timeshifts of a set of previous segments to build a
background distribution with which to analyze a segment
containing an event and characterizing the false alaram
rate of that event as a function of time from the event
trigger.
"""
# first check if we can skip this analysis altogether
# because we already have data on it and we're not
# forcing ourselves to re-analyze
norm_seconds = norm_seconds or [norm_seconds]
if not force:
norm_seconds, names, times = check_if_needs_analyzing(
event_segment, norm_seconds, characterizations
)
if len(norm_seconds) == 0:
logging.info(
f"Already analyzed events in segment {event_segment}, skipping"
)
return
with Progress() as pbar:
# TODO: exclude segments with events?
backgrounds = build_background(
thread_ex,
process_ex,
pbar,
background_segments=background_segments,
data_dir=data_dir,
write_dir=write_dir,
window_length=window_length,
norm_seconds=norm_seconds,
max_tb=max_tb,
num_bins=num_bins,
)
# now use the fit background to characterize the
# significance of BBHNet's detection around the event
for norm, background in backgrounds.items():
if norm is not None:
normalizer = GaussianNormalizer(norm)
else:
normalizer = None
logging.info(
"Characterizing events {} with normalization "
"window length {}".format(", ".join(names), norm)
)
t, y, integrated = integrate(
event_segment,
kernel_length=1,
window_length=window_length,
normalizer=normalizer,
)
fname = write_timeseries(
get_write_dir(write_dir, norm, event_segment),
t=t,
y=y,
integrated=integrated,
)
# create a segment and add the existing data to
# its cache so that we don't try to load it again
segment = Segment(fname)
segment._cache = {"t": t, "integrated": integrated}
fars, latencies = background.characterize_events(
segment, times, window_length=window_length, metric="far"
)
# for each one of the events in this segment,
# record the false alarm rate as a function of
# time and add it to our dataframe then checkpoint it.
# Then isolate the timeseries of both the NN outputs and
# the integrated values around the event and write those
# to another dataframe and checkpoint that as well
for far, latency, name, time in zip(fars, latencies, names, times):
logging.info(f"\t{name}:")
logging.info(f"\t\tFalse Alarm Rates: {list(far)}")
logging.info(f"\t\tLatencies: {list(latency)}")
df = pd.DataFrame(
dict(
event_name=[name] * len(far),
norm_seconds=[norm] * len(far),
far=far,
latency=latency,
)
).set_index(["event_name", "norm_seconds"])
characterizations = pd.concat([characterizations, df])
characterizations.to_csv(results_dir / "characterizations.csv")
# keep the one second before the trigger,
# during the event after the trigger, and
# after the event trigger has left the kernel
mask = (time - 1 < t) & (t < time + 2)
df = pd.DataFrame(
dict(
event_name=[name] * mask.sum(),
norm_seconds=[norm] * mask.sum(),
t=t[mask] - time,
y=y[mask],
integrated=integrated[mask],
)
).set_index(["event_name", "norm_seconds"])
timeseries = pd.concat([timeseries, df])
timeseries.to_csv(results_dir / "timeseries.csv")
# write an h5 file describing the background distribution
fname = "background_events.{}_norm.{}.hdf5".format(
",".join(names), norm
)
background.write(results_dir / fname)
return far, t, background
@typeo
def main(
data_dir: Path,
write_dir: Path,
results_dir: Path,
window_length: float = 1.0,
norm_seconds: Optional[List[float]] = None,
max_tb: Optional[float] = None,
num_bins: int = 10000,
force: bool = False,
log_file: Optional[str] = None,
verbose: bool = False,
):
"""Analyze known events in a directory of timeslides
Iterate through a directory of timeslides analyzing known
events for false alarm rates in units of yrs$^{-1}$ as
a function of the time after the event trigger times enters
the neural network's input kernel. For each event and normalization
period specified by `norm_seconds`, use time- shifted data from
segments _before_ the event's segment tobuild up a background
distribution of the output of matched filters of length `window_length`,
normalized by the mean and standard deviation of the previous
`norm_seconds` worth of data, until the effective time analyzed
is equal to `max_tb`.
The results of this analysis will be written to two csv files,
one of which will contain the latency and false alaram rates
for each of the events and normalization windows, and the other
of which will contain the bins and counts for the background
distributions used to calculate each of these false alarm rates.
Args:
data_dir: Path to directory contains timeslides
write_dir: Path to directory to which to write matched filter outputs
results_dir:
Path to directory to which to write analysis logs and
summary csvs for analyzed events and their corresponding
background distributions.
window_length:
Length of time, in seconds, over which to average
neural network outputs for matched filter analysis
norm_seconds:
Length of time, in seconds, over which to compute a moving
"background" used to normalize the averaged neural network
outputs. More specifically, the matched filter output at each
point in time will be the average over the last `window_length`
seconds, normalized by the mean and standard deviation of the
previous `norm_seconds` seconds. If left as `None`, no
normalization will be performed. Otherwise, should be specified
as an iterable to compute multiple different normalization values
for each event.
max_tb:
The maximum number of time-shifted background data to analyze
per event, in seconds
num_bins:
The number of bins to use in building up the discrete background
distribution
force:
Flag indicating whether to force an event analysis to re-run
if its data already exists in the summary files written to
`results_dir`.
log_file:
A filename to write logs to. If left as `None`, logs will only
be printed to stdout
verbose:
Flag indicating whether to log at level `INFO` (if set)
or `DEBUG` (if not set)
"""
results_dir.mkdir(parents=True, exist_ok=True)
configure_logging(results_dir / log_file, verbose)
# organize timeslides into segments
timeslide = TimeSlide(data_dir / "dt-0.0")
# if we're not going to force ourselves to re-analyze
# events we've already analyzed, try and load existing
# results so we know what we can skip.
if not force:
try:
characterizations = pd.read_csv(
results_dir / "characterizations.csv"
).set_index(["event_name", "norm_seconds"])
except FileNotFoundError:
characterizations = pd.DataFrame()
try:
timeseries = | pd.read_csv(results_dir / "timeseries.csv") | pandas.read_csv |
import pandas as pd
from imblearn.over_sampling import RandomOverSampler
import math
#Training Data
re = RandomOverSampler()
df = | pd.read_csv("data/raw/train.csv") | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
.. module:: skimpy
:platform: Unix, Windows
:synopsis: Simple Kinetic Models in Python
.. moduleauthor:: SKiMPy team
[---------]
Copyright 2020 Laboratory of Computational Systems Biotechnology (LCSB),
Ecole Polytechnique Federale de Lausanne (EPFL), Switzerland
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from skimpy.utils.general import sanitize_cobra_vars
from skimpy.utils.conversions import deltag0_to_keq
from skimpy.core.parameters import ParameterValues
import pandas as pd
import numpy as np
# Load and convert pytfa solution for kinetic model
def load_fluxes(solution_raw,tmodel,kmodel,
density=None,
ratio_gdw_gww=None,
concentration_scaling=None,
time_scaling=None):
# TODO try to fetch from model
if density is None \
or ratio_gdw_gww is None \
or concentration_scaling is None \
or time_scaling is None:
raise ValueError("density, ratio_gdw_gww, concentration_scaling, or time_scaling "
"is required as input or field of kmodel")
# Flux solution input assumed to be mmol/gDW/hr
flux_scaling_factor = 1e-3 * (ratio_gdw_gww * density) \
* concentration_scaling \
/ time_scaling
fluxes_in_kmodel = list(kmodel.reactions.keys())
# Convert to net-fluxes
solution_nf = { this_rxn.id: (solution_raw[this_rxn.forward_variable.name] \
- solution_raw[this_rxn.reverse_variable.name]) \
for this_rxn in tmodel.reactions}
# Convert tmodel net fluxes to kmodel fluxes
flux_dict = {rxn: solution_nf[rxn]*flux_scaling_factor for rxn in fluxes_in_kmodel}
fluxes = pd.Series(flux_dict)
# Sort according to the k-model
return fluxes[fluxes_in_kmodel]
def load_concentrations(solution_raw, tmodel, kmodel, concentration_scaling=None):
# TODO try to fetch from model
if concentration_scaling is None:
raise ValueError("concentration_scaling is required as input or field of kmodel")
concentration_dict = {sanitize_cobra_vars(lc.id): np.exp(solution_raw[lc.variable.name])
*concentration_scaling
for lc in tmodel.log_concentration}
concentrations = | pd.Series(concentration_dict) | pandas.Series |
# pylint: disable-msg=E1101,W0612
from datetime import datetime, time, timedelta, date
import sys
import os
import operator
from distutils.version import LooseVersion
import nose
import numpy as np
randn = np.random.randn
from pandas import (Index, Series, TimeSeries, DataFrame,
isnull, date_range, Timestamp, Period, DatetimeIndex,
Int64Index, to_datetime, bdate_range, Float64Index)
import pandas.core.datetools as datetools
import pandas.tseries.offsets as offsets
import pandas.tseries.tools as tools
import pandas.tseries.frequencies as fmod
import pandas as pd
from pandas.util.testing import assert_series_equal, assert_almost_equal
import pandas.util.testing as tm
from pandas.tslib import NaT, iNaT
import pandas.lib as lib
import pandas.tslib as tslib
import pandas.index as _index
from pandas.compat import range, long, StringIO, lrange, lmap, zip, product
import pandas.core.datetools as dt
from numpy.random import rand
from numpy.testing import assert_array_equal
from pandas.util.testing import assert_frame_equal
import pandas.compat as compat
import pandas.core.common as com
from pandas import concat
from pandas import _np_version_under1p7
from numpy.testing.decorators import slow
def _skip_if_no_pytz():
try:
import pytz
except ImportError:
raise nose.SkipTest("pytz not installed")
def _skip_if_has_locale():
import locale
lang, _ = locale.getlocale()
if lang is not None:
raise nose.SkipTest("Specific locale is set {0}".format(lang))
class TestTimeSeriesDuplicates(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
dates = [datetime(2000, 1, 2), datetime(2000, 1, 2),
datetime(2000, 1, 2), datetime(2000, 1, 3),
datetime(2000, 1, 3), datetime(2000, 1, 3),
datetime(2000, 1, 4), datetime(2000, 1, 4),
datetime(2000, 1, 4), datetime(2000, 1, 5)]
self.dups = Series(np.random.randn(len(dates)), index=dates)
def test_constructor(self):
tm.assert_isinstance(self.dups, TimeSeries)
tm.assert_isinstance(self.dups.index, DatetimeIndex)
def test_is_unique_monotonic(self):
self.assertFalse(self.dups.index.is_unique)
def test_index_unique(self):
uniques = self.dups.index.unique()
expected = DatetimeIndex([datetime(2000, 1, 2), datetime(2000, 1, 3),
datetime(2000, 1, 4), datetime(2000, 1, 5)])
self.assertEqual(uniques.dtype, 'M8[ns]') # sanity
self.assertTrue(uniques.equals(expected))
self.assertEqual(self.dups.index.nunique(), 4)
# #2563
self.assertTrue(isinstance(uniques, DatetimeIndex))
dups_local = self.dups.index.tz_localize('US/Eastern')
dups_local.name = 'foo'
result = dups_local.unique()
expected = DatetimeIndex(expected, tz='US/Eastern')
self.assertTrue(result.tz is not None)
self.assertEqual(result.name, 'foo')
self.assertTrue(result.equals(expected))
# NaT
arr = [ 1370745748 + t for t in range(20) ] + [iNaT]
idx = DatetimeIndex(arr * 3)
self.assertTrue(idx.unique().equals(DatetimeIndex(arr)))
self.assertEqual(idx.nunique(), 21)
arr = [ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ] + [NaT]
idx = DatetimeIndex(arr * 3)
self.assertTrue(idx.unique().equals(DatetimeIndex(arr)))
self.assertEqual(idx.nunique(), 21)
def test_index_dupes_contains(self):
d = datetime(2011, 12, 5, 20, 30)
ix = DatetimeIndex([d, d])
self.assertTrue(d in ix)
def test_duplicate_dates_indexing(self):
ts = self.dups
uniques = ts.index.unique()
for date in uniques:
result = ts[date]
mask = ts.index == date
total = (ts.index == date).sum()
expected = ts[mask]
if total > 1:
assert_series_equal(result, expected)
else:
assert_almost_equal(result, expected[0])
cp = ts.copy()
cp[date] = 0
expected = Series(np.where(mask, 0, ts), index=ts.index)
assert_series_equal(cp, expected)
self.assertRaises(KeyError, ts.__getitem__, datetime(2000, 1, 6))
# new index
ts[datetime(2000,1,6)] = 0
self.assertEqual(ts[datetime(2000,1,6)], 0)
def test_range_slice(self):
idx = DatetimeIndex(['1/1/2000', '1/2/2000', '1/2/2000', '1/3/2000',
'1/4/2000'])
ts = Series(np.random.randn(len(idx)), index=idx)
result = ts['1/2/2000':]
expected = ts[1:]
assert_series_equal(result, expected)
result = ts['1/2/2000':'1/3/2000']
expected = ts[1:4]
assert_series_equal(result, expected)
def test_groupby_average_dup_values(self):
result = self.dups.groupby(level=0).mean()
expected = self.dups.groupby(self.dups.index).mean()
assert_series_equal(result, expected)
def test_indexing_over_size_cutoff(self):
import datetime
# #1821
old_cutoff = _index._SIZE_CUTOFF
try:
_index._SIZE_CUTOFF = 1000
# create large list of non periodic datetime
dates = []
sec = datetime.timedelta(seconds=1)
half_sec = datetime.timedelta(microseconds=500000)
d = datetime.datetime(2011, 12, 5, 20, 30)
n = 1100
for i in range(n):
dates.append(d)
dates.append(d + sec)
dates.append(d + sec + half_sec)
dates.append(d + sec + sec + half_sec)
d += 3 * sec
# duplicate some values in the list
duplicate_positions = np.random.randint(0, len(dates) - 1, 20)
for p in duplicate_positions:
dates[p + 1] = dates[p]
df = DataFrame(np.random.randn(len(dates), 4),
index=dates,
columns=list('ABCD'))
pos = n * 3
timestamp = df.index[pos]
self.assertIn(timestamp, df.index)
# it works!
df.ix[timestamp]
self.assertTrue(len(df.ix[[timestamp]]) > 0)
finally:
_index._SIZE_CUTOFF = old_cutoff
def test_indexing_unordered(self):
# GH 2437
rng = date_range(start='2011-01-01', end='2011-01-15')
ts = Series(randn(len(rng)), index=rng)
ts2 = concat([ts[0:4],ts[-4:],ts[4:-4]])
for t in ts.index:
s = str(t)
expected = ts[t]
result = ts2[t]
self.assertTrue(expected == result)
# GH 3448 (ranges)
def compare(slobj):
result = ts2[slobj].copy()
result = result.sort_index()
expected = ts[slobj]
assert_series_equal(result,expected)
compare(slice('2011-01-01','2011-01-15'))
compare(slice('2010-12-30','2011-01-15'))
compare(slice('2011-01-01','2011-01-16'))
# partial ranges
compare(slice('2011-01-01','2011-01-6'))
compare(slice('2011-01-06','2011-01-8'))
compare(slice('2011-01-06','2011-01-12'))
# single values
result = ts2['2011'].sort_index()
expected = ts['2011']
assert_series_equal(result,expected)
# diff freq
rng = date_range(datetime(2005, 1, 1), periods=20, freq='M')
ts = Series(np.arange(len(rng)), index=rng)
ts = ts.take(np.random.permutation(20))
result = ts['2005']
for t in result.index:
self.assertTrue(t.year == 2005)
def test_indexing(self):
idx = date_range("2001-1-1", periods=20, freq='M')
ts = Series(np.random.rand(len(idx)),index=idx)
# getting
# GH 3070, make sure semantics work on Series/Frame
expected = ts['2001']
df = DataFrame(dict(A = ts))
result = df['2001']['A']
assert_series_equal(expected,result)
# setting
ts['2001'] = 1
expected = ts['2001']
df.loc['2001','A'] = 1
result = df['2001']['A']
assert_series_equal(expected,result)
# GH3546 (not including times on the last day)
idx = date_range(start='2013-05-31 00:00', end='2013-05-31 23:00', freq='H')
ts = Series(lrange(len(idx)), index=idx)
expected = ts['2013-05']
assert_series_equal(expected,ts)
idx = date_range(start='2013-05-31 00:00', end='2013-05-31 23:59', freq='S')
ts = Series(lrange(len(idx)), index=idx)
expected = ts['2013-05']
assert_series_equal(expected,ts)
idx = [ Timestamp('2013-05-31 00:00'), Timestamp(datetime(2013,5,31,23,59,59,999999))]
ts = Series(lrange(len(idx)), index=idx)
expected = ts['2013']
assert_series_equal(expected,ts)
# GH 3925, indexing with a seconds resolution string / datetime object
df = DataFrame(randn(5,5),columns=['open','high','low','close','volume'],index=date_range('2012-01-02 18:01:00',periods=5,tz='US/Central',freq='s'))
expected = df.loc[[df.index[2]]]
result = df['2012-01-02 18:01:02']
assert_frame_equal(result,expected)
# this is a single date, so will raise
self.assertRaises(KeyError, df.__getitem__, df.index[2],)
def test_recreate_from_data(self):
if _np_version_under1p7:
freqs = ['M', 'Q', 'A', 'D', 'B', 'T', 'S', 'L', 'U', 'H']
else:
freqs = ['M', 'Q', 'A', 'D', 'B', 'T', 'S', 'L', 'U', 'H', 'N', 'C']
for f in freqs:
org = DatetimeIndex(start='2001/02/01 09:00', freq=f, periods=1)
idx = DatetimeIndex(org, freq=f)
self.assertTrue(idx.equals(org))
# unbale to create tz-aware 'A' and 'C' freq
if _np_version_under1p7:
freqs = ['M', 'Q', 'D', 'B', 'T', 'S', 'L', 'U', 'H']
else:
freqs = ['M', 'Q', 'D', 'B', 'T', 'S', 'L', 'U', 'H', 'N']
for f in freqs:
org = DatetimeIndex(start='2001/02/01 09:00', freq=f, tz='US/Pacific', periods=1)
idx = DatetimeIndex(org, freq=f, tz='US/Pacific')
self.assertTrue(idx.equals(org))
def assert_range_equal(left, right):
assert(left.equals(right))
assert(left.freq == right.freq)
assert(left.tz == right.tz)
class TestTimeSeries(tm.TestCase):
_multiprocess_can_split_ = True
def test_is_(self):
dti = DatetimeIndex(start='1/1/2005', end='12/1/2005', freq='M')
self.assertTrue(dti.is_(dti))
self.assertTrue(dti.is_(dti.view()))
self.assertFalse(dti.is_(dti.copy()))
def test_dti_slicing(self):
dti = DatetimeIndex(start='1/1/2005', end='12/1/2005', freq='M')
dti2 = dti[[1, 3, 5]]
v1 = dti2[0]
v2 = dti2[1]
v3 = dti2[2]
self.assertEqual(v1, Timestamp('2/28/2005'))
self.assertEqual(v2, Timestamp('4/30/2005'))
self.assertEqual(v3, Timestamp('6/30/2005'))
# don't carry freq through irregular slicing
self.assertIsNone(dti2.freq)
def test_pass_datetimeindex_to_index(self):
# Bugs in #1396
rng = date_range('1/1/2000', '3/1/2000')
idx = Index(rng, dtype=object)
expected = Index(rng.to_pydatetime(), dtype=object)
self.assert_numpy_array_equal(idx.values, expected.values)
def test_contiguous_boolean_preserve_freq(self):
rng = date_range('1/1/2000', '3/1/2000', freq='B')
mask = np.zeros(len(rng), dtype=bool)
mask[10:20] = True
masked = rng[mask]
expected = rng[10:20]
self.assertIsNotNone(expected.freq)
assert_range_equal(masked, expected)
mask[22] = True
masked = rng[mask]
self.assertIsNone(masked.freq)
def test_getitem_median_slice_bug(self):
index = date_range('20090415', '20090519', freq='2B')
s = Series(np.random.randn(13), index=index)
indexer = [slice(6, 7, None)]
result = s[indexer]
expected = s[indexer[0]]
assert_series_equal(result, expected)
def test_series_box_timestamp(self):
rng = date_range('20090415', '20090519', freq='B')
s = Series(rng)
tm.assert_isinstance(s[5], Timestamp)
rng = date_range('20090415', '20090519', freq='B')
s = Series(rng, index=rng)
tm.assert_isinstance(s[5], Timestamp)
tm.assert_isinstance(s.iget_value(5), Timestamp)
def test_date_range_ambiguous_arguments(self):
# #2538
start = datetime(2011, 1, 1, 5, 3, 40)
end = datetime(2011, 1, 1, 8, 9, 40)
self.assertRaises(ValueError, date_range, start, end,
freq='s', periods=10)
def test_timestamp_to_datetime(self):
_skip_if_no_pytz()
rng = date_range('20090415', '20090519',
tz='US/Eastern')
stamp = rng[0]
dtval = stamp.to_pydatetime()
self.assertEqual(stamp, dtval)
self.assertEqual(stamp.tzinfo, dtval.tzinfo)
def test_index_convert_to_datetime_array(self):
_skip_if_no_pytz()
def _check_rng(rng):
converted = rng.to_pydatetime()
tm.assert_isinstance(converted, np.ndarray)
for x, stamp in zip(converted, rng):
tm.assert_isinstance(x, datetime)
self.assertEqual(x, stamp.to_pydatetime())
self.assertEqual(x.tzinfo, stamp.tzinfo)
rng = date_range('20090415', '20090519')
rng_eastern = date_range('20090415', '20090519', tz='US/Eastern')
rng_utc = date_range('20090415', '20090519', tz='utc')
_check_rng(rng)
_check_rng(rng_eastern)
_check_rng(rng_utc)
def test_ctor_str_intraday(self):
rng = DatetimeIndex(['1-1-2000 00:00:01'])
self.assertEqual(rng[0].second, 1)
def test_series_ctor_plus_datetimeindex(self):
rng = date_range('20090415', '20090519', freq='B')
data = dict((k, 1) for k in rng)
result = Series(data, index=rng)
self.assertIs(result.index, rng)
def test_series_pad_backfill_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
result = s[:2].reindex(index, method='pad', limit=5)
expected = s[:2].reindex(index).fillna(method='pad')
expected[-3:] = np.nan
assert_series_equal(result, expected)
result = s[-2:].reindex(index, method='backfill', limit=5)
expected = s[-2:].reindex(index).fillna(method='backfill')
expected[:3] = np.nan
assert_series_equal(result, expected)
def test_series_fillna_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
result = s[:2].reindex(index)
result = result.fillna(method='pad', limit=5)
expected = s[:2].reindex(index).fillna(method='pad')
expected[-3:] = np.nan
assert_series_equal(result, expected)
result = s[-2:].reindex(index)
result = result.fillna(method='bfill', limit=5)
expected = s[-2:].reindex(index).fillna(method='backfill')
expected[:3] = np.nan
assert_series_equal(result, expected)
def test_frame_pad_backfill_limit(self):
index = np.arange(10)
df = DataFrame(np.random.randn(10, 4), index=index)
result = df[:2].reindex(index, method='pad', limit=5)
expected = df[:2].reindex(index).fillna(method='pad')
expected.values[-3:] = np.nan
tm.assert_frame_equal(result, expected)
result = df[-2:].reindex(index, method='backfill', limit=5)
expected = df[-2:].reindex(index).fillna(method='backfill')
expected.values[:3] = np.nan
tm.assert_frame_equal(result, expected)
def test_frame_fillna_limit(self):
index = np.arange(10)
df = DataFrame(np.random.randn(10, 4), index=index)
result = df[:2].reindex(index)
result = result.fillna(method='pad', limit=5)
expected = df[:2].reindex(index).fillna(method='pad')
expected.values[-3:] = np.nan
tm.assert_frame_equal(result, expected)
result = df[-2:].reindex(index)
result = result.fillna(method='backfill', limit=5)
expected = df[-2:].reindex(index).fillna(method='backfill')
expected.values[:3] = np.nan
tm.assert_frame_equal(result, expected)
def test_frame_setitem_timestamp(self):
# 2155
columns = DatetimeIndex(start='1/1/2012', end='2/1/2012',
freq=datetools.bday)
index = lrange(10)
data = DataFrame(columns=columns, index=index)
t = datetime(2012, 11, 1)
ts = Timestamp(t)
data[ts] = np.nan # works
def test_sparse_series_fillna_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
ss = s[:2].reindex(index).to_sparse()
result = ss.fillna(method='pad', limit=5)
expected = ss.fillna(method='pad', limit=5)
expected = expected.to_dense()
expected[-3:] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
ss = s[-2:].reindex(index).to_sparse()
result = ss.fillna(method='backfill', limit=5)
expected = ss.fillna(method='backfill')
expected = expected.to_dense()
expected[:3] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
def test_sparse_series_pad_backfill_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
s = s.to_sparse()
result = s[:2].reindex(index, method='pad', limit=5)
expected = s[:2].reindex(index).fillna(method='pad')
expected = expected.to_dense()
expected[-3:] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
result = s[-2:].reindex(index, method='backfill', limit=5)
expected = s[-2:].reindex(index).fillna(method='backfill')
expected = expected.to_dense()
expected[:3] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
def test_sparse_frame_pad_backfill_limit(self):
index = np.arange(10)
df = DataFrame(np.random.randn(10, 4), index=index)
sdf = df.to_sparse()
result = sdf[:2].reindex(index, method='pad', limit=5)
expected = sdf[:2].reindex(index).fillna(method='pad')
expected = expected.to_dense()
expected.values[-3:] = np.nan
expected = expected.to_sparse()
tm.assert_frame_equal(result, expected)
result = sdf[-2:].reindex(index, method='backfill', limit=5)
expected = sdf[-2:].reindex(index).fillna(method='backfill')
expected = expected.to_dense()
expected.values[:3] = np.nan
expected = expected.to_sparse()
tm.assert_frame_equal(result, expected)
def test_sparse_frame_fillna_limit(self):
index = np.arange(10)
df = DataFrame(np.random.randn(10, 4), index=index)
sdf = df.to_sparse()
result = sdf[:2].reindex(index)
result = result.fillna(method='pad', limit=5)
expected = sdf[:2].reindex(index).fillna(method='pad')
expected = expected.to_dense()
expected.values[-3:] = np.nan
expected = expected.to_sparse()
tm.assert_frame_equal(result, expected)
result = sdf[-2:].reindex(index)
result = result.fillna(method='backfill', limit=5)
expected = sdf[-2:].reindex(index).fillna(method='backfill')
expected = expected.to_dense()
expected.values[:3] = np.nan
expected = expected.to_sparse()
tm.assert_frame_equal(result, expected)
def test_pad_require_monotonicity(self):
rng = date_range('1/1/2000', '3/1/2000', freq='B')
rng2 = rng[::2][::-1]
self.assertRaises(ValueError, rng2.get_indexer, rng,
method='pad')
def test_frame_ctor_datetime64_column(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 1:59:50',
freq='10s')
dates = np.asarray(rng)
df = DataFrame({'A': np.random.randn(len(rng)), 'B': dates})
self.assertTrue(np.issubdtype(df['B'].dtype, np.dtype('M8[ns]')))
def test_frame_add_datetime64_column(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 1:59:50',
freq='10s')
df = DataFrame(index=np.arange(len(rng)))
df['A'] = rng
self.assertTrue(np.issubdtype(df['A'].dtype, np.dtype('M8[ns]')))
def test_frame_datetime64_pre1900_repr(self):
df = DataFrame({'year': date_range('1/1/1700', periods=50,
freq='A-DEC')})
# it works!
repr(df)
def test_frame_add_datetime64_col_other_units(self):
n = 100
units = ['h', 'm', 's', 'ms', 'D', 'M', 'Y']
ns_dtype = np.dtype('M8[ns]')
for unit in units:
dtype = np.dtype('M8[%s]' % unit)
vals = np.arange(n, dtype=np.int64).view(dtype)
df = DataFrame({'ints': np.arange(n)}, index=np.arange(n))
df[unit] = vals
ex_vals = to_datetime(vals.astype('O'))
self.assertEqual(df[unit].dtype, ns_dtype)
self.assertTrue((df[unit].values == ex_vals).all())
# Test insertion into existing datetime64 column
df = DataFrame({'ints': np.arange(n)}, index=np.arange(n))
df['dates'] = np.arange(n, dtype=np.int64).view(ns_dtype)
for unit in units:
dtype = np.dtype('M8[%s]' % unit)
vals = np.arange(n, dtype=np.int64).view(dtype)
tmp = df.copy()
tmp['dates'] = vals
ex_vals = to_datetime(vals.astype('O'))
self.assertTrue((tmp['dates'].values == ex_vals).all())
def test_to_datetime_unit(self):
epoch = 1370745748
s = Series([ epoch + t for t in range(20) ])
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ])
assert_series_equal(result,expected)
s = Series([ epoch + t for t in range(20) ]).astype(float)
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ])
assert_series_equal(result,expected)
s = Series([ epoch + t for t in range(20) ] + [iNaT])
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ] + [NaT])
assert_series_equal(result,expected)
s = Series([ epoch + t for t in range(20) ] + [iNaT]).astype(float)
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ] + [NaT])
assert_series_equal(result,expected)
s = concat([Series([ epoch + t for t in range(20) ]).astype(float),Series([np.nan])],ignore_index=True)
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ] + [NaT])
assert_series_equal(result,expected)
def test_series_ctor_datetime64(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 1:59:50',
freq='10s')
dates = np.asarray(rng)
series = Series(dates)
self.assertTrue(np.issubdtype(series.dtype, np.dtype('M8[ns]')))
def test_index_cast_datetime64_other_units(self):
arr = np.arange(0, 100, 10, dtype=np.int64).view('M8[D]')
idx = Index(arr)
self.assertTrue((idx.values == tslib.cast_to_nanoseconds(arr)).all())
def test_index_astype_datetime64(self):
idx = Index([datetime(2012, 1, 1)], dtype=object)
if not _np_version_under1p7:
raise nose.SkipTest("test only valid in numpy < 1.7")
casted = idx.astype(np.dtype('M8[D]'))
expected = DatetimeIndex(idx.values)
tm.assert_isinstance(casted, DatetimeIndex)
self.assertTrue(casted.equals(expected))
def test_reindex_series_add_nat(self):
rng = date_range('1/1/2000 00:00:00', periods=10, freq='10s')
series = Series(rng)
result = series.reindex(lrange(15))
self.assertTrue(np.issubdtype(result.dtype, np.dtype('M8[ns]')))
mask = result.isnull()
self.assertTrue(mask[-5:].all())
self.assertFalse(mask[:-5].any())
def test_reindex_frame_add_nat(self):
rng = date_range('1/1/2000 00:00:00', periods=10, freq='10s')
df = DataFrame({'A': np.random.randn(len(rng)), 'B': rng})
result = df.reindex(lrange(15))
self.assertTrue(np.issubdtype(result['B'].dtype, np.dtype('M8[ns]')))
mask = com.isnull(result)['B']
self.assertTrue(mask[-5:].all())
self.assertFalse(mask[:-5].any())
def test_series_repr_nat(self):
series = Series([0, 1000, 2000, iNaT], dtype='M8[ns]')
result = repr(series)
expected = ('0 1970-01-01 00:00:00\n'
'1 1970-01-01 00:00:00.000001\n'
'2 1970-01-01 00:00:00.000002\n'
'3 NaT\n'
'dtype: datetime64[ns]')
self.assertEqual(result, expected)
def test_fillna_nat(self):
series = Series([0, 1, 2, iNaT], dtype='M8[ns]')
filled = series.fillna(method='pad')
filled2 = series.fillna(value=series.values[2])
expected = series.copy()
expected.values[3] = expected.values[2]
assert_series_equal(filled, expected)
assert_series_equal(filled2, expected)
df = DataFrame({'A': series})
filled = df.fillna(method='pad')
filled2 = df.fillna(value=series.values[2])
expected = DataFrame({'A': expected})
assert_frame_equal(filled, expected)
assert_frame_equal(filled2, expected)
series = Series([iNaT, 0, 1, 2], dtype='M8[ns]')
filled = series.fillna(method='bfill')
filled2 = series.fillna(value=series[1])
expected = series.copy()
expected[0] = expected[1]
assert_series_equal(filled, expected)
assert_series_equal(filled2, expected)
df = DataFrame({'A': series})
filled = df.fillna(method='bfill')
filled2 = df.fillna(value=series[1])
expected = DataFrame({'A': expected})
assert_frame_equal(filled, expected)
assert_frame_equal(filled2, expected)
def test_string_na_nat_conversion(self):
# GH #999, #858
from pandas.compat import parse_date
strings = np.array(['1/1/2000', '1/2/2000', np.nan,
'1/4/2000, 12:34:56'], dtype=object)
expected = np.empty(4, dtype='M8[ns]')
for i, val in enumerate(strings):
if com.isnull(val):
expected[i] = iNaT
else:
expected[i] = parse_date(val)
result = tslib.array_to_datetime(strings)
assert_almost_equal(result, expected)
result2 = to_datetime(strings)
tm.assert_isinstance(result2, DatetimeIndex)
assert_almost_equal(result, result2)
malformed = np.array(['1/100/2000', np.nan], dtype=object)
result = to_datetime(malformed)
assert_almost_equal(result, malformed)
self.assertRaises(ValueError, to_datetime, malformed,
errors='raise')
idx = ['a', 'b', 'c', 'd', 'e']
series = Series(['1/1/2000', np.nan, '1/3/2000', np.nan,
'1/5/2000'], index=idx, name='foo')
dseries = Series([to_datetime('1/1/2000'), np.nan,
to_datetime('1/3/2000'), np.nan,
to_datetime('1/5/2000')], index=idx, name='foo')
result = to_datetime(series)
dresult = to_datetime(dseries)
expected = Series(np.empty(5, dtype='M8[ns]'), index=idx)
for i in range(5):
x = series[i]
if isnull(x):
expected[i] = iNaT
else:
expected[i] = to_datetime(x)
assert_series_equal(result, expected)
self.assertEqual(result.name, 'foo')
assert_series_equal(dresult, expected)
self.assertEqual(dresult.name, 'foo')
def test_to_datetime_iso8601(self):
result = to_datetime(["2012-01-01 00:00:00"])
exp = Timestamp("2012-01-01 00:00:00")
self.assertEqual(result[0], exp)
result = to_datetime(['20121001']) # bad iso 8601
exp = Timestamp('2012-10-01')
self.assertEqual(result[0], exp)
def test_to_datetime_default(self):
rs = to_datetime('2001')
xp = datetime(2001, 1, 1)
self.assertTrue(rs, xp)
#### dayfirst is essentially broken
#### to_datetime('01-13-2012', dayfirst=True)
#### self.assertRaises(ValueError, to_datetime('01-13-2012', dayfirst=True))
def test_to_datetime_on_datetime64_series(self):
# #2699
s = Series(date_range('1/1/2000', periods=10))
result = to_datetime(s)
self.assertEqual(result[0], s[0])
def test_to_datetime_with_apply(self):
# this is only locale tested with US/None locales
_skip_if_has_locale()
# GH 5195
# with a format and coerce a single item to_datetime fails
td = Series(['May 04', 'Jun 02', 'Dec 11'], index=[1,2,3])
expected = pd.to_datetime(td, format='%b %y')
result = td.apply(pd.to_datetime, format='%b %y')
assert_series_equal(result, expected)
td = pd.Series(['May 04', 'Jun 02', ''], index=[1,2,3])
self.assertRaises(ValueError, lambda : pd.to_datetime(td,format='%b %y'))
self.assertRaises(ValueError, lambda : td.apply(pd.to_datetime, format='%b %y'))
expected = pd.to_datetime(td, format='%b %y', coerce=True)
result = td.apply(lambda x: pd.to_datetime(x, format='%b %y', coerce=True))
assert_series_equal(result, expected)
def test_nat_vector_field_access(self):
idx = DatetimeIndex(['1/1/2000', None, None, '1/4/2000'])
fields = ['year', 'quarter', 'month', 'day', 'hour',
'minute', 'second', 'microsecond', 'nanosecond',
'week', 'dayofyear']
for field in fields:
result = getattr(idx, field)
expected = [getattr(x, field) if x is not NaT else -1
for x in idx]
self.assert_numpy_array_equal(result, expected)
def test_nat_scalar_field_access(self):
fields = ['year', 'quarter', 'month', 'day', 'hour',
'minute', 'second', 'microsecond', 'nanosecond',
'week', 'dayofyear']
for field in fields:
result = getattr(NaT, field)
self.assertEqual(result, -1)
self.assertEqual(NaT.weekday(), -1)
def test_to_datetime_types(self):
# empty string
result = to_datetime('')
self.assertIs(result, NaT)
result = to_datetime(['', ''])
self.assertTrue(isnull(result).all())
# ints
result = Timestamp(0)
expected = to_datetime(0)
self.assertEqual(result, expected)
# GH 3888 (strings)
expected = to_datetime(['2012'])[0]
result = to_datetime('2012')
self.assertEqual(result, expected)
### array = ['2012','20120101','20120101 12:01:01']
array = ['20120101','20120101 12:01:01']
expected = list(to_datetime(array))
result = lmap(Timestamp,array)
tm.assert_almost_equal(result,expected)
### currently fails ###
### result = Timestamp('2012')
### expected = to_datetime('2012')
### self.assertEqual(result, expected)
def test_to_datetime_unprocessable_input(self):
# GH 4928
self.assert_numpy_array_equal(
to_datetime([1, '1']),
np.array([1, '1'], dtype='O')
)
self.assertRaises(TypeError, to_datetime, [1, '1'], errors='raise')
def test_to_datetime_other_datetime64_units(self):
# 5/25/2012
scalar = np.int64(1337904000000000).view('M8[us]')
as_obj = scalar.astype('O')
index = DatetimeIndex([scalar])
self.assertEqual(index[0], scalar.astype('O'))
value = Timestamp(scalar)
self.assertEqual(value, as_obj)
def test_to_datetime_list_of_integers(self):
rng = date_range('1/1/2000', periods=20)
rng = DatetimeIndex(rng.values)
ints = list(rng.asi8)
result = DatetimeIndex(ints)
self.assertTrue(rng.equals(result))
def test_to_datetime_dt64s(self):
in_bound_dts = [
np.datetime64('2000-01-01'),
np.datetime64('2000-01-02'),
]
for dt in in_bound_dts:
self.assertEqual(
pd.to_datetime(dt),
Timestamp(dt)
)
oob_dts = [
np.datetime64('1000-01-01'),
np.datetime64('5000-01-02'),
]
for dt in oob_dts:
self.assertRaises(ValueError, pd.to_datetime, dt, errors='raise')
self.assertRaises(ValueError, tslib.Timestamp, dt)
self.assertIs(pd.to_datetime(dt, coerce=True), NaT)
def test_to_datetime_array_of_dt64s(self):
dts = [
np.datetime64('2000-01-01'),
np.datetime64('2000-01-02'),
]
# Assuming all datetimes are in bounds, to_datetime() returns
# an array that is equal to Timestamp() parsing
self.assert_numpy_array_equal(
pd.to_datetime(dts, box=False),
np.array([Timestamp(x).asm8 for x in dts])
)
# A list of datetimes where the last one is out of bounds
dts_with_oob = dts + [np.datetime64('9999-01-01')]
self.assertRaises(
ValueError,
pd.to_datetime,
dts_with_oob,
coerce=False,
errors='raise'
)
self.assert_numpy_array_equal(
pd.to_datetime(dts_with_oob, box=False, coerce=True),
np.array(
[
Timestamp(dts_with_oob[0]).asm8,
Timestamp(dts_with_oob[1]).asm8,
iNaT,
],
dtype='M8'
)
)
# With coerce=False and errors='ignore', out of bounds datetime64s
# are converted to their .item(), which depending on the version of
# numpy is either a python datetime.datetime or datetime.date
self.assert_numpy_array_equal(
pd.to_datetime(dts_with_oob, box=False, coerce=False),
np.array(
[dt.item() for dt in dts_with_oob],
dtype='O'
)
)
def test_index_to_datetime(self):
idx = Index(['1/1/2000', '1/2/2000', '1/3/2000'])
result = idx.to_datetime()
expected = DatetimeIndex(datetools.to_datetime(idx.values))
self.assertTrue(result.equals(expected))
today = datetime.today()
idx = Index([today], dtype=object)
result = idx.to_datetime()
expected = DatetimeIndex([today])
self.assertTrue(result.equals(expected))
def test_to_datetime_freq(self):
xp = bdate_range('2000-1-1', periods=10, tz='UTC')
rs = xp.to_datetime()
self.assertEqual(xp.freq, rs.freq)
self.assertEqual(xp.tzinfo, rs.tzinfo)
def test_range_misspecified(self):
# GH #1095
self.assertRaises(ValueError, date_range, '1/1/2000')
self.assertRaises(ValueError, date_range, end='1/1/2000')
self.assertRaises(ValueError, date_range, periods=10)
self.assertRaises(ValueError, date_range, '1/1/2000', freq='H')
self.assertRaises(ValueError, date_range, end='1/1/2000', freq='H')
self.assertRaises(ValueError, date_range, periods=10, freq='H')
def test_reasonable_keyerror(self):
# GH #1062
index = DatetimeIndex(['1/3/2000'])
try:
index.get_loc('1/1/2000')
except KeyError as e:
self.assertIn('2000', str(e))
def test_reindex_with_datetimes(self):
rng = date_range('1/1/2000', periods=20)
ts = Series(np.random.randn(20), index=rng)
result = ts.reindex(list(ts.index[5:10]))
expected = ts[5:10]
tm.assert_series_equal(result, expected)
result = ts[list(ts.index[5:10])]
tm.assert_series_equal(result, expected)
def test_promote_datetime_date(self):
rng = date_range('1/1/2000', periods=20)
ts = Series(np.random.randn(20), index=rng)
ts_slice = ts[5:]
ts2 = ts_slice.copy()
ts2.index = [x.date() for x in ts2.index]
result = ts + ts2
result2 = ts2 + ts
expected = ts + ts[5:]
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
# test asfreq
result = ts2.asfreq('4H', method='ffill')
expected = ts[5:].asfreq('4H', method='ffill')
assert_series_equal(result, expected)
result = rng.get_indexer(ts2.index)
expected = rng.get_indexer(ts_slice.index)
self.assert_numpy_array_equal(result, expected)
def test_asfreq_normalize(self):
rng = date_range('1/1/2000 09:30', periods=20)
norm = date_range('1/1/2000', periods=20)
vals = np.random.randn(20)
ts = Series(vals, index=rng)
result = ts.asfreq('D', normalize=True)
norm = date_range('1/1/2000', periods=20)
expected = Series(vals, index=norm)
assert_series_equal(result, expected)
vals = np.random.randn(20, 3)
ts = DataFrame(vals, index=rng)
result = ts.asfreq('D', normalize=True)
expected = DataFrame(vals, index=norm)
assert_frame_equal(result, expected)
def test_date_range_gen_error(self):
rng = date_range('1/1/2000 00:00', '1/1/2000 00:18', freq='5min')
self.assertEqual(len(rng), 4)
def test_first_subset(self):
ts = _simple_ts('1/1/2000', '1/1/2010', freq='12h')
result = ts.first('10d')
self.assertEqual(len(result), 20)
ts = _simple_ts('1/1/2000', '1/1/2010')
result = ts.first('10d')
self.assertEqual(len(result), 10)
result = ts.first('3M')
expected = ts[:'3/31/2000']
assert_series_equal(result, expected)
result = ts.first('21D')
expected = ts[:21]
assert_series_equal(result, expected)
result = ts[:0].first('3M')
assert_series_equal(result, ts[:0])
def test_last_subset(self):
ts = _simple_ts('1/1/2000', '1/1/2010', freq='12h')
result = ts.last('10d')
self.assertEqual(len(result), 20)
ts = _simple_ts('1/1/2000', '1/1/2010')
result = ts.last('10d')
self.assertEqual(len(result), 10)
result = ts.last('21D')
expected = ts['12/12/2009':]
assert_series_equal(result, expected)
result = ts.last('21D')
expected = ts[-21:]
assert_series_equal(result, expected)
result = ts[:0].last('3M')
assert_series_equal(result, ts[:0])
def test_add_offset(self):
rng = date_range('1/1/2000', '2/1/2000')
result = rng + offsets.Hour(2)
expected = date_range('1/1/2000 02:00', '2/1/2000 02:00')
self.assertTrue(result.equals(expected))
def test_format_pre_1900_dates(self):
rng = date_range('1/1/1850', '1/1/1950', freq='A-DEC')
rng.format()
ts = Series(1, index=rng)
repr(ts)
def test_repeat(self):
rng = date_range('1/1/2000', '1/1/2001')
result = rng.repeat(5)
self.assertIsNone(result.freq)
self.assertEqual(len(result), 5 * len(rng))
def test_at_time(self):
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = Series(np.random.randn(len(rng)), index=rng)
rs = ts.at_time(rng[1])
self.assertTrue((rs.index.hour == rng[1].hour).all())
self.assertTrue((rs.index.minute == rng[1].minute).all())
self.assertTrue((rs.index.second == rng[1].second).all())
result = ts.at_time('9:30')
expected = ts.at_time(time(9, 30))
assert_series_equal(result, expected)
df = DataFrame(np.random.randn(len(rng), 3), index=rng)
result = ts[time(9, 30)]
result_df = df.ix[time(9, 30)]
expected = ts[(rng.hour == 9) & (rng.minute == 30)]
exp_df = df[(rng.hour == 9) & (rng.minute == 30)]
# expected.index = date_range('1/1/2000', '1/4/2000')
assert_series_equal(result, expected)
tm.assert_frame_equal(result_df, exp_df)
chunk = df.ix['1/4/2000':]
result = chunk.ix[time(9, 30)]
expected = result_df[-1:]
tm.assert_frame_equal(result, expected)
# midnight, everything
rng = date_range('1/1/2000', '1/31/2000')
ts = Series(np.random.randn(len(rng)), index=rng)
result = ts.at_time(time(0, 0))
assert_series_equal(result, ts)
# time doesn't exist
rng = date_range('1/1/2012', freq='23Min', periods=384)
ts = Series(np.random.randn(len(rng)), rng)
rs = ts.at_time('16:00')
self.assertEqual(len(rs), 0)
def test_at_time_frame(self):
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = DataFrame(np.random.randn(len(rng), 2), index=rng)
rs = ts.at_time(rng[1])
self.assertTrue((rs.index.hour == rng[1].hour).all())
self.assertTrue((rs.index.minute == rng[1].minute).all())
self.assertTrue((rs.index.second == rng[1].second).all())
result = ts.at_time('9:30')
expected = ts.at_time(time(9, 30))
assert_frame_equal(result, expected)
result = ts.ix[time(9, 30)]
expected = ts.ix[(rng.hour == 9) & (rng.minute == 30)]
assert_frame_equal(result, expected)
# midnight, everything
rng = date_range('1/1/2000', '1/31/2000')
ts = DataFrame(np.random.randn(len(rng), 3), index=rng)
result = ts.at_time(time(0, 0))
assert_frame_equal(result, ts)
# time doesn't exist
rng = date_range('1/1/2012', freq='23Min', periods=384)
ts = DataFrame(np.random.randn(len(rng), 2), rng)
rs = ts.at_time('16:00')
self.assertEqual(len(rs), 0)
def test_between_time(self):
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = Series(np.random.randn(len(rng)), index=rng)
stime = time(0, 0)
etime = time(1, 0)
close_open = product([True, False], [True, False])
for inc_start, inc_end in close_open:
filtered = ts.between_time(stime, etime, inc_start, inc_end)
exp_len = 13 * 4 + 1
if not inc_start:
exp_len -= 5
if not inc_end:
exp_len -= 4
self.assertEqual(len(filtered), exp_len)
for rs in filtered.index:
t = rs.time()
if inc_start:
self.assertTrue(t >= stime)
else:
self.assertTrue(t > stime)
if inc_end:
self.assertTrue(t <= etime)
else:
self.assertTrue(t < etime)
result = ts.between_time('00:00', '01:00')
expected = ts.between_time(stime, etime)
assert_series_equal(result, expected)
# across midnight
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = Series(np.random.randn(len(rng)), index=rng)
stime = time(22, 0)
etime = time(9, 0)
close_open = product([True, False], [True, False])
for inc_start, inc_end in close_open:
filtered = ts.between_time(stime, etime, inc_start, inc_end)
exp_len = (12 * 11 + 1) * 4 + 1
if not inc_start:
exp_len -= 4
if not inc_end:
exp_len -= 4
self.assertEqual(len(filtered), exp_len)
for rs in filtered.index:
t = rs.time()
if inc_start:
self.assertTrue((t >= stime) or (t <= etime))
else:
self.assertTrue((t > stime) or (t <= etime))
if inc_end:
self.assertTrue((t <= etime) or (t >= stime))
else:
self.assertTrue((t < etime) or (t >= stime))
def test_between_time_frame(self):
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = DataFrame(np.random.randn(len(rng), 2), index=rng)
stime = time(0, 0)
etime = time(1, 0)
close_open = product([True, False], [True, False])
for inc_start, inc_end in close_open:
filtered = ts.between_time(stime, etime, inc_start, inc_end)
exp_len = 13 * 4 + 1
if not inc_start:
exp_len -= 5
if not inc_end:
exp_len -= 4
self.assertEqual(len(filtered), exp_len)
for rs in filtered.index:
t = rs.time()
if inc_start:
self.assertTrue(t >= stime)
else:
self.assertTrue(t > stime)
if inc_end:
self.assertTrue(t <= etime)
else:
self.assertTrue(t < etime)
result = ts.between_time('00:00', '01:00')
expected = ts.between_time(stime, etime)
assert_frame_equal(result, expected)
# across midnight
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = DataFrame(np.random.randn(len(rng), 2), index=rng)
stime = time(22, 0)
etime = time(9, 0)
close_open = product([True, False], [True, False])
for inc_start, inc_end in close_open:
filtered = ts.between_time(stime, etime, inc_start, inc_end)
exp_len = (12 * 11 + 1) * 4 + 1
if not inc_start:
exp_len -= 4
if not inc_end:
exp_len -= 4
self.assertEqual(len(filtered), exp_len)
for rs in filtered.index:
t = rs.time()
if inc_start:
self.assertTrue((t >= stime) or (t <= etime))
else:
self.assertTrue((t > stime) or (t <= etime))
if inc_end:
self.assertTrue((t <= etime) or (t >= stime))
else:
self.assertTrue((t < etime) or (t >= stime))
def test_dti_constructor_preserve_dti_freq(self):
rng = date_range('1/1/2000', '1/2/2000', freq='5min')
rng2 = DatetimeIndex(rng)
self.assertEqual(rng.freq, rng2.freq)
def test_normalize(self):
rng = date_range('1/1/2000 9:30', periods=10, freq='D')
result = rng.normalize()
expected = date_range('1/1/2000', periods=10, freq='D')
self.assertTrue(result.equals(expected))
rng_ns = pd.DatetimeIndex(np.array([1380585623454345752, 1380585612343234312]).astype("datetime64[ns]"))
rng_ns_normalized = rng_ns.normalize()
expected = pd.DatetimeIndex(np.array([1380585600000000000, 1380585600000000000]).astype("datetime64[ns]"))
self.assertTrue(rng_ns_normalized.equals(expected))
self.assertTrue(result.is_normalized)
self.assertFalse(rng.is_normalized)
def test_to_period(self):
from pandas.tseries.period import period_range
ts = _simple_ts('1/1/2000', '1/1/2001')
pts = ts.to_period()
exp = ts.copy()
exp.index = period_range('1/1/2000', '1/1/2001')
assert_series_equal(pts, exp)
pts = ts.to_period('M')
self.assertTrue(pts.index.equals(exp.index.asfreq('M')))
def create_dt64_based_index(self):
data = [Timestamp('2007-01-01 10:11:12.123456Z'),
Timestamp('2007-01-01 10:11:13.789123Z')]
index = DatetimeIndex(data)
return index
def test_to_period_millisecond(self):
index = self.create_dt64_based_index()
period = index.to_period(freq='L')
self.assertEqual(2, len(period))
self.assertEqual(period[0], Period('2007-01-01 10:11:12.123Z', 'L'))
self.assertEqual(period[1], Period('2007-01-01 10:11:13.789Z', 'L'))
def test_to_period_microsecond(self):
index = self.create_dt64_based_index()
period = index.to_period(freq='U')
self.assertEqual(2, len(period))
self.assertEqual(period[0], Period('2007-01-01 10:11:12.123456Z', 'U'))
self.assertEqual(period[1], Period('2007-01-01 10:11:13.789123Z', 'U'))
def test_to_period_tz(self):
_skip_if_no_pytz()
from dateutil.tz import tzlocal
from pytz import utc as UTC
xp = date_range('1/1/2000', '4/1/2000').to_period()
ts = date_range('1/1/2000', '4/1/2000', tz='US/Eastern')
result = ts.to_period()[0]
expected = ts[0].to_period()
self.assertEqual(result, expected)
self.assertTrue(ts.to_period().equals(xp))
ts = date_range('1/1/2000', '4/1/2000', tz=UTC)
result = ts.to_period()[0]
expected = ts[0].to_period()
self.assertEqual(result, expected)
self.assertTrue(ts.to_period().equals(xp))
ts = date_range('1/1/2000', '4/1/2000', tz=tzlocal())
result = ts.to_period()[0]
expected = ts[0].to_period()
self.assertEqual(result, expected)
self.assertTrue(ts.to_period().equals(xp))
def test_frame_to_period(self):
K = 5
from pandas.tseries.period import period_range
dr = date_range('1/1/2000', '1/1/2001')
pr = period_range('1/1/2000', '1/1/2001')
df = DataFrame(randn(len(dr), K), index=dr)
df['mix'] = 'a'
pts = df.to_period()
exp = df.copy()
exp.index = pr
assert_frame_equal(pts, exp)
pts = df.to_period('M')
self.assertTrue(pts.index.equals(exp.index.asfreq('M')))
df = df.T
pts = df.to_period(axis=1)
exp = df.copy()
exp.columns = pr
assert_frame_equal(pts, exp)
pts = df.to_period('M', axis=1)
self.assertTrue(pts.columns.equals(exp.columns.asfreq('M')))
self.assertRaises(ValueError, df.to_period, axis=2)
def test_timestamp_fields(self):
# extra fields from DatetimeIndex like quarter and week
idx = tm.makeDateIndex(100)
fields = ['dayofweek', 'dayofyear', 'week', 'weekofyear', 'quarter', 'is_month_start', 'is_month_end', 'is_quarter_start', 'is_quarter_end', 'is_year_start', 'is_year_end']
for f in fields:
expected = getattr(idx, f)[-1]
result = getattr(Timestamp(idx[-1]), f)
self.assertEqual(result, expected)
self.assertEqual(idx.freq, Timestamp(idx[-1], idx.freq).freq)
self.assertEqual(idx.freqstr, Timestamp(idx[-1], idx.freq).freqstr)
def test_woy_boundary(self):
# make sure weeks at year boundaries are correct
d = datetime(2013,12,31)
result = Timestamp(d).week
expected = 1 # ISO standard
self.assertEqual(result, expected)
d = datetime(2008,12,28)
result = Timestamp(d).week
expected = 52 # ISO standard
self.assertEqual(result, expected)
d = datetime(2009,12,31)
result = Timestamp(d).week
expected = 53 # ISO standard
self.assertEqual(result, expected)
d = datetime(2010,1,1)
result = Timestamp(d).week
expected = 53 # ISO standard
self.assertEqual(result, expected)
d = datetime(2010,1,3)
result = Timestamp(d).week
expected = 53 # ISO standard
self.assertEqual(result, expected)
result = np.array([Timestamp(datetime(*args)).week for args in
[(2000,1,1),(2000,1,2),(2005,1,1),(2005,1,2)]])
self.assertTrue((result == [52, 52, 53, 53]).all())
def test_timestamp_date_out_of_range(self):
self.assertRaises(ValueError, Timestamp, '1676-01-01')
self.assertRaises(ValueError, Timestamp, '2263-01-01')
# 1475
self.assertRaises(ValueError, DatetimeIndex, ['1400-01-01'])
self.assertRaises(ValueError, DatetimeIndex, [datetime(1400, 1, 1)])
def test_timestamp_repr(self):
# pre-1900
stamp = Timestamp('1850-01-01', tz='US/Eastern')
repr(stamp)
iso8601 = '1850-01-01 01:23:45.012345'
stamp = Timestamp(iso8601, tz='US/Eastern')
result = repr(stamp)
self.assertIn(iso8601, result)
def test_timestamp_from_ordinal(self):
# GH 3042
dt = datetime(2011, 4, 16, 0, 0)
ts = Timestamp.fromordinal(dt.toordinal())
self.assertEqual(ts.to_pydatetime(), dt)
# with a tzinfo
stamp = Timestamp('2011-4-16', tz='US/Eastern')
dt_tz = stamp.to_pydatetime()
ts = Timestamp.fromordinal(dt_tz.toordinal(),tz='US/Eastern')
self.assertEqual(ts.to_pydatetime(), dt_tz)
def test_datetimeindex_integers_shift(self):
rng = date_range('1/1/2000', periods=20)
result = rng + 5
expected = rng.shift(5)
self.assertTrue(result.equals(expected))
result = rng - 5
expected = rng.shift(-5)
self.assertTrue(result.equals(expected))
def test_astype_object(self):
# NumPy 1.6.1 weak ns support
rng = date_range('1/1/2000', periods=20)
casted = rng.astype('O')
exp_values = list(rng)
self.assert_numpy_array_equal(casted, exp_values)
def test_catch_infinite_loop(self):
offset = datetools.DateOffset(minute=5)
# blow up, don't loop forever
self.assertRaises(Exception, date_range, datetime(2011, 11, 11),
datetime(2011, 11, 12), freq=offset)
def test_append_concat(self):
rng = date_range('5/8/2012 1:45', periods=10, freq='5T')
ts = Series(np.random.randn(len(rng)), rng)
df = DataFrame(np.random.randn(len(rng), 4), index=rng)
result = ts.append(ts)
result_df = df.append(df)
ex_index = DatetimeIndex(np.tile(rng.values, 2))
self.assertTrue(result.index.equals(ex_index))
self.assertTrue(result_df.index.equals(ex_index))
appended = rng.append(rng)
self.assertTrue(appended.equals(ex_index))
appended = rng.append([rng, rng])
ex_index = DatetimeIndex(np.tile(rng.values, 3))
self.assertTrue(appended.equals(ex_index))
# different index names
rng1 = rng.copy()
rng2 = rng.copy()
rng1.name = 'foo'
rng2.name = 'bar'
self.assertEqual(rng1.append(rng1).name, 'foo')
self.assertIsNone(rng1.append(rng2).name)
def test_append_concat_tz(self):
#GH 2938
_skip_if_no_pytz()
rng = date_range('5/8/2012 1:45', periods=10, freq='5T',
tz='US/Eastern')
rng2 = date_range('5/8/2012 2:35', periods=10, freq='5T',
tz='US/Eastern')
rng3 = date_range('5/8/2012 1:45', periods=20, freq='5T',
tz='US/Eastern')
ts = Series(np.random.randn(len(rng)), rng)
df = DataFrame(np.random.randn(len(rng), 4), index=rng)
ts2 = Series(np.random.randn(len(rng2)), rng2)
df2 = DataFrame(np.random.randn(len(rng2), 4), index=rng2)
result = ts.append(ts2)
result_df = df.append(df2)
self.assertTrue(result.index.equals(rng3))
self.assertTrue(result_df.index.equals(rng3))
appended = rng.append(rng2)
self.assertTrue(appended.equals(rng3))
def test_set_dataframe_column_ns_dtype(self):
x = DataFrame([datetime.now(), datetime.now()])
self.assertEqual(x[0].dtype, np.dtype('M8[ns]'))
def test_groupby_count_dateparseerror(self):
dr = date_range(start='1/1/2012', freq='5min', periods=10)
# BAD Example, datetimes first
s = Series(np.arange(10), index=[dr, lrange(10)])
grouped = s.groupby(lambda x: x[1] % 2 == 0)
result = grouped.count()
s = Series(np.arange(10), index=[lrange(10), dr])
grouped = s.groupby(lambda x: x[0] % 2 == 0)
expected = grouped.count()
assert_series_equal(result, expected)
def test_datetimeindex_repr_short(self):
dr = date_range(start='1/1/2012', periods=1)
repr(dr)
dr = date_range(start='1/1/2012', periods=2)
repr(dr)
dr = date_range(start='1/1/2012', periods=3)
repr(dr)
def test_constructor_int64_nocopy(self):
# #1624
arr = np.arange(1000, dtype=np.int64)
index = DatetimeIndex(arr)
arr[50:100] = -1
self.assertTrue((index.asi8[50:100] == -1).all())
arr = np.arange(1000, dtype=np.int64)
index = DatetimeIndex(arr, copy=True)
arr[50:100] = -1
self.assertTrue((index.asi8[50:100] != -1).all())
def test_series_interpolate_method_values(self):
# #1646
ts = _simple_ts('1/1/2000', '1/20/2000')
ts[::2] = np.nan
result = ts.interpolate(method='values')
exp = ts.interpolate()
assert_series_equal(result, exp)
def test_frame_datetime64_handling_groupby(self):
# it works!
df = DataFrame([(3, np.datetime64('2012-07-03')),
(3, np.datetime64('2012-07-04'))],
columns=['a', 'date'])
result = df.groupby('a').first()
self.assertEqual(result['date'][3], Timestamp('2012-07-03'))
def test_series_interpolate_intraday(self):
# #1698
index = pd.date_range('1/1/2012', periods=4, freq='12D')
ts = pd.Series([0, 12, 24, 36], index)
new_index = index.append(index + pd.DateOffset(days=1)).order()
exp = ts.reindex(new_index).interpolate(method='time')
index = pd.date_range('1/1/2012', periods=4, freq='12H')
ts = pd.Series([0, 12, 24, 36], index)
new_index = index.append(index + pd.DateOffset(hours=1)).order()
result = ts.reindex(new_index).interpolate(method='time')
self.assert_numpy_array_equal(result.values, exp.values)
def test_frame_dict_constructor_datetime64_1680(self):
dr = date_range('1/1/2012', periods=10)
s = Series(dr, index=dr)
# it works!
DataFrame({'a': 'foo', 'b': s}, index=dr)
DataFrame({'a': 'foo', 'b': s.values}, index=dr)
def test_frame_datetime64_mixed_index_ctor_1681(self):
dr = date_range('2011/1/1', '2012/1/1', freq='W-FRI')
ts = Series(dr)
# it works!
d = DataFrame({'A': 'foo', 'B': ts}, index=dr)
self.assertTrue(d['B'].isnull().all())
def test_frame_timeseries_to_records(self):
index = date_range('1/1/2000', periods=10)
df = DataFrame(np.random.randn(10, 3), index=index,
columns=['a', 'b', 'c'])
result = df.to_records()
result['index'].dtype == 'M8[ns]'
result = df.to_records(index=False)
def test_frame_datetime64_duplicated(self):
dates = date_range('2010-07-01', end='2010-08-05')
tst = DataFrame({'symbol': 'AAA', 'date': dates})
result = tst.duplicated(['date', 'symbol'])
self.assertTrue((-result).all())
tst = DataFrame({'date': dates})
result = tst.duplicated()
self.assertTrue((-result).all())
def test_timestamp_compare_with_early_datetime(self):
# e.g. datetime.min
stamp = Timestamp('2012-01-01')
self.assertFalse(stamp == datetime.min)
self.assertFalse(stamp == datetime(1600, 1, 1))
self.assertFalse(stamp == datetime(2700, 1, 1))
self.assertNotEqual(stamp, datetime.min)
self.assertNotEqual(stamp, datetime(1600, 1, 1))
self.assertNotEqual(stamp, datetime(2700, 1, 1))
self.assertTrue(stamp > datetime(1600, 1, 1))
self.assertTrue(stamp >= datetime(1600, 1, 1))
self.assertTrue(stamp < datetime(2700, 1, 1))
self.assertTrue(stamp <= datetime(2700, 1, 1))
def test_to_html_timestamp(self):
rng = date_range('2000-01-01', periods=10)
df = DataFrame(np.random.randn(10, 4), index=rng)
result = df.to_html()
self.assertIn('2000-01-01', result)
def test_to_csv_numpy_16_bug(self):
frame = DataFrame({'a': date_range('1/1/2000', periods=10)})
buf = StringIO()
frame.to_csv(buf)
result = buf.getvalue()
self.assertIn('2000-01-01', result)
def test_series_map_box_timestamps(self):
# #2689, #2627
s = Series(date_range('1/1/2000', periods=10))
def f(x):
return (x.hour, x.day, x.month)
# it works!
s.map(f)
s.apply(f)
DataFrame(s).applymap(f)
def test_concat_datetime_datetime64_frame(self):
# #2624
rows = []
rows.append([datetime(2010, 1, 1), 1])
rows.append([datetime(2010, 1, 2), 'hi'])
df2_obj = DataFrame.from_records(rows, columns=['date', 'test'])
ind = date_range(start="2000/1/1", freq="D", periods=10)
df1 = DataFrame({'date': ind, 'test':lrange(10)})
# it works!
pd.concat([df1, df2_obj])
def test_period_resample(self):
# GH3609
s = Series(range(100),index=date_range('20130101', freq='s', periods=100), dtype='float')
s[10:30] = np.nan
expected = Series([34.5, 79.5], index=[Period('2013-01-01 00:00', 'T'), Period('2013-01-01 00:01', 'T')])
result = s.to_period().resample('T', kind='period')
assert_series_equal(result, expected)
result2 = s.resample('T', kind='period')
assert_series_equal(result2, expected)
def test_period_resample_with_local_timezone(self):
# GH5430
_skip_if_no_pytz()
import pytz
local_timezone = pytz.timezone('America/Los_Angeles')
start = datetime(year=2013, month=11, day=1, hour=0, minute=0, tzinfo=pytz.utc)
# 1 day later
end = datetime(year=2013, month=11, day=2, hour=0, minute=0, tzinfo=pytz.utc)
index = pd.date_range(start, end, freq='H')
series = pd.Series(1, index=index)
series = series.tz_convert(local_timezone)
result = series.resample('D', kind='period')
# Create the expected series
expected_index = (pd.period_range(start=start, end=end, freq='D') - 1) # Index is moved back a day with the timezone conversion from UTC to Pacific
expected = pd.Series(1, index=expected_index)
assert_series_equal(result, expected)
def test_pickle(self):
#GH4606
from pandas.compat import cPickle
import pickle
for pick in [pickle, cPickle]:
p = pick.loads(pick.dumps(NaT))
self.assertTrue(p is NaT)
idx = pd.to_datetime(['2013-01-01', NaT, '2014-01-06'])
idx_p = pick.loads(pick.dumps(idx))
self.assertTrue(idx_p[0] == idx[0])
self.assertTrue(idx_p[1] is NaT)
self.assertTrue(idx_p[2] == idx[2])
def _simple_ts(start, end, freq='D'):
rng = date_range(start, end, freq=freq)
return Series(np.random.randn(len(rng)), index=rng)
class TestDatetimeIndex(tm.TestCase):
_multiprocess_can_split_ = True
def test_hash_error(self):
index = date_range('20010101', periods=10)
with tm.assertRaisesRegexp(TypeError,
"unhashable type: %r" %
type(index).__name__):
hash(index)
def test_stringified_slice_with_tz(self):
#GH2658
import datetime
start=datetime.datetime.now()
idx=DatetimeIndex(start=start,freq="1d",periods=10)
df=DataFrame(lrange(10),index=idx)
df["2013-01-14 23:44:34.437768-05:00":] # no exception here
def test_append_join_nondatetimeindex(self):
rng = date_range('1/1/2000', periods=10)
idx = Index(['a', 'b', 'c', 'd'])
result = rng.append(idx)
tm.assert_isinstance(result[0], Timestamp)
# it works
rng.join(idx, how='outer')
def test_astype(self):
rng = date_range('1/1/2000', periods=10)
result = rng.astype('i8')
self.assert_numpy_array_equal(result, rng.asi8)
def test_to_period_nofreq(self):
idx = DatetimeIndex(['2000-01-01', '2000-01-02', '2000-01-04'])
self.assertRaises(ValueError, idx.to_period)
idx = DatetimeIndex(['2000-01-01', '2000-01-02', '2000-01-03'],
freq='infer')
idx.to_period()
def test_000constructor_resolution(self):
# 2252
t1 = Timestamp((1352934390 * 1000000000) + 1000000 + 1000 + 1)
idx = DatetimeIndex([t1])
self.assertEqual(idx.nanosecond[0], t1.nanosecond)
def test_constructor_coverage(self):
rng = date_range('1/1/2000', periods=10.5)
exp = date_range('1/1/2000', periods=10)
self.assertTrue(rng.equals(exp))
self.assertRaises(ValueError, DatetimeIndex, start='1/1/2000',
periods='foo', freq='D')
self.assertRaises(ValueError, DatetimeIndex, start='1/1/2000',
end='1/10/2000')
self.assertRaises(ValueError, DatetimeIndex, '1/1/2000')
# generator expression
gen = (datetime(2000, 1, 1) + timedelta(i) for i in range(10))
result = DatetimeIndex(gen)
expected = DatetimeIndex([datetime(2000, 1, 1) + timedelta(i)
for i in range(10)])
self.assertTrue(result.equals(expected))
# NumPy string array
strings = np.array(['2000-01-01', '2000-01-02', '2000-01-03'])
result = DatetimeIndex(strings)
expected = DatetimeIndex(strings.astype('O'))
self.assertTrue(result.equals(expected))
from_ints = DatetimeIndex(expected.asi8)
self.assertTrue(from_ints.equals(expected))
# non-conforming
self.assertRaises(ValueError, DatetimeIndex,
['2000-01-01', '2000-01-02', '2000-01-04'],
freq='D')
self.assertRaises(ValueError, DatetimeIndex,
start='2011-01-01', freq='b')
self.assertRaises(ValueError, DatetimeIndex,
end='2011-01-01', freq='B')
self.assertRaises(ValueError, DatetimeIndex, periods=10, freq='D')
def test_constructor_name(self):
idx = DatetimeIndex(start='2000-01-01', periods=1, freq='A',
name='TEST')
self.assertEqual(idx.name, 'TEST')
def test_comparisons_coverage(self):
rng = date_range('1/1/2000', periods=10)
# raise TypeError for now
self.assertRaises(TypeError, rng.__lt__, rng[3].value)
result = rng == list(rng)
exp = rng == rng
self.assert_numpy_array_equal(result, exp)
def test_map(self):
rng = date_range('1/1/2000', periods=10)
f = lambda x: x.strftime('%Y%m%d')
result = rng.map(f)
exp = [f(x) for x in rng]
self.assert_numpy_array_equal(result, exp)
def test_add_union(self):
rng = date_range('1/1/2000', periods=5)
rng2 = date_range('1/6/2000', periods=5)
result = rng + rng2
expected = rng.union(rng2)
self.assertTrue(result.equals(expected))
def test_misc_coverage(self):
rng = date_range('1/1/2000', periods=5)
result = rng.groupby(rng.day)
tm.assert_isinstance(list(result.values())[0][0], Timestamp)
idx = DatetimeIndex(['2000-01-03', '2000-01-01', '2000-01-02'])
self.assertTrue(idx.equals(list(idx)))
non_datetime = Index(list('abc'))
self.assertFalse(idx.equals(list(non_datetime)))
def test_union_coverage(self):
idx = DatetimeIndex(['2000-01-03', '2000-01-01', '2000-01-02'])
ordered = DatetimeIndex(idx.order(), freq='infer')
result = ordered.union(idx)
self.assertTrue(result.equals(ordered))
result = ordered[:0].union(ordered)
self.assertTrue(result.equals(ordered))
self.assertEqual(result.freq, ordered.freq)
def test_union_bug_1730(self):
rng_a = date_range('1/1/2012', periods=4, freq='3H')
rng_b = date_range('1/1/2012', periods=4, freq='4H')
result = rng_a.union(rng_b)
exp = DatetimeIndex(sorted(set(list(rng_a)) | set(list(rng_b))))
self.assertTrue(result.equals(exp))
def test_union_bug_1745(self):
left = DatetimeIndex(['2012-05-11 15:19:49.695000'])
right = DatetimeIndex(['2012-05-29 13:04:21.322000',
'2012-05-11 15:27:24.873000',
'2012-05-11 15:31:05.350000'])
result = left.union(right)
exp = DatetimeIndex(sorted(set(list(left)) | set(list(right))))
self.assertTrue(result.equals(exp))
def test_union_bug_4564(self):
from pandas import DateOffset
left = date_range("2013-01-01", "2013-02-01")
right = left + DateOffset(minutes=15)
result = left.union(right)
exp = DatetimeIndex(sorted(set(list(left)) | set(list(right))))
self.assertTrue(result.equals(exp))
def test_intersection_bug_1708(self):
from pandas import DateOffset
index_1 = date_range('1/1/2012', periods=4, freq='12H')
index_2 = index_1 + DateOffset(hours=1)
result = index_1 & index_2
self.assertEqual(len(result), 0)
# def test_add_timedelta64(self):
# rng = date_range('1/1/2000', periods=5)
# delta = rng.values[3] - rng.values[1]
# result = rng + delta
# expected = rng + timedelta(2)
# self.assertTrue(result.equals(expected))
def test_get_duplicates(self):
idx = DatetimeIndex(['2000-01-01', '2000-01-02', '2000-01-02',
'2000-01-03', '2000-01-03', '2000-01-04'])
result = idx.get_duplicates()
ex = DatetimeIndex(['2000-01-02', '2000-01-03'])
self.assertTrue(result.equals(ex))
def test_argmin_argmax(self):
idx = DatetimeIndex(['2000-01-04', '2000-01-01', '2000-01-02'])
self.assertEqual(idx.argmin(), 1)
self.assertEqual(idx.argmax(), 0)
def test_order(self):
idx = DatetimeIndex(['2000-01-04', '2000-01-01', '2000-01-02'])
ordered = idx.order()
self.assertTrue(ordered.is_monotonic)
ordered = idx.order(ascending=False)
self.assertTrue(ordered[::-1].is_monotonic)
ordered, dexer = idx.order(return_indexer=True)
self.assertTrue(ordered.is_monotonic)
self.assert_numpy_array_equal(dexer, [1, 2, 0])
ordered, dexer = idx.order(return_indexer=True, ascending=False)
self.assertTrue(ordered[::-1].is_monotonic)
self.assert_numpy_array_equal(dexer, [0, 2, 1])
def test_insert(self):
idx = DatetimeIndex(['2000-01-04', '2000-01-01', '2000-01-02'])
result = idx.insert(2, datetime(2000, 1, 5))
exp = DatetimeIndex(['2000-01-04', '2000-01-01', '2000-01-05',
'2000-01-02'])
self.assertTrue(result.equals(exp))
# insertion of non-datetime should coerce to object index
result = idx.insert(1, 'inserted')
expected = Index([datetime(2000, 1, 4), 'inserted', datetime(2000, 1, 1),
datetime(2000, 1, 2)])
self.assertNotIsInstance(result, DatetimeIndex)
tm.assert_index_equal(result, expected)
idx = date_range('1/1/2000', periods=3, freq='M')
result = idx.insert(3, datetime(2000, 4, 30))
self.assertEqual(result.freqstr, 'M')
def test_map_bug_1677(self):
index = DatetimeIndex(['2012-04-25 09:30:00.393000'])
f = index.asof
result = index.map(f)
expected = np.array([f(index[0])])
self.assert_numpy_array_equal(result, expected)
def test_groupby_function_tuple_1677(self):
df = DataFrame(np.random.rand(100),
index=date_range("1/1/2000", periods=100))
monthly_group = df.groupby(lambda x: (x.year, x.month))
result = monthly_group.mean()
tm.assert_isinstance(result.index[0], tuple)
def test_append_numpy_bug_1681(self):
# another datetime64 bug
dr = date_range('2011/1/1', '2012/1/1', freq='W-FRI')
a = DataFrame()
c = DataFrame({'A': 'foo', 'B': dr}, index=dr)
result = a.append(c)
self.assertTrue((result['B'] == dr).all())
def test_isin(self):
index = tm.makeDateIndex(4)
result = index.isin(index)
self.assertTrue(result.all())
result = index.isin(list(index))
self.assertTrue(result.all())
assert_almost_equal(index.isin([index[2], 5]),
[False, False, True, False])
def test_union(self):
i1 = Int64Index(np.arange(0, 20, 2))
i2 = Int64Index(np.arange(10, 30, 2))
result = i1.union(i2)
expected = Int64Index(np.arange(0, 30, 2))
self.assert_numpy_array_equal(result, expected)
def test_union_with_DatetimeIndex(self):
i1 = Int64Index(np.arange(0, 20, 2))
i2 = DatetimeIndex(start='2012-01-03 00:00:00', periods=10, freq='D')
i1.union(i2) # Works
i2.union(i1) # Fails with "AttributeError: can't set attribute"
def test_time(self):
rng = pd.date_range('1/1/2000', freq='12min', periods=10)
result = pd.Index(rng).time
expected = [t.time() for t in rng]
self.assertTrue((result == expected).all())
def test_date(self):
rng = pd.date_range('1/1/2000', freq='12H', periods=10)
result = pd.Index(rng).date
expected = [t.date() for t in rng]
self.assertTrue((result == expected).all())
def test_does_not_convert_mixed_integer(self):
df = tm.makeCustomDataframe(10, 10, data_gen_f=lambda *args, **kwargs:
randn(), r_idx_type='i', c_idx_type='dt')
cols = df.columns.join(df.index, how='outer')
joined = cols.join(df.columns)
self.assertEqual(cols.dtype, np.dtype('O'))
self.assertEqual(cols.dtype, joined.dtype)
assert_array_equal(cols.values, joined.values)
def test_slice_keeps_name(self):
# GH4226
st = pd.Timestamp('2013-07-01 00:00:00', tz='America/Los_Angeles')
et = pd.Timestamp('2013-07-02 00:00:00', tz='America/Los_Angeles')
dr = pd.date_range(st, et, freq='H', name='timebucket')
self.assertEqual(dr[1:].name, dr.name)
def test_join_self(self):
index = date_range('1/1/2000', periods=10)
kinds = 'outer', 'inner', 'left', 'right'
for kind in kinds:
joined = index.join(index, how=kind)
self.assertIs(index, joined)
def assert_index_parameters(self, index):
assert index.freq == '40960N'
assert index.inferred_freq == '40960N'
def test_ns_index(self):
if _np_version_under1p7:
raise nose.SkipTest
nsamples = 400
ns = int(1e9 / 24414)
dtstart = np.datetime64('2012-09-20T00:00:00')
dt = dtstart + np.arange(nsamples) * np.timedelta64(ns, 'ns')
freq = ns * pd.datetools.Nano()
index = pd.DatetimeIndex(dt, freq=freq, name='time')
self.assert_index_parameters(index)
new_index = pd.DatetimeIndex(start=index[0], end=index[-1], freq=index.freq)
self.assert_index_parameters(new_index)
def test_join_with_period_index(self):
df = tm.makeCustomDataframe(10, 10, data_gen_f=lambda *args:
np.random.randint(2), c_idx_type='p',
r_idx_type='dt')
s = df.iloc[:5, 0]
joins = 'left', 'right', 'inner', 'outer'
for join in joins:
with tm.assertRaisesRegexp(ValueError, 'can only call with other '
'PeriodIndex-ed objects'):
df.columns.join(s.index, how=join)
def test_factorize(self):
idx1 = DatetimeIndex(['2014-01', '2014-01', '2014-02',
'2014-02', '2014-03', '2014-03'])
exp_arr = np.array([0, 0, 1, 1, 2, 2])
exp_idx = DatetimeIndex(['2014-01', '2014-02', '2014-03'])
arr, idx = idx1.factorize()
self.assert_numpy_array_equal(arr, exp_arr)
self.assertTrue(idx.equals(exp_idx))
arr, idx = idx1.factorize(sort=True)
self.assert_numpy_array_equal(arr, exp_arr)
self.assertTrue(idx.equals(exp_idx))
# tz must be preserved
idx1 = idx1.tz_localize('Asia/Tokyo')
exp_idx = exp_idx.tz_localize('Asia/Tokyo')
arr, idx = idx1.factorize()
self.assert_numpy_array_equal(arr, exp_arr)
self.assertTrue(idx.equals(exp_idx))
idx2 = pd.DatetimeIndex(['2014-03', '2014-03', '2014-02', '2014-01',
'2014-03', '2014-01'])
exp_arr = np.array([2, 2, 1, 0, 2, 0])
exp_idx = DatetimeIndex(['2014-01', '2014-02', '2014-03'])
arr, idx = idx2.factorize(sort=True)
self.assert_numpy_array_equal(arr, exp_arr)
self.assertTrue(idx.equals(exp_idx))
exp_arr = np.array([0, 0, 1, 2, 0, 2])
exp_idx = DatetimeIndex(['2014-03', '2014-02', '2014-01'])
arr, idx = idx2.factorize()
self.assert_numpy_array_equal(arr, exp_arr)
self.assertTrue(idx.equals(exp_idx))
# freq must be preserved
idx3 = date_range('2000-01', periods=4, freq='M', tz='Asia/Tokyo')
exp_arr = np.array([0, 1, 2, 3])
arr, idx = idx3.factorize()
self.assert_numpy_array_equal(arr, exp_arr)
self.assertTrue(idx.equals(idx3))
class TestDatetime64(tm.TestCase):
"""
Also test support for datetime64[ns] in Series / DataFrame
"""
def setUp(self):
dti = DatetimeIndex(start=datetime(2005, 1, 1),
end=datetime(2005, 1, 10), freq='Min')
self.series = Series(rand(len(dti)), dti)
def test_datetimeindex_accessors(self):
dti = DatetimeIndex(
freq='D', start=datetime(1998, 1, 1), periods=365)
self.assertEqual(dti.year[0], 1998)
self.assertEqual(dti.month[0], 1)
self.assertEqual(dti.day[0], 1)
self.assertEqual(dti.hour[0], 0)
self.assertEqual(dti.minute[0], 0)
self.assertEqual(dti.second[0], 0)
self.assertEqual(dti.microsecond[0], 0)
self.assertEqual(dti.dayofweek[0], 3)
self.assertEqual(dti.dayofyear[0], 1)
self.assertEqual(dti.dayofyear[120], 121)
self.assertEqual(dti.weekofyear[0], 1)
self.assertEqual(dti.weekofyear[120], 18)
self.assertEqual(dti.quarter[0], 1)
self.assertEqual(dti.quarter[120], 2)
self.assertEqual(dti.is_month_start[0], True)
self.assertEqual(dti.is_month_start[1], False)
self.assertEqual(dti.is_month_start[31], True)
self.assertEqual(dti.is_quarter_start[0], True)
self.assertEqual(dti.is_quarter_start[90], True)
self.assertEqual(dti.is_year_start[0], True)
self.assertEqual(dti.is_year_start[364], False)
self.assertEqual(dti.is_month_end[0], False)
self.assertEqual(dti.is_month_end[30], True)
self.assertEqual(dti.is_month_end[31], False)
self.assertEqual(dti.is_month_end[364], True)
self.assertEqual(dti.is_quarter_end[0], False)
self.assertEqual(dti.is_quarter_end[30], False)
self.assertEqual(dti.is_quarter_end[89], True)
self.assertEqual(dti.is_quarter_end[364], True)
self.assertEqual(dti.is_year_end[0], False)
self.assertEqual(dti.is_year_end[364], True)
self.assertEqual(len(dti.year), 365)
self.assertEqual(len(dti.month), 365)
self.assertEqual(len(dti.day), 365)
self.assertEqual(len(dti.hour), 365)
self.assertEqual(len(dti.minute), 365)
self.assertEqual(len(dti.second), 365)
self.assertEqual(len(dti.microsecond), 365)
self.assertEqual(len(dti.dayofweek), 365)
self.assertEqual(len(dti.dayofyear), 365)
self.assertEqual(len(dti.weekofyear), 365)
self.assertEqual(len(dti.quarter), 365)
self.assertEqual(len(dti.is_month_start), 365)
self.assertEqual(len(dti.is_month_end), 365)
self.assertEqual(len(dti.is_quarter_start), 365)
self.assertEqual(len(dti.is_quarter_end), 365)
self.assertEqual(len(dti.is_year_start), 365)
self.assertEqual(len(dti.is_year_end), 365)
dti = DatetimeIndex(
freq='BQ-FEB', start=datetime(1998, 1, 1), periods=4)
self.assertEqual(sum(dti.is_quarter_start), 0)
self.assertEqual(sum(dti.is_quarter_end), 4)
self.assertEqual(sum(dti.is_year_start), 0)
self.assertEqual(sum(dti.is_year_end), 1)
# Ensure is_start/end accessors throw ValueError for CustomBusinessDay, CBD requires np >= 1.7
if not _np_version_under1p7:
bday_egypt = offsets.CustomBusinessDay(weekmask='Sun Mon Tue Wed Thu')
dti = date_range(datetime(2013, 4, 30), periods=5, freq=bday_egypt)
self.assertRaises(ValueError, lambda: dti.is_month_start)
dti = DatetimeIndex(['2000-01-01', '2000-01-02', '2000-01-03'])
self.assertEqual(dti.is_month_start[0], 1)
tests = [
(Timestamp('2013-06-01', offset='M').is_month_start, 1),
(Timestamp('2013-06-01', offset='BM').is_month_start, 0),
(Timestamp('2013-06-03', offset='M').is_month_start, 0),
(Timestamp('2013-06-03', offset='BM').is_month_start, 1),
(Timestamp('2013-02-28', offset='Q-FEB').is_month_end, 1),
(Timestamp('2013-02-28', offset='Q-FEB').is_quarter_end, 1),
(Timestamp('2013-02-28', offset='Q-FEB').is_year_end, 1),
(Timestamp('2013-03-01', offset='Q-FEB').is_month_start, 1),
(Timestamp('2013-03-01', offset='Q-FEB').is_quarter_start, 1),
(Timestamp('2013-03-01', offset='Q-FEB').is_year_start, 1),
(Timestamp('2013-03-31', offset='QS-FEB').is_month_end, 1),
(Timestamp('2013-03-31', offset='QS-FEB').is_quarter_end, 0),
(Timestamp('2013-03-31', offset='QS-FEB').is_year_end, 0),
(Timestamp('2013-02-01', offset='QS-FEB').is_month_start, 1),
(Timestamp('2013-02-01', offset='QS-FEB').is_quarter_start, 1),
(Timestamp('2013-02-01', offset='QS-FEB').is_year_start, 1),
(Timestamp('2013-06-30', offset='BQ').is_month_end, 0),
(Timestamp('2013-06-30', offset='BQ').is_quarter_end, 0),
(Timestamp('2013-06-30', offset='BQ').is_year_end, 0),
(Timestamp('2013-06-28', offset='BQ').is_month_end, 1),
(Timestamp('2013-06-28', offset='BQ').is_quarter_end, 1),
(Timestamp('2013-06-28', offset='BQ').is_year_end, 0),
(Timestamp('2013-06-30', offset='BQS-APR').is_month_end, 0),
(Timestamp('2013-06-30', offset='BQS-APR').is_quarter_end, 0),
(Timestamp('2013-06-30', offset='BQS-APR').is_year_end, 0),
(Timestamp('2013-06-28', offset='BQS-APR').is_month_end, 1),
(Timestamp('2013-06-28', offset='BQS-APR').is_quarter_end, 1),
(Timestamp('2013-03-29', offset='BQS-APR').is_year_end, 1),
(Timestamp('2013-11-01', offset='AS-NOV').is_year_start, 1),
(Timestamp('2013-10-31', offset='AS-NOV').is_year_end, 1)]
for ts, value in tests:
self.assertEqual(ts, value)
def test_nanosecond_field(self):
dti = DatetimeIndex(np.arange(10))
self.assert_numpy_array_equal(dti.nanosecond, np.arange(10))
def test_datetimeindex_diff(self):
dti1 = DatetimeIndex(freq='Q-JAN', start=datetime(1997, 12, 31),
periods=100)
dti2 = DatetimeIndex(freq='Q-JAN', start=datetime(1997, 12, 31),
periods=98)
self.assertEqual(len(dti1.diff(dti2)), 2)
def test_fancy_getitem(self):
dti = DatetimeIndex(freq='WOM-1FRI', start=datetime(2005, 1, 1),
end=datetime(2010, 1, 1))
s = Series(np.arange(len(dti)), index=dti)
self.assertEqual(s[48], 48)
self.assertEqual(s['1/2/2009'], 48)
self.assertEqual(s['2009-1-2'], 48)
self.assertEqual(s[datetime(2009, 1, 2)], 48)
self.assertEqual(s[lib.Timestamp(datetime(2009, 1, 2))], 48)
self.assertRaises(KeyError, s.__getitem__, '2009-1-3')
assert_series_equal(s['3/6/2009':'2009-06-05'],
s[datetime(2009, 3, 6):datetime(2009, 6, 5)])
def test_fancy_setitem(self):
dti = DatetimeIndex(freq='WOM-1FRI', start=datetime(2005, 1, 1),
end=datetime(2010, 1, 1))
s = Series(np.arange(len(dti)), index=dti)
s[48] = -1
self.assertEqual(s[48], -1)
s['1/2/2009'] = -2
self.assertEqual(s[48], -2)
s['1/2/2009':'2009-06-05'] = -3
self.assertTrue((s[48:54] == -3).all())
def test_datetimeindex_constructor(self):
arr = ['1/1/2005', '1/2/2005', 'Jn 3, 2005', '2005-01-04']
self.assertRaises(Exception, DatetimeIndex, arr)
arr = ['1/1/2005', '1/2/2005', '1/3/2005', '2005-01-04']
idx1 = DatetimeIndex(arr)
arr = [datetime(2005, 1, 1), '1/2/2005', '1/3/2005', '2005-01-04']
idx2 = DatetimeIndex(arr)
arr = [lib.Timestamp(datetime(2005, 1, 1)), '1/2/2005', '1/3/2005',
'2005-01-04']
idx3 = DatetimeIndex(arr)
arr = np.array(['1/1/2005', '1/2/2005', '1/3/2005',
'2005-01-04'], dtype='O')
idx4 = DatetimeIndex(arr)
arr = to_datetime(['1/1/2005', '1/2/2005', '1/3/2005', '2005-01-04'])
idx5 = DatetimeIndex(arr)
arr = to_datetime(
['1/1/2005', '1/2/2005', 'Jan 3, 2005', '2005-01-04'])
idx6 = DatetimeIndex(arr)
idx7 = DatetimeIndex(['12/05/2007', '25/01/2008'], dayfirst=True)
idx8 = DatetimeIndex(['2007/05/12', '2008/01/25'], dayfirst=False,
yearfirst=True)
self.assertTrue(idx7.equals(idx8))
for other in [idx2, idx3, idx4, idx5, idx6]:
self.assertTrue((idx1.values == other.values).all())
sdate = datetime(1999, 12, 25)
edate = datetime(2000, 1, 1)
idx = DatetimeIndex(start=sdate, freq='1B', periods=20)
self.assertEqual(len(idx), 20)
self.assertEqual(idx[0], sdate + 0 * dt.bday)
self.assertEqual(idx.freq, 'B')
idx = DatetimeIndex(end=edate, freq=('D', 5), periods=20)
self.assertEqual(len(idx), 20)
self.assertEqual(idx[-1], edate)
self.assertEqual(idx.freq, '5D')
idx1 = DatetimeIndex(start=sdate, end=edate, freq='W-SUN')
idx2 = DatetimeIndex(start=sdate, end=edate,
freq=dt.Week(weekday=6))
self.assertEqual(len(idx1), len(idx2))
self.assertEqual(idx1.offset, idx2.offset)
idx1 = DatetimeIndex(start=sdate, end=edate, freq='QS')
idx2 = DatetimeIndex(start=sdate, end=edate,
freq=dt.QuarterBegin(startingMonth=1))
self.assertEqual(len(idx1), len(idx2))
self.assertEqual(idx1.offset, idx2.offset)
idx1 = DatetimeIndex(start=sdate, end=edate, freq='BQ')
idx2 = DatetimeIndex(start=sdate, end=edate,
freq=dt.BQuarterEnd(startingMonth=12))
self.assertEqual(len(idx1), len(idx2))
self.assertEqual(idx1.offset, idx2.offset)
def test_dayfirst(self):
# GH 5917
arr = ['10/02/2014', '11/02/2014', '12/02/2014']
expected = DatetimeIndex([datetime(2014, 2, 10),
datetime(2014, 2, 11),
datetime(2014, 2, 12)])
idx1 = DatetimeIndex(arr, dayfirst=True)
idx2 = DatetimeIndex(np.array(arr), dayfirst=True)
idx3 = to_datetime(arr, dayfirst=True)
idx4 = to_datetime(np.array(arr), dayfirst=True)
idx5 = DatetimeIndex(Index(arr), dayfirst=True)
idx6 = DatetimeIndex(Series(arr), dayfirst=True)
self.assertTrue(expected.equals(idx1))
self.assertTrue(expected.equals(idx2))
self.assertTrue(expected.equals(idx3))
self.assertTrue(expected.equals(idx4))
self.assertTrue(expected.equals(idx5))
self.assertTrue(expected.equals(idx6))
def test_dti_snap(self):
dti = DatetimeIndex(['1/1/2002', '1/2/2002', '1/3/2002', '1/4/2002',
'1/5/2002', '1/6/2002', '1/7/2002'], freq='D')
res = dti.snap(freq='W-MON')
exp = date_range('12/31/2001', '1/7/2002', freq='w-mon')
exp = exp.repeat([3, 4])
self.assertTrue((res == exp).all())
res = dti.snap(freq='B')
exp = date_range('1/1/2002', '1/7/2002', freq='b')
exp = exp.repeat([1, 1, 1, 2, 2])
self.assertTrue((res == exp).all())
def test_dti_reset_index_round_trip(self):
dti = DatetimeIndex(start='1/1/2001', end='6/1/2001', freq='D')
d1 = DataFrame({'v': np.random.rand(len(dti))}, index=dti)
d2 = d1.reset_index()
self.assertEqual(d2.dtypes[0], np.dtype('M8[ns]'))
d3 = d2.set_index('index')
assert_frame_equal(d1, d3, check_names=False)
# #2329
stamp = datetime(2012, 11, 22)
df = DataFrame([[stamp, 12.1]], columns=['Date', 'Value'])
df = df.set_index('Date')
self.assertEqual(df.index[0], stamp)
self.assertEqual(df.reset_index()['Date'][0], stamp)
def test_dti_set_index_reindex(self):
# GH 6631
df = DataFrame(np.random.random(6))
idx1 = date_range('2011/01/01', periods=6, freq='M', tz='US/Eastern')
idx2 = date_range('2013', periods=6, freq='A', tz='Asia/Tokyo')
df = df.set_index(idx1)
self.assertTrue(df.index.equals(idx1))
df = df.reindex(idx2)
self.assertTrue(df.index.equals(idx2))
def test_datetimeindex_union_join_empty(self):
dti = DatetimeIndex(start='1/1/2001', end='2/1/2001', freq='D')
empty = Index([])
result = dti.union(empty)
tm.assert_isinstance(result, DatetimeIndex)
self.assertIs(result, result)
result = dti.join(empty)
tm.assert_isinstance(result, DatetimeIndex)
def test_series_set_value(self):
# #1561
dates = [datetime(2001, 1, 1), datetime(2001, 1, 2)]
index = DatetimeIndex(dates)
s = Series().set_value(dates[0], 1.)
s2 = s.set_value(dates[1], np.nan)
exp = Series([1., np.nan], index=index)
assert_series_equal(s2, exp)
# s = Series(index[:1], index[:1])
# s2 = s.set_value(dates[1], index[1])
# self.assertEqual(s2.values.dtype, 'M8[ns]')
@slow
def test_slice_locs_indexerror(self):
times = [datetime(2000, 1, 1) + timedelta(minutes=i * 10)
for i in range(100000)]
s = Series(lrange(100000), times)
s.ix[datetime(1900, 1, 1):datetime(2100, 1, 1)]
class TestSeriesDatetime64(tm.TestCase):
def setUp(self):
self.series = Series(date_range('1/1/2000', periods=10))
def test_auto_conversion(self):
series = Series(list(date_range('1/1/2000', periods=10)))
self.assertEqual(series.dtype, 'M8[ns]')
def test_constructor_cant_cast_datetime64(self):
self.assertRaises(TypeError, Series,
date_range('1/1/2000', periods=10), dtype=float)
def test_series_comparison_scalars(self):
val = datetime(2000, 1, 4)
result = self.series > val
expected = np.array([x > val for x in self.series])
self.assert_numpy_array_equal(result, expected)
val = self.series[5]
result = self.series > val
expected = np.array([x > val for x in self.series])
self.assert_numpy_array_equal(result, expected)
def test_between(self):
left, right = self.series[[2, 7]]
result = self.series.between(left, right)
expected = (self.series >= left) & (self.series <= right)
assert_series_equal(result, expected)
#----------------------------------------------------------------------
# NaT support
def test_NaT_scalar(self):
series = Series([0, 1000, 2000, iNaT], dtype='M8[ns]')
val = series[3]
self.assertTrue(com.isnull(val))
series[2] = val
self.assertTrue(com.isnull(series[2]))
def test_set_none_nan(self):
self.series[3] = None
self.assertIs(self.series[3], NaT)
self.series[3:5] = None
self.assertIs(self.series[4], NaT)
self.series[5] = np.nan
self.assertIs(self.series[5], NaT)
self.series[5:7] = np.nan
self.assertIs(self.series[6], NaT)
def test_intercept_astype_object(self):
# this test no longer makes sense as series is by default already M8[ns]
expected = self.series.astype('object')
df = DataFrame({'a': self.series,
'b': np.random.randn(len(self.series))})
result = df.values.squeeze()
self.assertTrue((result[:, 0] == expected.values).all())
df = DataFrame({'a': self.series,
'b': ['foo'] * len(self.series)})
result = df.values.squeeze()
self.assertTrue((result[:, 0] == expected.values).all())
def test_union(self):
rng1 = date_range('1/1/1999', '1/1/2012', freq='MS')
s1 = Series(np.random.randn(len(rng1)), rng1)
rng2 = date_range('1/1/1980', '12/1/2001', freq='MS')
s2 = Series(np.random.randn(len(rng2)), rng2)
df = DataFrame({'s1': s1, 's2': s2})
self.assertEqual(df.index.values.dtype, np.dtype('M8[ns]'))
def test_intersection(self):
rng = date_range('6/1/2000', '6/15/2000', freq='D')
rng = rng.delete(5)
rng2 = date_range('5/15/2000', '6/20/2000', freq='D')
rng2 = DatetimeIndex(rng2.values)
result = rng.intersection(rng2)
self.assertTrue(result.equals(rng))
# empty same freq GH2129
rng = date_range('6/1/2000', '6/15/2000', freq='T')
result = rng[0:0].intersection(rng)
self.assertEqual(len(result), 0)
result = rng.intersection(rng[0:0])
self.assertEqual(len(result), 0)
def test_date_range_bms_bug(self):
# #1645
rng = date_range('1/1/2000', periods=10, freq='BMS')
ex_first = Timestamp('2000-01-03')
self.assertEqual(rng[0], ex_first)
def test_string_index_series_name_converted(self):
# #1644
df = DataFrame(np.random.randn(10, 4),
index=date_range('1/1/2000', periods=10))
result = df.ix['1/3/2000']
self.assertEqual(result.name, df.index[2])
result = df.T['1/3/2000']
self.assertEqual(result.name, df.index[2])
class TestTimestamp(tm.TestCase):
def test_class_ops(self):
_skip_if_no_pytz()
import pytz
def compare(x,y):
self.assertEqual(int(Timestamp(x).value/1e9), int(Timestamp(y).value/1e9))
compare(Timestamp.now(),datetime.now())
compare(Timestamp.now('UTC'),datetime.now(pytz.timezone('UTC')))
compare(Timestamp.utcnow(),datetime.utcnow())
compare(Timestamp.today(),datetime.today())
def test_basics_nanos(self):
val = np.int64(946684800000000000).view('M8[ns]')
stamp = Timestamp(val.view('i8') + 500)
self.assertEqual(stamp.year, 2000)
self.assertEqual(stamp.month, 1)
self.assertEqual(stamp.microsecond, 0)
self.assertEqual(stamp.nanosecond, 500)
def test_unit(self):
def check(val,unit=None,h=1,s=1,us=0):
stamp = Timestamp(val, unit=unit)
self.assertEqual(stamp.year, 2000)
self.assertEqual(stamp.month, 1)
self.assertEqual(stamp.day, 1)
self.assertEqual(stamp.hour, h)
if unit != 'D':
self.assertEqual(stamp.minute, 1)
self.assertEqual(stamp.second, s)
self.assertEqual(stamp.microsecond, us)
else:
self.assertEqual(stamp.minute, 0)
self.assertEqual(stamp.second, 0)
self.assertEqual(stamp.microsecond, 0)
self.assertEqual(stamp.nanosecond, 0)
ts = Timestamp('20000101 01:01:01')
val = ts.value
days = (ts - Timestamp('1970-01-01')).days
check(val)
check(val/long(1000),unit='us')
check(val/long(1000000),unit='ms')
check(val/long(1000000000),unit='s')
check(days,unit='D',h=0)
# using truediv, so these are like floats
if compat.PY3:
check((val+500000)/long(1000000000),unit='s',us=500)
check((val+500000000)/long(1000000000),unit='s',us=500000)
check((val+500000)/long(1000000),unit='ms',us=500)
# get chopped in py2
else:
check((val+500000)/long(1000000000),unit='s')
check((val+500000000)/long(1000000000),unit='s')
check((val+500000)/long(1000000),unit='ms')
# ok
check((val+500000)/long(1000),unit='us',us=500)
check((val+500000000)/long(1000000),unit='ms',us=500000)
# floats
check(val/1000.0 + 5,unit='us',us=5)
check(val/1000.0 + 5000,unit='us',us=5000)
check(val/1000000.0 + 0.5,unit='ms',us=500)
check(val/1000000.0 + 0.005,unit='ms',us=5)
check(val/1000000000.0 + 0.5,unit='s',us=500000)
check(days + 0.5,unit='D',h=12)
# nan
result = Timestamp(np.nan)
self.assertIs(result, NaT)
result = Timestamp(None)
self.assertIs(result, NaT)
result = Timestamp(iNaT)
self.assertIs(result, NaT)
result = Timestamp(NaT)
self.assertIs(result, NaT)
def test_comparison(self):
# 5-18-2012 00:00:00.000
stamp = long(1337299200000000000)
val = Timestamp(stamp)
self.assertEqual(val, val)
self.assertFalse(val != val)
self.assertFalse(val < val)
self.assertTrue(val <= val)
self.assertFalse(val > val)
self.assertTrue(val >= val)
other = datetime(2012, 5, 18)
self.assertEqual(val, other)
self.assertFalse(val != other)
self.assertFalse(val < other)
self.assertTrue(val <= other)
self.assertFalse(val > other)
self.assertTrue(val >= other)
other = Timestamp(stamp + 100)
self.assertNotEqual(val, other)
self.assertNotEqual(val, other)
self.assertTrue(val < other)
self.assertTrue(val <= other)
self.assertTrue(other > val)
self.assertTrue(other >= val)
def test_cant_compare_tz_naive_w_aware(self):
_skip_if_no_pytz()
# #1404
a = Timestamp('3/12/2012')
b = Timestamp('3/12/2012', tz='utc')
self.assertRaises(Exception, a.__eq__, b)
self.assertRaises(Exception, a.__ne__, b)
self.assertRaises(Exception, a.__lt__, b)
self.assertRaises(Exception, a.__gt__, b)
self.assertRaises(Exception, b.__eq__, a)
self.assertRaises(Exception, b.__ne__, a)
self.assertRaises(Exception, b.__lt__, a)
self.assertRaises(Exception, b.__gt__, a)
if sys.version_info < (3, 3):
self.assertRaises(Exception, a.__eq__, b.to_pydatetime())
self.assertRaises(Exception, a.to_pydatetime().__eq__, b)
else:
self.assertFalse(a == b.to_pydatetime())
self.assertFalse(a.to_pydatetime() == b)
def test_delta_preserve_nanos(self):
val = Timestamp(long(1337299200000000123))
result = val + timedelta(1)
self.assertEqual(result.nanosecond, val.nanosecond)
def test_frequency_misc(self):
self.assertEqual(fmod.get_freq_group('T'),
fmod.FreqGroup.FR_MIN)
code, stride = fmod.get_freq_code(offsets.Hour())
self.assertEqual(code, fmod.FreqGroup.FR_HR)
code, stride = fmod.get_freq_code((5, 'T'))
self.assertEqual(code, fmod.FreqGroup.FR_MIN)
self.assertEqual(stride, 5)
offset = offsets.Hour()
result = fmod.to_offset(offset)
self.assertEqual(result, offset)
result = fmod.to_offset((5, 'T'))
expected = offsets.Minute(5)
self.assertEqual(result, expected)
self.assertRaises(ValueError, fmod.get_freq_code, (5, 'baz'))
self.assertRaises(ValueError, fmod.to_offset, '100foo')
self.assertRaises(ValueError, fmod.to_offset, ('', ''))
result = fmod.get_standard_freq(offsets.Hour())
self.assertEqual(result, 'H')
def test_hash_equivalent(self):
d = {datetime(2011, 1, 1): 5}
stamp = Timestamp(datetime(2011, 1, 1))
self.assertEqual(d[stamp], 5)
def test_timestamp_compare_scalars(self):
# case where ndim == 0
lhs = np.datetime64(datetime(2013, 12, 6))
rhs = Timestamp('now')
nat = Timestamp('nat')
ops = {'gt': 'lt', 'lt': 'gt', 'ge': 'le', 'le': 'ge', 'eq': 'eq',
'ne': 'ne'}
for left, right in ops.items():
left_f = getattr(operator, left)
right_f = getattr(operator, right)
if pd._np_version_under1p7:
# you have to convert to timestamp for this to work with numpy
# scalars
expected = left_f(Timestamp(lhs), rhs)
# otherwise a TypeError is thrown
if left not in ('eq', 'ne'):
with tm.assertRaises(TypeError):
left_f(lhs, rhs)
else:
expected = left_f(lhs, rhs)
result = right_f(rhs, lhs)
self.assertEqual(result, expected)
expected = left_f(rhs, nat)
result = right_f(nat, rhs)
self.assertEqual(result, expected)
def test_timestamp_compare_series(self):
# make sure we can compare Timestamps on the right AND left hand side
# GH4982
s = Series(date_range('20010101', periods=10), name='dates')
s_nat = s.copy(deep=True)
s[0] = pd.Timestamp('nat')
s[3] = pd.Timestamp('nat')
ops = {'lt': 'gt', 'le': 'ge', 'eq': 'eq', 'ne': 'ne'}
for left, right in ops.items():
left_f = getattr(operator, left)
right_f = getattr(operator, right)
# no nats
expected = left_f(s, Timestamp('20010109'))
result = right_f(Timestamp('20010109'), s)
tm.assert_series_equal(result, expected)
# nats
expected = left_f(s, Timestamp('nat'))
result = right_f(Timestamp('nat'), s)
tm.assert_series_equal(result, expected)
# compare to timestamp with series containing nats
expected = left_f(s_nat, Timestamp('20010109'))
result = right_f(Timestamp('20010109'), s_nat)
tm.assert_series_equal(result, expected)
# compare to nat with series containing nats
expected = left_f(s_nat, Timestamp('nat'))
result = right_f(Timestamp('nat'), s_nat)
tm.assert_series_equal(result, expected)
class TestSlicing(tm.TestCase):
def test_slice_year(self):
dti = DatetimeIndex(freq='B', start=datetime(2005, 1, 1), periods=500)
s = Series(np.arange(len(dti)), index=dti)
result = s['2005']
expected = s[s.index.year == 2005]
assert_series_equal(result, expected)
df = DataFrame(np.random.rand(len(dti), 5), index=dti)
result = df.ix['2005']
expected = df[df.index.year == 2005]
assert_frame_equal(result, expected)
rng = date_range('1/1/2000', '1/1/2010')
result = rng.get_loc('2009')
expected = slice(3288, 3653)
self.assertEqual(result, expected)
def test_slice_quarter(self):
dti = DatetimeIndex(freq='D', start=datetime(2000, 6, 1), periods=500)
s = Series(np.arange(len(dti)), index=dti)
self.assertEqual(len(s['2001Q1']), 90)
df = DataFrame(np.random.rand(len(dti), 5), index=dti)
self.assertEqual(len(df.ix['1Q01']), 90)
def test_slice_month(self):
dti = DatetimeIndex(freq='D', start=datetime(2005, 1, 1), periods=500)
s = Series(np.arange(len(dti)), index=dti)
self.assertEqual(len(s['2005-11']), 30)
df = DataFrame(np.random.rand(len(dti), 5), index=dti)
self.assertEqual(len(df.ix['2005-11']), 30)
assert_series_equal(s['2005-11'], s['11-2005'])
def test_partial_slice(self):
rng = DatetimeIndex(freq='D', start=datetime(2005, 1, 1), periods=500)
s = Series(np.arange(len(rng)), index=rng)
result = s['2005-05':'2006-02']
expected = s['20050501':'20060228']
assert_series_equal(result, expected)
result = s['2005-05':]
expected = s['20050501':]
assert_series_equal(result, expected)
result = s[:'2006-02']
expected = s[:'20060228']
assert_series_equal(result, expected)
result = s['2005-1-1']
self.assertEqual(result, s.irow(0))
self.assertRaises(Exception, s.__getitem__, '2004-12-31')
def test_partial_slice_daily(self):
rng = DatetimeIndex(freq='H', start=datetime(2005, 1, 31), periods=500)
s = Series(np.arange(len(rng)), index=rng)
result = s['2005-1-31']
assert_series_equal(result, s.ix[:24])
self.assertRaises(Exception, s.__getitem__, '2004-12-31 00')
def test_partial_slice_hourly(self):
rng = DatetimeIndex(freq='T', start=datetime(2005, 1, 1, 20, 0, 0),
periods=500)
s = Series(np.arange(len(rng)), index=rng)
result = s['2005-1-1']
assert_series_equal(result, s.ix[:60 * 4])
result = s['2005-1-1 20']
assert_series_equal(result, s.ix[:60])
self.assertEqual(s['2005-1-1 20:00'], s.ix[0])
self.assertRaises(Exception, s.__getitem__, '2004-12-31 00:15')
def test_partial_slice_minutely(self):
rng = DatetimeIndex(freq='S', start=datetime(2005, 1, 1, 23, 59, 0),
periods=500)
s = Series(np.arange(len(rng)), index=rng)
result = s['2005-1-1 23:59']
assert_series_equal(result, s.ix[:60])
result = s['2005-1-1']
assert_series_equal(result, s.ix[:60])
self.assertEqual(s[Timestamp('2005-1-1 23:59:00')], s.ix[0])
self.assertRaises(Exception, s.__getitem__, '2004-12-31 00:00:00')
def test_partial_slicing_with_multiindex(self):
# GH 4758
# partial string indexing with a multi-index buggy
df = DataFrame({'ACCOUNT':["ACCT1", "ACCT1", "ACCT1", "ACCT2"],
'TICKER':["ABC", "MNP", "XYZ", "XYZ"],
'val':[1,2,3,4]},
index=date_range("2013-06-19 09:30:00", periods=4, freq='5T'))
df_multi = df.set_index(['ACCOUNT', 'TICKER'], append=True)
expected = DataFrame([[1]],index=Index(['ABC'],name='TICKER'),columns=['val'])
result = df_multi.loc[('2013-06-19 09:30:00', 'ACCT1')]
assert_frame_equal(result, expected)
expected = df_multi.loc[(pd.Timestamp('2013-06-19 09:30:00', tz=None), 'ACCT1', 'ABC')]
result = df_multi.loc[('2013-06-19 09:30:00', 'ACCT1', 'ABC')]
assert_series_equal(result, expected)
# this is a KeyError as we don't do partial string selection on multi-levels
def f():
df_multi.loc[('2013-06-19', 'ACCT1', 'ABC')]
self.assertRaises(KeyError, f)
# GH 4294
# partial slice on a series mi
s = pd.DataFrame(randn(1000, 1000), index=pd.date_range('2000-1-1', periods=1000)).stack()
s2 = s[:-1].copy()
expected = s2['2000-1-4']
result = s2[pd.Timestamp('2000-1-4')]
assert_series_equal(result, expected)
result = s[pd.Timestamp('2000-1-4')]
expected = s['2000-1-4']
assert_series_equal(result, expected)
df2 = pd.DataFrame(s)
expected = df2.ix['2000-1-4']
result = df2.ix[pd.Timestamp('2000-1-4')]
assert_frame_equal(result, expected)
def test_date_range_normalize(self):
snap = datetime.today()
n = 50
rng = date_range(snap, periods=n, normalize=False, freq='2D')
offset = timedelta(2)
values = np.array([snap + i * offset for i in range(n)],
dtype='M8[ns]')
self.assert_numpy_array_equal(rng, values)
rng = date_range(
'1/1/2000 08:15', periods=n, normalize=False, freq='B')
the_time = time(8, 15)
for val in rng:
self.assertEqual(val.time(), the_time)
def test_timedelta(self):
# this is valid too
index = date_range('1/1/2000', periods=50, freq='B')
shifted = index + timedelta(1)
back = shifted + timedelta(-1)
self.assertTrue(tm.equalContents(index, back))
self.assertEqual(shifted.freq, index.freq)
self.assertEqual(shifted.freq, back.freq)
result = index - timedelta(1)
expected = index + timedelta(-1)
self.assertTrue(result.equals(expected))
# GH4134, buggy with timedeltas
rng = date_range('2013', '2014')
s = Series(rng)
result1 = rng - pd.offsets.Hour(1)
result2 = DatetimeIndex(s - np.timedelta64(100000000))
result3 = rng - np.timedelta64(100000000)
result4 = DatetimeIndex(s - pd.offsets.Hour(1))
self.assertTrue(result1.equals(result4))
self.assertTrue(result2.equals(result3))
def test_shift(self):
ts = Series(np.random.randn(5),
index=date_range('1/1/2000', periods=5, freq='H'))
result = ts.shift(1, freq='5T')
exp_index = ts.index.shift(1, freq='5T')
self.assertTrue(result.index.equals(exp_index))
# GH #1063, multiple of same base
result = ts.shift(1, freq='4H')
exp_index = ts.index + datetools.Hour(4)
self.assertTrue(result.index.equals(exp_index))
idx = DatetimeIndex(['2000-01-01', '2000-01-02', '2000-01-04'])
self.assertRaises(ValueError, idx.shift, 1)
def test_setops_preserve_freq(self):
rng = date_range('1/1/2000', '1/1/2002')
result = rng[:50].union(rng[50:100])
self.assertEqual(result.freq, rng.freq)
result = rng[:50].union(rng[30:100])
self.assertEqual(result.freq, rng.freq)
result = rng[:50].union(rng[60:100])
self.assertIsNone(result.freq)
result = rng[:50].intersection(rng[25:75])
self.assertEqual(result.freqstr, 'D')
nofreq = DatetimeIndex(list(rng[25:75]))
result = rng[:50].union(nofreq)
self.assertEqual(result.freq, rng.freq)
result = rng[:50].intersection(nofreq)
self.assertEqual(result.freq, rng.freq)
def test_min_max(self):
rng = | date_range('1/1/2000', '12/31/2000') | pandas.date_range |
from flask import Flask
from pandas.io import parsers
from flask_restful import Resource, Api, reqparse
import pandas as pd
import ast
app = Flask(__name__)
api = Api(app)
class Users(Resource):
# methods go here
def get(self):
data = pd.read_csv('users.csv') # read CSV
data = data.to_dict() # convert dataframe to dictionary
return {'data': data}, 200 # return data and 200 OK code
def post(self):
parser = reqparse.RequestParser() # initialize
parser.add_argument('userId', required=True) # add args
parser.add_argument('name', required=True)
parser.add_argument('city', required=True)
args = parser.parse_args() # parse arguments to dictionary
# read our CSV
data = pd.read_csv('users.csv')
if args['userId'] in list(data['userId']):
return {
'message': f" '{args['userId']}' already exists. "
}, 401
else:
# create new dataframe containing new values
new_data = pd.DataFrame({
'userId': args['userId'],
'name': args['name'],
'city': args['city'],
'locations': [[]]
})
# add the newly provided values
data = data.append(new_data, ignore_index=True)
# save back to CSV
data.to_csv('users.csv', index=False)
return {'data': data.to_dict()} # return data with 200 OK code
def put(self):
parser = reqparse.RequestParser() # initialize
parser.add_argument('userId', required=True) # add args
parser.add_argument('location', required=True)
args = parser.parse_args() # parse arguments to dictionary
# read our CSV
data = pd.read_csv('users.csv')
if args['userId'] in list(data['userId']):
# evaluate strings of lists to lists
data['locations'] = data['locations'].apply(
lambda x: ast.literal_eval(x)
)
# select our user
user_data = data[data['userId'] == args['userId']]
# update users location
user_data['locations'] = user_data['locations'].values[0].append(args['location'])
# save back to CSV
data.to_csv('users.csv', index=False)
# return data and 200 OK
return {'data': data.to_dict()}, 200
else:
# userId doesn't exists
return {
'message': f" '{args['userId']}' user not found."
}, 404
def delete(self):
parser = reqparse.RequestParser() # initialize
parser.add_argument('userId', required=True) # add userId arg
args = parser.parse_args() # parse arguments to dictionary
# read our CSV
data = pd.read_csv('users.csv')
if args['userId'] in list(data['userId']):
# remove data entry matching userId
data = data[data['userId'] != args['userId']]
# save back to CSV
data.to_csv('users.csv', index=False)
# return data and 200 OK
return {'data': data.to_dict()}, 200
else:
# otherwise we return 404 because userid doesn't exists
return {
'message': f" '{args['userId']}' user not found. "
}, 404
class Locations(Resource):
# methods go here
def get(self):
data = pd.read_csv('locations.csv') # read local csv
return {'data': data.to_dict()}, 200 # return data dict and 200 OK
def post(self):
parser = reqparse.RequestParser() # initialize parser
parser.add_argument('locationId', required=True, type=int) # add args
parser.add_argument('name', required=True)
parser.add_argument('rating', required=True)
args = parser.parse_args() # parse arguments to dictionary
# read our CSV
data = pd.read_csv('locations.csv')
# check if location already exists
if args['locationId'] in list(data['locationId']):
# if locationId already exists, return 401 unauthorized
return {
'message': f" '{args['locationId']}' already exists."
}, 409
else:
# otherwise, we can add the new location record
# create new dataframe containing new values
new_data = pd.DataFrame({
'locationId': [args['locationId']],
'name': [args['name']],
'rating': [args['rating']]
})
# add the newly provided values
data = data.append(new_data, ignore_index=True)
data.to_csv('locations.csv', index=False) # save back to csv
return {'data': data.to_dict()}, 200 # return data with 200 OK
def patch(self):
parser = reqparse.RequestParser() # initialize parser
parser.add_argument('locationId', required=True, type=int) # add args
parser.add_argument('name', store_missing=False)
parser.add_argument('rating', store_missing=False)
args = parser.parse_args() # parse arguments to dictionary
# read our CSV
data = | pd.read_csv('locations.csv') | pandas.read_csv |
# Standard library imports
import math
import pprint
import re
from collections import Counter
# Third party imports
import pandas as pd
import numpy as np
# Local app imports
from data_module.corpus import operations as op
# -------------------------- EXTERNAL MODULES ABOVE -------------------------- #
pp = pprint.PrettyPrinter(indent=4)
raw_questions = pd.read_csv('questions_test.csv')[["Id", "Title", "Body"]]
questions = raw_questions.replace(np.nan, '', regex=True)
raw_answers = | pd.read_csv('answers_test.csv') | pandas.read_csv |
import pandas as pd
import requests
import urllib.request
import json
import numpy as np
from ast import literal_eval
from config import *
class MergeFiles:
def __init__(self):
pass
def merge_files(self):
# Mergining files
# Fetching the customer data from URL
with urllib.request.urlopen(customers_url) as url:
customerts_data = url.read().decode('utf-8')
# Converting JSON to Dataframe
customers = pd.DataFrame.from_records(json.loads(
'[' + customerts_data.replace('\n', ',') + ']'))
visits = pd.read_csv(visits_url)
visits.rename(columns={'Unnamed: 0': 'visit_column1'}, inplace=True)
# Loading loans data into Dataframe.
loans = pd.DataFrame()
for loan_url in loans_urls:
data = | pd.read_csv(base_url + loan_url) | pandas.read_csv |
import pathlib
import pytest
import pandas as pd
import numpy as np
import gradelib
EXAMPLES_DIRECTORY = pathlib.Path(__file__).parent / "examples"
GRADESCOPE_EXAMPLE = gradelib.Gradebook.from_gradescope(
EXAMPLES_DIRECTORY / "gradescope.csv"
)
CANVAS_EXAMPLE = gradelib.Gradebook.from_canvas(EXAMPLES_DIRECTORY / "canvas.csv")
# the canvas example has Lab 01, which is also in Gradescope. Let's remove it
CANVAS_WITHOUT_LAB_EXAMPLE = gradelib.Gradebook(
points=CANVAS_EXAMPLE.points.drop(columns="lab 01"),
maximums=CANVAS_EXAMPLE.maximums.drop(index="lab 01"),
late=CANVAS_EXAMPLE.late.drop(columns="lab 01"),
dropped=CANVAS_EXAMPLE.dropped.drop(columns="lab 01"),
)
# given
ROSTER = gradelib.read_egrades_roster(EXAMPLES_DIRECTORY / "egrades.csv")
def assert_gradebook_is_sound(gradebook):
assert gradebook.points.shape == gradebook.dropped.shape == gradebook.late.shape
assert (gradebook.points.columns == gradebook.dropped.columns).all()
assert (gradebook.points.columns == gradebook.late.columns).all()
assert (gradebook.points.index == gradebook.dropped.index).all()
assert (gradebook.points.index == gradebook.late.index).all()
assert (gradebook.points.columns == gradebook.maximums.index).all()
# assignments property
# -----------------------------------------------------------------------------
def test_assignments_are_produced_in_order():
assert list(GRADESCOPE_EXAMPLE.assignments) == list(
GRADESCOPE_EXAMPLE.points.columns
)
# keep_pids()
# -----------------------------------------------------------------------------
def test_keep_pids():
# when
actual = GRADESCOPE_EXAMPLE.keep_pids(ROSTER.index)
# then
assert len(actual.pids) == 3
assert_gradebook_is_sound(actual)
def test_keep_pids_raises_if_pid_does_not_exist():
# given
pids = ["A12345678", "ADNEDNE00"]
# when
with pytest.raises(KeyError):
actual = GRADESCOPE_EXAMPLE.keep_pids(pids)
# keep_assignments() and remove_assignments()
# -----------------------------------------------------------------------------
def test_keep_assignments():
# when
actual = GRADESCOPE_EXAMPLE.keep_assignments(["homework 01", "homework 02"])
# then
assert set(actual.assignments) == {"homework 01", "homework 02"}
assert_gradebook_is_sound(actual)
def test_keep_assignments_raises_if_assignment_does_not_exist():
# given
assignments = ["homework 01", "this aint an assignment"]
# then
with pytest.raises(KeyError):
GRADESCOPE_EXAMPLE.keep_assignments(assignments)
def test_remove_assignments():
# when
actual = GRADESCOPE_EXAMPLE.remove_assignments(
GRADESCOPE_EXAMPLE.assignments.starting_with("lab")
)
# then
assert set(actual.assignments) == {
"homework 01",
"homework 02",
"homework 03",
"homework 04",
"homework 05",
"homework 06",
"homework 07",
"project 01",
"project 02",
}
assert_gradebook_is_sound(actual)
def test_remove_assignments_raises_if_assignment_does_not_exist():
# given
assignments = ["homework 01", "this aint an assignment"]
# then
with pytest.raises(KeyError):
GRADESCOPE_EXAMPLE.remove_assignments(assignments)
# combine()
# -----------------------------------------------------------------------------
def test_combine_with_keep_pids():
# when
combined = gradelib.Gradebook.combine(
[GRADESCOPE_EXAMPLE, CANVAS_WITHOUT_LAB_EXAMPLE], keep_pids=ROSTER.index
)
# then
assert "homework 01" in combined.assignments
assert "midterm exam" in combined.assignments
assert_gradebook_is_sound(combined)
def test_combine_raises_if_duplicate_assignments():
# the canvas example and the gradescope example both have lab 01.
# when
with pytest.raises(ValueError):
combined = gradelib.Gradebook.combine([GRADESCOPE_EXAMPLE, CANVAS_EXAMPLE])
def test_combine_raises_if_indices_do_not_match():
# when
with pytest.raises(ValueError):
combined = gradelib.Gradebook.combine(
[CANVAS_WITHOUT_LAB_EXAMPLE, GRADESCOPE_EXAMPLE]
)
# number_of_lates()
# -----------------------------------------------------------------------------
def test_number_of_lates():
# when
labs = GRADESCOPE_EXAMPLE.assignments.starting_with("lab")
actual = GRADESCOPE_EXAMPLE.number_of_lates(within=labs)
# then
assert list(actual) == [1, 4, 2, 2]
def test_number_of_lates_with_empty_assignment_list_raises():
# when
with pytest.raises(ValueError):
actual = GRADESCOPE_EXAMPLE.number_of_lates(within=[])
def test_number_of_lates_with_no_assignment_list_uses_all_assignments():
# when
actual = GRADESCOPE_EXAMPLE.number_of_lates()
# then
assert list(actual) == [1, 5, 2, 2]
# forgive_lates()
# -----------------------------------------------------------------------------
def test_forgive_lates():
# when
labs = GRADESCOPE_EXAMPLE.assignments.starting_with("lab")
actual = GRADESCOPE_EXAMPLE.forgive_lates(n=3, within=labs)
# then
assert list(actual.number_of_lates(within=labs)) == [0, 1, 0, 0]
assert_gradebook_is_sound(actual)
def test_forgive_lates_with_empty_assignment_list_raises():
# when
with pytest.raises(ValueError):
actual = GRADESCOPE_EXAMPLE.forgive_lates(n=3, within=[])
def test_forgive_lates_forgives_the_first_n_lates():
# by "first", we mean in the order specified by the `within` argument
# student A10000000 had late lab 01, 02, 03, and 07
assignments = ["lab 02", "lab 07", "lab 01", "lab 03"]
# when
actual = GRADESCOPE_EXAMPLE.forgive_lates(n=2, within=assignments)
# then
assert not actual.late.loc["A10000000", "lab 02"]
assert not actual.late.loc["A10000000", "lab 07"]
assert actual.late.loc["A10000000", "lab 01"]
assert actual.late.loc["A10000000", "lab 03"]
def test_forgive_lates_does_not_forgive_dropped():
# given
labs = GRADESCOPE_EXAMPLE.assignments.starting_with("lab")
dropped = GRADESCOPE_EXAMPLE.dropped.copy()
dropped.iloc[:, :] = True
example = gradelib.Gradebook(
points=GRADESCOPE_EXAMPLE.points,
maximums=GRADESCOPE_EXAMPLE.maximums,
late=GRADESCOPE_EXAMPLE.late,
dropped=dropped,
)
# when
actual = example.forgive_lates(n=3, within=labs)
# then
assert list(actual.number_of_lates(within=labs)) == [1, 4, 2, 2]
assert_gradebook_is_sound(actual)
# drop_lowest()
# -----------------------------------------------------------------------------
def test_drop_lowest_on_simple_example_1():
# given
columns = ["hw01", "hw02", "hw03", "lab01"]
p1 = pd.Series(data=[1, 30, 90, 20], index=columns, name="A1")
p2 = pd.Series(data=[2, 7, 15, 20], index=columns, name="A2")
points = pd.DataFrame([p1, p2])
maximums = pd.Series([2, 50, 100, 20], index=columns)
gradebook = gradelib.Gradebook(points, maximums)
homeworks = gradebook.assignments.starting_with("hw")
# if we are dropping 1 HW, the right strategy is to drop the 50 point HW
# for A1 and to drop the 100 point homework for A2
# when
actual = gradebook.drop_lowest(1, within=homeworks)
# then
assert actual.dropped.iloc[0, 1]
assert actual.dropped.iloc[1, 2]
assert list(actual.dropped.sum(axis=1)) == [1, 1]
assert_gradebook_is_sound(actual)
def test_drop_lowest_on_simple_example_2():
# given
columns = ["hw01", "hw02", "hw03", "lab01"]
p1 = pd.Series(data=[1, 30, 90, 20], index=columns, name="A1")
p2 = pd.Series(data=[2, 7, 15, 20], index=columns, name="A2")
points = pd.DataFrame([p1, p2])
maximums = pd.Series([2, 50, 100, 20], index=columns)
gradebook = gradelib.Gradebook(points, maximums)
homeworks = gradebook.assignments.starting_with("hw")
# if we are dropping 1 HW, the right strategy is to drop the 50 point HW
# for A1 and to drop the 100 point homework for A2
# when
actual = gradebook.drop_lowest(2, within=homeworks)
# then
assert not actual.dropped.iloc[0, 2]
assert not actual.dropped.iloc[1, 0]
assert list(actual.dropped.sum(axis=1)) == [2, 2]
assert_gradebook_is_sound(actual)
def test_drop_lowest_counts_lates_as_zeros():
# given
columns = ["hw01", "hw02"]
p1 = pd.Series(data=[10, 5], index=columns, name="A1")
p2 = | pd.Series(data=[10, 10], index=columns, name="A2") | pandas.Series |
# -*- coding: utf-8 -*-
__title__ = 'histTags2mpt'
__description__ = 'to evaluate a HDSR FEWS-config with a csv with CAW histTags'
__version__ = '0.1'
__author__ = '<NAME>'
__author_email__ = '<EMAIL>'
__license__ = 'MIT License'
'''
ToDo:
- instellingen verplaatsen naar config.ini
- logging ook in bestand opslaan
'''
import configparser
from fews_utilities import Config, xml_to_dict
from pathlib import Path
import json
import numpy as np
import pandas as pd
import logging
from openpyxl import load_workbook
from openpyxl.styles import Font, PatternFill
import os
import sys
import shutil
import re
from collections.abc import Iterable
from shapely.geometry import Point
pd.options.mode.chained_assignment = None
#%% instellingen
# layout excel spreadsheet
fixed_sheets = ['histTag_ignore',
'inhoudsopgave',
'exLoc_ignore',
'TS800_ignore',
'xy_ignore']
warning_sheets = ['histTags_noMatch',
'histTags_ignore_match',
'dubbele idmaps',
'idmap v sectie',
'exPar error',
'exPar missing',
'intLoc missing',
'exLoc error',
'timeSeries error',
'validation error',
'par mismatch',
'locSet error',
'hloc error']
idmap_files = ['IdOPVLWATER',
'IdOPVLWATER_HYMOS',
'IdHDSR_NSC',
'IdOPVLWATER_WQ',
'IdGrondwaterCAW']
# secties in idmap files
idmap_sections = {'IdOPVLWATER':{'KUNSTWERKEN':[{'section_start': '<!--KUNSTWERK SUBLOCS (old CAW id)-->',
'section_end': '<!--WATERSTANDSLOCATIES (old CAW id)-->'},
{'section_start': '<!--KUNSTWERK SUBLOCS (new CAW id)-->',
'section_end':'<!--WATERSTANDSLOCATIES (new CAW id)-->'}],
'WATERSTANDLOCATIES':[{'section_start': '<!--WATERSTANDSLOCATIES (old CAW id)-->',
'section_end': '<!--MSW (old CAW id)-->'},
{'section_start': '<!--WATERSTANDSLOCATIES (new CAW id)-->',
'section_end': '<!--MSW (new CAW id)-->'}],
'MSWLOCATIES':[{'section_start': '<!--MSW (new CAW id)-->'}]},
'IdOPVLWATER_HYMOS':{'KUNSTWERKEN':[{'section_end':'<!--WATERSTANDSLOCATIES-->'}],
'WATERSTANDLOCATIES':[{'section_start': '<!--WATERSTANDSLOCATIES-->',
'section_end':'<!--OVERIG-->'}]}
}
# exParameters per sub-loc type
expars_allowed = {'pompvijzel': ['FQ.$', 'I.B$', 'IB.$', 'I.H$', 'IH.$', 'I.L$', 'IL.$', 'Q.$' , 'TT.$'],
'stuw': ['SW.$', 'Q.$', 'ES.$'],
'schuif': ['ES.$', 'SP.$', 'SS.$', 'Q.$', 'SM.$'],
'afsluiter': ['ES.$'],
'debietmeter': ['Q.$'],
'vispassage': ['ES.$', 'SP.$', 'SS.$', 'Q.$'],
'krooshek': ['HB.$', 'HO.$'],
'waterstand': ['HB.$', 'HO.$', 'H$']}
#%% functies
def idmap2tags(row,idmap):
'''functie voor het toevoegen van fews-locatie-ids aan de hist_tags data-frame in de apply-method'''
exloc, expar = row['serie'].split('_',1)
fews_locs = [col['internalLocation']
for col in idmap
if col['externalLocation'] == exloc
and col['externalParameter'] == expar]
if len(fews_locs) == 0:
fews_locs = np.NaN
return fews_locs
def update_hlocs(row):
'''functie voor het toevoegen van start en end-date op data-frame van hoofdloc in de apply-method'''
loc_id = row.name
start_date = row['STARTDATE']
end_date = row['ENDDATE']
if loc_id in h_locs:
start_date = mpt_df[mpt_df.index.str.contains(loc_id[0:-1])]['STARTDATE'].dropna().min()
end_date = mpt_df[mpt_df.index.str.contains(loc_id[0:-1])]['ENDDATE'].dropna().max()
return start_date, end_date
def flatten(l):
'''functie voor het platslaan van een onregemlatige iterable van lijsten'''
for el in l:
if isinstance(el, Iterable) and not isinstance(el, (str, bytes)):
yield from flatten(el)
else:
yield el
def get_attribs(validation_rules,int_pars=None,loc_type=None):
'''functie voor het ophalen van attributen uit validation_rules'''
if int_pars is None:
int_pars = [rule['parameter'] for rule in validation_rules]
result = []
for rule in validation_rules:
if 'type' in rule.keys():
if rule['type'] == loc_type:
if any(re.match(rule['parameter'],int_par) for int_par in int_pars):
for key,attribute in rule['extreme_values'].items():
if isinstance(attribute,list):
result += [value['attribute'] for value in attribute]
else:
result += [attribute]
elif any(re.match(rule['parameter'],int_par) for int_par in int_pars):
for key,attribute in rule['extreme_values'].items():
if isinstance(attribute,list):
result += [value['attribute'] for value in attribute]
else:
result += [attribute]
return result
#%% initialisatie
workdir = Path(__file__).parent
logging.basicConfig(level=os.environ.get("LOGLEVEL", "INFO"))
#inlezen paden vanuit inifile
config_json = Path(r'..\config\config.json')
if config_json.exists():
with open(config_json) as src:
config = json.load(src)
else:
logging.error(f'{config_json} does not exist')
sys.exit()
#controleren of paden bestaan
for key, path in config['paden'].items():
path = Path(path)
if not path.is_absolute():
path = workdir.joinpath(path).resolve()
if path.exists():
config['paden'][key] = path
else:
if path.suffix == '':
logging.warning(f'{path} bestaat niet, map wordt aangemaakt')
path.mkdir()
else:
logging.error(f'{path} bestaat niet. Specificeer het juiste path in config.ini')
sys.exit()
locals().update(config['paden'])
consistency_out_xlsx = consistency_xlsx.parent.joinpath(f'{consistency_xlsx.stem}_uit.xlsx')
#%% inlezen config-excel
# kopieeren van consistency workbook naar output
try:
shutil.copyfile(consistency_xlsx, consistency_out_xlsx)
except Exception as e:
logging.error(e)
sys.exit()
consistency_df = pd.read_excel(consistency_xlsx,sheet_name=None,engine='openpyxl')
if not (('histTag_ignore' in consistency_df.keys()) | (mpt_ignore_csv != None)):
logging.error(f'specificeer een histTag_ignore werkblad in {consistency_xlsx} of een csv-file in {config_json}')
sys.exit()
# weggooien van alle output-sheets, behalve degenen opgeschoond
consistency_df = {key:value for key,value in consistency_df.items() if key in fixed_sheets}
#%% inlezen idmap-files
fews_config = Config(fews_config)
idmap_dict = {idmap:xml_to_dict(fews_config.IdMapFiles[idmap])['idMap']['map']
for idmap in idmap_files}
idmap_total = [j for i in idmap_dict.values() for j in i]
#%% inlezen locationSets locationSets
location_sets = {location_set:{'id':config['location_sets'][location_set],
'gdf':fews_config.get_locations(config['location_sets'][location_set])}
for location_set in config['location_sets']}
#%% controle op KW/OW
logging.info('controle op KW/OW locaties in juiste sectie')
consistency_df['idmap v sectie'] = pd.DataFrame(columns=['bestand',
'externalLocation',
'externalParameter',
'internalLocation',
'internalParameter',
])
idmap = 'IdOPVLWATER'
idmap_subsecs = idmap_sections[idmap]
for section_type, sections in idmap_subsecs.items():
for section in sections:
if section_type == 'KUNSTWERKEN':
prefix = 'KW'
if section_type == 'WATERSTANDLOCATIES':
prefix = 'OW'
if section_type == 'MSWLOCATIES':
prefix = '(OW|KW)'
pattern = f'{prefix}\d{{6}}$'
idmap_wrong_section = [idmap for idmap in xml_to_dict(fews_config.IdMapFiles[idmap],**section)['idMap']['map']
if not bool(re.match(pattern,idmap['internalLocation']))]
if len(idmap_wrong_section):
section_start = section['section_start'] if 'section_start' in section.keys() else ''
section_end = section['section_end'] if 'section_end' in section.keys() else ''
logging.warning('{} internalLocations anders dan {}XXXXXX tussen {} en {} in {}'.format(len(idmap_wrong_section),
prefix,
section_start,
section_end,
idmap))
df = pd.DataFrame(idmap_wrong_section)
df['sectie'] = section_start
df['bestand'] = idmap
consistency_df['idmap v sectie'] = pd.concat([consistency_df['idmap v sectie'], df], axis=0)
#%% inlezen hist tags & ignore lijst
logging.info('zoeken naar missende histTags in idmaps')
dtype_cols = ['total_min_start_dt', 'total_max_end_dt']
hist_tags_org_df = pd.read_csv(hist_tags_csv,
parse_dates = dtype_cols,
sep = ';')
for col in dtype_cols:
if not pd.api.types.is_datetime64_dtype(hist_tags_org_df[col]):
logging.error(f"kolom '{col}' in '{hist_tags_csv}' kan niet worden geconverteerd"
" naar np.datetime64 formaat. Controleer of deze datums realistisch zijn.")
sys.exit()
#%% filteren hist_tags op alles wat niet in ignored staat
if mpt_ignore_csv:
logging.info(f'histag_ignore wordt gelezen uit {mpt_ignore_csv.absolute().resolve()}')
consistency_df['histTag_ignore'] = pd.read_csv(mpt_ignore_csv,sep=None,header=0,engine='python')
else:
logging.info(f'histag_ignore wordt gelezen uit werkblad "histTag_ignore" in {consistency_in.absolute().resolve()}')
consistency_df['histTag_ignore']['UNKNOWN_SERIE'] = consistency_df['histTag_ignore']['UNKNOWN_SERIE'].str.replace('#','')
hist_tags_df = hist_tags_org_df.copy()
hist_tags_df['fews_locid'] = hist_tags_org_df.apply(idmap2tags, args=[idmap_total], axis=1)
hist_tags_no_match_df = hist_tags_df[hist_tags_df['fews_locid'].isna()]
hist_tags_no_match_df = hist_tags_no_match_df[~hist_tags_no_match_df['serie'].isin(consistency_df['histTag_ignore']['UNKNOWN_SERIE'])]
hist_tags_no_match_df = hist_tags_no_match_df.drop('fews_locid',axis=1)
hist_tags_no_match_df.columns = ['UNKNOWN_SERIE','STARTDATE','ENDDATE']
hist_tags_no_match_df = hist_tags_no_match_df.set_index('UNKNOWN_SERIE')
consistency_df['histTags_noMatch'] = hist_tags_no_match_df
if not consistency_df['histTags_noMatch'].empty:
logging.warning('{} histTags zijn niet opgenomen in idmap'.format(len(consistency_df['histTags_noMatch'])))
else:
logging.info('alle histTags zijn opgenomen in idmap')
#%% wegschrijven van ids die ten onrechte in ignore-lijst staan
hist_tags_opvlwater_df = hist_tags_org_df.copy()
hist_tags_opvlwater_df['fews_locid'] = hist_tags_org_df.apply(idmap2tags, args=[idmap_dict['IdOPVLWATER']], axis=1)
hist_tags_opvlwater_df = hist_tags_opvlwater_df[hist_tags_opvlwater_df['fews_locid'].notna()]
hist_tag_ignore_match_df = consistency_df['histTag_ignore'][consistency_df['histTag_ignore']['UNKNOWN_SERIE'].isin(hist_tags_opvlwater_df['serie'])]
hist_tag_ignore_match_df = hist_tag_ignore_match_df.set_index('UNKNOWN_SERIE')
consistency_df['histTags_ignore_match'] = hist_tag_ignore_match_df
if not consistency_df['histTags_ignore_match'].empty:
logging.warning('{} histTags zijn ten onrechte opgenomen in histTag ignore'.format(len(consistency_df['histTags_ignore_match'])))
else:
logging.info('geen histTags ten onrechte in ignore')
#%% aanmaken van mpt_df vanuit de fews_locid lijsten in hist_tags_df
logging.info('omzetten van histTags naar meetpunten')
hist_tags_df = hist_tags_df[hist_tags_df['fews_locid'].notna()]
mpt_hist_tags_df = hist_tags_df.explode('fews_locid').reset_index(drop=True)
# bepalen minimale start en maximale eindtijd per fews_locid.
mpt_df = pd.concat([mpt_hist_tags_df.groupby(['fews_locid'], sort=False)['total_min_start_dt'].min(),
mpt_hist_tags_df.groupby(['fews_locid'], sort=False)['total_max_end_dt'].max()],
axis=1)
mpt_df = mpt_df.sort_index(axis=0)
mpt_df.columns = ['STARTDATE','ENDDATE']
mpt_df.index.name = 'LOC_ID'
# alle hoofdloc waar geen histag op binnekomt toevoegen
kw_locs = list(mpt_df[mpt_df.index.str.contains('KW', regex=False)].index)
h_locs = np.unique(['{}0'.format(loc[0:-1]) for loc in kw_locs])
h_locs_missing = [loc for loc in h_locs if not loc in list(mpt_df.index)]
h_locs_df = pd.DataFrame(data={'LOC_ID' : h_locs_missing,
'STARTDATE' : [pd.NaT]*len(h_locs_missing),
'ENDDATE' : [pd.NaT]*len(h_locs_missing)})
h_locs_df = h_locs_df.set_index('LOC_ID')
mpt_df = pd.concat([mpt_df,h_locs_df],axis=0)
# de start en eindtijd op de hoofdlocatie updaten met de min/max van de sublocatie
mpt_df[['STARTDATE','ENDDATE']] = mpt_df.apply(update_hlocs,axis=1,result_type="expand")
mpt_df = mpt_df.sort_index()
consistency_df['mpt'] = mpt_df
#%% consistentie parameters: zijn alle interne parameters opgenomen in parameters.xml
logging.info('controle dubbele idmaps')
consistency_df['dubbele idmaps'] = pd.DataFrame(columns=['bestand',
'externalLocation',
'externalParameter',
'internalLocation',
'internalParameter'])
for idmap_file in idmap_files:
idmap_doubles = [id_map for id_map in idmap_dict[idmap_file] if idmap_dict[idmap_file].count(id_map) > 1]
if len(idmap_doubles) > 0:
idmap_doubles = list({idmap['externalLocation']:idmap for idmap in idmap_doubles}.values())
df = pd.DataFrame(idmap_doubles,columns=['internalLocation','externalLocation','internalParameter','externalParameter'])
df['bestand'] = idmap_file
consistency_df['dubbele idmaps'] = pd.concat([consistency_df['dubbele idmaps'], df], axis=0)
logging.warning('{} dubbele idmap(s) in {}'.format(len(idmap_doubles),idmap_file))
else:
logging.info('geen dubbele idmaps in {}'.format(idmap_file))
#%% consistentie parameters: zijn alle interne parameters opgenomen in parameters.xml
logging.info('zoeken op missende interne parameters')
config_parameters = list(fews_config.get_parameters(dict_keys='parameters').keys())
id_map_parameters = [id_map['internalParameter'] for id_map in idmap_total]
params_missing = [parameter for parameter in id_map_parameters
if not parameter in config_parameters]
if len(params_missing) == 0:
logging.info('alle parameters in idMaps zijn opgenomen in config')
else:
logging.warning('{} parameter(s) in idMaps missen in config'.format(len(params_missing)))
consistency_df['params_missing'] = pd.DataFrame({'parameters': params_missing})
consistency_df['params_missing'] = consistency_df['params_missing'].set_index('parameters')
#%% controle op consistentie sublocs t.b.v. wegschrijven hoofdloc_gdf
logging.info('controle consistentie sublocs op per hoofdlocatie')
if 'xy_ignore' in consistency_df.keys():
xy_ignore_df = consistency_df['xy_ignore']
else:
xy_ignore_df = pd.DataFrame({'internalLocation':[],'x':[],'y':[]})
hoofdloc_gdf = fews_config.get_locations('OPVLWATER_HOOFDLOC')
subloc_gdf = fews_config.get_locations('OPVLWATER_SUBLOC')
hloc_errors = {'LOC_ID':[],
'SUB_LOCS':[],
'LOC_NAME':[],
'GEOMETRY':[],
'SYSTEEM':[],
'RAYON':[],
'KOMPAS':[]}
grouper = subloc_gdf.groupby('PAR_ID')
par_dict = {'LOC_ID':[],
'LOC_NAME':[],
'X':[],
'Y':[],
'ALLE_TYPES':[],
'START':[],
'EIND':[],
'SYSTEEM':[],
'RAYON':[],
'KOMPAS':[]}
for loc_id, gdf in grouper:
caw_code = loc_id[2:-2]
errors = dict.fromkeys(['LOC_NAME','GEOMETRY','SYSTEEM','RAYON','KOMPAS'],False)
fields = dict.fromkeys(par_dict.keys(),None)
fields['LOC_ID'] = loc_id
# controle subloc op 1 consistente parent sub-string
loc_names = np.unique(gdf['LOC_NAME'].str.extract(pat = f'([A-Z0-9 ]*_{caw_code}-K_[A-Z0-9 ]*)').values)
if not len(loc_names) == 1:
errors['LOC_NAME'] = ",".join(loc_names)
else:
fields['LOC_NAME'] = loc_names[0]
#controle subloc op 1 consistente locatie
if any([re.match(loc,loc_id) for loc in xy_ignore_df['internalLocation']]):
fields['X'], fields['Y'] = next([row['x'],row['y']]
for index, row in xy_ignore_df.iterrows()
if re.match(row['internalLocation'], loc_id))
else:
geoms = gdf['geometry'].unique()
if not len(geoms) == 1:
errors['GEOMETRY'] = ",".join([f'({geom.x} {geom.y})' for geom in geoms])
else:
fields['X'] = geoms[0].x
fields['Y'] = geoms[0].y
#wegschrijven alle types op sublocaties
all_types = list(gdf['TYPE'].unique())
all_types.sort()
fields['ALLE_TYPES'] = '/'.join(all_types)
#wegschrijven start/eind uit min/max sublocaties
fields['START'] = gdf['START'].min()
fields['EIND'] = gdf['EIND'].max()
#controle op unieke atributen
for attribuut in ['SYSTEEM','RAYON','KOMPAS']:
vals = gdf[attribuut].unique()
if not len(vals) == 1:
errors[attribuut] = "","".join(vals)
else:
fields[attribuut] = vals[0]
# parent kan geschreven worden als alle subloc-waarden consistent zijn
if not None in fields.values():
for key,value in fields.items():
par_dict[key].append(value)
# als fout, dan opname in error-dict
if any(errors.values()):
hloc_errors['LOC_ID'].append(loc_id)
hloc_errors['SUB_LOCS'].append(','.join(gdf['LOC_ID'].values))
for key,value in errors.items():
if value == False:
value = ''
hloc_errors[key].append(value)
consistency_df['hloc error'] = pd.DataFrame(hloc_errors)
#opname in samenvatting
if consistency_df['hloc error'].empty:
logging.info('geen fouten in aanmaken hoofdlocaties')
par_gdf = pd.DataFrame(par_dict)
columns = list(hoofdloc_gdf.columns)
drop_cols = [col for col in hoofdloc_gdf.columns if (col in par_gdf.columns) & (not col =='LOC_ID')]
drop_cols = drop_cols + ['geometry']
hoofdloc_gdf = hoofdloc_gdf.drop(drop_cols, axis=1)
hoofdloc_gdf = par_gdf.merge(hoofdloc_gdf,on='LOC_ID')
hoofdloc_gdf['geometry'] = hoofdloc_gdf.apply((lambda x: Point(float(x['X']),
float(x['Y']))),
axis=1)
hoofdloc_gdf = hoofdloc_gdf[columns]
else:
logging.warning('{} fouten bij aanmaken hoofdlocaties'.format(len(consistency_df['hloc error'])))
logging.warning('hoofdlocaties worden alleen opnieuw geschreven vanuit sublocaties wanneer de fouten zijn opgelost')
#%% consistentie externe parameters met interne parameters/locaties
logging.info('controle foutieve ex-parameters & niet opgenomen inlocs')
if 'externalParametersAllowed' in config.keys():
expars_allowed = {key: value.replace(" ","").split(',')
for key, value in config['externalParametersAllowed'].items()}
waterstandloc_gdf = fews_config.get_locations('OPVLWATER_WATERSTANDEN_AUTO')
mswloc_gdf = fews_config.get_locations('MSW_STATIONS')
ex_par_errors = {'internalLocation':[],
'locationType':[],
'exParError':[],
'types':[],
'FQ':[],
'I.X':[],
'IX.':[],
'SS./SM.':[]}
int_loc_missing = []
#maak een data-frame zodat we kunnen groeperen bij internalLocation
idmap_df = pd.DataFrame.from_dict(idmap_dict['IdOPVLWATER'])
for int_loc, loc_group in idmap_df.groupby('internalLocation'):
#initieer een aantal variabelen
errors = dict.fromkeys(['I.X','IX.','FQ', 'SS./SM.'],False)
#interne locatie en externe parameters
ex_pars = np.unique(loc_group['externalParameter'].values)
ex_pars_gen = [re.sub("\d", ".", ex_par) for ex_par in ex_pars]
#vaststellen locatie-type
if int_loc in hoofdloc_gdf['LOC_ID'].values:
loc_properties = hoofdloc_gdf[hoofdloc_gdf['LOC_ID'] == int_loc]
loc_type = 'hoofdloc'
elif int_loc in subloc_gdf['LOC_ID'].values:
loc_properties = subloc_gdf[subloc_gdf['LOC_ID'] == int_loc]
loc_type = 'subloc'
regexes = ['HR.$']
elif int_loc in waterstandloc_gdf['LOC_ID'].values:
loc_type = 'waterstandloc'
elif int_loc in mswloc_gdf['LOC_ID'].values:
loc_type = 'mswloc'
else:
loc_type = None
int_loc_missing += [int_loc]
#vaststellen object_typen
if loc_type in ['hoofdloc', 'subloc']:
all_types = loc_properties['ALLE_TYPES'].values[0].split("/")
all_types = [item.lower() for item in all_types]
elif loc_type == 'waterstandloc':
all_types = ['waterstandloc']
if loc_type == 'subloc':
sub_type = subloc_gdf[subloc_gdf['LOC_ID'] == int_loc]['TYPE'].values[0]
#zoeken naar foutief toegekende ex_pars
regexes += [j for i in
[values for keys, values in expars_allowed.items() if keys in all_types]
for j in i]
regexes += list(dict.fromkeys(regexes))
ex_par_error = [ex_par for ex_par in ex_pars if not any([regex.match(ex_par) for regex in [re.compile(rex) for rex in regexes]])]
# als sub_type = schuif dan SM. of SS.
if sub_type == 'schuif':
if not any([ex_par for ex_par in ex_pars_gen if ex_par in ['SS.', 'SM.']]):
errors['SS./SM.'] = True
# als wel/niet I.B dan ook wel/niet IB.
if any([ex_par for ex_par in ex_pars_gen if ex_par in ['I.B', 'I.H', 'I.L']]):
if not any([ex_par for ex_par in ex_pars_gen if ex_par in ['IB.', 'IH.', 'IL.']]):
errors['IX.'] = True
elif any([ex_par for ex_par in ex_pars_gen if ex_par in ['IB.', 'IH.', 'IL.']]):
errors['I.X'] = True
# Als FQ, dan ook I.B.
if 'FQ.' in ex_pars_gen:
if not any([ex_par for ex_par in ex_pars_gen if ex_par in ['IB.', 'IH.', 'IL.', 'I.B', 'I.H', 'I.L']]):
errors['FQ'] = True
elif loc_type == 'hoofdloc':
#zoeken naar foutief toegekende ex_pars
regexes = ['HS.$', 'QR.$', 'QS.$', 'WR', 'WS']
ex_par_error = [ex_par for ex_par in ex_pars if not any([regex.match(ex_par) for regex in [re.compile(rex) for rex in regexes]])]
else:
ex_par_error = []
# rapporteren expar_errors
if len(ex_par_error) > 0 | any(errors.values()):
ex_par_errors['internalLocation'].append(int_loc)
ex_par_errors['locationType'].append(loc_type)
ex_par_errors['exParError'].append(','.join(ex_par_error))
ex_par_errors['types'].append(','.join(all_types))
for key, value in errors.items():
ex_par_errors[key].append(value)
#opname in data-frame
consistency_df['exPar error'] = pd.DataFrame(ex_par_errors)
consistency_df['intLoc missing'] = pd.DataFrame({'internalLocation':int_loc_missing})
#opname in samenvatting
#loggen van resultaat
if len(consistency_df['exPar error']) == 0:
logging.info('geen ExPar errors')
else:
logging.warning('{} locaties met ExPar errors'.format(len(consistency_df['exPar error'])))
if len(consistency_df['intLoc missing']) == 0:
logging.info('alle interne locaties uit idmap opgenomen in locationSets')
else:
logging.warning('{} interne locaties niet opgenomen in locationSets'.format(len(consistency_df['intLoc missing'])))
#%% expar missings
logging.info('controle missende ex-parameters')
ex_par_missing = {'internalLocation':[],
'exPars':[],
'QR':[],
'QS':[],
'HS':[]}
grouper = idmap_df.groupby('internalLocation')
for index, row in hoofdloc_gdf.iterrows():
missings = dict.fromkeys(['QR','QS','HS'],False)
int_loc = row['LOC_ID']
loc_group = next((df for loc,df in idmap_df.groupby('internalLocation') if loc == int_loc), pd.DataFrame())
if not loc_group.empty:
ex_pars = np.unique(loc_group['externalParameter'].values)
ex_pars_gen = [re.sub("\d", ".", ex_par) for ex_par in ex_pars]
else:
ex_pars = []
ex_pars_gen = []
#is er een HS?
if not ('HS.' in ex_pars_gen):
missings['HS'] = True
if not ('QR.' in ex_pars_gen):
missings['QR'] = True
if not ('QS.' in ex_pars_gen):
missings['QS'] = True
# rapporteren missings
if any(missings.values()):
ex_par_missing['internalLocation'].append(int_loc)
ex_par_missing['exPars'].append(','.join(ex_pars))
for key, value in missings.items():
ex_par_missing[key].append(value)
consistency_df['exPar missing'] = pd.DataFrame(ex_par_missing)
#loggen van resultaat
if len(consistency_df['exPar missing']) == 0:
logging.info('geen ExPar missing')
else:
logging.warning('{} locaties met ExPar missing'.format(len(consistency_df['exPar missing'])))
#%% zoeken naar ex-loc errors
logging.info('controle externe locaties')
ex_loc_errors = {'internalLocation':[],
'externalLocation':[]}
for loc_group in idmap_df.groupby('externalLocation'):
#initialiseren int_loc_error
int_loc_error = []
#zoeken naar ex-loc errors
ex_loc = loc_group[0]
int_locs = np.unique(loc_group[1]['internalLocation'].values)
# als lengte van ex-loc == 3
if len(ex_loc) == 3:
# de default-case
if not bool(re.match('8..$',ex_loc)):
int_loc_error = [int_loc for int_loc in int_locs if
not bool(re.match(f'...{ex_loc}..$',int_loc))]
# opgesplitste locaties; ex-loc altijd naar 1 unieke hoofdlocatie + subloc
else:
for loc_type in ['KW','OW']:
int_locs_select = [int_loc for int_loc in int_locs
if bool(re.match(f'{loc_type}.',int_loc))]
if len(np.unique([int_loc[:-1] for int_loc in int_locs_select])) > 1:
int_loc_error += list(int_locs_select)
# als lengte ex-loc == 4
if len(ex_loc) == 4:
# de default-case
if not bool(re.match('.8..$',ex_loc)):
int_loc_error += [int_loc for int_loc in int_locs if
not bool(re.match(f'..{ex_loc}..$',int_loc))]
# opgesplitste locaties; ex-loc altijd naar 1 unieke hoofdlocatie + subloc
else:
for loc_type in ['KW','OW']:
int_locs_select = [int_loc for int_loc in int_locs
if bool(re.match(f'{loc_type}.',int_loc))]
if len(np.unique([int_loc[:-1] for int_loc in int_locs_select])) > 1:
int_loc_error += list(int_locs_select)
#als de ex-loc in de ignore-lijst staan, dan int_loc_error opruimen
if 'exLoc_ignore' in consistency_df.keys():
if int(ex_loc) in consistency_df['exLoc_ignore']['externalLocation'].values:
int_loc_error = [int_loc for int_loc in int_loc_error
if not int_loc in
consistency_df['exLoc_ignore'][consistency_df['exLoc_ignore']['externalLocation']
== int(ex_loc)]['internalLocation'].values]
for int_loc in int_loc_error:
ex_loc_errors['internalLocation'].append(int_loc)
ex_loc_errors['externalLocation'].append(ex_loc)
consistency_df['exLoc error'] = pd.DataFrame(ex_loc_errors)
if len(consistency_df['exLoc error']) == 0:
logging.info('alle externe locaties consistent met interne locaties')
else:
logging.warning('{} externe locaties onlogisch bij interne locaties'.format(len(consistency_df['exLoc error'])))
#%% zoeken naar sub-locaties anders dan krooshek en debietmeter:
# - zonder stuurpeil tijdserie
# - waarbij meerdere tijdseries met stuurpeilen naar dezelfde interne paramer mappen
logging.info('controle koppeling tijdseries')
if 'TS800_ignore' in consistency_df.keys():
ts_ignore_df = consistency_df['TS800_ignore']
else:
ts_ignore_df = pd.DataFrame({'internalLocation':[],'externalLocation':[]})
idmap_subloc_df = idmap_df[idmap_df['internalLocation'].isin(subloc_gdf['LOC_ID'].values)] # alleen locaties die in de sub-locs locationSet zitten
idmap_subloc_df['type'] = idmap_subloc_df['internalLocation'].apply((lambda x: subloc_gdf[subloc_gdf['LOC_ID'] == x]['TYPE'].values[0])) #toevoegen van type
idmap_subloc_df['loc_groep'] = idmap_subloc_df['internalLocation'].apply((lambda x: x[0:-1]))
ts_errors = {'internalLocation':[],
'internalParameters':[],
'externalParameters':[],
'externalLocations':[],
'type':[],
'fout':[]
}
for loc_group, group_df in idmap_subloc_df.groupby('loc_groep'):
#uniek nummer per ex-loc
ex_locs = np.unique(group_df['externalLocation'].values)
ex_locs_dict = {ex_loc:idx for idx, ex_loc in enumerate(ex_locs)}
#vinden van 800 nummers
split_ts = [key for key in ex_locs_dict.keys() if
any([regex.match(key)
for regex in [re.compile(rex)
for rex in ['8..','.8..']]])]
ex_locs_skip = ts_ignore_df[ts_ignore_df['internalLocation'].isin(group_df['internalLocation'])]['externalLocation']
split_ts = [key for key in split_ts if not str(key) in ex_locs_skip.values.astype(np.str)]
ex_locs_dict = {k:(ex_locs_dict[k[1:]]
if (k[1:] in ex_locs_dict.keys()) and (not k in split_ts)
else v) for (k,v) in ex_locs_dict.items()}
org_uniques = np.unique([val for key,val in ex_locs_dict.items() if not key in split_ts])
# als er maar 1 groep zit in split_ts én een groep in de originele tijdseriegroepen, dan samenvoegen
if (len(org_uniques) == 1) & (len(split_ts) == 1):
ex_locs_dict = {k:(org_uniques[0] if k in split_ts else v) for (k,v) in ex_locs_dict.items()}
group_df['ex_loc_group'] = group_df['externalLocation'].apply((lambda x: ex_locs_dict[x]))
for int_loc, loc_df in group_df.groupby('internalLocation'):
sub_type = subloc_gdf[subloc_gdf['LOC_ID'] == int_loc]['TYPE'].values[0]
end_time = pd.to_datetime(subloc_gdf[subloc_gdf['LOC_ID'] == int_loc]['EIND'].values[0])
ex_pars = np.unique(loc_df['externalParameter'].values)
int_pars = np.unique(loc_df['internalParameter'].values)
ex_locs = np.unique(loc_df['externalLocation'].values)
if sub_type in ['krooshek','debietmeter']:
if any([re.match('HR.',ex_par) for ex_par in ex_pars]):
#krooshek/debietmeter met stuurpeil = fout
ts_errors['internalLocation'].append(int_loc)
ts_errors['internalParameters'].append(",".join(int_pars))
ts_errors['externalParameters'].append(",".join(ex_pars))
ts_errors['externalLocations'].append(','.join(ex_locs))
ts_errors['type'].append(sub_type)
ts_errors['fout'].append(f'{sub_type} met stuurpeil')
else: #geen krooshek of debietmeter
# geen sp, maar wel sp op andere subloc = fout
if (not any([re.match('HR.',ex_par) for ex_par in ex_pars])): # geen stuurpeil
if any([re.match('HR.',ex_par) for ex_par in np.unique(group_df['externalParameter'])]):
#~krooshek/debietmeter zonder stuurpeil = fout
if not sub_type in ['totaal', 'vispassage']:
if pd.Timestamp.now() < end_time:
sp_locs = np.unique(group_df[group_df['externalParameter'].str.match('HR.')]['internalLocation'])
ts_errors['internalLocation'].append(int_loc)
ts_errors['internalParameters'].append(",".join(int_pars))
ts_errors['externalParameters'].append(",".join(ex_pars))
ts_errors['externalLocations'].append(','.join(ex_locs))
ts_errors['type'].append(sub_type)
ts_errors['fout'].append(f'{sub_type} zonder stuurpeil ({",".join(sp_locs)} wel)')
else: #krooshek/debietmeter met stuurpeil
# >1 sp zonder andere interne parameter = fout
time_series = loc_df.groupby(['ex_loc_group','externalParameter'])
sp_series = [series for series in time_series if bool(re.match('HR.',series[0][1]))]
for idx, series in enumerate(sp_series):
ex_par = series[0][1]
ex_locs = series[1]['externalLocation']
int_par = np.unique(series[1]['internalParameter'])
if len(int_par) > 1:
# 1 sp series gekoppeld aan 2 fews parameters
ts_errors['internalLocation'].append(int_loc)
ts_errors['internalParameters'].append(",".join(int_pars))
ts_errors['externalParameters'].append(",".join(ex_pars))
ts_errors['externalLocations'].append(','.join(ex_locs))
ts_errors['type'].append(sub_type)
ts_errors['fout'].append(f'{",".join(int_par)} gekoppeld aan 1 sp-serie (exPar: {ex_par}, exLoc(s)): {",".join(ex_locs)}')
other_series = [series for idy, series in enumerate(sp_series) if not idy == idx]
other_int_pars = [np.unique(series[1]['internalParameter']) for series in other_series]
if len(other_int_pars) > 0: other_int_pars = np.concatenate(other_int_pars)
conflicting_pars = [par for par in int_par if par in other_int_pars]
if len(conflicting_pars) > 0:
# 2 sp series gekoppeld aan dezelfde fews parameter
ts_errors['internalLocation'].append(int_loc)
ts_errors['internalParameters'].append(",".join(int_pars))
ts_errors['externalParameters'].append(",".join(ex_pars))
ts_errors['externalLocations'].append(','.join(ex_locs))
ts_errors['type'].append(sub_type)
ts_errors['fout'].append(f'{",".join(conflicting_pars)} gekoppeld aan sp-serie (exPar: {ex_par}, exLoc(s)): {",".join(ex_locs)}')
consistency_df['timeSeries error'] = pd.DataFrame(ts_errors)
#opname in samenvatting
if len(consistency_df['timeSeries error']) == 0:
logging.info('alle tijdseries zijn logisch gekoppeld aan interne locaties/parameters')
else:
logging.warning('{} tijdseries missend/onlogisch gekoppeld'.format(len(consistency_df['timeSeries error'])))
#%% controle validationrulesets
'''ToDo:
- df_idmap moet IDoppervlaktewater + Hymos bevatten
- kolom fout_type en fout_opmerking toevoegen/splitsen
- internal parameters uniek maken
- fout melden als smin/smax lijsten niet unieke waarden bevatten
- comment regel in ini-file bij lijst-vergelijkingen. Herstructureren json zodat die fout niet gemaakt kan worden?
'''
logging.info('controle validationRules')
valid_errors = {'internalLocation':[],
'start':[],
'eind':[],
'internalParameters':[],
'fout_type':[],
'fout_beschrijving':[]
}
def sort_attribs(rule):
result = {}
for key,value in rule.items():
if isinstance(value,str):
result[key] = [value]
elif isinstance(value,list):
periods = [val['period'] for val in value]
attribs = [val['attribute'] for val in value]
result[key] = [attrib for _,attrib in sorted(zip(periods,attribs))]
return result
location_sets_dict = xml_to_dict(fews_config.RegionConfigFiles['LocationSets'])['locationSets']['locationSet']
for set_name in config['validation_rules'].keys():
#set_name = 'subloc'
location_set = location_sets[set_name]
location_set_meta = next(loc_set for loc_set in location_sets_dict if loc_set['id'] == location_set['id'])['csvFile']
location_set_gdf = location_set['gdf']
attrib_files = location_set_meta['attributeFile']
if not isinstance(attrib_files,list):
attrib_files = [attrib_files]
attrib_files = [attrib_file for attrib_file in attrib_files if 'attribute' in attrib_file.keys()]
for attrib_file in attrib_files:
# schone lijst met attributen verkrijgen
attribs = attrib_file['attribute']
join_id = attrib_file['id'].replace("%","")
if not isinstance(attrib_file['attribute'],list):
attribs = [attribs]
attribs = [attrib['number'].replace("%",'') for attrib in attribs if 'number' in attrib.keys()]
# attribuut-bestand relateren op locatie aan locationSet
attrib_df = pd.read_csv(fews_config.MapLayerFiles[attrib_file['csvFile'].replace('.csv','')],
sep=None,
engine='python')
attrib_df.rename(columns={join_id:'LOC_ID'},inplace=True)
drop_cols = [col for col in attrib_df if not col in attribs + ['LOC_ID']]
attrib_df = attrib_df.drop(columns=drop_cols, axis=1)
location_set_gdf = location_set_gdf.merge(attrib_df,
on='LOC_ID',
how='outer')
validation_rules = config['validation_rules'][set_name]
validaton_attributes = get_attribs(validation_rules)
#row = location_set_gdf.loc[0]
params_df = pd.DataFrame.from_dict({int_loc:[df['internalParameter'].values]
for int_loc, df
in idmap_df.groupby('internalLocation')},
orient='index',
columns=['internalParameters'])
for (idx, row) in location_set_gdf.iterrows():
int_loc = row['LOC_ID']
row = row.dropna()
if set_name == 'sublocaties':
loc_type = row['TYPE']
if int_loc in params_df.index:
int_pars = np.unique(params_df.loc[int_loc]['internalParameters'])
else:
int_pars = []
attribs_required = get_attribs(validation_rules,int_pars)
attribs_missing = [attrib for attrib in attribs_required if not attrib in row.keys()]
attribs_obsolete = [attrib for attrib in validaton_attributes if
(not attrib in attribs_required) and (attrib in row.keys())]
attribs = [attrib for attrib in attribs_required if not attrib in attribs_missing]
for key, value in {'missend':attribs_missing,'overbodig':attribs_obsolete}.items():
if len(value) > 0:
valid_errors['internalLocation'] += [int_loc]
valid_errors['start'] += [row['START']]
valid_errors['eind'] += [row['EIND']]
valid_errors['internalParameters'] += [",".join(int_pars)]
valid_errors['fout_type'] += [key]
valid_errors['fout_beschrijving'] += [",".join(value)]
for validation_rule in validation_rules:
errors = {'fout_type':None,
'fout_beschrijving':[]}
param = validation_rule['parameter']
if any(re.match(param,int_par) for int_par in int_pars):
rule = validation_rule['extreme_values']
rule = sort_attribs(rule)
#regels met alleen hmax/hmin
if all(key in ['hmax', 'hmin'] for key in rule.keys()):
for hmin, hmax in zip(rule['hmin'], rule['hmax']):
if all(attrib in row.keys() for attrib in [hmin, hmax]):
if row[hmax] < row[hmin]:
errors['fout_type'] = 'waarde'
errors['fout_beschrijving'] += [f"{hmax} < {hmin}"]
#regels met soft + hard min/max
elif all(key in rule.keys() for key in ['hmax', 'smax', 'smin', 'hmin']):
hmax = rule['hmax'][0]
hmin = rule['hmin'][0]
for smin, smax in zip(rule['smin'], rule['smax']):
if all(attrib in row.keys() for attrib in [smin, smax]):
if row[smax] <= row[smin]:
errors['fout_type'] = 'waarde'
errors['fout_beschrijving'] += [f"{smax} <= {smin}"]
if row[hmax] < row[smax]:
errors['fout_type'] = 'waarde'
errors['fout_beschrijving'] += [f"{'hmax'} < {smax}"]
if row[smin] < row[hmin]:
errors['fout_type'] = 'waarde'
errors['fout_beschrijving'] += [f"{smin} < {hmin}"]
valid_errors['internalLocation'] += [row['LOC_ID']] * len(errors['fout_beschrijving'])
valid_errors['start'] += [row['START']] * len(errors['fout_beschrijving'])
valid_errors['eind'] += [row['EIND']] * len(errors['fout_beschrijving'])
valid_errors['internalParameters'] += [",".join(int_pars)] * len(errors['fout_beschrijving'])
valid_errors['fout_type'] += [errors['fout_type']] * len(errors['fout_beschrijving'])
valid_errors['fout_beschrijving'] += errors['fout_beschrijving']
consistency_df['validation error'] = pd.DataFrame(valid_errors)
consistency_df['validation error'] = consistency_df['validation error'].drop_duplicates()
#opname in samenvatting
if len(consistency_df['validation error']) == 0:
logging.info('er zijn geen foute/missende validatieregels')
else:
logging.warning('{} validatieregels zijn fout/missend'.format(len(consistency_df['validation error'])))
#%% controle op expar
logging.info('regex externalParameters')
par_errors = {'internalLocation':[],
'internalParameter':[],
'externalParameter':[],
'fout':[]
}
internal_parameters = [mapping['internal'] for mapping in config['parameter_mapping']]
for idx, row in idmap_df.iterrows():
error = None
ext_par = None
ext_par = [mapping['external'] for mapping in config['parameter_mapping'] if
re.match(f'{mapping["internal"]}[0-9]',row['internalParameter'])]
if ext_par:
if not any(re.match(par,row['externalParameter']) for par in ext_par):
error = 'parameter mismatch'
else:
error = 'pars niet opgenomen in config'
if error:
par_errors['internalLocation'].append(row['internalLocation'])
par_errors['internalParameter'].append(row['internalParameter'])
par_errors['externalParameter'].append(row['externalParameter'])
par_errors['fout'].append(error)
consistency_df['par mismatch'] = pd.DataFrame(par_errors)
#opname in samenvatting
if len(consistency_df['par mismatch']) == 0:
logging.info('geen regex fouten op interne en externe parameters')
else:
logging.warning('{} regex fouten op interne en externe parameters'.format(len(consistency_df['par mismatch'])))
#%% validatie locationSets
logging.info('validatie locatieSets')
loc_set_errors = {'locationId':[],
'caw_code':[],
'caw_name':[],
'csv_file':[],
'location_name':[],
'type':[],
'functie': [],
'name_error':[],
'caw_name_inconsistent':[],
'missing_in_map':[],
'missing_in_set':[],
'missing_peilschaal':[],
'missing_hbov':[],
'missing_hben':[],
'missing_hbovps':[],
'missing_hbenps':[],
'missing_hloc':[],
'xy_not_same':[]}
sets = {'waterstandlocaties':'WATERSTANDLOCATIES',
'sublocaties': 'KUNSTWERKEN',
'hoofdlocaties': 'KUNSTWERKEN'}
'''
ToDo:
missing_in_set:
- kijken in welke set een locatie hoort te zitten en of dat klopt met de sectie?
- kijken of de locatie mist in de locatieset. Even een locatie uit de csv halen om te testen.
'''
for set_name,section_name in sets.items():
logging.info(set_name)
location_set = location_sets[set_name]
location_gdf = location_set['gdf']
csv_file = fews_config.locationSets[location_set['id']]['csvFile']['file']
int_locs = []
for idmap in ['IdOPVLWATER', 'IdOPVLWATER_HYMOS']:
for section in idmap_sections[idmap][section_name]:
int_locs += [item['internalLocation'] for item in xml_to_dict(fews_config.IdMapFiles[idmap],**section)['idMap']['map']]
if set_name == 'sublocaties':
int_locs = [loc for loc in int_locs if not loc[-1] == '0']
par_gdf = location_sets['hoofdlocaties']['gdf']
elif set_name == 'hoofdlocaties':
int_locs = [loc for loc in int_locs if loc[-1] == '0']
#idx, row = list(location_gdf.iterrows())[0]
for idx, row in list(location_gdf.iterrows()):
error = {'name_error':False,
'caw_name_inconsistent':False,
'missing_in_map':False,
'type':'',
'functie':'',
'missing_in_set':False,
'missing_peilschaal':False,
'missing_hbov':False,
'missing_hben':False,
'missing_hbovps':False,
'missing_hbenps':False,
'missing_hloc':False,
'xy_not_same':False}
loc_id = row['LOC_ID']
caw_code = loc_id[2:-2]
loc_name = row['LOC_NAME']
caw_name = ''
if set_name == 'sublocaties':
loc_functie = row['FUNCTIE']
sub_type = row['TYPE']
if sub_type in ['afsluiter', 'debietmeter', 'krooshek', 'vispassage']:
if not re.match(f'[A-Z0-9 ]*_{caw_code}-K_[A-Z0-9 ]*-{sub_type}',loc_name):
error['name_error'] = True
else:
if not re.match(f'[A-Z0-9 ]*_{caw_code}-K_[A-Z0-9 ]*-{sub_type}[0-9]_{loc_functie}',loc_name):
error['name_error'] = True
if not error['name_error']:
caw_name = re.match(f'([A-Z0-9 ]*)_',loc_name).group(1)
if not all(location_gdf[location_gdf['LOC_ID'].str.match(
f'..{caw_code}')]['LOC_NAME'].str.match(
f'({caw_name}_{caw_code}-K)')
):
error['caw_name_inconsistent'] = True
if not row['HBOV'] in location_sets['waterstandlocaties']['gdf']['LOC_ID'].values:
error['missing_hbov'] = True
if not row['HBEN'] in location_sets['waterstandlocaties']['gdf']['LOC_ID'].values:
error['missing_hben'] = True
if not row['HBOVPS'] in location_sets['peilschalen']['gdf']['LOC_ID'].values:
error['missing_hbovps'] = True
if not row['HBENPS'] in location_sets['peilschalen']['gdf']['LOC_ID'].values:
error['missing_hbenps'] = True
if not row['PAR_ID'] in location_sets['hoofdlocaties']['gdf']['LOC_ID'].values:
error['missing_hloc'] = True
else:
if not any([re.match(loc,loc_id) for loc in xy_ignore_df['internalLocation']]):
if not par_gdf[par_gdf['LOC_ID'] == row['PAR_ID']]['geometry'].values[0].equals(row['geometry']):
error['xy_not_same'] = True
if any(error.values()):
error['type'] = sub_type
error['functie'] = loc_functie
elif set_name == 'hoofdlocaties':
if not re.match(f'[A-Z0-9 ]*_{caw_code}-K_[A-Z0-9 ]*',loc_name):
error['name_error'] = True
elif set_name == 'waterstandlocaties':
if not re.match(f'[A-Z0-9 ]*_{caw_code}-w_.*',loc_name):
error['name_error'] = True
if not error['name_error']:
caw_name = re.match(f'([A-Z0-9 ]*)_',loc_name).group(1)
if not all(location_gdf[location_gdf['LOC_ID'].str.match(
f'..{caw_code}')]['LOC_NAME'].str.match(
f'({caw_name}_{caw_code}-w)')
):
error['caw_name_inconsistent'] = True
if not row['PEILSCHAAL'] in location_sets['peilschalen']['gdf']['LOC_ID'].values:
error['missing_peilschaal'] = True
if not loc_id in int_locs:
error['missing_in_map'] = True
if any(error.values()):
loc_set_errors['locationId'].append(loc_id)
loc_set_errors['caw_name'].append(caw_name)
loc_set_errors['caw_code'].append(caw_code)
loc_set_errors['csv_file'].append(csv_file)
loc_set_errors['location_name'].append(loc_name)
for key, value in error.items():
loc_set_errors[key].append(value)
# miss_locs = [loc for loc in int_locs if not loc in location_set['gdf']['LOC_ID'].values]
# #for loc_id in miss_locs:
# loc_set_errors['locationId'].append(miss_locs)
# loc_set_errors['csv_file'].append([csv_file] * len(miss_locs))
# loc_set_errors['location_name'].append([''] * len(miss_locs))
# loc_set_errors['missing_in_set'].append([True] * len(miss_locs))
# for key in ['loc_name_error','missing_in_map','missing_peilschaal']:
# loc_set_errors[key].append([False] * len(miss_locs))
consistency_df['locSet error'] = pd.DataFrame(loc_set_errors)
#opname in samenvatting
if len(consistency_df['locSet error']) == 0:
logging.info('geen fouten in locationSets')
else:
logging.warning('{} fouten in locationSets'.format(len(consistency_df['locSet error'])))
#%% wegschrijven naar excel
inhoudsopgave = consistency_df['inhoudsopgave']
inhoudsopgave.index = inhoudsopgave['werkblad']
summary = {key:len(df) for key, df in consistency_df.items() if key in warning_sheets}
#lees input xlsx en gooi alles weg behalve de fixed_sheets
book = load_workbook(consistency_out_xlsx)
for worksheet in book.worksheets:
if not worksheet.title in fixed_sheets:
book.remove(worksheet)
# voeg samenvatting toe
worksheet = book.create_sheet('samenvatting',1)
worksheet.sheet_properties.tabColor = '92D050'
worksheet.append(['controle','aantal','beschrijving'])
for cell in worksheet['{}'.format(worksheet.max_row)]:
cell.font = Font(bold=True)
for key, value in summary.items():
worksheet.append([key,value,inhoudsopgave.loc[key]['beschrijving']])
if value > 0:
worksheet[worksheet.max_row][1].fill = PatternFill(fgColor='FF0000', fill_type='solid')
else:
worksheet[worksheet.max_row][1].fill = PatternFill(fgColor='92D050', fill_type='solid')
worksheet.column_dimensions['A'].width=40
worksheet.column_dimensions['C'].width = 100
worksheet.auto_filter.ref = worksheet.dimensions
xls_writer = pd.ExcelWriter(consistency_out_xlsx, engine='openpyxl')
xls_writer.book = book
for sheet_name, df in consistency_df.items():
if (not sheet_name in fixed_sheets) & (not df.empty):
if df.index.name == None:
df.to_excel(xls_writer, sheet_name=sheet_name, index=False)
else:
df.to_excel(xls_writer, sheet_name=sheet_name, index=True)
worksheet = xls_writer.sheets[sheet_name]
for col in worksheet.columns:
worksheet.column_dimensions[col[0].column_letter].width = 20
worksheet.auto_filter.ref = worksheet.dimensions
worksheet.freeze_panes = worksheet['B2']
if not df.empty:
if (sheet_name in warning_sheets):
worksheet.sheet_properties.tabColor = 'FF0000'
else:
worksheet.sheet_properties.tabColor = '92D050'
xls_writer.book.active = xls_writer.book['samenvatting']
xls_writer.save()
#%% updaten csv's
def update_date(row,mpt_df,date_threshold):
''' wegschrijven van de start- en end-date'''
int_loc = row['LOC_ID']
if int_loc in mpt_df.index:
start_date = mpt_df.loc[int_loc]['STARTDATE'].strftime('%Y%m%d')
end_date = mpt_df.loc[int_loc]['ENDDATE']
if end_date > date_threshold:
end_date = pd.Timestamp(year=2100, month=1, day=1)
end_date = end_date.strftime('%Y%m%d')
else:
start_date = row['START']
end_date = row['EIND']
return start_date, end_date
def update_histtag(row,grouper):
''' functie waarmee de laatste histag op aan waterstandsloc wordt toegekend '''
return next((df.sort_values('total_max_end_dt', ascending=False)['serie'].values[0]
for loc_id, df
in grouper
if loc_id == row['LOC_ID']),None)
def update_peilschaal(row):
''' toekennen van de bovenstroomse en benedenstroomse peilschalen aan sublocaties '''
result = {'HBOV':'','HBEN':''}
for key in result.keys():
df = waterstandloc_gdf.loc[waterstandloc_gdf['LOC_ID'] == row[key]]
if not df.empty:
result[key] = df['PEILSCHAAL'].values[0]
return result['HBOV'], result['HBEN']
#def update_types(row):
date_threshold = mpt_df['ENDDATE'].max() - | pd.Timedelta(weeks=26) | pandas.Timedelta |
#-*-coding:utf-8-*-
import numpy as np
import pandas as pd
import time
from bayes_smoothing import *
from sklearn.preprocessing import LabelEncoder
import copy
def roll_browse_fetch(df, column_list):
print("==========================roll_browse_fetch ing==============================")
df = df.sort('context_timestamp')
df['tmp_count'] = df['status']
for c in column_list:
if isinstance(c, (list, tuple)):
pair = [cc for cc in c]
c = '_'.join(c)
else:
pair = [c]
df['%s_browse' %c] = df.groupby(pair)['tmp_count'].cumsum()
del df['tmp_count']
return df
def roll_click_fetch(df, column_list):
print("==========================roll_click_fetch ing==============================")
for c in column_list:
if isinstance(c, (list, tuple)):
pair = [cc for cc in c]
c = '_'.join(c)
else:
pair = [c]
df['%s_click' %c] = df.groupby(pair)['is_trade'].cumsum()
df['%s_click' %c] = df['%s_click' %c]-df['is_trade']
return df
def roll_rate_fetch(df, column_list):
df = roll_browse_fetch(df,column_list)
df = roll_click_fetch(df,column_list)
print("==========================roll_rate_fetch ing==============================\n")
for c in column_list:
if isinstance(c, (list, tuple)):
c = '_'.join(c)
df['%s_rate' %c] = bs_utilize(df['%s_browse' %c], df['%s_click' %c])
# del df['%s_browse' %c]
return df
#===================================按天的转化率==============================
def roll_browse_day(df, column_list):
df = df.sort('context_timestamp')
df['tmp_count'] = df['status']
df_data_temp =df.copy()
for c in column_list:
if isinstance(c, (list, tuple)):
pair = [cc for cc in c]
c = '_'.join(c)
else:
pair = [c]
pair.append('day')
df_temp = df.groupby(pair)['tmp_count'].agg({"browse_temp":np.sum}).reset_index()
pair_temp =copy.copy(pair)
pair_temp.remove('day')
df_temp["{}_day_browse".format(c)] = df_temp.groupby(pair_temp)["browse_temp"].cumsum()
df_temp["{}_day_browse".format(c)] = df_temp["{}_day_browse".format(c)] - df_temp['browse_temp']
del df_temp['browse_temp']
df_data_temp = pd.merge(df_data_temp,df_temp,how = "left",on = pair )
del df['tmp_count']
return df_data_temp
def roll_click_day_hour(df,column_list):
df = df.sort('context_timestamp')
df_data_temp =df.copy()
for c in column_list:
if isinstance(c, (list, tuple)):
pair = [cc for cc in c]
c = '_'.join(c)
else:
pair = [c]
pair.append('day')
df_temp = df.groupby(pair)['is_trade'].agg({"click_temp":np.sum}).reset_index()
pair_temp = copy.copy(pair)
pair_temp.remove('day')
df_temp["{}_day_click".format(c)] = df_temp.groupby(pair_temp)["click_temp"].cumsum()
df_temp["{}_day_click".format(c)] = df_temp["{}_day_click".format(c)] - df_temp['click_temp']
del df_temp['click_temp']
df_data_temp = pd.merge(df_data_temp,df_temp,how = "left",on = pair)
return df_data_temp
def roll_rate_day(df,column_list):
print("==========================roll_rate_day ing==============================")
df = roll_browse_day(df,column_list)
df =roll_click_day(df,column_list)
for c in column_list:
if isinstance(c, (list, tuple)):
c = '_'.join(c)
df['%s_day_rate' %c] = bs_utilize(df['%s_day_browse' %c], df['%s_day_click' %c])
# del df['%s_day_browse'%c]
# del df['%s_day_click'%c]
return df
#===================================按天小时的转化率==============================
def roll_browse_day_hour(df, column_list):
df = df.sort('context_timestamp')
df['tmp_count'] = df['status']
df_data_temp =df.copy()
for c in column_list:
if isinstance(c, (list, tuple)):
pair = [cc for cc in c]
c = '_'.join(c)
else:
pair = [c]
pair.append('day')
pair.append('hour')
df_temp = df.groupby(pair)['tmp_count'].agg({"browse_temp":np.sum}).reset_index()
pair_temp =copy.copy(pair)
pair_temp.remove('day')
pair_temp.remove('hour')
df_temp["{}_day_hour_browse".format(c)] = df_temp.groupby(pair_temp)["browse_temp"].cumsum()
df_temp["{}_day_hour_browse".format(c)] = df_temp["{}_day_hour_browse".format(c)] - df_temp['browse_temp']
del df_temp['browse_temp']
df_data_temp = pd.merge(df_data_temp,df_temp,how = "left",on = pair )
del df['tmp_count']
return df_data_temp
def roll_click_day_hour(df,column_list):
df = df.sort('context_timestamp')
df_data_temp =df.copy()
for c in column_list:
if isinstance(c, (list, tuple)):
pair = [cc for cc in c]
c = '_'.join(c)
else:
pair = [c]
pair.append('day')
pair.append('hour')
df_temp = df.groupby(pair)['is_trade'].agg({"click_temp":np.sum}).reset_index()
pair_temp = copy.copy(pair)
pair_temp.remove('day')
pair_temp.remove('hour')
df_temp["{}_day_hour_click".format(c)] = df_temp.groupby(pair_temp)["click_temp"].cumsum()
df_temp["{}_day_hour_click".format(c)] = df_temp["{}_day_hour_click".format(c)] - df_temp['click_temp']
del df_temp['click_temp']
df_data_temp = pd.merge(df_data_temp,df_temp,how = "left",on = pair)
return df_data_temp
def roll_rate_day_hour(df,column_list):
print("==========================roll_rate_day ing==============================")
df = roll_browse_day_hour(df,column_list)
df =roll_click_day_hour(df,column_list)
for c in column_list:
if isinstance(c, (list, tuple)):
c = '_'.join(c)
df['%s_day_hour_rate' %c] = bs_utilize(df['%s_day_hour_browse' %c], df['%s_day_hour_click' %c])
# del df['%s_day_browse'%c]
# del df['%s_day_click'%c]
return df
#===================================按小时的转化率==============================
def roll_browse_hour(df, column_list):
df = df.sort('context_timestamp')
df['tmp_count'] = df['status']
df_data_temp =df.copy()
for c in column_list:
if isinstance(c, (list, tuple)):
pair = [cc for cc in c]
c = '_'.join(c)
else:
pair = [c]
pair.append('hour')
df_temp = df.groupby(pair)['tmp_count'].agg({"browse_temp":np.sum}).reset_index()
pair_temp =copy.copy(pair)
pair_temp.remove('hour')
df_temp["{}_hour_browse".format(c)] = df_temp.groupby(pair_temp)["browse_temp"].cumsum()
df_temp["{}_hour_browse".format(c)] = df_temp["{}_hour_browse".format(c)] - df_temp['browse_temp']
del df_temp['browse_temp']
df_data_temp = pd.merge(df_data_temp,df_temp,how = "left",on = pair )
del df['tmp_count']
return df_data_temp
def roll_click_hour(df,column_list):
df = df.sort('context_timestamp')
df_data_temp =df.copy()
for c in column_list:
if isinstance(c, (list, tuple)):
pair = [cc for cc in c]
c = '_'.join(c)
else:
pair = [c]
pair.append('hour')
df_temp = df.groupby(pair)['is_trade'].agg({"click_temp":np.sum}).reset_index()
pair_temp = copy.copy(pair)
pair_temp.remove('hour')
df_temp["{}_hour_click".format(c)] = df_temp.groupby(pair_temp)["click_temp"].cumsum()
df_temp["{}_hour_click".format(c)] = df_temp["{}_hour_click".format(c)] - df_temp['click_temp']
del df_temp['click_temp']
df_data_temp = pd.merge(df_data_temp,df_temp,how = "left",on = pair)
return df_data_temp
def roll_rate_hour(df,column_list):
print("==========================roll_rate_hour ing==============================")
df = roll_browse_hour(df,column_list)
df =roll_click_hour(df,column_list)
for c in column_list:
if isinstance(c, (list, tuple)):
c = '_'.join(c)
df['%s_hour_rate' %c] = bs_utilize(df['%s_hour_browse' %c], df['%s_hour_click' %c])
return df
def label_encoding(df, columns):
for c in columns:
le = LabelEncoder()
df[c] = le.fit_transform(df[c])
return df
# # #----------------统计特征-----------------
# def get_last_diff_statistic(data,col_list, n_last_diff):
# print("=======get_last_diff============\n")
# data_temp = data
# col_id = col_list[0],col_list[1]
# data = data.sort_values([col_id, 'timestamp'])
# data['next_id'] = data[col_id].shift(-1)
# data['next_actionTime'] = data.timestamp.shift(-1)
# data = data.loc[data.next_id == data[col_id]].copy()
# data['action_diff'] = data['next_actionTime'] - data['timestamp']
# if n_last_diff is not None:
# df_n_last_diff = data.groupby(col_id, as_index=False).tail(n_last_diff).copy()
# df_last_diff_statistic = df_n_last_diff.groupby(col_id, as_index=False).action_diff.agg({
# '{}_last_{}_action_diff_mean'.format(col_id,n_last_diff): np.mean,
# '{}_last_{}_action_diff_std'.format(col_id,n_last_diff): np.std,
# '{}_last_{}_action_diff_max'.format(col_id,n_last_diff): np.max,
# '{}_last_{}_action_diff_min'.format(col_id,n_last_diff): np.min
# })
# else:
# grouped_user = data.groupby(col_id, as_index=False)
# n_last_diff = 'all'
# df_last_diff_statistic = grouped_user.action_diff.agg({
# '{}_last_{}_action_diff_mean'.format(col_id,n_last_diff): np.mean,
# '{}_last_{}_action_diff_std'.format(col_id,n_last_diff): np.std,
# '{}_last_{}_action_diff_max'.format(col_id,n_last_diff): np.max,
# '{}_last_{}_action_diff_min'.format(col_id,n_last_diff): np.min
# })
# res_data = pd.merge(data_temp,df_last_diff_statistic,how="left",on = col_id)
# return res_data
# #-----------------------时间特征-----------------------
# # #--时间间隔特征、
# def chafen(df):
# return pd.DataFrame(np.diff(df,axis = 0))
# def get_last_diff(data, col_list,n_last_diff):
# """获取最后 n_last_diff 个动作之间的时间间隔"""
# print("=======get_last_diff============\n")
# for col in col_list:
# col_sort = col.copy()
# col_sort.append('timestamp')
# data = data.sort_values(col_sort,ascending = False)
# data_temp = data.groupby(col)['timestamp'].apply(chafen).reset_index()
# data_temp.columns = [col[0],col[1],'level','time_gap']
# data_temp = data_temp.loc[data_temp.level<n_last_diff]
# data_temp['time_gap'] = -1*data_temp['time_gap']
# data_temp['level'] = str(col[0])+"_"+str(col[1])+"_last_time_gap"+ data_temp['level'].astype('str')
# data_temp = pd.pivot_table(data_temp,index=[col[0],col[1]],values='time_gap',columns='level').reset_index()
# res_data = pd.merge(data,data_temp,how="left",on = [col[0],col[1]])
# return res_data
#--时间间隔特征
def time_diff_feat(data,col_list):
print("get tiem diff...")
for col in col_list:
col_sort = copy.copy(col)
col_sort.append('timestamp')
data_temp = data.sort(col_sort,ascending = True)
data_temp['{}_{}_time_diff'.format(col[0],col[1])] = data_temp.groupby(col)['timestamp'].apply(lambda x:x.diff())
data['{}_{}_time_diff'.format(col[0],col[1])] = data_temp['{}_{}_time_diff'.format(col[0],col[1])].fillna(0)
return data
def CombinationFeature(data):
print("==============convert_data===============")
data['tm_hour'] = data['hour'] + data['min']/60
data['tm_hour_sin'] = data['tm_hour'].map(lambda x:np.sin((x-12)/24*2*np.pi))
data['tm_hour_cos'] = data['tm_hour'].map(lambda x:np.cos((x-12)/24*2*np.pi))
data_time=data[['user_id','day','hour','min']]
user_query_day = data.groupby(['user_id', 'day']).size().reset_index().rename(columns={0: 'user_query_day'})
user_query_day_hour = data.groupby(['user_id', 'day', 'hour']).size().reset_index().rename(columns={0: 'user_query_day_hour'})
user_query_day_hour_min = data.groupby(['user_id', 'day', 'hour','min']).size().reset_index().rename(columns={0: 'user_query_day_hour_min'})
user_query_day_hour_min_sec = data.groupby(['user_id', 'day', 'hour','min','sec']).size().reset_index().rename(columns={0: 'user_query_day_hour_min_sec'})
user_day_hourmin_mean= data_time.groupby(['user_id', 'day']).mean().reset_index().rename(columns={'hour': 'mean_hour','min':'mean_minuite'})
user_day_hourmin_std= data_time.groupby(['user_id', 'day']).std().reset_index().rename(columns={'hour': 'std_hour','min':'std_minuite'})
user_day_hourmin_max= data_time.groupby(['user_id', 'day']).max().reset_index().rename(columns={'hour': 'max_hour','min':'max_minuite'})
user_day_hourmin_min= data_time.groupby(['user_id', 'day']).min().reset_index().rename(columns={'hour': 'min_hour','min':'min_minuite'})
#-------merge-----
data = pd.merge(data, user_query_day, 'left', on=['user_id', 'day'])
data = pd.merge(data, user_query_day_hour, 'left',on=['user_id', 'day', 'hour'])
data = pd.merge(data, user_query_day_hour_min, 'left',on=['user_id', 'day', 'hour','min'])
data = pd.merge(data, user_query_day_hour_min_sec, 'left',on=['user_id', 'day', 'hour','min','sec'])
data = pd.merge(data, user_day_hourmin_mean, 'left',on=['user_id','day'])
data = | pd.merge(data, user_day_hourmin_std, 'left',on=['user_id','day']) | pandas.merge |
#!/usr/bin/env python3
import pandas as pd
import sys
from math import ceil
import argparse
QUERY_SET = 'validation'
MAX_REL_ERR = 0.1
def extract_params(df, ds, mu, algo, k_equals_m, timeout):
# only allow instances whose runtime is at most a factor of timeout away from naive
timeout = ceil(df[(df['dataset'] == ds) & (df['algorithm'] == 'naive')]['query_time'].max())*timeout
# print(ds,mu,algo)
df = df[(df['dataset'] == ds) & \
(df['mu'] == mu) & \
(df['algorithm'] == algo) & \
(df['rel_err'] < MAX_REL_ERR) & \
(df['query_time'] < timeout)]
if k_equals_m and algo in ['ann-faiss', 'ann-permuted-faiss']:
df = df.copy()
df['k'] = df['params'].str.split('_').map(lambda x: x[1].strip('[]')).str.split(', ').map(lambda x: x[0]).astype(int)
df['m'] = df['params'].str.split('_').map(lambda x: x[1].strip('[]')).str.split(', ').map(lambda x: x[1]).astype(int)
df = df[df['k'] == df['m']]
df = df.sort_values(by='query_time')
if df.shape[0] == 0:
return None
df = df.iloc[0]
# print(df)
# if df['algorithm'] in ['ann-permuted-faiss', 'ann-faiss']:
# params = list(map(lambda s: int(s.split('=')[1]),
# df['params'].strip('()').split(', ')))
# params = dict(zip(['k','m','nlist','nprobe'], params))
# elif df['algorithm'] in ['random-sampling', 'rsp']:
# params = { 'm' : int(df['params']) }
# elif df['algorithm'] == 'naive':
# params = {}
# elif df['algorithm'] in ['rs','hbe']:
# params = list(map(lambda s: float(s.split('=')[1]),
# df['params'].strip('()').split(', ')))
# params = dict(zip(['eps', 'tau'], params))
# elif df['algorithm'] in ['sklearn-balltree', 'sklearn-kdtree']:
# # print(df['params'])
# params = df['params'].strip('()').split(', ')
# params = (int(params[0].split('=')[1]), float(params[1].split('=')[1]),
# float(params[2].split('=')[1]))
# params = dict(zip(['ls', 'atol', 'rtol'], params))
# # print(df['params'])
# # print(df['algorithm'])
return df['params']
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--k-equals-m',
action='store_true')
parser.add_argument('--timeout',
type=int,
default=10)
parser.add_argument('filename',
type=str,
metavar='results.csv')
args = parser.parse_args()
filename = args.filename
k_equals_m = args.k_equals_m
timeout = args.timeout
df = | pd.read_csv(filename) | pandas.read_csv |
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import time
import random
import datetime
from bs4 import BeautifulSoup as bs
import pandas as pd
import os
import json
import requests
import io
url11='https://www.boxofficemojo.com/weekend/by-year/2019/?area=AU'
url12='https://www.boxofficemojo.com/weekend/by-year/2020/?area=AU'
url21='https://www.boxofficemojo.com/weekend/by-year/2019/?area=DE'
url22='https://www.boxofficemojo.com/weekend/by-year/2020/?area=DE'
url31='https://www.boxofficemojo.com/weekend/by-year/2019/?area=JP'
url32='https://www.boxofficemojo.com/weekend/by-year/2020/?area=JP'
url41='https://www.boxofficemojo.com/weekend/by-year/2019/'
url42='https://www.boxofficemojo.com/weekend/by-year/2020/'
#Australia
dates=[]
dfs1=pd.read_html(url11)
dfs2=pd.read_html(url12)
df11=pd.DataFrame()
df12=pd.DataFrame()
df21=pd.DataFrame()
df22=pd.DataFrame()
total1=pd.DataFrame()
df110=dfs1[0]['Overall Gross'][29::-1]
df12=dfs1[0]['Dates'][29::-1]
df210=dfs2[0]['Overall Gross'][:0:-1].replace(',','')
df22=dfs2[0]['Dates'][:0:-1]
k = []
for i in df110:
k.append(int((i.replace('$','').replace(',',''))))
df11['Overall Gross']=k
k = []
for i in df210:
k.append(int((i.replace('$','').replace(',',''))))
df21['Overall Gross']=k
for i in range(0,42):
dates.append((datetime.datetime.strptime('2019-06-06','%Y-%m-%d')+datetime.timedelta(days=7*i)).date())
dates.append('2020-03-28')
dates.append('2020-06-04')
total1['Dates']=dates
total1['Overall Gross']=pd.concat([df11,df21],ignore_index=True)
print(total1)
total1.to_csv(r'C:/Users/USER/Desktop/資訊/鄭恆安/csv/Australia.csv',encoding='big5',index=False)
#Germany
dates=[]
dfs1=pd.read_html(url21)
dfs2=pd.read_html(url22)
df11=pd.DataFrame()
df12=pd.DataFrame()
df21=pd.DataFrame()
df22=pd.DataFrame()
total2=pd.DataFrame()
df110=dfs1[0]['Overall Gross'][29::-1]
df12=dfs1[0]['Dates'][29::-1]
df210=dfs2[0]['Overall Gross'][:0:-1].replace(',','')
df22=dfs2[0]['Dates'][:0:-1]
k = []
for i in df110:
k.append(int((i.replace('$','').replace(',',''))))
df11['Overall Gross']=k
k = []
for i in df210:
k.append(int((i.replace('$','').replace(',',''))))
df21['Overall Gross']=k
for i in range(0,42):
dates.append((datetime.datetime.strptime('2019-06-06','%Y-%m-%d')+datetime.timedelta(days=7*i)).date())
dates.append('2020-04-09')
dates.append('2020-05-21')
dates.append('2020-05-28')
dates.append('2020-06-04')
total2['Dates']=dates
total2['Overall Gross']=pd.concat([df11,df21],ignore_index=True)
print(total2)
total2.to_csv(r'C:/Users/USER/Desktop/資訊/鄭恆安/csv/Germany.csv',encoding='big5',index=False)
#Japan
dates=[]
dfs1=pd.read_html(url31)
dfs2=pd.read_html(url32)
df11=pd.DataFrame()
df12=pd.DataFrame()
df21=pd.DataFrame()
df22=pd.DataFrame()
total= | pd.DataFrame() | pandas.DataFrame |
__all__ = ['SOURCE_URL', 'NP', 'PJM', 'BE', 'FR', 'DE', 'EPFInfo', 'EPF']
import os
if not os.path.exists('./results/'):
os.makedirs('./results/')
from dataclasses import dataclass
from datetime import timedelta
from pathlib import Path
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
import pandas as pd
from pandas.tseries.frequencies import to_offset
from .utils import download_file, Info, TimeSeriesDataclass
# Cell
SOURCE_URL = 'https://sandbox.zenodo.org/api/files/da5b2c6f-8418-4550-a7d0-7f2497b40f1b/'
# Cell
@dataclass
class NP:
test_date: str = '2016-12-27'
name: str = 'NP'
@dataclass
class PJM:
test_date: str = '2016-12-27'
name: str = 'PJM'
@dataclass
class BE:
test_date: str = '2015-01-04'
name: str = 'BE'
@dataclass
class FR:
test_date: str = '2015-01-04'
name: str = 'FR'
@dataclass
class DE:
test_date: str = '2016-01-04'
name: str = 'DE'
# Cell
EPFInfo = Info(groups=('NP', 'PJM', 'BE', 'FR', 'DE'),
class_groups=(NP, PJM, BE, FR, DE))
# Cell
class EPF:
@staticmethod
def load(directory: str,
group: str) -> Tuple[pd.DataFrame,
Optional[pd.DataFrame],
Optional[pd.DataFrame]]:
"""
Downloads and loads EPF data.
Parameters
----------
directory: str
Directory where data will be downloaded.
group: str
Group name.
Allowed groups: 'NP', 'PJM', 'BE', 'FR', 'DE'.
"""
path = Path(directory) / 'epf' / 'datasets'
EPF.download(directory)
class_group = EPFInfo.get_group(group)
file = path / f'{group}.csv'
df = pd.read_csv(file)
df.columns = ['ds', 'y'] + \
[f'Exogenous{i}' for i in range(1, len(df.columns) - 1)]
df['unique_id'] = group
df['ds'] = pd.to_datetime(df['ds'])
df['week_day'] = df['ds'].dt.dayofweek
dummies = pd.get_dummies(df['week_day'], prefix='day')
df = pd.concat([df, dummies], axis=1)
dummies_cols = [col for col in df if col.startswith('day')]
Y = df.filter(items=['unique_id', 'ds', 'y'])
X = df.filter(items=['unique_id', 'ds', 'Exogenous1', 'Exogenous2', 'week_day'] + \
dummies_cols)
return Y, X, None
@staticmethod
def load_groups(directory: str,
groups: List[str]) -> Tuple[pd.DataFrame,
Optional[pd.DataFrame],
Optional[pd.DataFrame]]:
"""
Downloads and loads panel of EPF data
according of groups.
Parameters
----------
directory: str
Directory where data will be downloaded.
groups: List[str]
Group names.
Allowed groups: 'NP', 'PJM', 'BE', 'FR', 'DE'.
"""
Y = []
X = []
for group in groups:
Y_df, X_df, S_df = EPF.load(directory=directory, group=group)
Y.append(Y_df)
X.append(X_df)
Y = | pd.concat(Y) | pandas.concat |
import pandas
import vcf
from pymongo import MongoClient
class FileWriter(object):
#TODO: Tests for new classes.
def __init__(self, db_name, collection_name):
self.collection_name = collection_name
self.db_name = db_name
def generate_unfiltered_annotated_csv(self, filepath):
"""
:param list_dictionaries: list of annotated variants in dictionary
:param filepath: filpath (including name of output file) to which the output will be written
:return: annotated csv file
"""
client = MongoClient()
db = getattr(client, self.db_name)
collection = getattr(db, self.collection_name)
all_my_data = list(collection.find({}))
df = | pandas.DataFrame(all_my_data) | pandas.DataFrame |
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pandas as pd
import difflib
TMCF_ISOCODE = """Node: E:{dataset_name}->E0
typeOf: dcs:AdministrativeArea2
lgdCode: C:{dataset_name}->lgdCode
"""
TMCF_NODES = """
Node: E:{dataset_name}->E{index}
typeOf: dcs:StatVarObservation
variableMeasured: dcs:indianNHM/{statvar}
measurementMethod: dcs:NHM_HealthInformationManagementSystem
observationAbout: C:{dataset_name}->E0
observationDate: C:{dataset_name}->Date
observationPeriod: "P1Y"
value: C:{dataset_name}->{statvar}
"""
MCF_NODES = """Node: dcid:indianNHM/{statvar}
description: "{description}"
typeOf: dcs:StatisticalVariable
populationType: schema:Person
measuredProperty: dcs:indianNHM/{statvar}
statType: dcs:measuredValue
"""
class NHMDataLoaderBase(object):
"""
An object to clean .xls files under 'data/' folder and convert it to csv
Attributes:
data_folder: folder containing all the data files
dataset_name: name given to the dataset
cols_dict: dictionary containing column names in the data files mapped to StatVars
(keys contain column names and values contains StatVar names)
"""
def __init__(self, data_folder, dataset_name, cols_dict, clean_names,
final_csv_path):
"""
Constructor
"""
self.data_folder = data_folder
self.dataset_name = dataset_name
self.cols_dict = cols_dict
self.clean_names = clean_names
self.final_csv_path = final_csv_path
self.raw_df = None
# Ignoring the first 3 elements (State, isoCode, Date) in the dictionary map
self.cols_to_extract = list(self.cols_dict.keys())[3:]
def generate_csv(self):
"""
Class method to preprocess the data file for each available year, extract t
he columns and map the columns to schema.
The dataframe is saved as a csv file in current directory.
Returns:
pandas dataframe: Cleaned dataframe.
"""
df_full = pd.DataFrame(columns=list(self.cols_dict.keys()))
lgd_url = 'https://india-local-government-directory-bixhnw23da-el.a.run.app/india-local-government-directory/districts.csv?_size=max'
self.dist_code = | pd.read_csv(lgd_url, dtype={'DistrictCode': str}) | pandas.read_csv |
import pandas as pd # type: ignore
import openpyxl as opxl # type: ignore
import csv
import pickle
import numpy as np # type: ignore
import datetime
import random
import os
class PlateCombinatorics:
"""
Class to handle plate combinatorics. Methods to return variations of plate values
based on common OCR errors.
"""
_plate: str = ''
_result_list: list = []
_ocr_dict: dict = {'O': 'Q', 'Q': 'O', '8': 'B', 'B': '8', '1': 'I',
'I': '1', 'A': '4', '4': 'A', 'D': 'O', 'G': '6',
'6': 'G', 'S': '5', '5': 'S'}
def __init__(self, plate=''):
self._plate = plate
self._result_list = []
def set_plate(self, plate: str):
"""
Set plate value
:param plate: plate value
"""
if not isinstance(plate, str):
raise TypeError()
self._plate = plate
def get_plate(self) -> str:
"""
:return: plate value
"""
return self._plate
def get_plate_combinations(self) -> list:
"""
:return: return list of plate combinations
"""
if self._plate == '' or self._plate is None:
raise ValueError('plate value blank')
self._result_list.append(self._plate)
return self.__plate_combinations(self._plate, 0, self._result_list)
@staticmethod
def __update_name(old_name: str, new_char: str, index: int) -> str:
"""
Update input string with new character value at specified index
:param old_name: str of original name
:param new_char: replacement character
:param index: index to replace character
:return: updated str
"""
if index == 0:
return new_char + old_name[1:]
elif index == len(old_name) - 1:
return old_name[:index] + new_char
else:
new_name = old_name[:index] + new_char + old_name[index + 1:]
return new_name
def __plate_combinations(self, plate: str, index: int, result_list: list):
"""
Recursive method to find all permutations of a plate
:param plate: str of plate
:param index: int of index
:param result_list: list of plate permutations
:return: list of plate permutations
"""
if index < len(plate):
if plate[index] in self._ocr_dict:
new_plate = self.__update_name(plate, self._ocr_dict[plate[index]], index)
self._result_list.append(new_plate)
self.__plate_combinations(new_plate, index + 1, result_list)
self.__plate_combinations(plate, index + 1, result_list)
return result_list
class TransactionFile:
"""
Class to process transaction files from Kapsch toll system. Can process
both excel and csv files, excel files need to be in xlsx or xlsm format.
"""
_filename: str = ''
_df = None
_header_row: int = -1
_sheet_name: str = ''
_input_is_csv: bool = False
_sheet_names_list: list = ['transaction', 'Transaction', 'trans', 'TrxnDetail',
'Sheet1', 'tran', 'trip', 'TripTxn']
_header_values: list = ['Trx ID', 'CSC Lane']
_ocr_header_names: list = ['Ocr Info', 'Plate Info']
_tag_header_names: list = ['Number']
_agency_header_names: list = ['Ag']
_excel_file_types: list = ['xlsx', 'xlsm']
_payment_header_names: list = ['Pmnt Type']
_trx_id_names: list = ['Trx ID']
def __init__(self, filename: str):
self._filename = filename
print('Processing: ' + filename)
if filename.split('.')[1] in self._excel_file_types:
self.__process_excel_file()
elif filename.split('.')[1] == 'csv':
self.__process_csv_file()
self._input_is_csv = True
else:
raise TypeError('Input input, must be xlsx or csv')
self.__create_tag_fields()
self.__create_plate_field()
self.__create_trx_id_field()
def __create_trx_id_field(self):
complete: bool = False
for i in self._trx_id_names:
if complete:
break
try:
self._df['TRX_ID'] = self._df[i]
complete = True
except KeyError:
continue
if not complete:
raise ValueError('Missing TrxID field')
def __create_plate_field(self):
columns = self._df.columns
for i in self._ocr_header_names:
if i in columns:
try:
self._df['PLATE'] = self._df[i].str.split(pat='-', expand=True)[0]
except AttributeError:
self._df['PLATE'] = ''
def to_csv(self):
"""
Output dataframe to csv
"""
if self._input_is_csv:
raise ValueError('Input file is a csv file')
out_filename = self._filename.split('.')[0] + '.csv'
self._df.to_csv(out_filename)
def __select_worksheet(self, workbook: opxl):
"""
Select worksheet from all worksheets. Uses sheet_names_list
:param workbook: Workbook to search
"""
for sheet in workbook:
for i in self._sheet_names_list:
if i in sheet.title:
self._sheet_name = sheet.title
def __set_excel_header_row(self, wb: opxl):
"""
Finds header row in workbook. Uses header_values for search.
:param wb: workbook
"""
ws: opxl = wb[self._sheet_name]
header_row: int = 0
found: bool = False
for row in ws:
if found:
break
for cell in row:
if cell.value in self._header_values:
found = True
self._header_row = header_row
break
header_row += 1
def __process_excel_file(self):
"""
Process excel data file info dataframe
"""
wb: opxl = opxl.load_workbook(filename=self._filename, read_only=True)
self.__select_worksheet(wb)
self.__set_excel_header_row(wb)
self._df = pd.read_excel(self._filename, sheet_name=self._sheet_name,
skiprows=self._header_row)
def __create_tag_fields(self):
columns = self._df.columns
for i in self._tag_header_names:
if i in columns:
self._df['TAG_ID'] = self._df[i]
for i in self._agency_header_names:
if i in columns:
self._df['AG'] = self._df[i]
def get_df(self) -> pd.DataFrame:
"""
:return: dataframe
"""
return self._df
def __get_csv_header(self):
with open(self._filename) as csvfile:
reader = csv.reader(csvfile)
header_row: int = 0
row_found: bool = False
for row in reader:
if row_found:
break
for value in row:
if value in self._header_values:
self._header_row = header_row
header_row += 1
def __process_csv_file(self):
"""
Wrapper method to identify csv header row and read as
pandas dataframe.
"""
self.__get_csv_header()
self._df = pd.read_csv(self._filename, skiprows=self._header_row, low_memory=False)
class TripFile(TransactionFile):
_header_values = ['id', 'dt', 'lane', 'agency', 'Plaza']
_ocr_header_names = ['plate', 'Review Type', 'Plate Info']
_tag_header_names = ['Ag-Tag', 'Prime']
def __init__(self, filename):
super(TripFile, self).__init__(filename)
self.__create_tag_fields()
def __create_tag_fields(self):
"""
overloaded method to create tag and agency fields for
toll trip files
"""
columns = self._df.columns
for i in self._tag_header_names:
if i in columns:
tag_field: pd.Series = self._df[i].str.split(pat='-', expand=True)
self._df['AG'] = pd.to_numeric(tag_field[0])
self._df['TAG_ID'] = | pd.to_numeric(tag_field[1]) | pandas.to_numeric |
import functools
import logging
from datetime import timedelta
import numpy as np
import pandas as pd
JHU_CSV_URL = "https://raw.githubusercontent.com/datasets/covid-19/main/data/countries-aggregated.csv"
UN_POPULATION_CSV_URL = "https://raw.githubusercontent.com/owid/covid-19-data/152b2236a32f889df3116c7121d9bb14ce2ff2a8/scripts/input/un/population_2020.csv"
def load_COVID_data(country, num_data_points=None):
# Population
population_df = pd.read_csv(
UN_POPULATION_CSV_URL,
keep_default_na=False,
usecols=["entity", "year", "population"],
)
population_df = population_df.loc[population_df["entity"] == country]
population_df = population_df.loc[population_df["year"] == 2020]
population = float(population_df["population"])
# COVID data
cases_df = pd.read_csv(JHU_CSV_URL)
cases_df["Date"] = | pd.to_datetime(cases_df["Date"]) | pandas.to_datetime |
__author__ = 'qchasserieau'
import networkx as nx
import pandas as pd
import shapely
from syspy.skims import skims
def linestring_geometry(row):
to_return = shapely.geometry.linestring.LineString(
[
[row['x_origin'], row['y_origin']],
[row['x_destination'], row['y_destination']]
]
)
return to_return
def point_geometry(row):
return shapely.geometry.point.Point(row['stop_lon'], row['stop_lat'])
def stop_clusters(
stops,
longitude='stop_lon',
latitude='stop_lat',
reach=150,
method='connected_components',
geometry=False
):
"""
Clusterizes a collection of stops to a smaller collection of centroids.
For a given station, every trip may be linked to a different entry in the
'stops' table of the transitfeed. This function may be used in order
to build the parents "stations" of these stops.
:param stops: transitfeed "stops" table where the id is used as index
:param longitude: name of the longitude column
:param latitude: name of the latitude column
:param reach: maximum length of the connections of the stops of a cluster
:param method: clustering method, connected components builds a station
for every connected components of a graph where every stop is linked
to its neighbors that are nearer than reach.
:param geometry: if True: the geometry of the centroids of the clusters
is added to the centroid table
:return: {
'transfers': transfers,
'centroids': centroids,
'clusters': cluster_list
}
"transfers" are the edges of the graph (the ones shorter than reach) ;
"centroids" contains the centroids' data ;
"clusters" joins the stops to the clusters ;
"""
stops[[longitude, latitude]] = stops[[longitude, latitude]].astype(float)
euclidean = skims.euclidean(
stops, latitude=latitude, longitude=longitude)
transfers = euclidean[euclidean['euclidean_distance'] < reach]
if method == 'connected_components':
g = nx.Graph(transfers[['origin', 'destination']].values.tolist())
components = list(nx.connected_components(g))
cluster_list = []
for i in range(len(components)):
for c in components[i]:
cluster_list.append({'stop_id': c, 'cluster_id': i})
centroids = pd.merge(
stops, pd.DataFrame(cluster_list),
left_index=True, right_on='stop_id'
).groupby(
'cluster_id')[[longitude, latitude]].mean()
if geometry:
transfers['geometry'] = transfers.apply(linestring_geometry, axis=1)
centroids['geometry'] = centroids.apply(point_geometry, axis=1)
return_dict = {
'transfers': transfers,
'centroids': centroids,
'clusters': cluster_list
}
return return_dict
def stops_with_parent_station(stops, stop_cluster_kwargs={}, sort_column=None):
clusters = pd.DataFrame(
stop_clusters(
stops.set_index('stop_id'),
**stop_cluster_kwargs
)['clusters']
)
if sort_column:
sort = stops[['stop_id', sort_column]].copy()
clusters = pd.merge(
clusters, sort, on='stop_id').sort_values(sort_column)
parents = clusters.groupby('cluster_id')[['stop_id']].first()
# return parents
to_merge = pd.merge(
clusters,
parents,
left_on='cluster_id',
right_index=True,
suffixes=['', '_parent']
)
to_merge = to_merge.rename(
columns={
'stop_id_parent': 'parent_station',
}
)[['stop_id', 'parent_station']]
_stops = | pd.merge(stops, to_merge, on='stop_id', suffixes=['', '_merged']) | pandas.merge |
import configparser
import datetime as dt
import logging
import os
import shutil
from pathlib import Path
from urllib.error import URLError
import matplotlib.image as mplimg
import pandas as pd
import pkg_resources as pr
from . import stats
from .exceptions import NoFilesFoundError
try:
from urllib import urlretrieve
except ImportError:
from urllib.request import urlretrieve
pkg_name = __name__.split('.')[0]
configpath = Path.home() / ".{}.ini".format(pkg_name)
LOGGER = logging.getLogger(__name__)
def get_config():
"""Read the configfile and return config dict.
Returns
-------
dict
Dictionary with the content of the configpath file.
"""
if not configpath.exists():
raise IOError("Config file {} not found.".format(str(configpath)))
else:
config = configparser.ConfigParser()
config.read(str(configpath))
return config
def set_database_path(dbfolder):
"""Use to write the database path into the config.
Parameters
----------
dbfolder : str or pathlib.Path
Path to where planet4 will store clustering results by default.
"""
try:
d = get_config()
except IOError:
d = configparser.ConfigParser()
d['planet4_db'] = {}
d['planet4_db']['path'] = dbfolder
with configpath.open('w') as f:
d.write(f)
print("Saved database path into {}.".format(configpath))
def get_data_root():
d = get_config()
data_root = Path(d['planet4_db']['path']).expanduser()
data_root.mkdir(exist_ok=True, parents=True)
return data_root
def get_ground_projection_root():
d = get_config()
gp_root = Path(d['ground_projection']['path'])
gp_root.mkdir(exist_ok=True)
return gp_root
if not configpath.exists():
print("No configuration file {} found.\n".format(configpath))
savepath = input(
"Please provide the path where you want to store planet4 results:")
set_database_path(savepath)
else:
data_root = get_data_root()
def dropbox():
return Path.home() / 'Dropbox'
def p4data():
return dropbox() / 'data' / 'planet4'
def analysis_folder():
name = 'p4_analysis'
if p4data().exists():
path = p4data() / name
else:
path = dropbox() / name
return path
def check_and_pad_id(imgid):
"Does NOT work with pd.Series item."
if imgid is None:
return None
imgid_template = "APF0000000"
if len(imgid) < len(imgid_template):
imgid = imgid_template[:-len(imgid)] + imgid
return imgid
def get_subframe(url):
"""Download image if not there yet and return numpy array.
Takes a data record (called 'line'), picks out the image_url.
First checks if the name of that image is already stored in
the image path. If not, it grabs it from the server.
Then uses matplotlib.image to read the image into a numpy-array
and finally returns it.
"""
targetpath = data_root / 'images' / os.path.basename(url)
targetpath.parent.mkdir(exist_ok=True)
if not targetpath.exists():
LOGGER.info("Did not find image in cache. Downloading ...")
try:
path = urlretrieve(url)[0]
except URLError:
msg = "Cannot receive subframe image. No internet?"
LOGGER.error(msg)
return None
LOGGER.debug("Done.")
shutil.move(path, str(targetpath))
else:
LOGGER.debug("Found image in cache.")
im = mplimg.imread(targetpath)
return im
class P4DBName(object):
def __init__(self, fname):
self.p = Path(fname)
date = str(self.name)[:10]
self.date = dt.datetime(*[int(i) for i in date.split('-')])
def __getattr__(self, name):
"looking up things in the Path object if not in `self`."
return getattr(self.p, name)
def get_latest_file(filenames):
fnames = list(filenames)
if len(fnames) == 0:
raise NoFilesFoundError
retval = P4DBName(fnames[0])
dtnow = retval.date
for fname in fnames[1:]:
dt_to_check = P4DBName(fname).date
if dt_to_check > dtnow:
dtnow = dt_to_check
retval = P4DBName(fname)
return retval.p
def get_latest_cleaned_db(datadir=None):
datadir = data_root if datadir is None else Path(datadir)
h5files = list(datadir.glob('201*_queryable_cleaned*.h5'))
if len(h5files) == 0:
LOGGER.error("No files found. Searching in %s", str(datadir))
raise NoFilesFoundError(f"No files found. Searching in {str(datadir)}")
return get_latest_file(h5files)
def get_latest_season23_dbase(datadir=None):
if datadir is None:
datadir = data_root
h5files = list(datadir.glob('201*_queryable_cleaned_seasons2and3.h5'))
return get_latest_file(h5files)
def get_test_database():
fname = pr.resource_filename('planet4', 'data/test_db.csv')
return pd.read_csv(fname)
def get_latest_tutorial_data(datadir=None):
if datadir is None:
datadir = data_root
tut_files = datadir.glob('/*_tutorials.h5')
tut_files = [i for i in tut_files if i.parent[:4].isdigit()]
if not tut_files:
raise NoFilesFoundError
return pd.read_hdf(str(get_latest_file(tut_files)), 'df')
def common_gold_ids():
# read the common gold_ids to check
with open('../data/gold_standard_commons.txt') as f:
gold_ids = f.read()
gold_ids = gold_ids.split('\n')
del gold_ids[-1] # last one is empty
return gold_ids
def get_image_names_from_db(dbfname):
"""Return arrary of HiRISE image_names from database file.
Parameters
----------
dbfname : pathlib.Path or str
Path to database file to be used.
Returns
-------
numpy.ndarray
Array of unique image names.
"""
path = Path(dbfname)
if path.suffix in ['.hdf', '.h5']:
with pd.HDFStore(str(dbfname)) as store:
return store.select_column('df', 'image_name').unique()
elif path.suffix == '.csv':
return pd.read_csv(dbfname).image_id.unique()
def get_latest_marked():
return pd.read_hdf(str(get_latest_cleaned_db()), 'df',
where='marking!=None')
def get_image_id_from_fname(fname):
"Return image_id from beginning of Path(fname).name"
fname = Path(fname)
name = fname.name
return name.split('_')[0]
def get_image_ids_in_folder(folder, extension='.csv'):
fnames = Path(folder).glob('*' + extension)
return [get_image_id_from_fname(i) for i in fnames]
class PathManager(object):
"""Manage file paths and folders related to the analysis pipeline.
Level definitions:
* L0 : Raw output of Planet Four
* L1A : Clustering of Blotches and Fans on their own
* L1B : Clustered blotches and fans combined into final fans, final blotches, and fnotches that
need to have a cut applied for the decision between fans or blotches.
* L1C : Derived database where a cut has been applied for fnotches to become either fan or
blotch.
Parameters
----------
id_ : str, optional
The data item id that is used to determine sub-paths. Can be set after
init.
datapath : str or pathlib.Path, optional
the base path from where to manage all derived paths. No default assumed
to prevent errors.
suffix : {'.hdf', '.h5', '.csv'}
The suffix that controls the reader function to be used.
obsid : str, optional
HiRISE obsid (i.e. P4 image_name), added as a folder inside path.
Can be set after init.
extra_path : str, pathlib.Path, optional
Any extra path element that needs to be added to the standard path.
Attributes
----------
cut_dir : pathlib.Path
Defined in `get_cut_folder`.
"""
def __init__(self, id_='', datapath='clustering', suffix='.csv', obsid='', cut=0.5,
extra_path=''):
self.id = id_
self.cut = cut
self._obsid = obsid
self.extra_path = extra_path
if datapath is None:
# take default path if none given
self._datapath = Path(data_root) / 'clustering'
elif Path(datapath).is_absolute():
# if given datapath is absolute, take only that:
self._datapath = Path(datapath)
else:
# if it is relative, add it to data_root
self._datapath = Path(data_root) / datapath
self.suffix = suffix
# point reader to correct function depending on required suffix
if suffix in ['.hdf', '.h5']:
self.reader = pd.read_hdf
elif suffix == '.csv':
self.reader = pd.read_csv
# making sure to warn the user here if the data isn't where it's expected to be
if id_ != '':
if not self.path_so_far.exists():
raise FileNotFoundError(f"{self.path_so_far} does not exist.")
@property
def id(self):
return self._id
@id.setter
def id(self, value):
if value is not None:
self._id = check_and_pad_id(value)
@property
def clustering_logfile(self):
return self.fanfile.parent / 'clustering_settings.yaml'
@property
def obsid(self):
if self._obsid is '':
if self.id is not '':
LOGGER.debug("Entering obsid search for known image_id.")
db = DBManager()
data = db.get_image_id_markings(self.id)
try:
obsid = data.image_name.iloc[0]
except IndexError:
raise IndexError("obsid access broken. Did you forget to use the `obsid` keyword"
" at initialization?")
LOGGER.debug("obsid found: %s", obsid)
self._obsid = obsid
return self._obsid
@obsid.setter
def obsid(self, value):
self._obsid = value
@property
def obsid_results_savefolder(self):
subfolder = 'p4_catalog' if self.datapath is None else self.datapath
savefolder = analysis_folder() / subfolder
savefolder.mkdir(exist_ok=True, parents=True)
return savefolder
@property
def obsid_final_fans_path(self):
return self.obsid_results_savefolder / f"{self.obsid}_fans.csv"
@property
def obsid_final_blotches_path(self):
return self.obsid_results_savefolder / f"{self.obsid}_blotches.csv"
@property
def datapath(self):
return self._datapath
@property
def path_so_far(self):
p = self.datapath
p /= self.extra_path
p /= self.obsid
return p
@property
def L1A_folder(self):
"Subfolder name for the clustered data before fnotching."
return 'L1A'
@property
def L1B_folder(self):
"Subfolder name for the fnotched data, before cut is applied."
return 'L1B'
@property
def L1C_folder(self):
"subfolder name for the final catalog after applying `cut`."
return 'L1C_cut_{:.1f}'.format(self.cut)
def get_path(self, marking, specific=''):
p = self.path_so_far
# now add the image_id
try:
p /= self.id
except TypeError:
logging.warning("self.id not set. Storing in obsid level.")
id_ = self.id if self.id != '' else self.obsid
# add the specific sub folder
p /= specific
if specific != '':
p /= f"{id_}_{specific}_{marking}{self.suffix}"
else:
# prepend the data level to file name if given.
p /= f"{id_}_{marking}{self.suffix}"
return p
def get_obsid_paths(self, level):
"""get all existing paths for a given data level.
Parameters
----------
level : {'L1A', 'L1B', 'L1C'}
"""
folder = self.path_so_far
# cast to upper case for the lazy... ;)
level = level.upper()
image_id_paths = [item for item in folder.glob('*') if item.is_dir()]
bucket = []
for p in image_id_paths:
try:
bucket.append(next(p.glob(f"{level}*")))
except StopIteration:
continue
return bucket
def get_df(self, fpath):
return self.reader(str(fpath))
@property
def fanfile(self):
return self.get_path('fans', self.L1A_folder)
@property
def fandf(self):
return self.get_df(self.fanfile)
@property
def reduced_fanfile(self):
return self.get_path('fans', self.L1B_folder)
@property
def reduced_fandf(self):
return self.get_df(self.reduced_fanfile)
@property
def final_fanfile(self):
return self.get_path('fans', self.L1C_folder)
@property
def final_fandf(self):
return self.get_df(self.final_fanfile)
@property
def blotchfile(self):
return self.get_path('blotches', self.L1A_folder)
@property
def blotchdf(self):
return self.get_df(self.blotchfile)
@property
def reduced_blotchfile(self):
return self.get_path('blotches', self.L1B_folder)
@property
def reduced_blotchdf(self):
return self.get_df(self.reduced_blotchfile)
@property
def final_blotchfile(self):
return self.get_path('blotches', self.L1C_folder)
@property
def final_blotchdf(self):
return self.get_df(self.final_blotchfile)
@property
def fnotchfile(self):
return self.get_path('fnotches', self.L1B_folder)
@property
def fnotchdf(self):
# the fnotchfile has an index, so i need to read that here:
return pd.read_csv(self.fnotchfile, index_col=0)
class DBManager(object):
"""Access class for database activities.
Provides easy access to often used data items.
Parameters
----------
dbname : str, optional
Path to database file to be used. Default: use get_latest_cleaned_db() to
find it.
Attributes
----------
image_names
image_ids
n_image_ids
n_image_names
obsids : Alias to image_ids
season2and3_image_names
"""
def __init__(self, dbname=None):
"""Initialize DBManager class.
Parameters
----------
dbname : <str>
Filename of database file to use. Default: Latest produced full
database.
"""
if dbname is None:
self.dbname = str(get_latest_cleaned_db())
else:
self.dbname = str(dbname)
def __repr__(self):
s = "Database root: {}\n".format(Path(self.dbname).parent)
s += "Database name: {}\n".format(Path(self.dbname).name)
return s
@property
def orig_csv(self):
p = Path(self.dbname)
return p.parent / (p.name[:38] + '.csv')
def set_latest_with_dupes_db(self, datadir=None):
datadir = data_root if datadir is None else Path(datadir)
h5files = datadir.glob('201*_queryable.h5')
dbname = get_latest_file(h5files)
print("Setting {} as dbname.".format(dbname.name))
self.dbname = str(dbname)
@property
def image_names(self):
"""Return list of unique obsids used in database.
See also
--------
get_image_names_from_db
"""
return get_image_names_from_db(self.dbname)
@property
def image_ids(self):
"Return list of unique image_ids in database."
with pd.HDFStore(self.dbname) as store:
return store.select_column('df', 'image_id').unique()
@property
def n_image_ids(self):
return len(self.image_ids)
@property
def n_image_names(self):
return len(self.image_names)
@property
def obsids(self):
"Alias to self.image_names."
return self.image_names
def get_all(self, datadir=None):
return pd.read_hdf(str(self.dbname), 'df')
def get_obsid_markings(self, obsid):
"Return marking data for given HiRISE obsid."
return pd.read_hdf(self.dbname, 'df', where='image_name=' + obsid)
def get_image_name_markings(self, image_name):
"Alias for get_obsid_markings."
return self.get_obsid_markings(image_name)
def get_image_id_markings(self, image_id):
"Return marking data for one Planet4 image_id"
image_id = check_and_pad_id(image_id)
return pd.read_hdf(self.dbname, 'df', where='image_id=' + image_id)
def get_data_for_obsids(self, obsids):
bucket = []
for obsid in obsids:
bucket.append(self.get_obsid_markings(obsid))
return pd.concat(bucket, ignore_index=True)
def get_classification_id_data(self, class_id):
"Return data for one classification_id"
return pd.read_hdf(self.dbname, 'df',
where="classification_id=='{}'".format(class_id))
@property
def season2and3_image_names(self):
"numpy.array : List of image_names for season 2 and 3."
image_names = self.image_names
metadf = pd.DataFrame(pd.Series(image_names).astype(
'str'), columns=['image_name'])
stats.define_season_column(metadf)
return metadf[(metadf.season > 1) & (metadf.season < 4)].image_name.unique()
def get_general_filter(self, f):
return | pd.read_hdf(self.dbname, 'df', where=f) | pandas.read_hdf |
# Copyright (c) 2018 Uber Technologies, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import defaultdict
import os.path
from tempfile import mkdtemp
from random import seed as py_seed
import numpy as np
import pandas as pd
from scipy.special import expit as logistic
import torch
import torchvision.utils as vutils
import benchmark_tools.benchmark_tools as bt
import benchmark_tools.classification as btc
import benchmark_tools.sciprint as sp
from benchmark_tools.constants import METHOD, METRIC
from contrib.dcgan.dcgan import gan_trainer, BASE_D
from contrib.dcgan.dcgan_loader import get_data_loader, get_opts
from contrib.inception_score.inception_score import \
inception_score_precomp, inception_score_fast
import classification as cl
import mh
SEED_MAX = 2**32 - 1 # np.random.randint can't accept seeds above this
LABEL = 'label'
NA_LEVEL = '-'
DBG = False # Use bogus incep scores for faster debugging
SKIP_INIT_EVAL = True
SAVE_IMAGES = True
INCEP_SCORE = True
INCEP = 'incep'
def const_dict(val):
D = defaultdict(lambda: val)
return D
def base(score, score_max=None):
'''This is a normal GAN. It always just selects the first generated image
in a series.
'''
idx = 0
return idx, 1.0
PICK_DICT = {'MH': mh.mh_sample, 'base': base, 'reject': mh.rejection_sample}
# ============================================================================
# These functions are here just to illustrate the kind of interface the trainer
# iterator needs to provide.
def gen_dummy(batch_size):
image_size = 32
nc = 3
X = np.random.randn(batch_size, nc, image_size, image_size)
return X
def gen_and_disc_dummy(batch_size):
disc_names = (BASE_D,)
X = gen_dummy(batch_size)
scores = {k: np.random.rand(X.shape[0]) for k in disc_names}
return X, scores
def trainer_dummy():
disc_names = (BASE_D,)
while True:
scores_real = {k: np.random.rand(320) for k in disc_names}
yield gen_dummy, gen_and_disc_dummy, scores_real
# ============================================================================
# Validation routines:
def validate_X(X):
assert isinstance(X, np.ndarray)
assert X.dtype.kind == 'f'
batch_size, nc, image_size, _ = X.shape
assert X.shape == (batch_size, nc, image_size, image_size)
assert np.all(np.isfinite(X))
return X
def validate_scores(scores):
assert isinstance(scores, dict)
for sv in scores.values():
assert isinstance(sv, np.ndarray)
assert sv.dtype.kind == 'f'
assert sv.ndim == 1
assert np.all(0 <= sv) and np.all(sv <= 1)
scores = pd.DataFrame(scores)
return scores
def validate(R):
'''
X : ndarray, shape (batch_size, nc, image_size, image_size)
scores : dict of str -> ndarray of shape (batch_size,)
'''
X, scores = R
X = validate_X(X)
scores = validate_scores(scores)
assert len(X) == len(scores)
return X, scores
# ============================================================================
def batched_gen_and_disc(gen_and_disc, n_batches, batch_size):
'''
Get a large batch of images. Pytorch might run out of memory if we set
the batch size to n_images=n_batches*batch_size directly.
g_d_f : callable returning (X, scores) compliant with `validate`
n_images : int
assumed to be multiple of batch size
'''
X, scores = zip(*[validate(gen_and_disc(batch_size))
for _ in xrange(n_batches)])
X = np.concatenate(X, axis=0)
scores = pd.concat(scores, axis=0, ignore_index=True)
return X, scores
def enhance_samples(scores_df, scores_max, scores_real_df, clf_df,
pickers=PICK_DICT):
'''
Return selected image (among a batcf on n images) for each picker.
scores_df : DataFrame, shape (n, n_discriminators)
scores_real_df : DataFrame, shape (m, n_discriminators)
clf_df : Series, shape (n_classifiers x n_calibrators,)
pickers : dict of str -> callable
'''
assert len(scores_df.columns.names) == 1
assert list(scores_df.columns) == list(scores_real_df.columns)
init_idx = np.random.choice(len(scores_real_df))
picked = pd.DataFrame(data=0, index=pickers.keys(), columns=clf_df.index,
dtype=int)
cap_out = pd.DataFrame(data=False,
index=pickers.keys(), columns=clf_df.index,
dtype=bool)
alpha = pd.DataFrame(data=np.nan,
index=pickers.keys(), columns=clf_df.index,
dtype=float)
for disc_name in sorted(scores_df.columns):
assert isinstance(disc_name, str)
s0 = scores_real_df[disc_name].values[init_idx]
assert np.ndim(s0) == 0
for calib_name in sorted(clf_df[disc_name].index):
assert isinstance(calib_name, str)
calibrator = clf_df[(disc_name, calib_name)]
s_ = np.concatenate(([s0], scores_df[disc_name].values))
s_ = calibrator.predict(s_)
s_max, = calibrator.predict(np.array([scores_max[disc_name]]))
for picker_name in sorted(pickers.keys()):
assert isinstance(picker_name, str)
idx, aa = pickers[picker_name](s_, score_max=s_max)
if idx == 0:
# Try again but init from first fake
cap_out.loc[picker_name, (disc_name, calib_name)] = True
idx, aa = pickers[picker_name](s_[1:], score_max=s_max)
else:
idx = idx - 1
assert idx >= 0
picked.loc[picker_name, (disc_name, calib_name)] = idx
alpha.loc[picker_name, (disc_name, calib_name)] = aa
return picked, cap_out, alpha
def enhance_samples_series(g_d_f, scores_real_df, clf_df,
pickers=PICK_DICT, n_images=64):
'''
Call enhance_samples multiple times to build up a batch of selected images.
Stores list of used images X separate from the indices of the images
selected by each method. This is more memory efficient if there are
duplicate images selected.
g_d_f : callable returning (X, scores) compliant with `validate`
calibrator : dict of str -> trained sklearn classifier
same keys as scores
n_images : int
'''
batch_size = 64 # Batch size to use when calling the pytorch generator G
chain_batches = 10 # Number of batches to use total for the pickers
max_est_batches = 156 # Num batches for estimating M in DRS pilot samples
assert n_images > 0
_, scores_max = batched_gen_and_disc(g_d_f, max_est_batches, batch_size)
scores_max = scores_max.max(axis=0)
print('max scores')
print(scores_max.to_string())
X = []
picked = [None] * n_images
cap_out = [None] * n_images
alpha = [None] * n_images
for nn in xrange(n_images):
X_, scores_fake_df = \
batched_gen_and_disc(g_d_f, chain_batches, batch_size)
picked_, cc, aa = \
enhance_samples(scores_fake_df, scores_max, scores_real_df, clf_df,
pickers=pickers)
picked_ = picked_.unstack() # Convert to series
# Only save the used images for memory, so some index x-from needed
assert np.ndim(picked_.values) == 1
used_idx, idx_new = np.unique(picked_.values, return_inverse=True)
picked_ = pd.Series(data=idx_new, index=picked_.index)
# A bit of index manipulation in our memory saving scheme
picked[nn] = len(X) + picked_
X.extend(list(X_[used_idx])) # Unravel first index to list
cap_out[nn] = cc.unstack()
alpha[nn] = aa.unstack()
X = np.asarray(X)
assert X.ndim == 4
picked = pd.concat(picked, axis=1).T
assert picked.shape == (n_images, len(picked_))
cap_out = pd.concat(cap_out, axis=1).T
assert cap_out.shape == (n_images, len(picked_))
alpha = pd.concat(alpha, axis=1).T
assert alpha.shape == (n_images, len(picked_))
return X, picked, cap_out, alpha
def discriminator_analysis(scores_fake_df, scores_real_df, ref_method,
dump_fname=None):
'''
scores_fake_df : DataFrame, shape (n, n_discriminators)
scores_real_df : DataFrame, shape (n, n_discriminators)
ref_method : (str, str)
perf_report : str
calib_report : str
clf_df : DataFrame, shape (n_calibrators, n_discriminators)
'''
# Build combined data set dataframe and train calibrators
pred_df, y_true = cl.combine_class_df(neg_class_df=scores_fake_df,
pos_class_df=scores_real_df)
pred_df, y_true, clf_df = cl.calibrate_pred_df(pred_df, y_true)
# Make methods flat to be compatible with benchmark tools
pred_df.columns = cl.flat_cols(pred_df.columns)
ref_method = cl.flat(ref_method) # Make it flat as well
# Do calibration analysis
Z = cl.calibration_diagnostic(pred_df, y_true)
calib_report = Z.to_string()
# Dump prediction to csv in case we want it for later analysis
if dump_fname is not None:
pred_df_dump = | pd.DataFrame(pred_df, copy=True) | pandas.DataFrame |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import statsmodels.graphics.tsaplots as sgt
from statsmodels.tsa.arima_model import ARMA
from scipy.stats.distributions import chi2
import statsmodels.tsa.stattools as sts
# ------------------------
# load data
# ----------
raw_csv_data = | pd.read_csv("../data/Index2018.csv") | pandas.read_csv |
import os, sys, re, json, random, copy, argparse, pickle, importlib
import numpy as np
import pandas as pd
from collections import OrderedDict
from tqdm import tqdm
import torch
import torch.nn as nn
import torch.nn.functional as F
import matplotlib.pyplot as plt
import logomaker as lm
from util import *
import warnings
warnings.filterwarnings('ignore')
class Predictor():
def __init__(self, mhc_encode_dict, model_file, model_state_files, encoding_method):
# MHC binding domain encoding
self.mhc_encode_dict = mhc_encode_dict
# device: gpu or cpu
if torch.cuda.is_available():
self.device = torch.device('cuda')
self.batch_size = 4096
else:
self.device = torch.device('cpu')
self.batch_size = 64
# model
if encoding_method == 'onehot':
dim = 21
elif encoding_method == 'blosum':
dim = 24
else:
print("Wrong encoding method")
raise ValueError
model_file = '.'.join(model_file.split('.')[0].split('/'))
module = importlib.import_module(model_file)
self.model = module.CombineModel(module.MHCModel(dim), module.EpitopeModel(dim))
# model states
self.models = OrderedDict()
for i in range(len(model_state_files)):
basename = re.split(r'[\/\.]', model_state_files[i])[-2]
model_state_dict = torch.load(model_state_files[i], map_location=self.device)
self.models[basename] = copy.deepcopy(self.model)
self.models[basename].load_state_dict(model_state_dict['model_state_dict'])
self.models[basename].to(self.device)
def __call__(self, df, dataset, allele=None):
result_df = pd.DataFrame(index=df.index, columns=list(self.models.keys()))
result_df['sequence'] = df['sequence']
# general mode
if allele:
dataloader = torch.utils.data.DataLoader(dataset, batch_size=self.batch_size, shuffle=False)
preds = self._predict(allele, dataloader)
result_df.loc[:, list(self.models.keys())] = preds
# specific mode
else:
result_df['mhc'] = df['mhc']
for allele, sub_df in tqdm(df.groupby('mhc'), desc='alleles', leave=False, position=0):
idx = sub_df.index
sub_dataset = torch.utils.data.Subset(dataset, idx)
sub_dataloader = torch.utils.data.DataLoader(sub_dataset, batch_size=self.batch_size, shuffle=False)
preds = self._predict(allele, sub_dataloader)
result_df.loc[idx, list(self.models.keys())] = preds
return result_df
def _predict(self, allele, dataloader):
mhc_encode = self.mhc_encode_dict[allele]
df = pd.DataFrame()
for key, model in tqdm(self.models.items(), desc='models', leave=False, position=1):
for j, (x,y) in enumerate(tqdm(dataloader, desc='batches', leave=False, position=2)):
with torch.no_grad():
model.eval()
num = x.shape[0]
epitope_encode = x.to(self.device).float()
mhc_encode_tile = torch.FloatTensor(np.tile(mhc_encode, (num, 1, 1))).to(self.device)
pred = model(mhc_encode_tile, epitope_encode).to('cpu')
pred = pred.view(-1,).numpy()
if j==0:
preds = pred
else:
preds = np.append(preds, pred, axis=0)
df[key] = preds
return df.values
class Interpretation():
def __init__(self, interpretation_db, output_dir):
self.aa_str = 'ACDEFGHIKLMNPQRSTVWY'
self.sub_motif_len = 4
self.dpi = 200
self.fontsize = 8
self.interp_dict = interpretation_db
self.positions = self.interp_dict['important_positions']
self.mhc_dict = self.interp_dict['seq']
self.motif_dict = self.interp_dict['motif']
self.output_dir = output_dir
if not os.path.isdir(output_dir):
os.mkdir(output_dir)
def __call__(self, allele):
hla = allele.split('*')[0]
motif_df = pd.DataFrame(self.motif_dict[allele], columns=list(self.aa_str))
allele_df = self._get_allele_seqlogo(allele)
fig, ax = plt.subplots(2, 4, figsize=(10, 4), dpi=self.dpi, gridspec_kw={'width_ratios': [1, 4, 1, 4]})
ax_y = 0
for side in ['N', 'C']:
# cluster
cluster = self.interp_dict['cluster']['%s_%s'%(hla, side)][allele]
# sub motif
if side == 'N':
sub_motif_df = motif_df.iloc[:4]
else:
sub_motif_df = motif_df.iloc[-4:].reset_index(drop=True)
# sub-motif plot
self._motif_plot(sub_motif_df, side, ax[1][ax_y])
# check cluster
if cluster not in self.interp_dict['hyper_motif']['%s_%s'%(hla, side)].keys():
_ = ax[0][ax_y+1].set_title('%s-terminus: not well classified'%side, loc='left')
_ = ax[1][ax_y+1].set_title('%s-terminus: allele information'%side, loc='left')
print('%s-terminal of %s is not well classified'%(side, allele))
continue
# hyper-motif plot
hyper_motif = self.interp_dict['hyper_motif']['%s_%s'%(hla, side)][cluster]
hyper_motif = pd.DataFrame(hyper_motif, columns=list(self.aa_str))
self._motif_plot(hyper_motif, side, ax[0][ax_y])
# allele signature plot
allele_signature = self.interp_dict['allele_signature']['%s_%s'%(hla, side)][cluster]
allele_signature = pd.DataFrame(allele_signature, columns=list(self.aa_str))
self._mhcseq_plot(allele_signature, ax[0][ax_y+1], title='%s-terminus: cluster information'%side)
# highlighted allele signature plot
allele_df[allele_df > 0] = 1
allele_signature[allele_signature < 0] = 0
self._mhcseq_plot(allele_df * allele_signature, ax[1][ax_y+1], title='%s-terminus: allele information'%side)
ax_y += 2
fig.tight_layout()
fig.savefig('%s/%s%s%s.png'%(self.output_dir, hla, allele[2:4], allele[5:]))
def _get_allele_seqlogo(self, allele):
seq = self.mhc_dict[allele]
seq = ''.join([seq[i] for i in self.positions])
seqlogo_df = lm.alignment_to_matrix(sequences=[seq], to_type='counts')
df = pd.DataFrame(columns=list(self.aa_str))
return pd.concat([df, seqlogo_df], axis=0)[list(self.aa_str)].fillna(0)
def _motif_plot(self, seqlogo_df, side, ax, ylim=4, title=None, turn_off_label=False):
if side == 'N':
xticklabels = list(range(1, self.sub_motif_len+1))
else:
xticklabels = list(range(-self.sub_motif_len, 0))
logo = lm.Logo(seqlogo_df, color_scheme='skylign_protein', ax=ax)
_ = ax.set_xticks(list(range(len(xticklabels))))
_ = ax.set_xticklabels(xticklabels)
_ = ax.set_ylim(0,ylim)
_ = ax.set_title(title)
for item in ([ax.title, ax.xaxis.label, ax.yaxis.label] + ax.get_xticklabels() + ax.get_yticklabels()):
item.set_fontsize(self.fontsize-2)
_ = ax.set_yticks([])
_ = ax.set_yticklabels([])
if turn_off_label:
_ = ax.set_xticks([])
_ = ax.set_xticklabels([])
_ = ax.set_title(None)
def _mhcseq_plot(self, seqlogo_df, ax, ylim=1, title=None, turn_off_label=False):
logo = lm.Logo(seqlogo_df, color_scheme='skylign_protein', ax=ax)
_ = ax.set_ylim(0, ylim)
_ = ax.set_xticks(range(len(self.positions)))
_ = ax.set_xticklabels([i+1 for i in self.positions], rotation=90)
_ = ax.set_title(title, loc='left')
for item in ([ax.title, ax.xaxis.label, ax.yaxis.label] + ax.get_xticklabels() + ax.get_yticklabels()):
item.set_fontsize(self.fontsize-2)
_ = ax.set_yticks([])
_ = ax.set_yticklabels([])
if turn_off_label:
_ = ax.set_xticks([])
_ = ax.set_xticklabels([])
_ = ax.set_title(None)
def ArgumentParser():
description = '''
MHCfovea, an MHCI-peptide binding predictor. In this prediction process, GPU is recommended.
Having two modes:
1. specific mode: each peptide has its corresponding MHC-I allele in the input file; column "mhc" or "allele" is required
2. general mode: all peptides are predicted with all alleles in the "alleles" argument
Input file:
only .csv file is acceptable
column "sequence" or "peptide" is required as peptide sequences
column "mhc" or "allele" is optional as MHC-I alleles
Output directory contains:
1. prediction.csv: with new column "score" for specific mode or [allele] for general mode
2. interpretation: a directory contains interpretation figures of each allele
3. metrics.json: all and allele-specific metrics (AUC, AUC0.1, AP, PPV); column "bind" as benchmark is required
'''
parser = argparse.ArgumentParser(prog='predictor', description=description,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('input', help='The input file')
parser.add_argument('output_dir', help='The output directory')
parser.add_argument('--alleles', required=False, default=None, help='alleles for general mode')
parser.add_argument('--get_metrics', required=False, default=False, action='store_true', help='calculate the metrics between prediction and benchmark')
return parser
def main(args=None):
"""""""""""""""""""""""""""""""""""""""""
# Arguments
"""""""""""""""""""""""""""""""""""""""""
args = ArgumentParser().parse_args(args)
current_dir = os.path.abspath(os.getcwd())
os.chdir(os.path.dirname(os.path.abspath(__file__)))
# input
if args.input.startswith('/'):
peptide_dataframe = args.input
else:
peptide_dataframe = '%s/%s'%(current_dir, args.input)
# output
if args.output_dir.startswith('/'):
output_dir = args.output_dir
else:
output_dir = '%s/%s'%(current_dir, args.output_dir)
if not os.path.isdir(output_dir):
os.mkdir(output_dir)
# data
rank_file = '../data/score_rank.csv'
rank_df = pd.read_csv(rank_file, index_col=0)
db_file = '../data/interpretation.pkl'
db = pickle.load(open(db_file, 'rb'))
mhc_dict = db['seq']
alleles = args.alleles
encoding_method = 'onehot'
# model
model_file = 'model.py'
model_state_dir = 'model_state'
# others
get_metrics = args.get_metrics
"""""""""""""""""""""""""""""""""""""""""
# Loading Data & Model
"""""""""""""""""""""""""""""""""""""""""
print("Loading data and model...")
# model state files
model_state_files = list()
for file in os.listdir(model_state_dir):
model_state_files.append('%s/%s'%(model_state_dir, file))
model_state_files.sort()
# peptide dataframe
df = | pd.read_csv(peptide_dataframe) | pandas.read_csv |
from snorkel.lf_helpers import (
get_left_tokens,
get_right_tokens,
get_between_tokens,
get_tagged_text,
get_text_between,
is_inverted,
rule_regex_search_tagged_text,
rule_regex_search_btw_AB,
rule_regex_search_btw_BA,
rule_regex_search_before_A,
rule_regex_search_before_B,
)
import numpy as np
import random
import re
import pathlib
import pandas as pd
import nltk
from nltk.stem import WordNetLemmatizer
from nltk.tokenize import word_tokenize
from nltk.corpus import stopwords
random.seed(100)
stop_word_list = stopwords.words('english')
"""
Debugging to understand how LFs work
"""
def LF_DEBUG(c):
"""
This label function is for debugging purposes. Feel free to ignore.
keyword arguments:
c - The candidate object to be labeled
"""
print(c)
print()
print("Left Tokens")
print(list(get_left_tokens(c[0], window=5)))
print()
print("Right Tokens")
print(list(get_right_tokens(c[0])))
print()
print("Between Tokens")
print(list(get_between_tokens(c)))
print()
print("Tagged Text")
print(get_tagged_text(c))
print(re.search(r'{{B}} .* is a .* {{A}}', get_tagged_text(c)))
print()
print("Get between Text")
print(get_text_between(c))
print(len(get_text_between(c)))
print()
print("Parent Text")
print(c.get_parent())
print()
return 0
# Helper function for label functions
def ltp(tokens):
return '(' + '|'.join(tokens) + ')'
"""
DISTANT SUPERVISION
"""
path = pathlib.Path(__file__).joinpath('../../../../disease_gene_pairs_association.csv.xz').resolve()
pair_df = pd.read_csv(path, dtype={"sources": str})
knowledge_base = set()
for row in pair_df.itertuples():
if not row.sources or | pd.isnull(row.sources) | pandas.isnull |
"""
================================
The Population Management System
================================
This module provides tools for managing the :term:`state table <State Table>`
in a :mod:`vivarium` simulation, which is the record of all simulants in a
simulation and their state. It's main tasks are managing the creation of new
simulants and providing the ability for components to view and update simulant
state safely during runtime.
"""
from types import MethodType
from typing import List, Callable, Union, Dict, Any, NamedTuple, Tuple
import pandas as pd
from vivarium.exceptions import VivariumError
class PopulationError(VivariumError):
"""Error raised when the population is invalidly queried or updated."""
pass
class PopulationView:
"""A read/write manager for the simulation state table.
It can be used to both read and update the state of the population. A
PopulationView can only read and write columns for which it is configured.
Attempts to update non-existent columns are ignored except during
simulant creation when new columns are allowed to be created.
Parameters
----------
manager
The population manager for the simulation.
columns
The set of columns this view should have access too. If explicitly
specified as ``None``, this view will have access to the entire
state table.
query
A :mod:`pandas`-style filter that will be applied any time this
view is read from.
Notes
-----
By default, this view will filter out ``untracked`` simulants unless
the ``tracked`` column is specified in the initialization arguments.
"""
def __init__(self,
manager: 'PopulationManager',
view_id: int,
columns: Union[List[str], Tuple[str], None] = (),
query: str = None):
self._manager = manager
self._id = view_id
self._columns = list(columns)
self._query = query
@property
def name(self):
return f'population_view_{self._id}'
@property
def columns(self) -> List[str]:
"""The columns that the view can read and update.
If the view was created with ``None`` as the columns argument, then
the view will have access to the full table by default. That case
should be only be used in situations where the full state table is
actually needed, like for some metrics collection applications.
"""
if not self._columns:
return list(self._manager.get_population(True).columns)
return list(self._columns)
@property
def query(self) -> str:
"""A :mod:`pandas` style query to filter the rows of this view.
This query will be applied any time the view is read. This query may
reference columns not in the view's columns.
"""
return self._query
def subview(self, columns: Union[List[str], Tuple[str]]) -> 'PopulationView':
"""Retrieves a new view with a subset of this view's columns.
Parameters
----------
columns
The set of columns to provide access to in the subview. Must be
a proper subset of this view's columns.
Returns
-------
PopulationView
A new view with access to the requested columns.
Raises
------
PopulationError
If the requested columns are not a proper subset of this view's
columns.
Notes
-----
Subviews are useful during population initialization. The original
view may contain both columns that a component needs to create and
update as well as columns that the component needs to read. By
requesting a subview, a component can read the sections it needs
without running the risk of trying to access uncreated columns
because the component itself has not created them.
"""
if set(columns) > set(self.columns):
raise PopulationError(f"Invalid subview requested. Requested columns must be a subset of this "
f"view's columns. Requested columns: {columns}, Available columns: {self.columns}")
# Skip constraints for requesting subviews.
return self._manager._get_view(columns, self.query)
def get(self, index: pd.Index, query: str = '') -> pd.DataFrame:
"""Select the rows represented by the given index from this view.
For the rows in ``index`` get the columns from the simulation's
state table to which this view has access. The resulting rows may be
further filtered by the view's query and only return a subset
of the population represented by the index.
Parameters
----------
index
Index of the population to get.
query
Additional conditions used to filter the index. These conditions
will be unioned with the default query of this view. The query
provided may use columns that this view does not have access to.
Returns
-------
pandas.DataFrame
A table with the subset of the population requested.
Raises
------
PopulationError
If this view has access to columns that have not yet been created
and this method is called. If you see this error, you should
request a subview with the columns you need read access to.
See Also
--------
:meth:`subview <PopulationView.subview>`
"""
pop = self._manager.get_population(True).loc[index]
if not index.empty:
if self._query:
pop = pop.query(self._query)
if query:
pop = pop.query(query)
if not self._columns:
return pop
else:
columns = self._columns
non_existent_columns = set(columns) - set(pop.columns)
if non_existent_columns:
raise PopulationError(f'Requested column(s) {non_existent_columns} not in population table.')
else:
return pop.loc[:, columns]
def update(self, population_update: Union[pd.DataFrame, pd.Series]):
"""Updates the state table with the provided data.
Parameters
----------
population_update
The data which should be copied into the simulation's state. If
the update is a :class:`pandas.DataFrame`, it can contain a subset
of the view's columns but no extra columns. If ``pop`` is a
:class:`pandas.Series` it must have a name that matches one of
this view's columns unless the view only has one column in which
case the Series will be assumed to refer to that regardless of its
name.
Raises
------
PopulationError
If the provided data name or columns does not match columns that
this view manages or if the view is being updated with a data
type inconsistent with the original population data.
"""
if population_update.empty:
return
# TODO: Cast series to data frame and clean this up.
if isinstance(population_update, pd.Series):
if population_update.name in self._columns:
affected_columns = [population_update.name]
elif len(self._columns) == 1:
affected_columns = self._columns
else:
raise PopulationError('Cannot update with a pandas series unless the series name is a column '
'name in the view or there is only a single column in the view.')
else:
if not set(population_update.columns).issubset(self._columns):
raise PopulationError(f'Cannot update with a DataFrame that contains columns the view does not. '
f'Dataframe contains the following extra columns: '
f'{set(population_update.columns).difference(self._columns)}.')
affected_columns = set(population_update.columns)
affected_columns = set(affected_columns).intersection(self._columns)
state_table = self._manager.get_population(True)
if not self._manager.growing:
affected_columns = set(affected_columns).intersection(state_table.columns)
for affected_column in affected_columns:
if affected_column in state_table:
new_state_table_values = state_table[affected_column].values
if isinstance(population_update, pd.Series):
update_values = population_update.values
else:
update_values = population_update[affected_column].values
new_state_table_values[population_update.index] = update_values
if new_state_table_values.dtype != update_values.dtype:
# This happens when the population is being grown because extending
# the index forces columns that don't have a natural null type
# to become 'object'
if not self._manager.growing:
raise PopulationError('Component corrupting population table. '
f'Column name: {affected_column} '
f'Old column type: {new_state_table_values.dtype} '
f'New column type: {update_values.dtype}')
new_state_table_values = new_state_table_values.astype(update_values.dtype)
else:
if isinstance(population_update, pd.Series):
new_state_table_values = population_update.values
else:
new_state_table_values = population_update[affected_column].values
self._manager._population[affected_column] = new_state_table_values
def __repr__(self):
return f"PopulationView(_id={self._id}, _columns={self.columns}, _query={self._query})"
class SimulantData(NamedTuple):
"""Data to help components initialize simulants.
Any time simulants are added to the simulation, each initializer is called
with this structure containing information relevant to their
initialization.
"""
#: The index representing the new simulants being added to the simulation.
index: pd.Index
#: A dictionary of extra data passed in by the component creating the
#: population.
user_data: Dict[str, Any]
#: The time when the simulants enter the simulation.
creation_time: pd.Timestamp
#: The span of time over which the simulants are created. Useful for,
#: e.g., distributing ages over the window.
creation_window: pd.Timedelta
class InitializerComponentSet:
"""Set of unique components with population initializers."""
def __init__(self):
self._components = {}
self._columns_produced = {}
def add(self, initializer: Callable, columns_produced: List[str]):
"""Adds an initializer and columns to the set, enforcing uniqueness.
Parameters
----------
initializer
The population initializer to add to the set.
columns_produced
The columns the initializer produces.
Raises
------
TypeError
If the initializer is not an object method.
AttributeError
If the object bound to the method does not have a name attribute.
PopulationError
If the component bound to the method already has an initializer
registered or if the columns produced are duplicates of columns
another initializer produces.
"""
if not isinstance(initializer, MethodType):
raise TypeError('Population initializers must be methods of named simulation components. '
f'You provided {initializer} which is of type {type(initializer)}.')
component = initializer.__self__
if not hasattr(component, "name"):
raise AttributeError('Population initializers must be methods of named simulation components. '
f'You provided {initializer} which is bound to {component} that has no '
f'name attribute.')
if component.name in self._components:
raise PopulationError(f'Component {component.name} has multiple population initializers. '
'This is not allowed.')
for column in columns_produced:
if column in self._columns_produced:
raise PopulationError(f'Component {component.name} and component {self._columns_produced[column]} '
f'have both registered initializers for column {column}.')
self._columns_produced[column] = component.name
self._components[component.name] = columns_produced
def __repr__(self):
return repr(self._components)
def __str__(self):
return str(self._components)
class PopulationManager:
"""Manages the state of the simulated population."""
# TODO: Move the configuration for initial population creation to
# user components.
configuration_defaults = {
'population': {'population_size': 100}
}
def __init__(self):
self._population = pd.DataFrame()
self._initializer_components = InitializerComponentSet()
self.growing = False
self._last_id = -1
############################
# Normal Component Methods #
############################
@property
def name(self):
"""The name of this component."""
return "population_manager"
def setup(self, builder):
"""Registers the population manager with other vivarium systems."""
self.clock = builder.time.clock()
self.step_size = builder.time.step_size()
self.resources = builder.resources
self._add_constraint = builder.lifecycle.add_constraint
builder.lifecycle.add_constraint(self.get_view, allow_during=['setup', 'post_setup', 'population_creation',
'simulation_end', 'report'])
builder.lifecycle.add_constraint(self.get_simulant_creator, allow_during=['setup'])
builder.lifecycle.add_constraint(self.register_simulant_initializer, allow_during=['setup'])
self.register_simulant_initializer(self.on_initialize_simulants, creates_columns=['tracked'])
self._view = self.get_view(['tracked'])
builder.value.register_value_modifier('metrics', modifier=self.metrics)
def on_initialize_simulants(self, pop_data: SimulantData):
"""Adds a ``tracked`` column to the state table for new simulants."""
status = | pd.Series(True, index=pop_data.index) | pandas.Series |
#!/usr/bin/env python3
# coding: utf-8
"""
@author: <NAME> <EMAIL>
@last modified by: <NAME>
@file:cell_type_anno.py
@time:2021/03/09
change log:
2021/05/20 rst supplement. by: qindanhua.
2021/07/08 adjust for restructure base class . by: qindanhua.
"""
import pandas as pd
import numpy as np
import os
from multiprocessing import Pool
import traceback
from ..log_manager import logger
from ..utils.correlation import spearmanr_corr, pearson_corr
from ..preprocess.normalize import normalize_total
from ..config import stereo_conf
from ..utils import remove_file
from ..core.tool_base import ToolBase
class CellTypeAnno(ToolBase):
"""
predict bin-cells's type
:param data: StereoExpData object
:param ref_dir: reference database directory
:param cores: set running core to fasten running speed
:param keep_zeros: if true, keeping the genes that in reference but not in input expression data
:param use_rf: if running random choosing genes or not
:param sample_rate: ratio of sampling data
:param n_estimators: prediction times
:param strategy:
:param method: calculate correlation's method
:param split_num:
Example
-------
>>> from stereo.io.reader import read_stereo
>>>sed = read_stereo('test_gem', 'txt', 'bins')
>>>cta = CellTypeAnno(sed, ref_dir='/path/to/reference_exp_data_dir/')
>>>cta.fit()
cell cell type ... type_cnt_sum type_rate
0 0_0 hereditary spherocytosis cell line ... 20 1.0
1 0_1 hereditary spherocytosis cell line ... 20 1.0
2 0_10 hereditary spherocytosis cell line ... 20 1.0
"""
def __init__(
self,
data,
method='spearmanr',
ref_dir: str = None,
cores: int = 1,
keep_zeros: bool = True,
use_rf: bool = True,
sample_rate: float = 0.8,
n_estimators: int = 20,
strategy='1',
split_num: int = 1,
):
super(CellTypeAnno, self).__init__(data=data, method=method)
self.ref_dir = ref_dir
self.n_jobs = cores
self.keep_zeros = keep_zeros
self.use_rf = use_rf
self.sample_rate = sample_rate
self.n_estimators = n_estimators
self.strategy = strategy
self.split_num = split_num
self.output = stereo_conf.out_dir
@property
def ref_dir(self):
return self._ref_dir
@ref_dir.setter
def ref_dir(self, ref_dir):
"""
set reference directory which must exist two file ref_sample_epx.csv and cell_map.csv
"""
git_ref = 'https://github.com/BGIResearch/stereopy/raw/data/FANTOM5/ref_sample_epx.csv'
if ref_dir is None:
logger.info(f'reference file not found, download from {git_ref}')
ref_dir = os.path.join(stereo_conf.data_dir, 'ref_db', 'FANTOM5')
self.download_ref(ref_dir)
if not os.path.exists(os.path.join(ref_dir, 'ref_sample_epx.csv')) and \
os.path.exists(os.path.join(ref_dir, 'cell_map.csv')):
raise ValueError(
f'reference file not found, ref_dir should exist two file ref_sample_epx.csv and cell_map.csv'
)
self._ref_dir = ref_dir
@ToolBase.method.setter
def method(self, method):
m_range = ['spearmanr', 'pearson']
self._method_check(method, m_range)
def split_dataframe(self, df):
"""
split input data to N(split_num) part
:param df: input expression data frame
:return: N part of data frame
"""
datas = []
logger.info(f'input data: {df.shape[0]} genes, {df.shape[1]} cells.')
if self.split_num > 1:
logger.info(f'split the exp matrix to {self.split_num} matrixs')
step_size = int(df.shape[1]/self.split_num) + 1
for i in range(self.split_num):
start = i * step_size
end = start + step_size if start + step_size < df.shape[1] else df.shape[1]
datas.append(df.iloc[:, start: end])
else:
datas.append(df)
return datas
@staticmethod
def concat_top_corr_files(files, output_dir, prefix=None):
"""
concat correlation files from n-times prediction's result
:param files: all prediction results
:param output_dir: output directory
:param prefix: prefix of output files
:return: correlation dataframe
"""
df = | pd.read_csv(files[0]) | pandas.read_csv |
import os
# Enforce CPU Usage
#os.environ['CUDA_VISIBLE_DEVICES'] = '-1' # Uncommenting enforces CPU usage # Commenting enforces GPU usage
# Seed the Random-Number-Generator in a bid to get 'Reproducible Results'
import tensorflow as tf
from random import seed, sample
from numpy.random import seed
seed(1)
tf.compat.v1.set_random_seed(3)
# load required modules
import pandas as pd
import numpy as np
import math, time
from datetime import datetime, timedelta
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import QuantileTransformer, MinMaxScaler, Normalizer
from sklearn.metrics import mean_squared_error, explained_variance_score, mean_absolute_error, r2_score
import matplotlib.pyplot as plt
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
from sklearn.feature_selection import SelectKBest, f_regression
from sklearn.ensemble import ExtraTreesRegressor, GradientBoostingRegressor
from sklearn.multioutput import MultiOutputRegressor
from sklearn.linear_model import LinearRegression, LogisticRegression, SGDRegressor, BayesianRidge, ARDRegression
from sklearn.svm import SVR
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import RandomForestRegressor
from sklearn.neighbors import KNeighborsRegressor
from sklearn.neural_network import MLPRegressor
# Import classes from my custom package
from custom_classes.Starter_Module_01 import Starter
# Global settings for PANDAS frame display
| pd.set_option('html.table_schema', True) | pandas.set_option |
import re
import os
import csv
import pandas as pd
data = []
for fn in os.listdir("./data"):
yr = int(re.sub("[^0-9]","", fn))
print("scanning year %d\n" % (yr))
with open("./data/"+fn, 'rt') as f:
reader = csv.reader(f)
for row in reader:
data.append(row+[yr])
print("done scanning files")
data = | pd.DataFrame(data, columns=['name','gender','n','year']) | pandas.DataFrame |
import pandas as pd
import tkinter as tk
from tkinter import filedialog, messagebox
# Load File
class MissingDataImputer(object):
# Allow User to Select File via tkinter, returns file_path
def getFilePath(self):
""" Get Name and Location of User's CSV file
Args:
None
Returns:
File Path of Target CSV.
"""
root = tk.Tk()
messagebox.showinfo("Missing Data Imputer", "Click OK to Choose your File.")
root.withdraw()
file_path = filedialog.askopenfilename()
return file_path
def get_file(self, filename):
""" Extract csv file contents, sep on semi-colon
Args:
filename: Path to target CSV
Returns:
raw data of csv file
"""
raw = | pd.read_csv(filename) | pandas.read_csv |
"""
Authors: <NAME>, <NAME>
Feature Selection module of chi2, anova, and mutual information.
The main object is to insert X, y, and output an dataframe with features and their scores.
"""
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.feature_selection import f_classif,chi2, mutual_info_classif, SelectKBest
class Filter_Algorithms(object):
def __init__(self, X, y, test_size, seed=0):
"""
Parameters
----------
input:
X: array-like {n_samples, n_features}
Training instances to compute the feature importance scores
y: array-like {n_samples}
Training labels
output:
R: Ranked features according particular algorithm
-------
"""
self.X = X # Feature values
self.y = y # Target values
self.seed = seed # Fixed seed
self.test_size = test_size # Split for train and test
def fit_Chi2(self):
scores_Chi2 = []
X_train, X_val, y_train, y_val = train_test_split(self.X, self.y, stratify=self.y, test_size=self.test_size, random_state=self.seed)
X_train = | pd.DataFrame(data=X_train, columns=self.X.columns) | pandas.DataFrame |
# Catboost for Avito Demand Prediction Challenge
# https://www.kaggle.com/c/avito-demand-prediction
# By <NAME>, April 2018
#https://www.kaggle.com/nicapotato/simple-catboost/code
import time
notebookstart = time.time()
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import os
import gc
from sklearn.model_selection import KFold
# print("Data:\n", os.listdir("../input"))
# Models Packages
from sklearn import metrics
from sklearn.metrics import mean_squared_error
from sklearn import feature_selection
from catboost import CatBoostRegressor
from sklearn.model_selection import train_test_split
from sklearn import preprocessing
from sklearn import *
# Viz
# import seaborn as sns
# import matplotlib.pyplot as plt
print("\nData Load Stage")
debug=False
if debug:
nrows=10000*1
else:
nrows=1503424
training = pd.read_csv('../input/train.csv',nrows=nrows, index_col="item_id", parse_dates=["activation_date"])
traindex = training.index
len_train = len(training)
testing = pd.read_csv('../input/test.csv',nrows=nrows, index_col="item_id", parse_dates=["activation_date"])
testdex = testing.index
y = training.deal_probability.copy()
training.drop("deal_probability", axis=1, inplace=True)
import pickle
with open('../input/train_image_features.p', 'rb') as f:
x = pickle.load(f)
train_blurinesses = x['blurinesses']
train_ids = x['ids']
with open('../input/test_image_features.p', 'rb') as f:
x = pickle.load(f)
test_blurinesses = x['blurinesses']
test_ids = x['ids']
del x;
gc.collect()
incep_train_image_df = pd.DataFrame(train_blurinesses, columns=['blurinesses'])
incep_test_image_df = pd.DataFrame(test_blurinesses, columns=['blurinesses'])
incep_train_image_df['image'] = (train_ids)
incep_test_image_df['image'] = (test_ids)
training = training.join(incep_train_image_df.set_index('image'), on='image')
testing = testing.join(incep_test_image_df.set_index('image'), on='image')
print('adding whitenesses ...')
with open('../input/train_image_features.p', 'rb') as f:
x = pickle.load(f)
train_whitenesses = x['whitenesses']
train_ids = x['ids']
with open('../input/test_image_features.p', 'rb') as f:
x = pickle.load(f)
test_whitenesses = x['whitenesses']
test_ids = x['ids']
del x;
gc.collect()
incep_train_image_df = pd.DataFrame(train_whitenesses, columns=['whitenesses'])
incep_test_image_df = pd.DataFrame(test_whitenesses, columns=['whitenesses'])
incep_train_image_df['image'] = (train_ids)
incep_test_image_df['image'] = (test_ids)
training = training.join(incep_train_image_df.set_index('image'), on='image')
testing = testing.join(incep_test_image_df.set_index('image'), on='image')
print('adding dullnesses ...')
with open('../input/train_image_features.p', 'rb') as f:
x = pickle.load(f)
train_dullnesses = x['dullnesses']
train_ids = x['ids']
with open('../input/test_image_features.p', 'rb') as f:
x = pickle.load(f)
test_dullnesses = x['dullnesses']
test_ids = x['ids']
del x;
gc.collect()
incep_train_image_df = pd.DataFrame(train_dullnesses, columns=['dullnesses'])
incep_test_image_df = pd.DataFrame(test_dullnesses, columns=['dullnesses'])
incep_train_image_df['image'] = (train_ids)
incep_test_image_df['image'] = (test_ids)
training = training.join(incep_train_image_df.set_index('image'), on='image')
testing = testing.join(incep_test_image_df.set_index('image'), on='image')
print('adding average_pixel_width ...')
with open('../input/train_image_features_1.p', 'rb') as f:
x = pickle.load(f)
train_average_pixel_width = x['average_pixel_width']
train_ids = x['ids']
with open('../input/test_image_features_1.p', 'rb') as f:
x = pickle.load(f)
test_average_pixel_width = x['average_pixel_width']
test_ids = x['ids']
del x;
gc.collect()
incep_train_image_df = pd.DataFrame(train_average_pixel_width, columns=['average_pixel_width'])
incep_test_image_df = pd.DataFrame(test_average_pixel_width, columns=['average_pixel_width'])
incep_train_image_df['image'] = (train_ids)
incep_test_image_df['image'] = (test_ids)
training = training.join(incep_train_image_df.set_index('image'), on='image')
testing = testing.join(incep_test_image_df.set_index('image'), on='image')
print('adding average_reds ...')
with open('../input/train_image_features_1.p', 'rb') as f:
x = pickle.load(f)
train_average_reds = x['average_reds']
train_ids = x['ids']
with open('../input/test_image_features_1.p', 'rb') as f:
x = pickle.load(f)
test_average_reds = x['average_reds']
test_ids = x['ids']
del x;
gc.collect()
incep_train_image_df = pd.DataFrame(train_average_reds, columns=['average_reds'])
incep_test_image_df = pd.DataFrame(test_average_reds, columns=['average_reds'])
incep_train_image_df['image'] = (train_ids)
incep_test_image_df['image'] = (test_ids)
training = training.join(incep_train_image_df.set_index('image'), on='image')
testing = testing.join(incep_test_image_df.set_index('image'), on='image')
print('adding average_blues ...')
with open('../input/train_image_features_1.p', 'rb') as f:
x = pickle.load(f)
train_average_blues = x['average_blues']
train_ids = x['ids']
with open('../input/test_image_features_1.p', 'rb') as f:
x = pickle.load(f)
test_average_blues = x['average_blues']
test_ids = x['ids']
del x;
gc.collect()
incep_train_image_df = | pd.DataFrame(train_average_blues, columns=['average_blues']) | pandas.DataFrame |
import numpy as np
import pandas as pd
def file_combine(start,finish):
combine_df = pd.DataFrame()
for i in range(start,finish,500):
temp = pd.read_csv("~/Desktop/Chess/data/train_data{}".format(i), index_col = 0)
combine_df = combine_df.append(temp, ignore_index=True)
print (i)
return combine_df
train_data_2013 = file_combine(499,41500)
temp = pd.read_csv("~/Desktop/Chess/data/train_data41737", index_col = 0)
train_data_2013 = train_data_2013.append(temp, ignore_index = True)
def file_combine(start,finish):
combine_df = pd.DataFrame()
for i in range(start,finish,10000):
temp = pd.read_csv("~/Chess/piece_pos_data{}".format(i), index_col = 0)
combine_df = combine_df.append(temp, ignore_index=True)
print (i)
#if (i+1) % 100000 == 0:
#combine_df.to_csv("~/Desktop/Chess/data/piece_pos_checkpt")
#print("check point done")
return combine_df
piece_pos_data_2013 = file_combine(9999,3399999)
temp = pd.read_csv("~/Chess/data/piece_pos_data3406525", index_col = 0)
piece_pos_data_2013 = piece_pos_data_2013.append(temp, ignore_index = True)
def file_combine():
combine_df = | pd.DataFrame() | pandas.DataFrame |
from pyops.utils import is_elapsed_time, parse_time, getMonth
import pandas as pd
from datetime import datetime, time, timedelta
import os
from pyops.plots import modes_schedule
class ITL:
def __init__(self, fname, ref_date=None):
# Variable initialization
self.WTF = list()
self.meta = dict()
self.header = list()
self.end_time = None
self.ref_date = ref_date
self.start_time = None
self.init_values = list()
self.merged_events = None
self.include_files = list()
# Loading the given file
self.load(fname)
def load(self, fname):
# Storing the name of the file for editting purposes
self.fname = fname
# Auxiliary dictionary to speed up the data convertion into pandas
aux_dict = dict(raw_time=[], time=[], experiment=[], mode=[],
action=[], parameters=[], comment=[])
# Importing the file
out_ouf_metadata = False
with open(fname) as f:
line = ""
line_comments = list()
for l in f:
# Formatting just in case there is no space between parenthesis
l = l.replace('(', ' ( ').replace(')', ' ) ')
# Concatening lines if '\' found
if '\\' in l and '#' not in l[0] and \
'\\' not in l[l.index('\\') + 1]:
line += l[:l.index('\\')]
line_comments.append(l[l.index('\\') + 1: - 1])
# Continues with the next iteration of the loop
continue
# If there was no concatenation of lines
if len(line) == 0:
line = l
# If we were concatenating, we concatenate the last one
else:
line += l[:l.index(')') + 1]
line_comments.append(l[l.index(')'):])
if '\n' in line[0]:
pass
elif 'Comment:' in line:
pass
# Filtering lines with comments
elif '#' in line[0]:
if not out_ouf_metadata:
self.header.append(line)
self._read_metada(line)
else:
self.WTF.append(line)
# Storing events
elif len(line.split()) > 0 and \
is_elapsed_time(line.split()[0]):
aux_dict = \
self._read_events(line, aux_dict, line_comments)
# Useful data from the header
else:
# We can say we are out of the metadate here because
# start_time and end_time are mandatory in the files
out_ouf_metadata = True
self._read_header_line(line.split())
# Preparing values for next iteration
line = ""
line_comments = list()
# Closing the file
f.close()
# Creating the pandas dataframe
self.events = pd.DataFrame(aux_dict)
self.events = self.order_colums_in_dataframe(self.events)
def _read_metada(self, line):
if ': ' in line:
self.meta[line[1:line.index(': ')].strip()] = \
line[line.index(': ') + 1:-1].strip()
def _read_events(self, line, aux_dict, line_comments):
# Consecutive whitespace are regarded as a single separator
l = line.split()
# Special case of include:
# 000_22:30:00 INCLUDE "SA-SFT_FM__-ORB_LOAD-TC_-GEN01A.itl"
if 'INCLUDE' in l[1].upper():
if '#' in l:
index = l.index('#')
comment = ' '.join(l[index:])
self.include_files.append(
[l[2], l[0]] + l[3:index] + [comment])
else:
self.include_files.append([l[2], l[0]] + l[3:])
else:
# Storing comments
if '#' in line:
index = line.index('#')
aux_dict['comment'].append(line[index + 1:-1])
elif len(line_comments) > 0 and len(line_comments[0]) > 0:
index = line_comments[0].index('#')
aux_dict['comment'].append(line_comments[0][index + 1:])
else:
aux_dict['comment'].append(None)
aux_dict['raw_time'].append(l[0])
aux_dict['time'].append(self._to_datetime(l[0]))
aux_dict['experiment'].append(l[1])
# If SOC as experiment and PTR isn't the mode then there is no mode
if 'SOC' in l[1].upper() and 'PTR' not in l[2]:
aux_dict['mode'].append(None)
else:
aux_dict['mode'].append(l[2])
# If the next element in the line doesn't contain a hash, then
# there is an action
if '#' not in l[3]:
aux_dict['action'].append(l[3])
# If there are parameters we store them
if len(l) > 4:
if '(' in l[4]:
aux_dict['parameters'].append(
self._read_parameters(l[5:], line_comments[1:]))
else:
aux_dict['parameters'].append(None)
else:
aux_dict['parameters'].append(None)
else:
aux_dict['action'].append(None)
aux_dict['parameters'].append(None)
return aux_dict
def _read_parameters(self, parameters, line_comments):
output = list()
# Selecting the indexes of every '=' in the line which implies a new
# parameter exist
indexes = [i for i, val in enumerate(parameters) if val == '=']
for index in indexes:
param = list()
param.append(parameters[index - 1])
# If it is the last element we take all but the last expected ')'
if indexes[-1] == index:
last_index = -1
else:
last_index = indexes[indexes.index(index) + 1] - 1
# We avoid '=', that's why index + 1
for element in parameters[index + 1:last_index]:
param.append(element)
# Adding comments. line_comments & indexes should have same length
if indexes.index(index) + 1 <= len(line_comments):
comment = line_comments[indexes.index(index)]
if '#' in comment:
param.append((comment[comment.index('#') + 1:]))
else:
param.append(comment)
else:
param.append(None)
# Adding new parameter tuple
output.append(param)
return output
def _read_header_line(self, line):
if 'Ref_date:' in line:
# Storing them in "raw" format
self.raw_ref_time = line[1]
# Getting the reference date from the header and transforming it
# into datetime format
self.ref_date = self._ref_date_to_datetime(line[1])
elif 'Start_time:' in line:
# Storing them in "raw" format
self.raw_start_time = line[1]
# Storing them in datetime format
self.start_time = self._to_datetime(line[1])
elif 'End_time:' in line:
# Storing them in "raw" format
self.raw_end_time = line[1]
# Storing them in datetime format
self.end_time = self._to_datetime(line[1])
elif 'Propagation_delay:' in line:
self.propagation_delay = line[1:]
elif 'Init_value:' in line:
# Storing them in "raw" format
self.init_values.append(line[1:])
# Sometimes it appears as Include instead of Include_file
elif 'Include_file:' in line or 'Include:' in line:
self.include_files.append(line[1:])
def _ref_date_to_datetime(self, ref_date):
ref_date = ref_date.split('-')[0] + "-" +\
str(getMonth(ref_date.split('-')[1])) + "-" + \
ref_date.split('-')[2]
return datetime.strptime(ref_date, "%d-%m-%Y")
def _to_datetime(self, element):
if self.ref_date is None and '-' not in element:
# Only hh:mm:ss case
if len(element) == 8:
elements = [int(e) for e in element.split(':')]
return time(elements[0], elements[1], elements[2])
return parse_time(element)
else:
# Only hh:mm:ss case
if len(element) == 8:
return parse_time("000_" + element, self.ref_date)
elif '-' in element:
date = self._ref_date_to_datetime(element.split('_')[0])
return parse_time("000_" + element.split('_')[1], date)
return parse_time(element, self.ref_date)
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# This method has to be adapted!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
def to_file(self, fname):
# Creating file if the file doesn't exist and truncating it if exists
with open(fname, 'w') as f:
# Copying the header
[f.write(line) for line in self.header]
# Copying the useful data in the header
# Reg_date
if self.ref_date is not None:
f.write("Ref_date: " + self.raw_ref_time + "\n#\n")
# Start and End time
f.write("Start_time: " + self.raw_start_time + "\n")
f.write("End_time: " + self.raw_end_time + "\n#\n")
# Propagation delay
if self.propagation_delay is not None:
output = ""
for element in self.propagation_delay:
output += " " + element
f.write("Propagation_delay: " + output + "\n#\n")
# Init values
if len(self.init_values) > 0:
for value in self.init_values:
output = ""
for element in value:
output += " " + element
f.write("Init_value: " + output + "\n")
f.write("#\n")
# Include files
if len(self.include_files) > 0:
for include in self.include_files:
output = ""
for element in include:
output += " " + element
f.write("Include_file: " + output + "\n")
f.write("#\n")
# Copying events
f.write("# Events_in_list: " + str(len(self.events.index))
+ "\n#\n")
f.write("# Time Event\n#\n")
for index, row in self.events.iterrows():
output = row['raw_time'] + " " + row['event']
if row['experiment'] is not None:
output += " (EXP = " + row['experiment'] + " "
output += "ITEM = " + row['item'] + ")"
if row['count'] is not None:
output += " (COUNT = " + row['count'] + ")"
if row['comment'] is not None:
output += " # " + row['comment']
output += "\n"
f.write(output)
f.write("#\n")
f.close()
def order_colums_in_dataframe(self, df):
# Sorting by the time
df = df.sort(['time'])
# Sorting the columns in the dataframe
cols = ['raw_time', 'time', 'experiment', 'mode', 'action',
'parameters', 'comment']
return df[cols]
def check_consistency(self):
if self.start_time is not None and \
self.events['time'].min() < self.start_time:
print ("There is an time event before the official start_time")
print (self.events['time'].min() + " is before than "
+ self.start_time)
raise NameError('Events before start_time')
elif self.end_time is not None and \
self.events['time'].max() > self.end_time:
print ("There is an time event after the official end_time")
print (self.events['time'].max() + " is after than "
+ self.end_time)
raise NameError('Events after end_time')
elif self.check_if_included_files_exist_in_directory():
print ("Everything seems to be ok, congratulations! :)")
def check_if_included_files_exist_in_directory(self):
files_exist = True
# Getting the path of the directory where we are working
path = os.path.dirname(os.path.abspath(self.fname))
for fname in self.include_files:
# Removing possible problematic characters
fname = fname[0].strip('"')
if not os.path.isfile(os.path.join(path, fname)):
files_exist = False
output = "It seems as if " + fname + "is not in the same "
output += "directory as " + os.path.basename(self.fname)
print (output)
# Perhaps raising an exception here in the future...
return files_exist
def merge_includes(self):
self.merged_events = self.events
# Getting the path to load correctly the files
path = os.path.dirname(os.path.abspath(self.fname))
for f in self.include_files:
print ("Reading " + f[0] + "...")
fname = os.path.join(path, f[0].strip('"'))
# There is an existing time
if len(f) > 1 and is_elapsed_time(f[1]):
ref_date = self._to_datetime(f[1])
itl = ITL(fname, ref_date=ref_date)
else:
itl = ITL(fname)
itl.check_consistency()
# Recursing over the itl files
itl.merge_includes()
# Merging the dataframes
self.merged_events = \
pd.concat([self.merged_events, itl.merged_events],
ignore_index=True)
self.merged_events = self.order_colums_in_dataframe(self.merged_events)
def plot(self):
# If the includes are still not merged, we merge them
if self.merged_events is None:
self.merge_includes()
df = self._convert_df_to_mode_plot_table_format(
self.merged_events, 'mode')
df = df.set_index(['time'])
df.index.names = ['Time']
modes_schedule(df)
def _convert_df_to_mode_plot_table_format(self, df, attribute):
df = df[['time', 'experiment', attribute]]
experiments_unique = df['experiment'].unique()
# Initializating the output table
# We create a new dictionary and convert it to a df because working
# with the dataframe has a high computational cost
output = dict()
for exp in experiments_unique:
output[exp] = list()
# Adding the times
output['time'] = pd.to_datetime(df['time'].values).tolist()
experiments = df['experiment'].values.tolist()
modes = df[attribute].values.tolist()
# Creating the new table
for experiment, mode in zip(experiments, modes):
for exp in experiments_unique:
if exp == experiment:
output[exp].append(mode)
else:
output[exp].append(None)
# Merging entries with the same time
pos_to_delete = list()
for pos in range(len(output['time']) - 1):
if output['time'][pos + 1] == output['time'][pos]:
for exp in experiments_unique:
if output[exp][pos] is not None and \
output[exp][pos + 1] is None:
output[exp][pos + 1] = output[exp][pos]
pos_to_delete.append(pos)
# Starting from the end to avoid keys shifting
pos_to_delete.reverse()
for pos in pos_to_delete:
for key in output:
del output[key][pos]
out_df = pd.DataFrame(output)
# Filling the NaN fields with the last not NaN value in the column
out_df.fillna(method='ffill', inplace=True)
return out_df
def shift_time(df, rows=None, days=0, seconds=0, microseconds=0,
milliseconds=0, minutes=0, hours=0, weeks=0):
delta = timedelta(days=days, seconds=seconds, minutes=minutes,
microseconds=microseconds, milliseconds=milliseconds,
hours=hours, weeks=weeks)
if rows is None:
for times in pd.to_datetime(df['time'].unique()).tolist():
df.loc[df['time'] == times, 'time'] = times + delta
else:
for row in rows:
df.loc[row, 'time'] = | pd.to_datetime(df.loc[row, 'time']) | pandas.to_datetime |
#!/usr/bin/env python3
"""
Script to create Figure 2 of the paper.
"""
from pathlib import Path
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from tqdm import tqdm
from utils import load_dataset
PROJECT_ROOT = Path.cwd()
def main():
"""Create elements for figure 2 of the paper"""
# ----------------------------------------------------------------------------
n_bootstrap = 1000
model_name = 'supervised_aae'
outputs_dir = PROJECT_ROOT / 'outputs'
bootstrap_dir = outputs_dir / 'bootstrap_analysis'
model_dir = bootstrap_dir / model_name
# ----------------------------------------------------------------------------
dataset_name = 'ADNI'
participants_path = PROJECT_ROOT / 'data' / dataset_name / 'participants.tsv'
freesurfer_path = PROJECT_ROOT / 'data' / dataset_name / 'freesurferData.csv'
ids_path = PROJECT_ROOT / 'outputs' / (dataset_name + '_homogeneous_ids.csv')
adni_df = load_dataset(participants_path, ids_path, freesurfer_path)
mean_adni_list = []
for i_bootstrap in tqdm(range(n_bootstrap)):
bootstrap_model_dir = model_dir / '{:03d}'.format(i_bootstrap)
output_dataset_dir = bootstrap_model_dir / dataset_name
output_dataset_dir.mkdir(exist_ok=True)
reconstruction_error_df = pd.read_csv(output_dataset_dir / 'reconstruction_error.csv')
error_hc = reconstruction_error_df.loc[adni_df['Diagn'] == 1]['Reconstruction error']
error_emci = reconstruction_error_df.loc[adni_df['Diagn'] == 27]['Reconstruction error']
error_lmci = reconstruction_error_df.loc[adni_df['Diagn'] == 28]['Reconstruction error']
error_ad = reconstruction_error_df.loc[adni_df['Diagn'] == 17]['Reconstruction error']
mean_adni_list.append([error_hc.mean(), error_emci.mean(), error_lmci.mean(), error_ad.mean()])
mean_adni_list = np.array(mean_adni_list)
plt.hlines(range(4),
np.percentile(mean_adni_list, 2.5, axis=0),
np.percentile(mean_adni_list, 97.5, axis=0))
plt.plot(np.mean(mean_adni_list, axis=0), range(4), 's', color='k')
plt.savefig(bootstrap_dir / 'ADNI.eps', format='eps')
plt.close()
plt.clf()
results = pd.DataFrame(columns={'Measure', 'HC', 'EMCI', 'LMCI', 'AD'})
results = results.append({'Measure': 'Mean',
'HC': np.mean(mean_adni_list, axis=0)[0],
'EMCI': np.mean(mean_adni_list, axis=0)[1],
'LMCI': np.mean(mean_adni_list, axis=0)[2],
'AD': np.mean(mean_adni_list, axis=0)[3], }, ignore_index=True)
results = results.append({'Measure': 'Lower',
'HC': np.percentile(mean_adni_list, 2.5, axis=0)[0],
'EMCI': np.percentile(mean_adni_list, 2.5, axis=0)[1],
'LMCI': np.percentile(mean_adni_list, 2.5, axis=0)[2],
'AD': np.percentile(mean_adni_list, 2.5, axis=0)[3], }, ignore_index=True)
results = results.append({'Measure': 'Upper',
'HC': np.percentile(mean_adni_list, 97.5, axis=0)[0],
'EMCI': np.percentile(mean_adni_list, 97.5, axis=0)[1],
'LMCI': np.percentile(mean_adni_list, 97.5, axis=0)[2],
'AD': np.percentile(mean_adni_list, 97.5, axis=0)[3], }, ignore_index=True)
results.to_csv(bootstrap_dir / dataset_name / 'deviations.csv', index=False)
# ----------------------------------------------------------------------------
dataset_name = 'AIBL'
participants_path = PROJECT_ROOT / 'data' / dataset_name / 'participants.tsv'
freesurfer_path = PROJECT_ROOT / 'data' / dataset_name / 'freesurferData.csv'
ids_path = PROJECT_ROOT / 'outputs' / (dataset_name + '_homogeneous_ids.csv')
brescia_df = load_dataset(participants_path, ids_path, freesurfer_path)
mean_brescia_list = []
for i_bootstrap in tqdm(range(n_bootstrap)):
bootstrap_model_dir = model_dir / '{:03d}'.format(i_bootstrap)
output_dataset_dir = bootstrap_model_dir / dataset_name
output_dataset_dir.mkdir(exist_ok=True)
reconstruction_error_df = pd.read_csv(output_dataset_dir / 'reconstruction_error.csv')
error_hc = reconstruction_error_df.loc[brescia_df['Diagn'] == 1]['Reconstruction error']
error_mci = reconstruction_error_df.loc[brescia_df['Diagn'] == 18]['Reconstruction error']
error_ad = reconstruction_error_df.loc[brescia_df['Diagn'] == 17]['Reconstruction error']
mean_brescia_list.append([error_hc.mean(), error_mci.mean(), error_ad.mean()])
mean_brescia_list = np.array(mean_brescia_list)
plt.hlines(range(3),
np.percentile(mean_brescia_list, 2.5, axis=0),
np.percentile(mean_brescia_list, 97.5, axis=0))
plt.plot(np.mean(mean_brescia_list, axis=0), range(3), 's', color='k')
plt.savefig(bootstrap_dir / 'AIBL.eps', format='eps')
plt.close()
plt.clf()
results = pd.DataFrame(columns={'Measure', 'HC', 'MCI', 'AD'})
results = results.append({'Measure': 'Mean',
'HC': np.mean(mean_brescia_list, axis=0)[0],
'MCI': np.mean(mean_brescia_list, axis=0)[1],
'AD': np.mean(mean_brescia_list, axis=0)[2], }, ignore_index=True)
results = results.append({'Measure': 'Lower',
'HC': np.percentile(mean_brescia_list, 2.5, axis=0)[0],
'MCI': np.percentile(mean_brescia_list, 2.5, axis=0)[1],
'AD': np.percentile(mean_brescia_list, 2.5, axis=0)[2], }, ignore_index=True)
results = results.append({'Measure': 'Upper',
'HC': np.percentile(mean_brescia_list, 97.5, axis=0)[0],
'MCI': np.percentile(mean_brescia_list, 97.5, axis=0)[1],
'AD': np.percentile(mean_brescia_list, 97.5, axis=0)[2], }, ignore_index=True)
results.to_csv(bootstrap_dir / dataset_name / 'deviations.csv', index=False)
# ----------------------------------------------------------------------------
dataset_name = 'TOMC'
participants_path = PROJECT_ROOT / 'data' / dataset_name / 'participants.tsv'
freesurfer_path = PROJECT_ROOT / 'data' / dataset_name / 'freesurferData.csv'
ids_path = PROJECT_ROOT / 'outputs' / (dataset_name + '_homogeneous_ids.csv')
brescia_df = load_dataset(participants_path, ids_path, freesurfer_path)
mean_brescia_list = []
for i_bootstrap in tqdm(range(n_bootstrap)):
bootstrap_model_dir = model_dir / '{:03d}'.format(i_bootstrap)
output_dataset_dir = bootstrap_model_dir / dataset_name
output_dataset_dir.mkdir(exist_ok=True)
reconstruction_error_df = pd.read_csv(output_dataset_dir / 'reconstruction_error.csv')
error_hc = reconstruction_error_df.loc[brescia_df['Diagn'] == 1]['Reconstruction error']
error_mci = reconstruction_error_df.loc[brescia_df['Diagn'] == 18]['Reconstruction error']
error_ad = reconstruction_error_df.loc[brescia_df['Diagn'] == 17]['Reconstruction error']
mean_brescia_list.append([error_hc.mean(), error_mci.mean(), error_ad.mean()])
mean_brescia_list = np.array(mean_brescia_list)
plt.hlines(range(3),
np.percentile(mean_brescia_list, 2.5, axis=0),
np.percentile(mean_brescia_list, 97.5, axis=0))
plt.plot(np.mean(mean_brescia_list, axis=0), range(3), 's', color='k')
plt.savefig(bootstrap_dir / 'TOMC.eps', format='eps')
plt.close()
plt.clf()
results = pd.DataFrame(columns={'Measure', 'HC', 'MCI', 'AD'})
results = results.append({'Measure': 'Mean',
'HC': np.mean(mean_brescia_list, axis=0)[0],
'MCI': np.mean(mean_brescia_list, axis=0)[1],
'AD': np.mean(mean_brescia_list, axis=0)[2], }, ignore_index=True)
results = results.append({'Measure': 'Lower',
'HC': np.percentile(mean_brescia_list, 2.5, axis=0)[0],
'MCI': np.percentile(mean_brescia_list, 2.5, axis=0)[1],
'AD': np.percentile(mean_brescia_list, 2.5, axis=0)[2], }, ignore_index=True)
results = results.append({'Measure': 'Upper',
'HC': np.percentile(mean_brescia_list, 97.5, axis=0)[0],
'MCI': np.percentile(mean_brescia_list, 97.5, axis=0)[1],
'AD': np.percentile(mean_brescia_list, 97.5, axis=0)[2], }, ignore_index=True)
results.to_csv(bootstrap_dir / dataset_name / 'deviations.csv', index=False)
# ----------------------------------------------------------------------------
dataset_name = 'OASIS1'
participants_path = PROJECT_ROOT / 'data' / dataset_name / 'participants.tsv'
freesurfer_path = PROJECT_ROOT / 'data' / dataset_name / 'freesurferData.csv'
ids_path = PROJECT_ROOT / 'outputs' / (dataset_name + '_homogeneous_ids.csv')
oasis1_df = load_dataset(participants_path, ids_path, freesurfer_path)
mean_oasis1_list = []
for i_bootstrap in tqdm(range(n_bootstrap)):
bootstrap_model_dir = model_dir / '{:03d}'.format(i_bootstrap)
output_dataset_dir = bootstrap_model_dir / dataset_name
output_dataset_dir.mkdir(exist_ok=True)
reconstruction_error_df = pd.read_csv(output_dataset_dir / 'reconstruction_error.csv')
error_hc = reconstruction_error_df.loc[oasis1_df['Diagn'] == 1]['Reconstruction error']
error_ad = reconstruction_error_df.loc[oasis1_df['Diagn'] == 17]['Reconstruction error']
mean_oasis1_list.append([error_hc.mean(), error_ad.mean()])
mean_oasis1_list = np.array(mean_oasis1_list)
plt.hlines(range(2),
np.percentile(mean_oasis1_list, 2.5, axis=0),
np.percentile(mean_oasis1_list, 97.5, axis=0))
plt.plot(np.mean(mean_oasis1_list, axis=0), range(2), 's', color='k')
plt.savefig(bootstrap_dir / 'OASIS1.eps', format='eps')
plt.close()
plt.clf()
results = pd.DataFrame(columns={'Measure', 'HC', 'AD'})
results = results.append({'Measure': 'Mean',
'HC': np.mean(mean_oasis1_list, axis=0)[0],
'AD': np.mean(mean_oasis1_list, axis=0)[1], }, ignore_index=True)
results = results.append({'Measure': 'Lower',
'HC': np.percentile(mean_oasis1_list, 2.5, axis=0)[0],
'AD': np.percentile(mean_oasis1_list, 2.5, axis=0)[1], }, ignore_index=True)
results = results.append({'Measure': 'Upper',
'HC': np.percentile(mean_oasis1_list, 97.5, axis=0)[0],
'AD': np.percentile(mean_oasis1_list, 97.5, axis=0)[1], }, ignore_index=True)
results.to_csv(bootstrap_dir / dataset_name / 'deviations.csv', index=False)
# ----------------------------------------------------------------------------
dataset_name = 'MIRIAD'
participants_path = PROJECT_ROOT / 'data' / dataset_name / 'participants.tsv'
freesurfer_path = PROJECT_ROOT / 'data' / dataset_name / 'freesurferData.csv'
ids_path = PROJECT_ROOT / 'outputs' / (dataset_name + '_homogeneous_ids.csv')
oasis1_df = load_dataset(participants_path, ids_path, freesurfer_path)
mean_oasis1_list = []
for i_bootstrap in tqdm(range(n_bootstrap)):
bootstrap_model_dir = model_dir / '{:03d}'.format(i_bootstrap)
output_dataset_dir = bootstrap_model_dir / dataset_name
output_dataset_dir.mkdir(exist_ok=True)
reconstruction_error_df = pd.read_csv(output_dataset_dir / 'reconstruction_error.csv')
error_hc = reconstruction_error_df.loc[oasis1_df['Diagn'] == 1]['Reconstruction error']
error_ad = reconstruction_error_df.loc[oasis1_df['Diagn'] == 17]['Reconstruction error']
mean_oasis1_list.append([error_hc.mean(), error_ad.mean()])
mean_oasis1_list = np.array(mean_oasis1_list)
plt.hlines(range(2),
np.percentile(mean_oasis1_list, 2.5, axis=0),
np.percentile(mean_oasis1_list, 97.5, axis=0))
plt.plot(np.mean(mean_oasis1_list, axis=0), range(2), 's', color='k')
plt.savefig(bootstrap_dir / 'MIRIAD.eps', format='eps')
plt.close()
plt.clf()
results = | pd.DataFrame(columns={'Measure', 'HC', 'AD'}) | pandas.DataFrame |
import os, datetime
import csv
import pycurl
import sys
import shutil
from openpyxl import load_workbook
import pandas as pd
import download.box
from io import BytesIO
import numpy as np
from download.box import LifespanBox
verbose = True
snapshotdate = datetime.datetime.today().strftime('%m_%d_%Y')
box_temp='/home/petra/UbWinSharedSpace1/boxtemp' #location of local copy of curated data
box = LifespanBox(cache=box_temp)
redcapconfigfile="/home/petra/UbWinSharedSpace1/ccf-nda-behavioral/PycharmToolbox/.boxApp/redcapconfig.csv"
#grab stuff from corrected and curated
#get list of filenames
##########################
#folderlistlabels=['WashU_HCAorBoth','WashU_HCD', 'UCLA_HCAorBoth','UCLA_HCD', 'UMN_HCAorBoth','UMN_HCD', 'MGH_HCAorBoth','Harvard_HCD']
#folderlistnums= [82804729845, 82804015457,82807223120, 82805124019, 82803665867, 82805151056,82761770877, 82803734267]
#Harvard
Harv=82803734267
Harvattn=96013516511
MGH2=82761770877
MGHattn=96148925420
WashUD=82804015457
WashUDattn=96147128675
WashUA=82804729845
WashUAattn=96149947498
UMNA=82803665867
UMNAattn=96153923311
UMND=82805151056
UMNDattn=96155708581
UCLAA=82807223120
UCLAAattn=96154919803
UCLAD=82805124019
UCLADattn=96162759127
harvcleandata, harvcleanscore=curatedandcorrected(Harv,Harvattn)
mghcleandata, mghcleanscore=curatedandcorrected(MGH2,MGHattn)
washudcleandata,washudcleanscore=curatedandcorrected(WashUD,WashUDattn)
washuacleandata,washuacleanscore=curatedandcorrected(WashUA,WashUAattn)
umnacleandata,umnacleanscore=curatedandcorrected(UMNA,UMNAattn)
umndcleandata,umndcleanscore=curatedandcorrected(UMND,UMNDattn)
uclaacleandata,uclaacleanscore=curatedandcorrected(UCLAA,UCLAAattn)
ucladcleandata,ucladcleanscore=curatedandcorrected(UCLAD,UCLADattn)
###stopped here
harvcleandata.to_csv(box_temp+'/Harvard_HCDonly_Toolbox_Raw_Combined_'+snapshotdate+'.csv')
#box.update_file(497579203898,box_temp+'/Harvard_HCDonly_Toolbox_Raw_Combined_'+snapshotdate+'.csv')
harvcleanscore.to_csv(box_temp+'/Harvard_HCDonly_Toolbox_Scored_Combined_'+snapshotdate+'.csv')
#box.update_file(497530866864,box_temp+'/Harvard_HCDonly_Toolbox_Scored_Combined_'+snapshotdate+'.csv')
mghcleandata.to_csv(box_temp+'/MGH_HCAorBoth_Toolbox_Raw_Combined_'+snapshotdate+'.csv')
mghcleanscore.to_csv(box_temp+'/MGH_HCAorBoth_Toolbox_Scored_Combined_'+snapshotdate+'.csv')
#update box files by hand
washudcleandata.to_csv(box_temp+'/WashU_HCDonly_Toolbox_Raw_Combined_'+snapshotdate+'.csv')
washudcleanscore.to_csv(box_temp+'/WashU_HCDonly_Toolbox_Scored_Combined_'+snapshotdate+'.csv')
washuacleandata.to_csv(box_temp+'/WashU_HCAorBoth_Toolbox_Raw_Combined_'+snapshotdate+'.csv')
washuacleanscore.to_csv(box_temp+'/WashU_HCAorBoth_Toolbox_Scored_Combined_'+snapshotdate+'.csv')
umnacleandata.to_csv(box_temp+'/UMN_HCAorBoth_Toolbox_Raw_Combined_'+snapshotdate+'.csv')
umnacleanscore.to_csv(box_temp+'/UMN_HCAorBoth_Toolbox_Scored_Combined_'+snapshotdate+'.csv')
umndcleandata.to_csv(box_temp+'/UMN_HCDonly_Toolbox_Raw_Combined_'+snapshotdate+'.csv')
umndcleanscore.to_csv(box_temp+'/UMN_HCDonly_Toolbox_Scored_Combined_'+snapshotdate+'.csv')
uclaacleandata.to_csv(box_temp+'/UCLA_HCAorBoth_Toolbox_Raw_Combined_'+snapshotdate+'.csv')
uclaacleanscore.to_csv(box_temp+'/UCLA_HCAorBoth_Toolbox_Scored_Combined_'+snapshotdate+'.csv')
ucladcleandata.to_csv(box_temp+'/UCLA_HCDonly_Toolbox_Raw_Combined_'+snapshotdate+'.csv')
ucladcleanscore.to_csv(box_temp+'/UCLA_HCDonly_Toolbox_Scored_Combined_'+snapshotdate+'.csv')
#concatenate cleandata for snapshotdate - putting read_csv here in case not loaded into memory
harvcleandata=pd.read_csv(box_temp+'/Harvard_HCDonly_Toolbox_Raw_Combined_12_12_2019.csv',header=0,low_memory=False)
mghcleandata=pd.read_csv(box_temp+'/MGH_HCAorBoth_Toolbox_Raw_Combined_12_12_2019.csv',header=0,low_memory=False)
washudcleandata=pd.read_csv(box_temp+'/WashU_HCDonly_Toolbox_Raw_Combined_12_12_2019.csv',header=0,low_memory=False)
washuacleandata=pd.read_csv(box_temp+'/WashU_HCAorBoth_Toolbox_Raw_Combined_12_12_2019.csv',header=0,low_memory=False)
umnacleandata=pd.read_csv(box_temp+'/UMN_HCAorBoth_Toolbox_Raw_Combined_12_12_2019.csv',header=0,low_memory=False)
umndcleandata=pd.read_csv(box_temp+'/UMN_HCDonly_Toolbox_Raw_Combined_12_12_2019.csv',header=0,low_memory=False)
uclaacleandata=pd.read_csv(box_temp+'/UCLA_HCAorBoth_Toolbox_Raw_Combined_12_12_2019.csv',header=0,low_memory=False)
ucladcleandata=pd.read_csv(box_temp+'/UCLA_HCDonly_Toolbox_Raw_Combined_12_12_2019.csv',header=0,low_memory=False)
allrawdataHCAorBoth=pd.concat([mghcleandata,washuacleandata,umnacleandata,uclaacleandata],axis=0)
allrawdataHCD=pd.concat([harvcleandata,washudcleandata,umndcleandata,ucladcleandata],axis=0)
harvcleanscore=pd.read_csv(box_temp+'/Harvard_HCDonly_Toolbox_Scored_Combined_12_12_2019.csv',header=0,low_memory=False)
mghcleanscore=pd.read_csv(box_temp+'/MGH_HCAorBoth_Toolbox_Scored_Combined_12_12_2019.csv',header=0,low_memory=False)
washudcleanscore=pd.read_csv(box_temp+'/WashU_HCDonly_Toolbox_Scored_Combined_12_12_2019.csv',header=0,low_memory=False)
washuacleanscore=pd.read_csv(box_temp+'/WashU_HCAorBoth_Toolbox_Scored_Combined_12_12_2019.csv',header=0,low_memory=False)
umnacleanscore=pd.read_csv(box_temp+'/UMN_HCAorBoth_Toolbox_Scored_Combined_12_12_2019.csv',header=0,low_memory=False)
umndcleanscore=pd.read_csv(box_temp+'/UMN_HCDonly_Toolbox_Scored_Combined_12_12_2019.csv',header=0,low_memory=False)
uclaacleanscore=pd.read_csv(box_temp+'/UCLA_HCAorBoth_Toolbox_Scored_Combined_12_12_2019.csv',header=0,low_memory=False)
ucladcleanscore=pd.read_csv(box_temp+'/UCLA_HCDonly_Toolbox_Scored_Combined_12_12_2019.csv',header=0,low_memory=False)
allscoresHCAorBoth=pd.concat([mghcleanscore,washuacleanscore,umnacleanscore,uclaacleanscore],axis=0)
allscoresHCD=pd.concat([harvcleanscore,washudcleanscore,umndcleanscore,ucladcleanscore],axis=0)
#make csv
allrawdataHCAorBoth.to_csv(box_temp+'/HCAorBoth_Toolbox_Raw_Combined_'+snapshotdate+'.csv')
allrawdataHCD.to_csv(box_temp+'/HCD_Toolbox_Raw_Combined_'+snapshotdate+'.csv')
allscoresHCAorBoth.to_csv(box_temp+'/HCAorBoth_Toolbox_Scored_Combined_'+snapshotdate+'.csv')
allscoresHCD.to_csv(box_temp+'/HCD_Toolbox_Scored_Combined_'+snapshotdate+'.csv')
def curatedandcorrected(curatedfolderid,needsattnfolder):
harvardfiles, harvardfolders=foldercontents(curatedfolderid)
#dont grab files that need attention
harvardfolders=harvardfolders.loc[~(harvardfolders.foldername.str.contains('needs_attention'))]
harvardfiles2, harvardfolders2=folderlistcontents(harvardfolders.foldername,harvardfolders.folder_id)
harvardfiles=pd.concat([harvardfiles,harvardfiles2],axis=0,sort=True)
data4process=harvardfiles.loc[~(harvardfiles.filename.str.upper().str.contains('SCORE')==True)]
scores4process=harvardfiles.loc[harvardfiles.filename.str.upper().str.contains('SCORE')==True]
box.download_files(data4process.file_id)
box.download_files(scores4process.file_id)
#trick the catcontents macro to create catable dataset, but dont actually cat until you remove the
#PINS in the corrected file from the curated file
#step1 - separate data4process/scores4process into corrected and old curated data
cdata=data4process.loc[data4process.filename.str.contains('corrected')]
cscores=scores4process.loc[scores4process.filename.str.contains('corrected')]
olddata=data4process.loc[~(data4process.filename.str.contains('corrected'))]
oldscores=scores4process.loc[~(scores4process.filename.str.contains('corrected'))]
#create catable dataset for corrected data
hdatainitcorr=catcontents(cdata,box_temp)
hscoreinitcorr=catcontents(cscores,box_temp)
#get list of ids in this corrected data #60 for Harvard
corrl=findpairs(hdatainitcorr,hscoreinitcorr) #this is the list of ids in both scored and raw corrected data
#create catable dataset for old curated data
hdatainitold=catcontents(olddata,box_temp)
hscoreinitold=catcontents(oldscores,box_temp)
#remove the data with PINS from corrected
hdatainitoldsub=hdatainitold[~(hdatainitold.PIN.isin(corrl))]
hscoreinitoldsub=hscoreinitold[~(hscoreinitold.PIN.isin(corrl))]
#now cat the two datasets together
hdatainit=pd.concat([hdatainitcorr,hdatainitoldsub],axis=0,sort=True) #these have 60 more unique pins than before...good
hscoreinit=pd.concat([hscoreinitcorr,hscoreinitoldsub],axis=0,sort=True) #these have 60 more than before...good
l=findpairs(hdatainit,hscoreinit) #this is the list of ids in both scored and raw data
#set aside those who arebnt in both and those that are in dlist or slist
notbothdatalist=hdatainit[~(hdatainit.PIN.isin(l))]
notbothscorelist=hscoreinit[~(hscoreinit.PIN.isin(l))]
nbs=list(notbothscorelist.PIN.unique())
nbd=list(notbothdatalist.PIN.unique())
hdatainit2=hdatainit[hdatainit.PIN.isin(l)]
hscoreinit2=hscoreinit[hscoreinit.PIN.isin(l)]
#check that this is same as above -- it is
#hdatainit2qc=hdatainit[~(hdatainit.PIN.isin(nbs+nbd))]
#hscoreinit2qc=hscoreinit[~(hscoreinit.PIN.isin(nbs+nbd))]
#find instrument duplications that are not identical
dlist,slist=findwierdos(hdatainit2,hscoreinit2)
dslist=pd.concat([dlist,slist],axis=0)
wierdlist=list(dslist.PIN.unique())
#set aside those who are in the wierdlist
nonidenticaldupdata=hdatainit2.loc[hdatainit2.PIN.isin(wierdlist)]
nonidenticaldupscore=hscoreinit2.loc[hscoreinit2.PIN.isin(wierdlist)]
wierdd=list(dlist.PIN.unique())
wierds=list(slist.PIN.unique())
#so we have the notinboth lists and the wierdlists
#Already set aside the notinbothlists
#if we exclude any wierdlist PINs from both, this should get rid of everything that isnt one-to-one
hdatainit3=hdatainit2.loc[~(hdatainit2.PIN.isin(wierdlist))]
hscoreinit3=hscoreinit2.loc[~(hscoreinit2.PIN.isin(wierdlist))]
#both have 580 unique ids - make them into a list
l3=findpairs(hdatainit3,hscoreinit3) #this is the list of ids in both scored and raw data
dlist,slist=findwierdos(hdatainit3,hscoreinit3)
#now delete any identical duplicates check for issues finding wierdos
if dlist.empty and slist.empty:
hdatainit3=hdatainit3.drop_duplicates(subset={'PIN','Inst','ItemID','Position'},keep='first')
hscoreinit3=hscoreinit3.drop_duplicates(subset={'PIN','Inst'})
else:
print('Found Non-Identical Duplications')
print(dlist)
print(slist)
#export scores and data for all pins in dslist or nbs or nbd with flags
notbothdatalist.to_csv(box_temp+'/Toolbox_notinboth_Data_'+snapshotdate+'.csv')
notbothscorelist.to_csv(box_temp+'/Toolbox_notinboth_Scores_'+snapshotdate+'.csv')
box.upload_file(box_temp+'/Toolbox_notinboth_Data_'+snapshotdate+'.csv',needsattnfolder)
box.upload_file(box_temp+'/Toolbox_notinboth_Scores_'+snapshotdate+'.csv',needsattnfolder)
nonidenticaldupdata.to_csv(box_temp+'/Toolbox_NonidentDups_Data_'+snapshotdate+'.csv')
nonidenticaldupscore.to_csv(box_temp+'/Toolbox_NonidentDups_Scores_'+snapshotdate+'.csv')
box.upload_file(box_temp+'/Toolbox_NonidentDups_Data_'+snapshotdate+'.csv',needsattnfolder)
box.upload_file(box_temp+'/Toolbox_NonidentDups_Scores_'+snapshotdate+'.csv',needsattnfolder)
#last but not least...set aside ids not in REDCap, and IDs that need visit numbers
#get reds from hdatatinit3 (should be same as list from hscoreinit3)
#generate hdatainit4 and hscoreinit4 which is relieved of these ids
hdatainit4=subjectsvisits(hdatainit3)
hscoreinit4=subjectsvisits(hscoreinit3)
mv=hscoreinit4.loc[~(hscoreinit4.visit.isin(['V1','V2','V3','X1','X2','X3']))].copy()
mvs=list(mv.subject.unique()) #list of PINs without visit numbers
check=subjectpairs(hdatainit4,hscoreinit4) #this number will be fewer because V1 and V2 PINs for same subject only counted once)
redids=box.getredcapids()
dfcheck=pd.DataFrame(check,columns=['subject'])
boxids=pd.merge(dfcheck,redids,how='left',on='subject',indicator=True)
reds=list(boxids.loc[boxids._merge=='left_only'].subject) #subjects not in redcap
boxandredcap=boxids.loc[boxids._merge=='both'].subject
#export the otherwise cleanest data ready for snapshotting as the new updated curated file -- then run this for all sites befo
#write code here - has only ids with visit numbers and one to one scores and data correspondence and no wierd duplications
#but check one last time that hdatainit5 and hscoreinit5 is super clean
hdatainit5=hdatainit4.loc[~(hdatainit4.subject.isin(mvs+reds))]
hscoreinit5=hscoreinit4.loc[~(hscoreinit4.subject.isin(mvs+reds))]
#export the lists of ids and reasons they were excluded
df=pd.DataFrame(columns=['reason','affectedIDs'])
df=df.append({'reason': 'PIN In Scores but not Data', 'affectedIDs': nbs}, ignore_index=True)
df=df.append({'reason': 'PIN In Data but not Scores', 'affectedIDs': nbd}, ignore_index=True)
df=df.append({'reason': 'PIN/Instrument Non-identical Duplication in Data', 'affectedIDs': wierdd}, ignore_index=True)
df=df.append({'reason': 'PIN/Instrument Non-identical Duplication in Scores', 'affectedIDs': wierds}, ignore_index=True)
df=df.append({'reason': 'PIN/subject in Scores and Data but missing visit', 'affectedIDs': mvs}, ignore_index=True)
df=df.append({'reason': 'subject in Scores and Data but not REDCap ', 'affectedIDs': reds}, ignore_index=True)
df.to_csv(box_temp+'/List_of_IDs_and_Reasons_they_in_these_files_'+snapshotdate+'.csv')
box.upload_file(box_temp+'/List_of_IDs_and_Reasons_they_in_these_files_'+snapshotdate+'.csv',needsattnfolder)
return hdatainit5,hscoreinit5
#get subject and visit from a PIN in a dataframe
def subjectsvisits(hdatainit3):
hdatainit3['subject']=hdatainit3.PIN.str.strip().str[:10]
hdatainit3['visit']=''
hdatainit3.loc[hdatainit3.PIN.str.contains('v1',case=False),'visit']='V1'
hdatainit3.loc[hdatainit3.PIN.str.contains('v2',case=False),'visit']='V2'
hdatainit3.loc[hdatainit3.PIN.str.contains('v3',case=False),'visit']='V3'
hdatainit3.loc[hdatainit3.PIN.str.contains('x1',case=False),'visit']='X1'
hdatainit3.loc[hdatainit3.PIN.str.contains('x2',case=False),'visit']='X2'
hdatainit3.loc[hdatainit3.PIN.str.contains('x3',case=False),'visit']='X3'
return hdatainit3
#pull id visit combos that arent in both scores and data files
def findpairs(hdatainit,hscoreinit):
pinsinboth=[]
for i in hscoreinit.PIN.unique():
if i in hdatainit.PIN.unique() and isinstance(i,str):
pinsinboth=pinsinboth+[i]
else:
print('the following PINs in scores but not data:')
print(i)
for i in hdatainit.PIN.unique():
if i in hscoreinit.PIN.unique():
pass
else:
print('the following PINs in data but not scores:')
print(i)
return pinsinboth
def subjectpairs(hdatainit,hscoreinit):
pinsinboth=[]
for i in hscoreinit.subject.unique():
if i in hdatainit.subject.unique() and isinstance(i,str):
pinsinboth=pinsinboth+[i]
else:
print('the following subjects in scores but not data:')
print(i)
for i in hdatainit.subject.unique():
if i in hscoreinit.subject.unique():
pass
else:
print('the following subjectss in data but not scores:')
print(i)
return pinsinboth
def findwierdos(hdatainit,hscoreinit):
#compare the two types of sort to identify which files have non-identical duplications
sort1data=hdatainit.drop_duplicates(subset={'PIN','Inst','ItemID','Position'},keep='first')
sort1score=hscoreinit.drop_duplicates(subset={'PIN','Inst'})
sort2data=hdatainit.drop_duplicates(subset=set(hdatainit.columns).difference({'filename','file_id'}))
sort2score=hscoreinit.drop_duplicates(subset=set(hscoreinit.columns).difference({'filename','file_id'}))
s1d=sort1data.groupby('PIN').count()
s2d=sort2data.groupby('PIN').count()
databoth=pd.merge(s1d.reset_index()[['PIN','DeviceID']], s2d.reset_index()[['PIN','DeviceID']],on=['PIN','DeviceID'],how='outer',indicator=True)
wierd_data=databoth.loc[databoth._merge!='both'].rename(columns={'DeviceID':'Number of Rows'})
s1s=sort1score.groupby('PIN').count()
s2s=sort2score.groupby('PIN').count()
scoreboth=pd.merge(s1s.reset_index()[['PIN','DeviceID']], s2s.reset_index()[['PIN','DeviceID']],on=['PIN','DeviceID'],how='outer',indicator=True)
wierd_score=scoreboth.loc[scoreboth._merge!='both'].rename(columns={'DeviceID':'Number of Rows'})
return wierd_data,wierd_score
def catcontents(files,cache_space): #dataframe that has filename and file_id as columns
scoresfiles=files.copy()
scoresinit=pd.DataFrame()
for i in scoresfiles.filename:
filepath=os.path.join(cache_space,i)
filenum=scoresfiles.loc[scoresfiles.filename==i,'file_id']
try:
temp=pd.read_csv(filepath,header=0,low_memory=False)
temp['filename']=i
temp['file_id']=pd.Series(int(filenum.values[0]),index=temp.index)
temp['raw_cat_date']=snapshotdate
scoresinit=pd.concat([scoresinit,temp],axis=0,sort=False)
except:
print(filepath+' wouldnt import')
temp=pd.DataFrame()
temp['filename']=pd.Series(i,index=[0])
temp['file_id']=pd.Series(int(filenum.values[0]),index=[0])
temp['raw_cat_date']=snapshotdate
scoresinit=pd.concat([scoresinit,temp],axis=0,sort=False)
return scoresinit
def catfromlocal(endpoint_temp,scores2cat): #dataframe that has filenames
scoresfiles=scores2cat.copy()
scoresinit=pd.DataFrame()
for i in scoresfiles.fname:
filepath=os.path.join(endpoint_temp,i)
try:
temp=pd.read_csv(filepath,header=0,low_memory=False)
temp['filename']="endpointmachine/"+i
temp['raw_cat_date']=snapshotdate
scoresinit= | pd.concat([scoresinit,temp],axis=0,sort=False) | pandas.concat |
from __future__ import print_function
import os
import argparse
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision
from torch.autograd import Variable
import torch.optim as optim
from torchvision import datasets, transforms
from models.wideresnet import *
from models.resnet import *
from models.small_cnn import *
from models.net_mnist import *
import matplotlib
matplotlib.use('pdf')
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
from datetime import datetime
import pandas as pd
import yaml
import input_add_noise as attacks
#--row_max 20 for MNIST because at most 20 adversarial misclassification for MNIST
#--row_max 100 for CIFAR because for resonable size fig
#CLEAN is without --adv
#Generate PGD data on TRADES: python pgd_attack.py --dataset XXX --gen --adv
#Generate PGD data on CLEAN: python pgd_attack.py --dataset XXX --gen
#df on TRADES: python pgd_attack.py --dataset XXX --diff --var --row_max XXX --adv
#heat on TRADES: python pgd_attack.py --dataset [cifar10,mnist] --diff --heat --row_max [100,20] --adv --attack [rand,pgd]
parser = argparse.ArgumentParser(description='PyTorch CIFAR PGD Attack Evaluation')
parser.add_argument('--test-batch-size', type=int, default=200, metavar='N',
help='input batch size for testing (default: 200)')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='disables CUDA training')
parser.add_argument('--epsilon', default=0.031,
help='perturbation')
parser.add_argument('--num-steps', default=20,
help='perturb number of steps')
parser.add_argument('--step-size', default=0.003,
help='perturb step size')
parser.add_argument('--random',
default=True,
help='random initialization for PGD')
parser.add_argument('--white-box-attack', action='store_true', default=False,
help='whether perform white-box attack')
parser.add_argument('--gen', action='store_true', default=False,
help='generate dataset')
parser.add_argument('--diff', action='store_true', default=False,
help='computing diff in activation')
parser.add_argument('--heat', action='store_true', default=False,
help='plot heat map')
parser.add_argument('--var', action='store_true', default=False,
help='plot variance graph')
parser.add_argument('--bar', action='store_true', default=False,
help='plot bar chart')
parser.add_argument('--dataset', required=True,
help='which dataset')
parser.add_argument('--row_max',
help='which dataset')
parser.add_argument('--adv', action='store_true', default=False,
help='load adv model or clean model')
parser.add_argument('--attack', required=True,
help='which attack')
args = parser.parse_args()
attack = args.attack
is_black = "white" if args.white_box_attack else "black"
adv = "adv" if args.adv else "clean"
# settings
use_cuda = torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}
# set up data loader
# if args.rand:
# transform_test = transforms.Compose([transforms.ToTensor(), attacks.AddGaussianNoise(mean, sd)]])
# else:
transform_test = transforms.Compose([transforms.ToTensor(),])
testset = eval("torchvision.datasets." + args.dataset.upper() + "(root='../data', train=False, download=True, transform=transform_test)")
test_loader = torch.utils.data.DataLoader(testset, batch_size=args.test_batch_size, shuffle=False, **kwargs)
if not args.gen:
if args.attack == "rand":
transform_test = transforms.Compose([transforms.ToTensor(), attacks.AddGaussianNoise(0.0, 0.3)])
advset = eval("torchvision.datasets." + args.dataset.upper() + "(root='../data', train=False, download=True, transform=transform_test)")
elif args.attack == "pgd":
advset = torch.load("data_attack/" + args.dataset + "_" + is_back + "_" + adv + ".pt")
else:
assert False, "invalid attack"
adv_loader = torch.utils.data.DataLoader(advset, batch_size=args.test_batch_size, shuffle=False, **kwargs)
def _pgd_whitebox(model,
X,
y,
epsilon=args.epsilon,
num_steps=args.num_steps,
step_size=args.step_size):
out = model(X)
err = (out.data.max(1)[1] != y.data).float().sum()
X_pgd = Variable(X.data, requires_grad=True)
if args.random:
random_noise = torch.FloatTensor(*X_pgd.shape).uniform_(-epsilon, epsilon).to(device)
X_pgd = Variable(X_pgd.data + random_noise, requires_grad=True)
for _ in range(num_steps):
opt = optim.SGD([X_pgd], lr=1e-3)
opt.zero_grad()
with torch.enable_grad():
loss = nn.CrossEntropyLoss()(model(X_pgd), y)
loss.backward()
eta = step_size * X_pgd.grad.data.sign()
X_pgd = Variable(X_pgd.data + eta, requires_grad=True)
eta = torch.clamp(X_pgd.data - X.data, -epsilon, epsilon)
X_pgd = Variable(X.data + eta, requires_grad=True)
X_pgd = Variable(torch.clamp(X_pgd, 0, 1.0), requires_grad=True)
err_pgd = (model(X_pgd).data.max(1)[1] != y.data).float().sum()
print('err pgd (white-box): ', err_pgd)
return err, err_pgd
def _pgd_blackbox(model_target,
model_source,
X,
y,
epsilon=args.epsilon,
num_steps=args.num_steps,
step_size=args.step_size):
out = model_target(X)
err = (out.data.max(1)[1] != y.data).float().sum()
X_pgd = Variable(X.data, requires_grad=True)
if args.random:
random_noise = torch.FloatTensor(*X_pgd.shape).uniform_(-epsilon, epsilon).to(device)
X_pgd = Variable(X_pgd.data + random_noise, requires_grad=True)
for _ in range(num_steps):
opt = optim.SGD([X_pgd], lr=1e-3)
opt.zero_grad()
with torch.enable_grad():
loss = nn.CrossEntropyLoss()(model_source(X_pgd), y)
loss.backward()
eta = step_size * X_pgd.grad.data.sign()
X_pgd = Variable(X_pgd.data + eta, requires_grad=True)
eta = torch.clamp(X_pgd.data - X.data, -epsilon, epsilon)
X_pgd = Variable(X.data + eta, requires_grad=True)
X_pgd = Variable(torch.clamp(X_pgd, 0, 1.0), requires_grad=True)
err_pgd = (model_target(X_pgd).data.max(1)[1] != y.data).float().sum()
print('err pgd black-box: ', err_pgd)
return err, err_pgd
def _pgd_blackbox_gen(model_target,
model_source,
X,
y,
epsilon=args.epsilon,
num_steps=args.num_steps,
step_size=args.step_size):
out = model_target(X)
err = (out.data.max(1)[1] != y.data).float().sum()
X_pgd = Variable(X.data, requires_grad=True)
if args.random:
random_noise = torch.FloatTensor(*X_pgd.shape).uniform_(-epsilon, epsilon).to(device)
X_pgd = Variable(X_pgd.data + random_noise, requires_grad=True)
for _ in range(int(num_steps)):
opt = optim.SGD([X_pgd], lr=1e-3)
opt.zero_grad()
with torch.enable_grad():
loss = nn.CrossEntropyLoss()(model_source(X_pgd), y)
loss.backward()
eta = step_size * X_pgd.grad.data.sign()
X_pgd = Variable(X_pgd.data + eta, requires_grad=True)
eta = torch.clamp(X_pgd.data - X.data, -epsilon, epsilon)
X_pgd = Variable(X.data + eta, requires_grad=True)
X_pgd = Variable(torch.clamp(X_pgd, 0, 1.0), requires_grad=True)
return X_pgd
def eval_adv_test_whitebox(model, device, test_loader):
"""
evaluate model by white-box attack
"""
model.eval()
robust_err_total = 0
natural_err_total = 0
for data, target in test_loader:
data, target = data.to(device), target.to(device)
# pgd attack
X, y = Variable(data, requires_grad=True), Variable(target)
err_natural, err_robust = _pgd_whitebox(model, X, y)
robust_err_total += err_robust
natural_err_total += err_natural
print('natural_err_total: ', natural_err_total)
print('robust_err_total: ', robust_err_total)
#plot graph and compute divergence
def eval_adv_test_blackbox(model_target, model_source, device, test_loader):
"""
evaluate model by white-box attack
"""
model_target.eval()
model_source.eval()
robust_err_total = 0
natural_err_total = 0
if args.gen:
all_datasets = []
print("Start generating data")
for data, target in test_loader:
data, target = data.to(device), target.to(device)
# pgd attack
X, y = Variable(data, requires_grad=True), Variable(target)
X_pgd = _pgd_blackbox_gen(model_target, model_source, X, y)
all_datasets.append(torch.utils.data.TensorDataset(X_pgd, y))
# break
final_dataset = torch.utils.data.ConcatDataset(all_datasets)
torch.save(final_dataset, "data_attack/" + args.dataset + "_" + is_black + "_" + adv + ".pt")
elif args.diff:
print("start computing differences")
dateTimeObj = datetime.now()
date_name = str(dateTimeObj).replace(' ', '-').replace(':', '-').replace('.', '-')
# print("advset[0]: " + str(advset[0]))
# return
heat_dict = {}
row_max = int(args.row_max)
row_count = 0
act_list_len = 0
print("len(advset): " + str(len(advset)))
print("advset: " + str(advset))
for i in range(len(advset)):
# print("i is: " + str(i))
d_adv, t_adv = advset[i][0].to(device), torch.tensor(advset[i][1]).to(device)
# pgd attack
X_adv, y_adv = Variable(d_adv), Variable(t_adv)
out_adv, act_list_adv = model_target.forward_act(X_adv.unsqueeze(0))
corr_adv = out_adv.data.max(1)[1] == y_adv.data
# print("y test: " + str(target_test))
if not corr_adv:
d_test, t_test = testset[i][0].to(device), torch.tensor(testset[i][1]).to(device)
# pgd attack
X_test, y_test = Variable(d_test), Variable(t_test)
out_test, act_list_test = model_target.forward_act(X_test.unsqueeze(0))
corr_test = out_test.data.max(1)[1] == y_test.data
if corr_test:
print("now in row: " + str(row_count))
act_list_len = len(act_list_test)
for j in range(act_list_len):
sub = torch.absolute(torch.subtract(act_list_test[j],act_list_adv[j]))
flatten_np = sub.cpu().data.numpy().flatten()
if not j in heat_dict:
heat_dict[j] = [flatten_np]
else:
heat_dict[j].append(flatten_np)
row_count = row_count + 1
if row_count == row_max:
if args.heat:
print("plotting heat map")
for j in range(act_list_len):
fig, ax = plt.subplots()
# plt.imshow(np.reshape(flatten_np, (-1, len(flatten_np))), cmap='hot', interpolation='nearest')
ax = sns.heatmap(heat_dict[j])
plt.savefig("fig/" + args.dataset + "_" + is_black + "_" + attack + str(j) + "," + str(row_max) + "," + date_name[:-7] + ".pdf")
if args.var:
for j in range(1):
# for j in range(act_list_len):
print("plotting "+ str(j))
df = pd.DataFrame(heat_dict[j])
if args.bar:
df_var = df.var().to_frame()
fig, ax = plt.subplots()
ax = df_var.hist(bins=12)
plt.savefig("fig/bin_" + args.dataset + "_10_" + str(j) + "," + str(row_max) + "," + date_name[:-7] + ".pdf")
else:
print("combing df and comparing ranks")
all_df_rank = []
all_df_var = []
h_rows = np.linspace(row_max/5, row_max, 5, dtype=int)
for h_row in h_rows:
df_var = df.head(h_row).var().to_frame()
df_var.columns = ['a_' + str(h_row)]
df_rank = df_var.rank()
df_rank.columns = ['a_' + str(h_row)]
# df_var.sort_values(by=['a_' + str(h_row)], ascending=False)
all_df_var.append(df_var)
all_df_rank.append(df_rank)
show_df_rank = pd.concat(all_df_rank,axis=1)
show_df_var = | pd.concat(all_df_var, axis=1) | pandas.concat |
from datetime import datetime
import pandas as pd
from bs4 import BeautifulSoup
from requests import request
from sqlalchemy import create_engine
from sqlalchemy.orm import scoped_session, sessionmaker
header = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.108 Safari/537.36'
}
def get_morningstar_index():
response = request('GET', 'http://cn.morningstar.com/index/default.aspx', headers=header)
response.encoding = 'UTF-8'
html = response.text
soup = BeautifulSoup(html, 'lxml')
table = soup.table
table_trs = table.find_all("tr")
table_ths = table.find_all('th')
data_columns = []
for table_th in table_ths:
data_columns.append(table_th.get_text())
table_data = []
for table_tr in table_trs:
table_tds = table_tr.find_all('td')
if table_tds:
table_text = []
for table_td in table_tds:
table_text.append(table_td.get_text().replace(',', ''))
table_data.append(table_text)
db_data_columns=['region', 'category', 'name', 'close', 'change', 'percent', 'date']
morningstar_index = | pd.DataFrame(data=table_data, columns=data_columns) | pandas.DataFrame |
import csv
import libsbml
import rr_cache
import tempfile
import numpy as np
import pandas as pd
from copy import deepcopy
from logging import (
Logger,
getLogger
)
from os import (
path as os_path,
remove
)
from tempfile import NamedTemporaryFile
from typing import (
Dict,
Iterable,
)
from cobra import io as cobra_io
from cobra.io.sbml import _f_reaction
from cobra.medium.annotations import (
excludes,
sbo_terms
)
from rptools.rplibs.rpCompound import rpCompound
from rptools.rplibs.rpSBML import rpSBML
from rptools.rpfba import medium
from rptools.rpfba.medium import (
build_minimal_medium,
is_df_medium_defined,
load_medium_file,
read_medium_ids,
load_compounds,
create_rp_compound,
crossref_medium_id,
merge_medium,
merge_medium_exchange,
df_to_medium,
add_missing_specie
)
from main_rpfba import Main_rpfba
class Test_medium(Main_rpfba):
# TODO: import directly from module
__MEDIUM_DEFAULT_ID = 'not_predefined_model'
__MEDIUM_HEADER_NAME = 'medium_name'
__MEDIUM_HEADER_COMPOUND_ID = 'compound_id'
__MEDIUM_HEADER_BOUND = 'upper_bound'
__MEDIUM_HEADER_OPTIONAL = ['compound_annotation', 'compound_group']
__MEDIUM_HEADER = __MEDIUM_HEADER_OPTIONAL + [__MEDIUM_HEADER_BOUND, __MEDIUM_HEADER_COMPOUND_ID, __MEDIUM_HEADER_NAME]
def load_medium_file(filename):
medium = pd.read_csv(filename)
return create_rp_compound(
df=medium,
logger=self.logger
)
def setUp(self):
super().setUp()
# self.logger.setLevel('DEBUG')
# objects below have to be created for each test instance
# since some tests can modified them
def test_is_df_medium_defined(self):
# Return type
self.assertTrue(isinstance(is_df_medium_defined(None), bool))
# Values
self.assertFalse(is_df_medium_defined(None))
self.assertFalse(is_df_medium_defined(np.nan))
self.assertFalse(is_df_medium_defined(pd.DataFrame()))
self.assertFalse(is_df_medium_defined(pd.DataFrame(columns=['a'])))
self.assertFalse(is_df_medium_defined(pd.DataFrame(index=[0])))
self.assertTrue(is_df_medium_defined(pd.DataFrame(data=[1], columns=['a'], index=[0])))
def test_load_medium_file(self):
df = load_medium_file(os_path.join(self.medium_path, 'medium.io.a.tsv'))
# Return type
self.assertTrue(isinstance(df, pd.DataFrame))
# Basic io profile
self.assertTrue(is_df_medium_defined(df))
df = load_medium_file(os_path.join(self.medium_path, 'medium.io.d.csv'))
self.assertFalse(is_df_medium_defined(df))
df = load_medium_file(os_path.join(self.medium_path, 'medium.io.a.xlsx'))
self.assertFalse(is_df_medium_defined(df))
df = load_medium_file(os_path.join(self.medium_path, 'medium.io.a.csv'))
self.assertTrue(is_df_medium_defined(df))
# Type expected
self.assertTrue(pd.api.types.is_float_dtype(df[self.__MEDIUM_HEADER_BOUND]))
self.assertEqual(
sum(
df['rp_compound'].apply(lambda x: isinstance(x, rpCompound) or pd.isna(x))
),
len(df['rp_compound'])
)
# Challenge on column labels
df_columns = df.columns.tolist()
df_columns.remove('rp_compound')
self.assertEqual(
sorted(df_columns),
sorted(self.__MEDIUM_HEADER)
)
tmp_file = tempfile.NamedTemporaryFile(
suffix='.csv',
dir=self.temp_d,
delete=False
)
for ix in range(len(self.__MEDIUM_HEADER)):
tmp_header = deepcopy(self.__MEDIUM_HEADER)
tmp_header = tmp_header.pop(ix)
df_tmp = df[tmp_header]
df_tmp.to_csv(
tmp_file.name,
index=False
)
df_tmp = load_medium_file(tmp_file.name)
self.assertFalse(is_df_medium_defined(df_tmp))
tmp_file.close()
remove(tmp_file.name)
def test_read_medium_ids(self):
ids = read_medium_ids(os_path.join(self.medium_path, 'medium.io.b.csv'))
# Return type.
self.assertTrue(ids, Iterable)
# Values.
self.assertEqual(
sorted([x for x in ids if not pd.isna(x)]),
sorted(['m9', 'lb', 'lc'])
)
def test_load_compounds(self):
df = pd.read_csv(os_path.join(self.medium_path, 'medium.io.c.csv'))
# Return type.
dfs = load_compounds(
self.__MEDIUM_DEFAULT_ID,
os_path.join(self.medium_path, 'medium.io.c.csv'),
os_path.join(self.medium_path, 'medium.io.c.csv')
)
self.assertTrue(isinstance(dfs, Iterable))
self.assertEqual(len(dfs), 2)
self.assertTrue(isinstance(dfs[0], pd.DataFrame))
self.assertTrue(isinstance(dfs[1], pd.DataFrame))
# Base.
df_base, df_user = load_compounds(
self.__MEDIUM_DEFAULT_ID,
os_path.join(self.medium_path, 'medium.io.c.csv'),
os_path.join(self.medium_path, 'medium.io.c.csv')
)
self.assertEqual(df_base.shape, (0,0))
self.assertEqual(df_user.shape[0], df.shape[0])
self.assertEqual(df_user.shape[1]-1, df.shape[1])
# Select by id
df_base, df_user = load_compounds(
'm9',
os_path.join(self.medium_path, 'medium.io.c.csv'),
os_path.join(self.medium_path, 'medium.io.c.csv')
)
self.assertEqual(df_base.shape[0], 2)
self.assertEqual(df_base.shape[1]-1, len(self.__MEDIUM_HEADER))
self.assertEqual(df_user.shape[0], df.shape[0])
self.assertEqual(df_user.shape[1]-1, df.shape[1])
# Challenge
df_base, df_user = load_compounds(
'',
os_path.join(self.medium_path, 'medium.io.c.csv'),
os_path.join(self.medium_path, 'medium.io.c.csv')
)
self.assertFalse(is_df_medium_defined(df_base))
df_base, df_user = load_compounds(
np.nan,
os_path.join(self.medium_path, 'medium.io.c.csv'),
os_path.join(self.medium_path, 'medium.io.c.csv')
)
self.assertFalse(is_df_medium_defined(df_base))
df_base, df_user = load_compounds(
self.__MEDIUM_DEFAULT_ID,
self.temp_d,
os_path.join(self.medium_path, 'medium.io.c.csv')
)
self.assertFalse(is_df_medium_defined(df_base))
self.assertTrue(is_df_medium_defined(df_user))
df_base, df_user = load_compounds(
self.__MEDIUM_DEFAULT_ID,
os_path.join(self.medium_path, 'medium.io.c.csv'),
self.temp_d
)
self.assertFalse(is_df_medium_defined(df_base))
self.assertFalse(is_df_medium_defined(df_user))
def test_create_rp_compound(self):
df = pd.read_csv(os_path.join(self.medium_path, 'medium.annotation.a.csv'))
df = create_rp_compound(
df=df,
logger=self.logger
)
# Return type.
self.assertTrue(isinstance(df, pd.DataFrame))
# Values.
self.assertTrue(isinstance(df.loc[0, 'rp_compound'], rpCompound))
self.assertTrue(isinstance(df.loc[1, 'rp_compound'], rpCompound))
self.assertTrue(isinstance(df.loc[2, 'rp_compound'], rpCompound))
self.assertEqual(
df.loc[0, 'rp_compound'].get_id(),
df.loc[2, 'rp_compound'].get_id()
)
self.assertTrue( | pd.isna(df.loc[3, 'rp_compound']) | pandas.isna |
__author__ = ["<NAME>"]
__copyright__ = "Copyright 2020, National Renewable Energy Laboratory"
__maintainer__ = "<NAME>"
__email__ = ["<EMAIL>"]
import os
import multiprocessing as mp
from fnmatch import fnmatch
from functools import partial
import numpy as np
import pandas as pd
import fatpack
from pCrunch.io import OpenFASTAscii, OpenFASTBinary, OpenFASTOutput
class LoadsAnalysis:
"""Implementation of `mlife` in python."""
def __init__(self, outputs, **kwargs):
"""
Creates an instance of `pyLife`.
Parameters
----------
outputs : list
List of OpenFAST output filepaths or dicts of OpenFAST outputs.
directory : str (optional)
If directory is passed, list of files will be treated as relative
and appended to the directory.
fatigue_channels : dict (optional)
Dictionary with format:
'channel': 'fatigue slope'
magnitude_channels : dict (optional)
Additional channels as vector magnitude of other channels.
Format: 'new-chan': ['chan1', 'chan2', 'chan3']
trim_data : tuple
Trim processed outputs to desired times.
Format: (min, max)
"""
self.outputs = outputs
self.parse_settings(**kwargs)
def parse_settings(self, **kwargs):
"""Parses settings from input kwargs."""
self._directory = kwargs.get("directory", None)
self._ec = kwargs.get("extreme_channels", True)
self._mc = kwargs.get("magnitude_channels", {})
self._fc = kwargs.get("fatigue_channels", {})
self._td = kwargs.get("trim_data", ())
def process_outputs(self, cores=1, **kwargs):
"""
Processes all outputs for summary statistics and configured damage
equivalent loads.
"""
if cores > 1:
stats, extrs, dels = self._process_parallel(cores, **kwargs)
else:
stats, extrs, dels = self._process_serial(**kwargs)
summary_stats, extremes, DELs = self.post_process(
stats, extrs, dels, **kwargs
)
self._summary_stats = summary_stats
self._extremes = extremes
self._dels = DELs
def _process_serial(self, **kwargs):
"""Process outputs in serieal in serial."""
summary_stats = {}
extremes = {}
DELs = {}
for output in self.outputs:
filename, stats, extrs, dels = self._process_output(
output, **kwargs
)
summary_stats[filename] = stats
extremes[filename] = extrs
DELs[filename] = dels
return summary_stats, extremes, DELs
def _process_parallel(self, cores, **kwargs):
"""
Process outputs in parallel.
Parameters
----------
cores : int
"""
summary_stats = {}
extremes = {}
DELs = {}
pool = mp.Pool(cores)
returned = pool.map(
partial(self._process_output, **kwargs), self.outputs
)
pool.close()
pool.join()
for filename, stats, extrs, dels in returned:
summary_stats[filename] = stats
extremes[filename] = extrs
DELs[filename] = dels
return summary_stats, extremes, DELs
def _process_output(self, f, **kwargs):
"""
Process OpenFAST output `f`.
Parameters
----------
f : str | OpenFASTOutput
Path to output or direct output in dict format.
"""
if isinstance(f, str):
output = self.read_file(f)
else:
output = f
if self._td:
output.trim_data(*self._td)
stats = self.get_summary_stats(output, **kwargs)
if self._ec is True:
extremes = output.extremes(output.channels)
elif isinstance(self._ec, list):
extremes = output.extremes(self._ec)
dels = self.get_DELs(output, **kwargs)
return output.filename, stats, extremes, dels
def get_summary_stats(self, output, **kwargs):
"""
Appends summary statistics to `self._summary_statistics` for each file.
Parameters
----------
output : OpenFASTOutput
"""
fstats = {}
for channel in output.channels:
if channel in ["time", "Time"]:
continue
fstats[channel] = {
"min": float(min(output[channel])),
"max": float(max(output[channel])),
"std": float(np.std(output[channel])),
"mean": float(np.mean(output[channel])),
"abs": float(max(np.abs(output[channel]))),
"integrated": float(np.trapz(output["Time"], output[channel])),
}
return fstats
def get_extreme_events(self, output, channels, **kwargs):
"""
Returns extreme events of `output`.
Parameters
----------
output : OpenFASTOutput
channels : list
"""
return output.extremes(channels)
@staticmethod
def post_process(stats, extremes, dels, **kwargs):
"""Post processes internal data to produce DataFrame outputs."""
# Summary statistics
ss = | pd.DataFrame.from_dict(stats, orient="index") | pandas.DataFrame.from_dict |
import base64
from flask import Blueprint, session, send_from_directory, url_for, flash
from flask import redirect
from flask import render_template
from flask import request
import os
from werkzeug.utils import secure_filename
from shutil import copyfile
from .classes.preProcessClass import PreProcess
from .classes.featureReductionClass import FeatureReduction
import io
import pandas as pd
from flaskr.auth import login_required, UserData
from flask import g
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from werkzeug.exceptions import abort
bp = Blueprint("preprocess", __name__, url_prefix="/pre")
ALLOWED_EXTENSIONS = set(['pkl', 'csv', 'plk'])
from pathlib import Path
ROOT_PATH = Path.cwd()
USER_PATH = ROOT_PATH / "flaskr" / "upload" / "users"
UPLOAD_FOLDER = ROOT_PATH / "flaskr" / "upload"
ANNOTATION_TBL = UPLOAD_FOLDER / "AnnotationTbls"
def allowed_file(filename):
return '.' in filename and filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
@bp.route("/")
@login_required
def index():
annotation_list = []
path = USER_PATH / str(g.user["id"])
list_names = [f for f in os.listdir(path) if os.path.isfile((path / f))]
annotation_db = UserData.get_annotation_file(g.user["id"])
for f in annotation_db:
annotation_list.append([f['file_name'], f['path']])
if len(list_names) == 0:
flash("Error: You don't have uploaded file.")
return render_template("preprocess/step-1.html", available_list=list_names, annotation_list=annotation_list)
# step 2 | Session > Database
@bp.route("/step-2", methods=['POST'])
@login_required
def view_merge_df():
user_id = g.user["id"]
annotation_table = request.form.get("anno_tbl")
col_sel_method = request.form.get("column_selection")
file_name = request.form.get("available_files")
if annotation_table and col_sel_method and file_name:
file_path = USER_PATH / str(user_id) / file_name
# Delete query if file already pre-processed
UserData.delete_preprocess_file(user_id, file_name)
if annotation_table == 'other':
file = request.files['chooseFile']
if file and allowed_file(file.filename):
annotation_table = secure_filename(file.filename)
path_csv = ANNOTATION_TBL / "other" / (str(user_id) + "_" + annotation_table)
# Delete same file uploaded
result = UserData.get_user_file_by_file_name(user_id, annotation_table)
annotation_df = pd.read_csv(file, usecols=[0, 1], header=0)
col = annotation_df.columns
if "ID" in col and "Gene Symbol" in col and len(col) == 2:
annotation_df.to_csv(path_csv, index=False)
else:
flash("Wrong Format: Gene Symbol and/or ID column not found in annotation table.")
return redirect('/pre')
else:
return abort(403)
df = PreProcess.mergeDF(file_path, path_csv)
if result is None:
view_path = "/AnnotationTbls/other/" + str(user_id) + "_" + annotation_table
UserData.add_file(annotation_table, annotation_table.split('.')[1], view_path, user_id, 1, 0)
else:
# load df
annotation_table_path = UPLOAD_FOLDER.as_posix() + annotation_table
df = PreProcess.mergeDF(file_path, Path(annotation_table_path))
if df is None:
flash("Couldn't merge dataset with annotation table")
return redirect('/pre')
y = PreProcess.getDF(file_path)
if 'class' not in y.columns:
flash("Wrong Format: class column not found.")
return redirect('/pre')
y = y['class']
data = PreProcess.get_df_details(df, y)
session[file_name] = data
df = df.dropna(axis=0, subset=['Gene Symbol'])
df = PreProcess.probe2Symbol(df, int(col_sel_method))
merge_name = "merge_" + file_name
merge_path = USER_PATH / str(user_id) / "tmp" / merge_name
merge_path_str = merge_path.as_posix()
PreProcess.saveDF(df, merge_path_str)
# save data to the Database
UserData.add_preprocess(user_id, file_name, file_path.as_posix(), annotation_table, col_sel_method,
merge_path_str)
pre_process_id = UserData.get_user_preprocess(user_id, file_name)['id']
if len(df.columns) > 100:
df_view = df.iloc[:, 0:100].head(15)
else:
df_view = df.head(15)
return render_template("preprocess/step-2.html", tables=[df_view.to_html(classes='data')], details=data,
pre_process_id=pre_process_id, file_name=merge_name)
return redirect('/pre')
# step 3
@bp.route("/step-3", methods=['GET'])
@login_required
def scaling_imputation():
pre_process_id = request.args.get("id")
pre_process = UserData.get_preprocess_from_id(pre_process_id)
if pre_process is None:
return redirect('/pre')
data = session.get(pre_process['file_name'])
if data is not None:
return render_template("preprocess/step-3.html", details=data, pre_process_id=pre_process_id)
return redirect('/pre')
# normalization and null remove
@bp.route("/step-4", methods=['POST'])
@login_required
def norm():
norm_method = request.form.get("norm_mthd")
null_rmv = request.form.get("null_rmv")
pre_process_id = request.form.get("id")
if norm_method and null_rmv and pre_process_id:
pre_process = UserData.get_preprocess_from_id(pre_process_id)
if pre_process is None:
return redirect('/pre')
user_id = pre_process['user_id']
UserData.update_preprocess(user_id, pre_process['file_name'], 'scaling', norm_method)
UserData.update_preprocess(user_id, pre_process['file_name'], 'imputation', null_rmv)
if pre_process['merge_df_path'] == '':
merge_df_path = Path(pre_process['file_path'])
df = PreProcess.getDF(merge_df_path)
df = df.drop(['class'], axis=1)
df = df.T
df = df.reset_index()
else:
merge_df_path = Path(pre_process['merge_df_path'])
df = PreProcess.getDF(merge_df_path)
df = PreProcess.step3(df, norm_method, null_rmv) # symbol_df
avg_symbol_name = "avg_symbol_" + pre_process['file_name']
avg_symbol_df_path = USER_PATH / str(g.user["id"]) / "tmp" / avg_symbol_name
avg_symbol_df_path_str = avg_symbol_df_path.as_posix()
PreProcess.saveDF(df, avg_symbol_df_path_str)
UserData.update_preprocess(user_id, pre_process['file_name'], 'avg_symbol_df_path', avg_symbol_df_path_str)
data = session[pre_process['file_name']]
data = PreProcess.add_details_json(data, df, "r1")
session[pre_process['file_name']] = data
if len(df.columns) > 100:
df_view = df.iloc[:, 0:100].head(15)
else:
df_view = df.head(15)
return render_template("preprocess/step-4.html", tablesstep4=[df_view.to_html(classes='data')],
details=data, pre_process_id=pre_process_id, file_name=avg_symbol_name)
return redirect('/pre')
# skip method Step 1 to Step 3
@bp.route("/skip-step-1", methods=['GET'])
@login_required
def skip_df_mapping():
user_id = g.user['id']
file_name = request.args.get("selected_file")
if not file_name:
return redirect('./pre')
file_path = USER_PATH / str(user_id) / file_name
UserData.delete_preprocess_file(user_id, file_name)
UserData.add_preprocess(user_id, file_name, file_path.as_posix(), '', '', '')
pre_process_id = UserData.get_user_preprocess(user_id, file_name)['id']
df = PreProcess.getDF(file_path)
data = PreProcess.get_df_details(df, None)
session[file_name] = data
return redirect(url_for('preprocess.scaling_imputation') + "?id=" + str(pre_process_id))
# step 5
@bp.route("/step-5", methods=['GET'])
@login_required
def feature_reduction():
pre_process_id = request.args.get("id")
pre_process = UserData.get_preprocess_from_id(pre_process_id)
if pre_process is None:
return redirect('/pre')
if pre_process['avg_symbol_df_path']:
avg_symbol_df_path = Path(pre_process['avg_symbol_df_path'])
file_path = Path(pre_process['file_path'])
p_fold_df = PreProcess.get_pvalue_fold_df(avg_symbol_df_path, file_path)
else:
# From step1
file_path = Path(pre_process['file_path'])
p_fold_df = PreProcess.get_pvalue_fold_df(file_path)
p_fold_df_path = USER_PATH / str(g.user["id"]) / 'tmp' / ('_p_fold_' + pre_process['file_name'])
PreProcess.saveDF(p_fold_df, p_fold_df_path)
pvalues_max = p_fold_df['pValues'].max() * 0.1
fold_max = p_fold_df['fold'].max() * 0.2
pvalues = np.linspace(0.001, 0.01, 19)
pvalues = np.around(pvalues, decimals=4)
folds = np.linspace(0.001, fold_max, 40)
folds = np.around(folds, decimals=4)
data_array = [pvalues, folds]
volcano_hash = get_volcano_fig(p_fold_df['fold'], p_fold_df['pValues'])
return render_template("preprocess/step-5.html", data_array=data_array, volcano_hash=volcano_hash,
pre_process_id=pre_process_id)
# step 6
@bp.route("/step-6/", methods=['POST'])
@login_required
def get_reduce_features_from_pvalues():
fold = request.form["fold-range"]
pvalue = request.form["p-value"]
pre_process_id = request.form["id"]
pre_process = UserData.get_preprocess_from_id(pre_process_id)
p_fold_df_path = USER_PATH / str(g.user["id"]) / 'tmp' / ('_p_fold_' + pre_process['file_name'])
p_fold_df = PreProcess.getDF(p_fold_df_path)
if pre_process['avg_symbol_df_path']:
df = PreProcess.get_filtered_df_pvalue(p_fold_df, pre_process['avg_symbol_df_path'], float(pvalue), float(fold))
else:
# From step1 skip
df = PreProcess.get_filtered_df_pvalue(p_fold_df, pre_process['file_path'], float(pvalue), float(fold), 0)
fr_df_path = USER_PATH / str(g.user["id"]) / 'tmp' / ('fr_' + pre_process['file_name'])
PreProcess.saveDF(df, fr_df_path)
length = len(df.columns)
if length <= 150:
split_array = np.array([length])
elif length < 350:
split_array = np.arange(150, int(length / 10) * 10, 10)
else:
split_array = np.linspace(150, 350, 21)
split_array = split_array.astype(int)
# Get classification Results
df_y = PreProcess.getDF(Path(pre_process['file_path']))
y = df_y['class']
y = pd.to_numeric(y)
classification_result_df = FeatureReduction.get_classification_results(df, y)
cls_id, cls_name = FeatureReduction.get_best_cls(classification_result_df)
classification_result_df = classification_result_df.drop(['Training'], axis=1)
classification_result_df = classification_result_df.sort_values(by=['Testing'], ascending=False)
classification_result_df = classification_result_df.set_index(['Classifiers'])
classification_result_df.index.name = None
classification_result_df = classification_result_df.rename(columns={"Testing": "Testing Accuracy /%"})
fs_fig_hash = get_feature_selection_fig(df, df_y, length)
UserData.update_preprocess(pre_process['user_id'], pre_process['file_name'], 'reduce_df_path',
fr_df_path.as_posix())
UserData.update_preprocess(pre_process['user_id'], pre_process['file_name'], 'classifiers', cls_id)
return render_template("preprocess/step-6.html", split_array=split_array, fs_fig_hash=fs_fig_hash,
tables=[classification_result_df.to_html(classes='data')], cls_names=cls_name,
pre_process_id=pre_process_id)
@bp.route("/fr/pf/", methods=['GET'])
@login_required
def get_feature_count_pval():
pvalue = request.args.get("pvalue")
foldChange = request.args.get("foldChange")
pre_process_id = request.args.get("id")
pre_process = UserData.get_preprocess_from_id(pre_process_id)
path = USER_PATH / str(g.user["id"]) / 'tmp' / ('_p_fold_' + pre_process['file_name'])
p_fold_df = PreProcess.getDF(path)
count = PreProcess.get_filtered_df_count_pvalue(p_fold_df, float(pvalue), float(foldChange))
return str(count)
@bp.route("/fr/save/", methods=['POST'])
@login_required
def save_reduced_df():
features_count = request.form['features_count']
pre_process_id = request.form['id']
pre_process = UserData.get_preprocess_from_id(pre_process_id)
df = PreProcess.getDF(Path(pre_process['reduce_df_path']))
df_y = PreProcess.getDF(Path(pre_process['file_path']))
y = df_y['class']
y = | pd.to_numeric(y) | pandas.to_numeric |
"""
Copyright 2022 HSBC Global Asset Management (Deutschland) GmbH
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import numpy as np
import pandas as pd
import pytest
from pandas.testing import assert_frame_equal, assert_series_equal
import pyratings as rtg
from tests import conftest
# --- input: single rating/warf
@pytest.mark.parametrize(
["rating_provider", "rating", "score"],
list(
pd.concat(
[
conftest.rtg_df_long,
conftest.scores_df_long["rtg_score"],
],
axis=1,
).to_records(index=False)
),
)
def test_get_scores_from_single_rating_longterm(rating_provider, rating, score):
"""Tests if function can handle single string objects."""
act = rtg.get_scores_from_ratings(
ratings=rating, rating_provider=rating_provider, tenor="long-term"
)
assert act == score
@pytest.mark.parametrize(
["rating_provider", "rating", "score"],
list(
pd.concat(
[
conftest.rtg_df_long_st,
conftest.scores_df_long_st["rtg_score"],
],
axis=1,
).to_records(index=False)
),
)
def test_get_scores_from_single_rating_shortterm(rating_provider, rating, score):
"""Tests if function can handle single string objects."""
act = rtg.get_scores_from_ratings(
ratings=rating, rating_provider=rating_provider, tenor="short-term"
)
assert act == score
@pytest.mark.parametrize("tenor", ["long-term", "short-term"])
def test_get_scores_from_single_rating_invalid_rating_provider(tenor):
"""Tests if correct error message will be raised."""
with pytest.raises(AssertionError) as err:
rtg.get_scores_from_ratings(ratings="AA", rating_provider="foo", tenor=tenor)
assert str(err.value) == conftest.ERR_MSG
@pytest.mark.parametrize("tenor", ["long-term", "short-term"])
def test_get_scores_with_invalid_single_rating(tenor):
"""Tests if function returns NaN for invalid inputs."""
act = rtg.get_scores_from_ratings(
ratings="foo", rating_provider="Fitch", tenor=tenor
)
assert pd.isna(act)
@pytest.mark.parametrize("tenor", ["long-term", "short-term"])
def test_get_scores_with_single_rating_and_no_rating_provider(tenor):
"""Tests if correct error message will be raised."""
with pytest.raises(ValueError) as err:
rtg.get_scores_from_ratings(ratings="BBB", tenor=tenor)
assert str(err.value) == "'rating_provider' must not be None."
@pytest.mark.parametrize(
"warf, score",
[
(1, 1),
(6, 2),
(54.9999, 4),
(55, 5),
(55.00001, 5),
(400, 9),
(10_000, 22),
],
)
def test_get_scores_from_single_warf(warf, score):
"""Tests if function can correctly handle individual warf (float)."""
act = rtg.get_scores_from_warf(warf=warf)
assert act == score
@pytest.mark.parametrize("warf", [np.nan, -5, 20000.5])
def test_get_scores_from_invalid_single_warf(warf):
"""Tests if function returns NaN for invalid inputs."""
assert pd.isna(rtg.get_scores_from_warf(warf=warf))
# --- input: ratings series
@pytest.mark.parametrize(
["rating_provider", "scores_series", "ratings_series"],
conftest.params_provider_scores_ratings_lt,
)
def test_get_scores_from_ratings_series_longterm(
rating_provider, ratings_series, scores_series
):
"""Tests if function can correctly handle pd.Series objects."""
scores_series.name = f"rtg_score_{rating_provider}"
act = rtg.get_scores_from_ratings(
ratings=ratings_series, rating_provider=rating_provider
)
| assert_series_equal(act, scores_series) | pandas.testing.assert_series_equal |
import numpy as np
import pandas as pd
import yaml
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from sklearn.model_selection import cross_val_score
from sklearn import preprocessing
from sklearn.utils import shuffle
from sklearn.naive_bayes import MultinomialNB
from sklearn.svm import SVC, LinearSVC
with open("config.yaml", 'r') as stream:
cfg = yaml.load(stream)
class DataPreprocessing:
def __init__(self, train_file, test_file):
self.train_data = | pd.read_csv(train_file) | pandas.read_csv |
import numpy as np
import pandas as pd
from datetime import datetime
import warnings
warnings.filterwarnings('ignore')
results_df = pd.read_csv('./csvs/results_from_mongo.csv')
results_df.drop(columns=['Unnamed: 0'],inplace=True)
results_df
# only keep 2021 for the api
api_df = results_df[results_df['Season']==2021]
api_df.reset_index(drop=True, inplace=True)
api_df.to_csv(f'2021_races_drivers.csv')
api_df.to_csv(f'./flask-api/2021_races_drivers.csv')
# Feature Engineering - Driver Experience
print('Feature Engineering - Driver Experience')
results_df['DriverExperience'] = 0
drivers = results_df['Driver'].unique()
for driver in drivers:
df_driver = pd.DataFrame(results_df[results_df['Driver']==driver]).tail(60) # Arbitrary number, just look at the last x races
df_driver.loc[:,'DriverExperience'] = 1
results_df.loc[results_df['Driver']==driver, "DriverExperience"] = df_driver['DriverExperience'].cumsum()
results_df['DriverExperience'].fillna(value=0,inplace=True)
print('Feature Engineering - Constructor Experience')
# Feature Engineering - Constructor Experience
results_df['ConstructorExperience'] = 0
constructors = results_df['Constructor'].unique()
for constructor in constructors:
df_constructor = | pd.DataFrame(results_df[results_df['Constructor']==constructor]) | pandas.DataFrame |
from process.edit_data import *
from process.parse_data import *
from resources.const import *
from utils.scraping import *
import pandas as pd
if __name__ == "__main__":
df_tweets = pd.read_csv(path_to_raw, sep=";")
# extract URLs to scrape
df_tweets["urls"] = df_tweets.raw_tweets.apply(url_extractor)
# scrape and extract bodies of articles
df_tweets["bodies"] = df_tweets.urls.apply(extract_body)
# parse bodies
df_tweets["parsed_data"] = df_tweets["bodies"].apply(parse_metadata)
df_tweets[list_cols_parsed].to_csv(path_to_parsed, index=False, sep=";")
# extract information from parsed bodies
df_tweets["edited_data"] = df_tweets["parsed_data"].apply(edit_metadata)
df_tweets[list_cols_edited].to_csv(path_to_edited, index=False, sep=";")
# format edited_data into staging_data data
edited_data = df_tweets["edited_data"].apply(lambda x: dict(x) if type(x) == str else x)
edited_data = pd.DataFrame(edited_data.to_list())
staging_data = | pd.concat([df_tweets, edited_data], axis=1) | pandas.concat |
import json
import pickle
import joblib
import pandas as pd
from flask import Flask, jsonify, request
from peewee import (
SqliteDatabase, PostgresqlDatabase, Model, IntegerField,
FloatField, TextField, IntegrityError
)
from playhouse.shortcuts import model_to_dict
########################################
# Begin database stuff
DB = SqliteDatabase('predictions.db')
class Prediction(Model):
observation_id = IntegerField(unique=True)
observation = TextField()
proba = FloatField()
true_class = IntegerField(null=True)
class Meta:
database = DB
DB.create_tables([Prediction], safe=True)
# End database stuff
########################################
########################################
# Unpickle the previously-trained model
with open('columns.json') as fh:
columns = json.load(fh)
with open('pipeline.pickle', 'rb') as fh:
pipeline = joblib.load(fh)
with open('dtypes.pickle', 'rb') as fh:
dtypes = pickle.load(fh)
# End model un-pickling
########################################
########################################
# Input validation functions
def check_request(request):
"""
Validates that our request is well formatted
Returns:
- assertion value: True if request is ok, False otherwise
- error message: empty if request is ok, False otherwise
"""
if "id" not in request:
error = "Field `id` missing from request: {}".format(request)
return False, error
if "observation" not in request:
error = "Field `observation` missing from request: {}".format(request)
return False, error
return True, ""
def check_valid_column(observation):
"""
Validates that our observation only has valid columns
Returns:
- assertion value: True if all provided columns are valid, False otherwise
- error message: empty if all provided columns are valid, False otherwise
"""
valid_columns = {
"Department Name",
"InterventionLocationName",
"InterventionReasonCode",
"ReportingOfficerIdentificationID",
"ResidentIndicator",
"SearchAuthorizationCode",
"StatuteReason",
"SubjectAge",
"SubjectEthnicityCode",
"SubjectRaceCode",
"SubjectSexCode",
"TownResidentIndicator"
}
keys = set(observation.keys())
if len(valid_columns - keys) > 0:
missing = valid_columns - keys
error = "Missing columns: {}".format(missing)
return False, error
if len(keys - valid_columns) > 0:
extra = keys - valid_columns
error = "Unrecognized columns provided: {}".format(extra)
return False, error
return True, ""
def check_categorical_values(observation):
"""
Validates that all categorical fields are in the observation and values are valid
Returns:
- assertion value: True if all provided categorical columns contain valid values,
False otherwise
- error message: empty if all provided columns are valid, False otherwise
"""
valid_category_map = {
"InterventionReasonCode": ["V", "E", "I"],
"SubjectRaceCode": ["W", "B", "A", "I"],
"SubjectSexCode": ["M", "F"],
"SubjectEthnicityCode": ["H", "M", "N"],
"SearchAuthorizationCode": ["O", "I", "C", "N"],
"ResidentIndicator": [True, False],
"TownResidentIndicator": [True, False],
"StatuteReason": [
'Stop Sign', 'Other', 'Speed Related', 'Cell Phone', 'Traffic Control Signal', 'Defective Lights',
'Moving Violation', 'Registration', 'Display of Plates', 'Equipment Violation', 'Window Tint',
'Suspended License', 'Seatbelt', 'Other/Error', 'STC Violation', 'Administrative Offense', 'Unlicensed Operation']
}
for key, valid_categories in valid_category_map.items():
if key in observation:
value = observation[key]
if value not in valid_categories:
error = "Invalid value provided for {}: {}. Allowed values are: {}".format(
key, value, ",".join(["'{}'".format(v) for v in valid_categories]))
return False, error
else:
error = "Categorical field {} missing"
return False, error
return True, ""
def check_age(observation):
"""
Validates that observation contains valid age value
Returns:
- assertion value: True if age is valid, False otherwise
- error message: empty if age is valid, False otherwise
"""
age = observation.get("SubjectAge")
if not age:
error = "Field `SubjectAge` missing"
return False, error
if not isinstance(age, int):
error = "Field `SubjectAge` is not an integer"
return False, error
if age < 0 or age > 100:
error = "Field `SubjectAge` is not between 0 and 100"
return False, error
return True, ""
# End input validation functions
########################################
########################################
# Begin webserver stuff
app = Flask(__name__)
@app.route('/predict', methods=['POST'])
def predict():
obs_dict = request.get_json()
request_ok, error = check_request(obs_dict)
if not request_ok:
response = {'error': error}
return jsonify(response)
_id = obs_dict['id']
observation = obs_dict['observation']
columns_ok, error = check_valid_column(observation)
if not columns_ok:
response = {'error': error}
return jsonify(response)
categories_ok, error = check_categorical_values(observation)
if not categories_ok:
response = {'error': error}
return jsonify(response)
age_ok, error = check_age(observation)
if not age_ok:
response = {'error': error}
return jsonify(response)
obs = | pd.DataFrame([observation], columns=columns) | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jul 17 16:17:25 2017
@author: jorgemauricio
"""
#librerias
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import os
from PIL import Image
#%% estilo de la grafica
plt.style.use('ggplot')
# - - - - - MAIN - - - - -
def main():
# función para generar el dict de colores (comentar si ya se tiene generado el diccionario)
# generarDictColores()
# leer csv totalDecolores
data = | pd.read_csv('resultados/totalDeColores.csv') | pandas.read_csv |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Aug 9 17:02:59 2018
@author: bruce
compared with version 1.6.4
the update is from correlation coefficient
"""
import pandas as pd
import numpy as np
from scipy import fftpack
from scipy import signal
import matplotlib.pyplot as plt
from matplotlib.colors import LinearSegmentedColormap
def correlation_matrix(corr_mx, cm_title):
from matplotlib import pyplot as plt
from matplotlib import cm as cm
fig = plt.figure()
ax1 = fig.add_subplot(111)
#cmap = cm.get_cmap('jet', 30)
cax = ax1.matshow(corr_mx, cmap='gray')
#cax = ax1.imshow(df.corr(), interpolation="nearest", cmap=cmap)
fig.colorbar(cax)
ax1.grid(False)
plt.title(cm_title)
#plt.title('cross correlation of test and retest')
ylabels=['T1','T2','T3','T4','T6','T7','T8','T9', 'T11', 'T12', 'T13', 'T14', 'T15', 'T16', 'T17', 'T18', 'T19', 'T20', 'T21', 'T22', 'T23', 'T25']
xlabels=['R1','R2','R3','R4','R6','R7','R8','R9', 'R11', 'R12', 'R13', 'R14', 'R15', 'R16', 'R17', 'R18', 'R19', 'R20', 'R21', 'R22', 'R23', 'R25']
ax1.set_xticks(np.arange(len(xlabels)))
ax1.set_yticks(np.arange(len(ylabels)))
ax1.set_xticklabels(xlabels,fontsize=6)
ax1.set_yticklabels(ylabels,fontsize=6)
# Add colorbar, make sure to specify tick locations to match desired ticklabels
#fig.colorbar(cax, ticks=[.75,.8,.85,.90,.95,1])
# show digit in matrix
corr_mx_array = np.asarray(corr_mx)
for i in range(22):
for j in range(22):
c = corr_mx_array[j,i]
ax1.text(i, j, round(c,2), va='center', ha='center')
plt.show()
def correlation_matrix_01(corr_mx, cm_title):
# find the maximum in each row
# input corr_mx is a dataframe
# need to convert it into a array first
#otherwise it is not working
temp = np.asarray(corr_mx)
output = (temp == temp.max(axis=1)[:,None]) # along rows
fig = plt.figure()
ax1 = fig.add_subplot(111)
#cmap = cm.get_cmap('jet', 30)
cs = ax1.matshow(output, cmap='gray')
#cax = ax1.imshow(df.corr(), interpolation="nearest", cmap=cmap)
fig.colorbar(cs)
ax1.grid(False)
plt.title(cm_title)
ylabels=['T1','T2','T3','T4','T6','T7','T8','T9', 'T11', 'T12', 'T13', 'T14', 'T15', 'T16', 'T17', 'T18', 'T19', 'T20', 'T21', 'T22', 'T23', 'T25']
xlabels=['R1','R2','R3','R4','R6','R7','R8','R9', 'R11', 'R12', 'R13', 'R14', 'R15', 'R16', 'R17', 'R18', 'R19', 'R20', 'R21', 'R22', 'R23', 'R25']
ax1.set_xticks(np.arange(len(xlabels)))
ax1.set_yticks(np.arange(len(ylabels)))
ax1.set_xticklabels(xlabels,fontsize=6)
ax1.set_yticklabels(ylabels,fontsize=6)
plt.show()
def correlation_matrix_rank(corr_mx, cm_title):
temp = corr_mx
#output = (temp == temp.max(axis=1)[:,None]) # along row
output = temp.rank(axis=1, ascending=False)
fig, ax1 = plt.subplots()
im1 = ax1.matshow(output, cmap=plt.cm.Wistia)
#cs = ax1.matshow(output)
fig.colorbar(im1)
ax1.grid(False)
ylabels=['T1','T2','T3','T4','T6','T7','T8','T9', 'T11', 'T12', 'T13', 'T14', 'T15', 'T16', 'T17', 'T18', 'T19', 'T20', 'T21', 'T22', 'T23', 'T25']
xlabels=['R1','R2','R3','R4','R6','R7','R8','R9', 'R11', 'R12', 'R13', 'R14', 'R15', 'R16', 'R17', 'R18', 'R19', 'R20', 'R21', 'R22', 'R23', 'R25']
ax1.set_xticks(np.arange(len(xlabels)))
ax1.set_yticks(np.arange(len(ylabels)))
ax1.set_xticklabels(xlabels,fontsize=6)
ax1.set_yticklabels(ylabels,fontsize=6)
plt.title(cm_title)
# show digit in matrix
output = np.asarray(output)
for i in range(22):
for j in range(22):
c = output[j,i]
ax1.text(i, j, int(c), va='center', ha='center')
plt.show()
def correlation_matrix_comb(corr_mx, cm_title):
fig, (ax2, ax3) = plt.subplots(1, 2)
ylabels=['T1','T2','T3','T4','T6','T7','T8','T9', 'T11', 'T12', 'T13', 'T14', 'T15', 'T16', 'T17', 'T18', 'T19', 'T20', 'T21', 'T22', 'T23', 'T25']
xlabels=['R1','R2','R3','R4','R6','R7','R8','R9', 'R11', 'R12', 'R13', 'R14', 'R15', 'R16', 'R17', 'R18', 'R19', 'R20', 'R21', 'R22', 'R23', 'R25']
'''
# graph 1 grayscale
im1 = ax1.matshow(corr_mx, cmap='gray')
# colorbar need numpy version 1.13.1
#fig.colorbar(im1, ax=ax1)
ax1.grid(False)
ax1.set_title(cm_title)
ax1.set_xticks(np.arange(len(xlabels)))
ax1.set_yticks(np.arange(len(ylabels)))
ax1.set_xticklabels(xlabels,fontsize=6)
ax1.set_yticklabels(ylabels,fontsize=6)
# show digit in matrix
corr_mx_array = np.asarray(corr_mx)
for i in range(22):
for j in range(22):
c = corr_mx_array[j,i]
ax1.text(i, j, round(c,2), va='center', ha='center')
'''
# graph 2 yellowscale
corr_mx_rank = corr_mx.rank(axis=1, ascending=False)
cmap_grey = LinearSegmentedColormap.from_list('mycmap', ['white', 'black'])
im2 = ax2.matshow(corr_mx, cmap='viridis')
# colorbar need numpy version 1.13.1
fig.colorbar(im2, ax=ax2)
ax2.grid(False)
ax2.set_title(cm_title)
ax2.set_xticks(np.arange(len(xlabels)))
ax2.set_yticks(np.arange(len(ylabels)))
ax2.set_xticklabels(xlabels,fontsize=6)
ax2.set_yticklabels(ylabels,fontsize=6)
# Add colorbar, make sure to specify tick locations to match desired ticklabels
# show digit in matrix
corr_mx_rank = np.asarray(corr_mx_rank)
for i in range(22):
for j in range(22):
c = corr_mx_rank[j,i]
ax2.text(i, j, int(c), va='center', ha='center')
# graph 3
# find the maximum in each row
# input corr_mx is a dataframe
# need to convert it into a array first
#otherwise it is not working
temp = np.asarray(corr_mx)
output = (temp == temp.max(axis=1)[:,None]) # along rows
im3 = ax3.matshow(output, cmap='gray')
# colorbar need numpy version 1.13.1
#fig.colorbar(im3, ax=ax3)
ax3.grid(False)
ax3.set_title(cm_title)
ax3.set_xticks(np.arange(len(xlabels)))
ax3.set_yticks(np.arange(len(ylabels)))
ax3.set_xticklabels(xlabels,fontsize=6)
ax3.set_yticklabels(ylabels,fontsize=6)
plt.show()
def correlation_matrix_tt_01(corr_mx, cm_title):
# find the maximum in each row
# input corr_mx is a dataframe
# need to convert it into a array first
#otherwise it is not working
temp = np.asarray(corr_mx)
output = (temp == temp.max(axis=1)[:,None]) # along rows
fig = plt.figure()
ax1 = fig.add_subplot(111)
#cmap = cm.get_cmap('jet', 30)
cax = ax1.matshow(output, cmap='gray')
#cax = ax1.imshow(df.corr(), interpolation="nearest", cmap=cmap)
fig.colorbar(cax)
ax1.grid(False)
plt.title(cm_title)
ylabels=['T1','T2','T3','T4','T6','T7','T8','T9', 'T11', 'T12', 'T13', 'T14', 'T15', 'T16', 'T17', 'T18', 'T19', 'T20', 'T21', 'T22', 'T23', 'T25']
xlabels=['T1','T2','T3','T4','T6','T7','T8','T9', 'T11', 'T12', 'T13', 'T14', 'T15', 'T16', 'T17', 'T18', 'T19', 'T20', 'T21', 'T22', 'T23', 'T25']
ax1.set_xticks(np.arange(len(xlabels)))
ax1.set_yticks(np.arange(len(ylabels)))
ax1.set_xticklabels(xlabels,fontsize=6)
ax1.set_yticklabels(ylabels,fontsize=6)
plt.show()
def correlation_matrix_rr_01(corr_mx, cm_title):
# find the maximum in each row
# input corr_mx is a dataframe
# need to convert it into a array first
#otherwise it is not working
temp = np.asarray(corr_mx)
output = (temp == temp.max(axis=1)[:,None]) # along rows
fig = plt.figure()
ax1 = fig.add_subplot(111)
#cmap = cm.get_cmap('jet', 30)
cax = ax1.matshow(output, cmap='gray')
#cax = ax1.imshow(df.corr(), interpolation="nearest", cmap=cmap)
fig.colorbar(cax)
ax1.grid(False)
plt.title(cm_title)
ylabels=['R1','R2','R3','R4','R6','R7','R8','R9', 'R11', 'R12', 'R13', 'R14', 'R15', 'R16', 'R17', 'R18', 'R19', 'R20', 'R21', 'R22', 'R23', 'R25']
xlabels=['R1','R2','R3','R4','R6','R7','R8','R9', 'R11', 'R12', 'R13', 'R14', 'R15', 'R16', 'R17', 'R18', 'R19', 'R20', 'R21', 'R22', 'R23', 'R25']
ax1.set_xticks(np.arange(len(xlabels)))
ax1.set_yticks(np.arange(len(ylabels)))
ax1.set_xticklabels(xlabels,fontsize=6)
ax1.set_yticklabels(ylabels,fontsize=6)
plt.show()
# shrink value for correlation matrix
# in order to use colormap -> 10 scale
def shrink_value_03_1(corr_in1):
corr_out1 = corr_in1.copy()
# here dataframe.copy() must be used, otherwise input can also be changed when changing output
for i in range (22):
for j in range(22):
if corr_in1.iloc[i, j] < 0.3:
corr_out1.iloc[i, j] = 0.3
return corr_out1
def shrink_value_05_1(corr_in2):
corr_out2 = corr_in2.copy()
# here dataframe.copy() must be used, otherwise input can also be changed when changing output
for i2 in range (22):
for j2 in range(22):
if corr_in2.iloc[i2, j2] < 0.5:
corr_out2.iloc[i2, j2] = 0.5
return corr_out2
# not used!!!!!!!!!!!!
# normalize the complex signal series
def normalize_complex_arr(a):
a_oo = a - a.real.min() - 1j*a.imag.min() # origin offsetted
return a_oo/np.abs(a_oo).max()
def improved_PCC(signal_in):
output_corr = pd.DataFrame()
for i in range(44):
row_pcc_notremovemean = []
for j in range(44):
sig_1 = signal_in.iloc[i, :]
sig_2 = signal_in.iloc[j, :]
pcc_notremovemean = np.abs(np.sum(sig_1 * sig_2) / np.sqrt(np.sum(sig_1*sig_1) * np.sum(sig_2 * sig_2)))
row_pcc_notremovemean = np.append(row_pcc_notremovemean, pcc_notremovemean)
output_corr = output_corr.append(pd.DataFrame(row_pcc_notremovemean.reshape(1,44)), ignore_index=True)
output_corr = output_corr.iloc[22:44, 0:22]
return output_corr
###############################################################################
# import the pkl file
#pkl_file=pd.read_pickle('/Users/bruce/Documents/uOttawa/Project/audio_brainstem_response/Data_BruceSunMaster_Studies/study2/study2DataFrame.pkl')
df_EFR=pd.read_pickle('/home/bruce/Dropbox/4.Project/4.Code for Linux/df_EFR.pkl')
# Mac
# df_EFR=pd.read_pickle('/Users/bruce/Documents/uOttawa/Master‘s Thesis/4.Project/4.Code for Linux/df_EFR.pkl')
# remove DC offset
df_EFR_detrend = pd.DataFrame()
for i in range(1408):
# combine next two rows later
df_EFR_detrend_data = pd.DataFrame(signal.detrend(df_EFR.iloc[i: i+1, 0:1024], type='constant').reshape(1,1024))
df_EFR_label = pd.DataFrame(df_EFR.iloc[i, 1024:1031].values.reshape(1,7))
df_EFR_detrend = df_EFR_detrend.append(pd.concat([df_EFR_detrend_data, df_EFR_label], axis=1, ignore_index=True))
# set the title of columns
df_EFR_detrend.columns = np.append(np.arange(1024), ["Subject", "Sex", "Condition", "Vowel", "Sound Level", "Num", "EFR/FFR"])
df_EFR_detrend = df_EFR_detrend.reset_index(drop=True)
df_EFR = df_EFR_detrend
# Define window function
win_kaiser = signal.kaiser(1024, beta=14)
win_hamming = signal.hamming(1024)
# average the df_EFR
df_EFR_avg = pd.DataFrame()
df_EFR_avg_win = pd.DataFrame()
# average test1 and test2
for i in range(704):
# combine next two rows later
df_EFR_avg_t = pd.DataFrame(df_EFR.iloc[2*i: 2*i+2, 0:1024].mean(axis=0).values.reshape(1,1024)) # average those two rows
# without window function
df_EFR_avg_t = pd.DataFrame(df_EFR_avg_t.iloc[0,:].values.reshape(1,1024)) # without window function
# implement the window function
df_EFR_avg_t_window = pd.DataFrame((df_EFR_avg_t.iloc[0,:] * win_hamming).values.reshape(1,1024))
df_EFR_label = pd.DataFrame(df_EFR.iloc[2*i, 1024:1031].values.reshape(1,7))
df_EFR_avg = df_EFR_avg.append(pd.concat([df_EFR_avg_t, df_EFR_label], axis=1, ignore_index=True))
df_EFR_avg_win = df_EFR_avg_win.append(pd.concat([df_EFR_avg_t_window, df_EFR_label], axis=1, ignore_index=True))
# set the title of columns
df_EFR_avg.columns = np.append(np.arange(1024), ["Subject", "Sex", "Condition", "Vowel", "Sound Level", "Num", "EFR/FFR"])
df_EFR_avg = df_EFR_avg.sort_values(by=["Condition", "Subject"])
df_EFR_avg = df_EFR_avg.reset_index(drop=True)
df_EFR_avg_win.columns = np.append(np.arange(1024), ["Subject", "Sex", "Condition", "Vowel", "Sound Level", "Num", "EFR/FFR"])
df_EFR_avg_win = df_EFR_avg_win.sort_values(by=["Condition", "Subject"])
df_EFR_avg_win = df_EFR_avg_win.reset_index(drop=True)
# average all the subjects , test and retest and keep one sound levels
# filter by 'a vowel and 85Db'
df_EFR_avg_sorted = df_EFR_avg.sort_values(by=["Sound Level", "Vowel","Condition", "Subject"])
df_EFR_avg_sorted = df_EFR_avg_sorted.reset_index(drop=True)
df_EFR_avg_win_sorted = df_EFR_avg_win.sort_values(by=["Sound Level", "Vowel","Condition", "Subject"])
df_EFR_avg_win_sorted = df_EFR_avg_win_sorted.reset_index(drop=True)
# filter55 65 75 sound levels and keep 85dB
# keep vowel condition and subject
df_EFR_avg_85 = pd.DataFrame(df_EFR_avg_sorted.iloc[528:, :])
df_EFR_avg_85 = df_EFR_avg_85.reset_index(drop=True)
df_EFR_avg_win_85 = pd.DataFrame(df_EFR_avg_win_sorted.iloc[528:, :])
df_EFR_avg_win_85 = df_EFR_avg_win_85.reset_index(drop=True)
# this part was replaced by upper part based on what I need to do
'''
# average all the subjects , test and retest, different sound levels
# filter by 'a vowel and 85Db'
df_EFR_avg_sorted = df_EFR_avg.sort_values(by=["Vowel","Condition", "Subject", "Sound Level"])
df_EFR_avg_sorted = df_EFR_avg_sorted.reset_index(drop=True)
# average sound levels and
# keep vowel condition and subject
df_EFR_avg_vcs = pd.DataFrame()
for i in range(176):
# combine next two rows later
df_EFR_avg_vcs_t = pd.DataFrame(df_EFR_avg_sorted.iloc[4*i: 4*i+4, 0:1024].mean(axis=0).values.reshape(1,1024)) # average those two rows
df_EFR_avg_vcs_label = pd.DataFrame(df_EFR_avg_sorted.iloc[4*i, 1024:1031].values.reshape(1,7))
df_EFR_avg_vcs = df_EFR_avg_vcs.append(pd.concat([df_EFR_avg_vcs_t, df_EFR_avg_vcs_label], axis=1, ignore_index=True), ignore_index=True)
# set the title of columns
df_EFR_avg_vcs.columns = np.append(np.arange(1024), ["Subject", "Sex", "Condition", "Vowel", "Sound Level", "Num", "EFR/FFR"])
#df_EFR_avg_vcs = df_EFR_avg_vcs.sort_values(by=["Condition", "Subject"])
'''
'''
# filter by 'a vowel and 85Db'
df_EFR_a_85_test1 = df_EFR[(df_EFR['Vowel'] == 'a vowel') & (df_EFR['Sound Level'] == '85')]
df_EFR_a_85_test1 = df_EFR_a_85_test1.reset_index(drop=True)
df_EFR_a_85_avg = pd.DataFrame()
# average test1 and test2
for i in range(44):
df_EFR_a_85_avg_t = pd.DataFrame(df_EFR_a_85_test1.iloc[2*i: 2*i+2, 0:1024].mean(axis=0).values.reshape(1,1024))
df_EFR_a_85_label = pd.DataFrame(df_EFR_a_85_test1.iloc[2*i, 1024:1031].values.reshape(1,7))
df_EFR_a_85_avg = df_EFR_a_85_avg.append(pd.concat([df_EFR_a_85_avg_t, df_EFR_a_85_label], axis=1, ignore_index=True))
# set the title of columns
df_EFR_a_85_avg.columns = np.append(np.arange(1024), ["Subject", "Sex", "Condition", "Vowel", "Sound Level", "Num", "EFR/FFR"])
df_EFR_a_85_avg = df_EFR_a_85_avg.sort_values(by=["Condition", "Subject"])
df_EFR_a_85_avg = df_EFR_a_85_avg.reset_index(drop=True)
'''
##################################################
# Frequency Domain
# parameters
sampling_rate = 9606 # fs
# sampling_rate = 9596.623
n = 1024
k = np.arange(n)
T = n/sampling_rate # time of signal
frq = k/T
freq = frq[range(int(n/2))]
n2 = 9606
k2 = np.arange(n2)
T2 = n2/sampling_rate
frq2 = k2/T2
freq2 = frq2[range(int(n2/2))]
# zero padding
# for df_EFR
df_EFR_data = df_EFR.iloc[:, :1024]
df_EFR_label = df_EFR.iloc[:, 1024:]
df_EFR_mid = pd.DataFrame(np.zeros((1408, 95036)))
df_EFR_withzero = pd.concat([df_EFR_data, df_EFR_mid, df_EFR_label], axis=1)
# rename columns
df_EFR_withzero.columns = np.append(np.arange(96060), ["Subject", "Sex", "Condition", "Vowel", "Sound Level", "Num", "EFR/FFR"])
# for df_EFR_avg_85
df_EFR_avg_85_data = df_EFR_avg_85.iloc[:, :1024]
df_EFR_avg_85_label = df_EFR_avg_85.iloc[:, 1024:]
df_EFR_avg_85_mid = pd.DataFrame(np.zeros((176, 8582)))
df_EFR_avg_85_withzero = pd.concat([df_EFR_avg_85_data, df_EFR_avg_85_mid, df_EFR_avg_85_label], axis=1)
# rename columns
df_EFR_avg_85_withzero.columns = np.append(np.arange(9606), ["Subject", "Sex", "Condition", "Vowel", "Sound Level", "Num", "EFR/FFR"])
# df_EFR_avg_win_85
df_EFR_avg_win_85_data = df_EFR_avg_win_85.iloc[:, :1024]
df_EFR_avg_win_85_label = df_EFR_avg_win_85.iloc[:, 1024:]
df_EFR_avg_win_85_mid = pd.DataFrame(np.zeros((176, 8582)))
df_EFR_avg_win_85_withzero = pd.concat([df_EFR_avg_win_85_data, df_EFR_avg_win_85_mid, df_EFR_avg_win_85_label], axis=1)
df_EFR_avg_win_85_withzero.columns = np.append(np.arange(9606), ["Subject", "Sex", "Condition", "Vowel", "Sound Level", "Num", "EFR/FFR"])
# concatenate AENU
temp1 = pd.concat([df_EFR_avg_85.iloc[0:44, 0:1024].reset_index(drop=True),df_EFR_avg_85.iloc[44:88, 0:1024].reset_index(drop=True)], axis=1)
temp2 = pd.concat([df_EFR_avg_85.iloc[88:132, 0:1024].reset_index(drop=True), df_EFR_avg_85.iloc[132:176, 0:1024].reset_index(drop=True)], axis=1)
df_EFR_avg_85_aenu = pd.concat([temp1, temp2], axis=1, ignore_index=True)
df_EFR_avg_85_aenu_withzero = pd.concat([df_EFR_avg_85_aenu, pd.DataFrame(np.zeros((44, 36864)))] , axis=1)
'''
# test##############
# test(detrend)
temp_test = np.asarray(df_EFR_avg_85_data.iloc[0, 0:1024])
temp_test_detrend = signal.detrend(temp_test)
plt.figure()
plt.subplot(2, 1, 1)
plt.plot(temp_test)
plt.subplot(2, 1, 2)
plt.plot(temp_test_detrend)
plt.show()
# the raw data is already DC removed
# test(zero padding)
temp_EFR_1 = df_EFR_withzero.iloc[0, 0:1024]
temp_EFR_2= df_EFR_withzero.iloc[0, 0:9606]
temp_amplitude_spectrum_1 = np.abs((fftpack.fft(temp_EFR_1)/n)[range(int(n/2))])
temp_amplitude_spectrum_2 = np.abs((fftpack.fft(temp_EFR_2)/n2)[range(int(n2/2))])
plt.figure()
plt.subplot(2, 1, 1)
markers1 = [11, 21, 32, 43, 53, 64, 75]
# which corresponds to 100 200....700Hz in frequency domain
plt.plot(temp_amplitude_spectrum_1, '-D', markevery=markers1)
plt.xlim(0, 100)
plt.title('without zero padding')
plt.subplot(2, 1, 2)
#markers2 = [100, 200, 300, 400, 500, 600, 700]
markers2 = [99, 199, 299, 399, 499, 599, 599]
# which corresponds to 100 200....700Hz in frequency domain
plt.plot(temp_amplitude_spectrum_2, '-D', markevery=markers2)
plt.xlim(0, 1000)
# plt.xscale('linear')
plt.title('with zero padding')
plt.show()
# #################
'''
# Calculate the Amplitude Spectrum
# create a new dataframe with zero-padding amplitude spectrum
'''
# for df_EFR
df_as_7= pd.DataFrame()
for i in range(1408):
temp_EFR = df_EFR_avg_85_withzero.iloc[i, 0:96060]
temp_as = np.abs((fftpack.fft(temp_EFR)/n2)[range(int(n2/2))])
#df_as_7 = pd.concat([df_as_7, temp_as_7_t], axis=0)
df_as_7 = df_as_7.append(pd.DataFrame(np.array([temp_as[1000], temp_as[2000], temp_as[3000], temp_as[4000], \
temp_as[5000], temp_as[6000], temp_as[7000]]).reshape(1,7)), ignore_index = True)
df_as_7 = pd.concat([df_as_7, df_EFR_label], axis=1) # add labels on it
# filter by 'a vowel and 85Db'
df_as_7_test1 = df_as_7[(df_as_7['Vowel'] == 'a vowel') & (df_as_7['Sound Level'] == '85')]
df_as_7_test1 = df_as_7_test1.reset_index(drop=True)
'''
# for df_EFR_avg_vcs_withzero
df_as_85_no0= pd.DataFrame()
df_as_85= | pd.DataFrame() | pandas.DataFrame |
#https://github.com/numpy/numpy/issues/2871
#numpy-1.12.1だとバグで動作しない
import numpy as np
uniq_arr = np.unique(arr, axis=0)
print(uniq_arr)
#https://www.reddit.com/r/learnpython/comments/3v9y8u/how_can_i_find_unique_elements_along_one_axis_of/
import pandas as pd
arr = [[0, 0], [1, 1], [1, 0], [1, 1], [0, 1], [0, 0]]
df = | pd.DataFrame(arr) | pandas.DataFrame |
"""
A selection of methods for a Pandas DataFrame. Please refer to the Pandas
documentation for a more desciptive guide to the methods:
https://pandas.pydata.org/pandas-docs/stable/dsintro.html
"""
import numpy as np
import pandas as pd
from CHECLabPy.core.io import DL1Reader
PATH = "/Users/Jason/Software/CHECLabPy/refdata/Run17473_dl1.h5"
def get_numpy():
"""
Pandas dataframe columns are essentially numpy arrays.
"""
r = DL1Reader(PATH)
df = r.load_entire_table()
charge_numpy_array = df['charge'].values
print(type(charge_numpy_array))
def get_numpy_mean():
"""
Pandas dataframe columns are essentially numpy array, and therefore can
be operated on by any of the numpy methods.
"""
r = DL1Reader(PATH)
df = r.load_entire_table()
charge_mean = np.mean(df['charge'])
print(charge_mean)
def get_table_mean():
"""
Pandas also has its own methods for obtaining many statistical results,
which can be applied to the entire table at once efficiently.
"""
r = DL1Reader(PATH)
df = r.load_entire_table()
mean_series = df.mean()
print(mean_series)
def select_subset():
"""
A subset of the DataFrame can be selected to produce a new DataFrame
"""
r = DL1Reader(PATH)
df = r.load_entire_table()
df['tm'] = df['pixel'] // 64
df_tm4 = df.loc[df['tm'] == 4]
print(df_tm4)
def get_mean_per_tm():
"""
The Pandas groupby method can be used to calculate statistics per group
"""
r = DL1Reader(PATH)
df = r.load_entire_table()
df['tm'] = df['pixel'] // 64
df_mean = df.groupby('tm').mean().reset_index()
# reset_index() restores the tm column,
# otherwise it will remain as the index
print(df_mean)
def get_multiple_statistics():
"""
The `aggregate` method allows multiple operations to be performed at once
"""
r = DL1Reader(PATH)
df = r.load_entire_table()
df['tm'] = df['pixel'] // 64
df_stats = df[['tm', 'charge']].groupby('tm').agg(['mean', 'min', 'max'])
print(df_stats)
print(df_stats['charge']['mean'])
def apply_different_statistic_to_different_column():
"""
Passing a dict to `aggregate` allows you to specify a different operation
depending on the column
"""
r = DL1Reader(PATH)
df = r.load_entire_table()
df['tm'] = df['pixel'] // 64
f = dict(pixel='first', charge='std')
df_stats = df[['tm', 'pixel', 'charge']].groupby('tm').agg(f)
print(df_stats)
def apply_custom_function():
"""
Any function can be passed to the `apply` method, including numpy functions
You will notice that the numpy std method produces a different result to
the pandas result. Thats because by default numpy calculates the sample
standard deviation, whereas pandas includes the Bessel correction by
default to correct for the bias in the estimation of the
population variance.
"""
r = DL1Reader(PATH)
df = r.load_entire_table()
df['tm'] = df['pixel'] // 64
df_pd_std = df[['tm', 'charge']].groupby('tm').std()['charge']
df_np_std = df[['tm', 'charge']].groupby('tm').apply(np.std)['charge']
df_comparison = pd.DataFrame(dict(pd=df_pd_std, np=df_np_std))
print(df_comparison)
def apply_custom_function_agg():
"""
One can also apply a custom function inside the agg approach
"""
r = DL1Reader(PATH)
df = r.load_entire_table()
df['tm'] = df['pixel'] // 64
f_camera_first_half = lambda g: df.loc[g.index].iloc[0]['tm'] < 32/2
f = dict(pixel=f_camera_first_half, charge='std')
df_stats = df[['tm', 'pixel', 'charge']].groupby('tm').agg(f)
df_stats = df_stats.rename(columns={'pixel': 'camera_first_half'})
print(df_stats)
def get_running_mean():
"""
For very large files it may not be possible to utilise pandas statistical
methods which assume the entire dataset is loaded into memory. This is the
approach I often use to calculate a running statistic (including
charge resolution)
One should be careful with using this approach to calculate the standard
deviation as it which can lead to numerical instability and
arithmetic overflow when dealing with large values.
https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance
"""
class RunningStats:
def __init__(self):
self._df_list = []
self._df = | pd.DataFrame() | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Fri Oct 29 09:20:13 2021
@author: bw98j
"""
import prose as pgx
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.colors as colors
import seaborn as sns
import numpy as np
import itertools
import glob
import os
import random
from tqdm import tqdm
import scipy.stats
import gtfparse
import itertools
from pylab import *
import collections
from sklearn.preprocessing import StandardScaler
import pickle
from sklearn.decomposition import PCA
import umap
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import train_test_split
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import confusion_matrix, f1_score, roc_curve, auc
from imblearn.over_sampling import SMOTE
from imblearn.under_sampling import RandomUnderSampler
from operator import itemgetter
#plot parameters
plt.rcParams['mathtext.fontset'] = 'custom'
plt.rcParams['mathtext.it'] = 'Arial:italic'
plt.rcParams['mathtext.rm'] = 'Arial'
plt.rc('font',family='arial',size=40)
plt.rc('hatch',linewidth = 2.0)
#%%
conv=pd.read_csv('databases/ensembl_uniprot_conversion.tsv',
sep='\t',
comment='#',
)
conv = conv.rename(columns={'ID':'gene',
'Entry': 'uniprot'})
conv = conv[['gene','uniprot']]
conv = dict(zip(conv.gene,conv.uniprot))
validGenes = conv.keys() #set of genes with associated protein names
tpm = pd.read_csv('klijn_rna_seq/E-MTAB-2706-query-results.tpms.tsv', sep='\t',comment='#')
tpm.columns = [i.split(', ')[-1] for i in tpm.columns]
tpm = tpm.fillna(0)
tpm['protein'] = tpm.apply(lambda x: conv[x['Gene ID']] if x['Gene ID'] in validGenes else np.nan, axis=1)
tpm = tpm.dropna()
hela = tpm[['HeLa','protein']].set_index('protein')
#%%
ibaq = pd.read_csv('klijn_rna_seq/bekker_jensen_2017_ibaq_s3_mmc4.csv', skiprows=2)
ibaq = ibaq[['Protein IDs','Median HeLa iBAQ']]
ibaq['Protein IDs'] = ibaq.apply(lambda x: list(set([i.split('-')[0] for i in x['Protein IDs'].split(';')])),axis=1)
ibaq['matches'] = ibaq.apply(lambda x: len(x['Protein IDs']),axis=1)
ibaq = ibaq[ibaq.matches == 1]
ibaq['Protein IDs'] = ibaq.apply(lambda x: x[0][0], axis=1)
ibaq = ibaq.set_index('Protein IDs').drop(columns=['matches'])
ibaq = ibaq.dropna().drop_duplicates()
ibaq = np.log10(ibaq)
ibaq = ibaq[~ibaq.index.duplicated(keep='first')]
#%% Get HeLa DDA protein lists
with open('interim_files/HeLa_DDA_sample.pkl', 'rb') as handle:
testdata = pickle.load(handle)
#%%
panel_corr = pd.read_csv('interim_files/klijn_panel_spearmanCorr.tsv', sep='\t',index_col=0)
panel_corr_scaled = pd.DataFrame(StandardScaler().fit_transform(panel_corr),
columns = panel_corr.columns,
index = panel_corr.index)
#%% Generate PCA and UMAP projections of panel_corr
pca = PCA(n_components=2)
pca.fit(panel_corr_scaled.T)
df_pca = pd.DataFrame(pca.components_.T, index = panel_corr_scaled.index, columns = ['PC1', 'PC2'])
df_pca.PC1 = df_pca.apply(lambda x: x.PC1*pca.explained_variance_ratio_[0],axis=1)
df_pca.PC2 = df_pca.apply(lambda x: x.PC2*pca.explained_variance_ratio_[1],axis=1)
reducer = umap.UMAP(min_dist=0, random_state=42)
u = reducer.fit_transform(panel_corr_scaled)
df_umap = pd.DataFrame(u, index = panel_corr_scaled.index, columns = ['UMAP-1', 'UMAP-2'])
#%% define KNN for prediction
class knn:
def __init__(self, obs, unobs, df):
df['y'] = df.apply(lambda row: pgx.labeller(row, obs, unobs),axis=1)
subset = df[df.y != -1]
split = lambda x: train_test_split(x.drop(columns=['y']),
x.y,test_size=200,
stratify=x.y)
X_train, X_test, Y_train, Y_test = split(subset)
X_train, Y_train = RandomUnderSampler().fit_resample(X_train, Y_train)
knn = KNeighborsClassifier()
k_range = list(range(5,51))
grid = GridSearchCV(knn, dict(n_neighbors=k_range), cv=5, scoring='accuracy')
grid.fit(X_train, Y_train)
knn=grid.best_estimator_
print('KNN: Optimal k = ', grid.best_params_)
model = knn.fit(X_train, Y_train)
Y_pred = knn.predict(X_test)
Y_scores = knn.predict_proba(X_test).T[1]
Y_self = knn.predict(X_train)
Y_self_scores = knn.predict_proba(X_train).T[1]
self.f1 = round(f1_score(Y_test,Y_pred),4)
self.f1_tr = round(f1_score(Y_train,Y_self),4)
self.fpr, self.tpr, thresholds = roc_curve(Y_test, Y_scores, pos_label = 1)
self.fpr_tr, self.tpr_tr, thresholds_tr = roc_curve(Y_train, Y_self_scores, pos_label = 1)
self.auc = round(auc(self.fpr,self.tpr),3)
self.auc_tr = round(auc(self.fpr_tr,self.tpr_tr),3)
tested_proteins = np.array(df.index.to_list())
probs = knn.predict_proba(df.drop(columns='y'))
score = [i[1] for i in probs]
score_norm = scipy.stats.zscore(score)
self.summary = pd.DataFrame(zip(tested_proteins,
score,
score_norm,
),
columns = ['protein',
'score',
'score_norm',
],
)
from pathlib import Path
if Path('source_data/benchmark_HeLa.tsv').is_file():
data = pd.read_csv('source_data/benchmark_HeLa.tsv',sep='\t')
else:
result = []
for i in range(10):
#random generation of sets for each simulation
sets = dict()
for rep in testdata.keys():
if rep not in sets.keys():
sets[rep] = dict()
import random
obs, unobs = list(testdata[rep]['two peptide']), list(testdata[rep]['no evidence'])
sets[rep]['baseline'] = obs, unobs
sets[rep]['downsamp_1k'] = random.sample(obs,1000), random.sample(unobs,1000)
sets[rep]['downsamp_2k'] = random.sample(obs,2000), random.sample(unobs,2000)
for dropout in [100,200,500,1000]:
drop = random.sample(obs, dropout)
sets[rep]['dropout_'+str(dropout)] = [i for i in obs if (i not in drop)], unobs
mix = random.sample(obs,2000) + random.sample(unobs,2000)
unobs_rand = random.sample(mix, 2000)
sets[rep]['random'] = [i for i in mix if (i not in unobs_rand)], unobs_rand
reps = testdata.keys()
groupings = list(sets[rep].keys())
for rep in reps:
for group in groupings:
print(i, rep, group)
obs, unobs = sets[rep][group]
container ={'knn_umap':knn(obs,unobs,df_umap),
'knn_pca':knn(obs,unobs,df_pca),
'prose':pgx.prose(obs,unobs,panel_corr,)}
for method in container.keys():
q = container[method]
score = q.summary[['protein','score_norm']].set_index('protein')
common_prot_tpm = scores.index.intersection(hela.index)
common_prot_ibaq = scores.index.intersection(ibaq.index)
rhotpm = scipy.stats.spearmanr(score.loc[common_prot_tpm],
hela.loc[common_prot_tpm])[0]
rhoibaq = scipy.stats.spearmanr(score.loc[common_prot_ibaq],
ibaq.loc[common_prot_ibaq])[0]
print(rhotpm, rhoibaq)
result.append([rep, group, i, method,
q.f1, q.f1_tr,
q.auc, q.auc_tr,
rhotpm,rhoibaq])
data = pd.DataFrame(result)
data.columns = ['rep', 'group', 'i', 'method',
'f1', 'f1_tr',
'auc', 'auc_tr',
'rho_tpm','rho_ibaq']
data.to_csv('source_data/Fig 1c,d (Benchmarking on HeLa DDA).tsv',sep='\t',index=False)
#%%
nrows = len(data.group.unique())
ncolumns = len(data.method.unique())
auc_df=pd.DataFrame()
auc_tr_df=pd.DataFrame()
f1_df= | pd.DataFrame() | pandas.DataFrame |
import pandas as pd
import os
import numpy as np
import seaborn as sns
import joblib
import matplotlib.pyplot as plt
from scipy.stats import mstats, wilcoxon
from inspect import getsource
import scipy.stats as stats
import researchpy as rp
import statsmodels.api as sm
from statsmodels.formula.api import ols
import matplotlib as mpl
from statsmodels.stats.multicomp import pairwise_tukeyhsd
from statsmodels.stats.multicomp import MultiComparison
current_path = os.getcwd()
model_dict = joblib.load(os.path.join(current_path,'constants','model_dict.pkl'))
def tukey(prediction: 'array of ints', model:'array of string'):
"""
This performs Tukey HSD test,
input: array(int(predictions)), array(str(model_names))
this test says if there are significant differences between the classes.
"""
mc = MultiComparison(prediction,model)
mc_results = mc.tukeyhsd()
print(mc_results)
return mc_results
def make2column(data):
"""
This function takes the 'raw' results from the processed prediction class
and transform the database to a two column one: predictions, model_names
"""
dumies = {}
for num, i in enumerate(data.keys()):
dumies[i] = num+1
model = []
prediction = []
for row in range(len(data)):
for key in data.keys():
model.append(key)
prediction.append(data.iloc[row][key]+1)
df = pd.DataFrame()
df['model'] = model
df['prediction'] = prediction
return df
def target_boolean(target,tuk_text):
lines = []
for ind, word in enumerate(tuk_text):
if word == target:
lines.append([ind])
elif word in ['True','False']:
if len(lines)<1:
continue
else:
lines[-1].append(word)
if len(lines) == 2: break
print(lines)
for line in lines:
if line[1] == 'False':
print('Accounts are the SAME')
return False
print('Account are Different')
return True
def makeAnalisys(pfreq, mod, target):
if len(model_dict['reverse'][mod])>2:
final_proportion = {}
pfreq2= make2column(pfreq)
tuk = tukey(pfreq2['prediction'],pfreq2['model'] )
summary = tuk.summary().as_text().split()
print('MODEL: ',mod)
is_target_different = target_boolean(target,summary)
total = sum([sum(pfreq[account]) for account in model_dict['reverse'][mod]])
for account in model_dict['reverse'][mod]:
final_proportion[account] = sum(pfreq[account])/total
if final_proportion[target] == max(final_proportion.values()):
is_target_max = True
else: is_target_max = False
return is_target_different, is_target_max, final_proportion
else:
final_proportion = {}
accounts = [account for account in pfreq.keys()]
_, p = wilcoxon(pfreq[accounts[0]],pfreq[accounts[1]], correction = True)
if p < 0.05:
is_target_different = True
else: is_target_different = False
total = sum([sum(pfreq[account]) for account in model_dict['reverse'][mod]])
for account in model_dict['reverse'][mod]:
final_proportion[account] = sum(pfreq[account])/total
if target not in accounts:
is_target_max = False
else:
if final_proportion[target] == max(final_proportion.values()):
is_target_max = True
else: is_target_max = False
return is_target_different, is_target_max, final_proportion
def purefreq(mod,raw):
final = {}
dummy = {}
counter = {}
for num, account in enumerate(model_dict['reverse'][mod]):
dummy[num] = account
counter[account]= 0
final[account] = []
for ind in range(len(raw)):
for datum in raw.iloc[ind].values:
counter[dummy[datum]] += 1
for account in final.keys():
final[account].append(counter[account])
for account in counter.keys():
counter[account] = 0
return | pd.DataFrame(final) | pandas.DataFrame |
import glob
import os
from sklearn.model_selection import ParameterGrid
from Data import DataCI
from Prioritizer import NNEmbeddings
import pandas as pd
from matplotlib.pylab import plt
import numpy as np
import seaborn as sns
sns.set_theme(style="darkgrid")
def data_clean_analysis(dates, thresholds, thresholds_pairs):
"""
Calculate file/test density for several combinations of data cleaning steps
:param dates:
:param thresholds:
:param thresholds_pairs:
:return:
"""
mpf = []
tpt = []
date = []
thresh = []
thresh_pairs = []
for k, v in dates.items():
for t in thresholds:
for tp in thresholds_pairs:
print(k)
print(t)
print(tp)
print('-----')
commits = pd.read_csv('../pub_data/test_commits_pub.csv', encoding='latin-1', sep='\t')
test_details = pd.read_csv('../pub_data/test_details_pub.csv', sep='\t')
test_status = pd.read_csv('../pub_data/test_histo_pub.csv', sep='\t')
mod_files = pd.read_csv("../pub_data/test_commits_mod_files_pub.csv", sep='\t')
D = DataCI(commits, test_details, test_status, mod_files, start_date=v, threshold=t, threshold_pairs=tp)
modification, transition = D.get_data_info()
mpf.append(modification)
tpt.append(transition)
date.append(k)
thresh.append(t)
thresh_pairs.append(tp)
print(len(date))
print(len(thresh))
print(len(thresh_pairs))
print(len(mpf))
print(len(tpt))
df = pd.DataFrame(list(zip(date, thresh, thresh_pairs, mpf, tpt)),
columns=['date', 'threshold', 'threshold_pairs', 'mpf', 'tpt']
)
df.to_pickle('start_date_analysis1.pkl')
def plot_df(name: str = 'start_date_analysis1.pkl'):
"""
Plot 2D for file/test density
:param name:
:return:
"""
df = pd.read_pickle(name)
print(df)
fig, axarr = plt.subplots(2, 2, sharey=True, sharex=True)
df = df.iloc[::-1]
plt.suptitle('Threshold Start Date Analysis', fontsize=14)
for idx, row in enumerate(sorted(df['threshold_pairs'].unique())):
data = df[df['threshold_pairs'] == row]
if idx == 0 or idx == 1:
column = 0
else:
column = 1
sns.lineplot(x="date", y="mpf", hue="threshold", data=data,
palette='tab10', ax=axarr[idx % 2, column])
sns.lineplot(x="date", y="tpt", hue="threshold", data=data,
palette='tab10', ax=axarr[idx % 2, column], linestyle='--', legend=False)
axarr[idx % 2, column].set_xlabel('Start Date')
axarr[idx % 2, column].set_ylabel('Frequency')
axarr[idx % 2, column].set_title(f'Pairs Threshold - {row}')
axarr[idx % 2, column].legend(loc='center left')
# plot vertical line
# plt.axvline(x=3, linestyle='-.', label='Optimal Value')
# plt.tight_layout()
plt.show()
def surface_plot(name: str = 'start_date_analysis1.pkl'):
"""
3D plot of data cleaning steps
:param name:
:return:
"""
df = pd.read_pickle(name)
# set up a figure twice as wide as it is tall
fig = plt.figure(figsize=plt.figaspect(0.5))
# ===============
# First subplot
# ===============
# set up the axes for the first plot
ax = fig.add_subplot(1, 2, 1, projection='3d')
ax.set_title('Modifications per File')
ax.set_xlabel('Date (Months)')
ax.set_ylabel('Threshold Individual')
for idx, row in enumerate(sorted(df['threshold_pairs'].unique())):
data = df[df['threshold_pairs'] == row]
label = 'Threshold pairs ' + str(row)
# Plot the surface.
surf = ax.plot_trisurf(data['date'], data['threshold'], data['mpf'], alpha=0.7,
linewidth=0, antialiased=False, label=label)
surf._facecolors2d = surf._facecolors3d
surf._edgecolors2d = surf._edgecolors3d
# ===============
# Second subplot
# ===============
# set up the axes for the second plot
ax = fig.add_subplot(1, 2, 2, projection='3d')
ax.set_title('Transitions per Test')
ax.set_xlabel('Date (Months)')
ax.set_ylabel('Threshold Individual')
for idx, row in enumerate(sorted(df['threshold_pairs'].unique())):
data = df[df['threshold_pairs'] == row]
label = 'Threshold pairs ' + str(row)
# Plot the surface.
surf = ax.plot_trisurf(data['date'], data['threshold'], data['tpt'], alpha=0.7,
linewidth=0, antialiased=False, label=label)
surf._facecolors2d = surf._facecolors3d
surf._edgecolors2d = surf._edgecolors3d
# cbar = fig.colorbar(surf)
# cbar.locator = LinearLocator(numticks=10)
# cbar.update_ticks()
plt.suptitle('Threshold Start Date Analysis 3D', fontsize=14)
plt.legend()
plt.show()
def plot_single(df_metrics):
"""
APFD plot for single Embedding Neural Network model
:param df_metrics:
:return:
"""
apfd = df_metrics['apfd']
miu = np.round(np.mean(apfd), 2)
sigma = np.round(np.std(apfd), 2)
label = 'regression' + '\n $\mu$ - ' + str(miu) + ' $\sigma$ - ' + str(sigma)
sns.distplot(apfd, kde=True,
bins=int(180 / 5), color=sns.color_palette()[0],
hist_kws={'edgecolor': 'black'},
kde_kws={'linewidth': 4, 'clip': (0.0, 1.0)}, label=label)
plt.legend(frameon=True, loc='upper left', prop={'size': 20})
plt.xlabel('APFD')
#plt.title('APFD Distribution - 100 revisions ')
plt.show()
def plot_metric(df_metrics, name, batch_size=10, epochs=10):
"""
Parameter tuning plots with several subplots
:param df_metrics:
:param name:
:param batch_size:
:param epochs:
:return:
"""
# One groupplot
fig, axarr = plt.subplots(3, 4, sharey=True, sharex=True)
plotname = 'apfd'
subplot_labels = ['(a)', '(b)', '(c)']
for column, nr in enumerate(sorted(df_metrics['negative_ratio'].unique())):
for row, emb_size in enumerate(df_metrics['emb_size'].unique()):
for agidx, (labeltext, task, linestyle) in enumerate(
[('Classification', 'True', '-'), ('Regression', 'False', '-.')]):
rel_df = df_metrics[
(df_metrics['emb_size'] == str(emb_size)) & (df_metrics['negative_ratio'] == str(nr)) &
(df_metrics['batch_size'] == str(batch_size)) & (df_metrics['epochs'] == str(epochs))]
# rel_df[rel_df['agent'] == agent].plot(x='step', y='napfd', label=labeltext, ylim=[0, 1], linewidth=0.8,
# style=linestyle, color=sns.color_palette()[agidx], ax=axarr[row,column])
apfd = rel_df.loc[rel_df['classification'] == task, 'apfd']
miu = np.round(np.mean(apfd), 2)
sigma = np.round(np.std(apfd), 2)
label = labeltext + '\n $\mu$ - ' + str(miu) + ' $\sigma$ - ' + str(sigma)
# sns.displot(data=rel_df, x="apfd", hue='classification', kde=True, ax=axarr[row, column])
sns.distplot(apfd, kde=True,
bins=int(180 / 5), color=sns.color_palette()[agidx],
hist_kws={'edgecolor': 'black'},
kde_kws={'linewidth': 4, 'clip': (0.0, 1.0)}, label=label, ax=axarr[row, column])
axarr[row, column].xaxis.grid(True, which='major')
axarr[row, column].set_title('Emb_size - %s - Neg_Ratio - %s' % (emb_size, nr), fontsize=10)
if row == 2:
axarr[row, column].set_xlabel('APFD')
if column == 0:
axarr[row, column].set_ylabel('Density')
axarr[row, column].legend(frameon=True, prop={'size': 6})
# Tweak spacing to prevent clipping of ylabel
fig.suptitle('APFD Parameter Tuning - %d Epochs and batch-size - %d' % (epochs, batch_size))
fig.tight_layout()
plt.savefig(name, bbox_inches='tight')
plt.show()
def load_stats_dataframe(files, aggregated_results=None):
"""
Load pickle files and transform to dataframe.
:param files:
:param aggregated_results:
:return:
"""
if os.path.exists(aggregated_results) and all(
[os.path.getmtime(f) < os.path.getmtime(aggregated_results) for f in files]):
return pd.read_pickle(aggregated_results)
df = pd.DataFrame()
for f in files:
tmp_dict = pd.read_pickle(f)
tmp_dict['emb_size'] = f.split('_')[2]
tmp_dict['negative_ratio'] = f.split('_')[4]
tmp_dict['batch_size'] = f.split('_')[6]
tmp_dict['epochs'] = f.split('_')[8]
tmp_dict['classification'] = f.split('_')[-1].split('.')[0]
tmp_df = pd.DataFrame.from_dict(tmp_dict)
df = pd.concat([df, tmp_df])
if aggregated_results:
df.to_pickle(aggregated_results)
return df
def parameter_tuning(D, param_grid):
"""
Train model with different combinations of parameters
:param D:
:param param_grid:
:return:
"""
grid = ParameterGrid(param_grid)
for params in grid:
model_file = 'Theshpairs1_Ind_5' + '_emb_' + str(params['embedding_size']) + '_nr_' + str(
params['negative_ratio']) + \
'_batch_' + str(params['batch_size']) + '_epochs_' \
+ str(params['nb_epochs']) + '_classification_' + str(params['classification'])
print(model_file)
# Train Model
Prio = NNEmbeddings(D, embedding_size=params['embedding_size'], negative_ratio=params['negative_ratio'],
nb_epochs=params['nb_epochs'], batch_size=params['batch_size'],
classification=params['classification'], save=True,
model_file='Models/' + model_file + '.h5')
# New Predicitons
df_metrics = Prio.predict(pickle_file=None)
plot_single(df_metrics)
plot_metric(df_metrics, name='Plot_Metrics/' + model_file + '.png')
def get_df_metrics():
"""
Collect all pickle files containing metrics and transform them into dataframe
:return: df: pd.Dataframe
"""
DATA_DIR = 'metrics'
search_pattern = '*.pkl'
filename = 'stats'
iteration_results = glob.glob(os.path.join(DATA_DIR, search_pattern))
aggregated_results = os.path.join(DATA_DIR, filename)
df = load_stats_dataframe(iteration_results, aggregated_results)
print(f'Dataframe {df}')
return df
def new_model(D: DataCI, params: dict, model_file: str, save: bool = False, load: bool = False):
"""
Train or load existing model.
:param D:
:param params:
:param model_file:
:param save:
:param load:
:return:
"""
if load:
# Load existing trained model
return NNEmbeddings(D=D, load=load, model_file=model_file)
else:
# Train New Model
return NNEmbeddings(D, embedding_size=params['embedding_size'], negative_ratio=params['negative_ratio'],
nb_epochs=params['nb_epochs'], batch_size=params['batch_size'],
optimizer=params['optimizer'],
classification=params['classification'], save=save, model_file=model_file)
def model(Prio: NNEmbeddings, plot_emb: bool = False, pickle_file: str = None):
"""
Make predictions and plots on unseen data.
:param Prio:
:param plot_emb:
:return:
"""
# New Predicitons
df_metrics = Prio.predict(pickle_file=pickle_file)
plot_single(df_metrics)
if plot_emb:
# TSNE Plots
Prio.plot_embeddings()
Prio.plot_embeddings_labeled(layer='tests')
Prio.plot_embeddings_labeled(layer='files')
# UMAP Plots
Prio.plot_embeddings(method='UMAP')
Prio.plot_embeddings_labeled(layer='tests', method='UMAP')
Prio.plot_embeddings_labeled(layer='files', method='UMAP')
def main():
commits = pd.read_csv('../pub_data/test_commits_pub.csv', encoding='latin-1', sep='\t')
test_details = | pd.read_csv('../pub_data/test_details_pub.csv', sep='\t') | pandas.read_csv |
"""
Author : <NAME>, <NAME>, <NAME>
Class : HMC CS 158
Date : 2018 April 2
Description : Utilities
"""
import pandas as pd
import numpy as np
import collections
from string import punctuation
from sklearn.svm import SVC
from sklearn.utils import shuffle
from sklearn import metrics, preprocessing
from sklearn.model_selection import KFold
from nltk.corpus import wordnet
from nltk.stem import PorterStemmer
#import Stemmer
from nltk.tokenize import RegexpTokenizer, sent_tokenize, word_tokenize
from nltk.stem.snowball import SnowballStemmer
from nltk.corpus import stopwords
import text_processing
from sklearn.model_selection import train_test_split
# path_data = "../data/"
path_data = ""
def read_dataSet():
file_name = path_data + "facebook-fact-check.csv"
return pd.read_csv(file_name)
def read_merged():
return pd.read_csv(path_data+"merged.csv")
def uniqueAccount():
data = read_dataSet()
return np.unique(data.account_id)
# postInfo
# return a lists of tuples containing (page_id, post_id)
# removes https://www.facebook.com/FreedomDailyNews
def postInfo():
data = read_dataSet()
resultList = []
for i in range(len(data.account_id)):
if data.account_id[i]!= 440106476051475:
tup = (data.account_id[i], data.post_id[i])
resultList.append(tup)
return resultList
# column: List of column that you want to be non empty
# df: pandas dataFrame
# depreciate logical: if you want it to be both not empty? one of the?
# ex) and if you want both to be true
# return the rows that correspond
def clear_rows(column_list, df):
if len(column_list) < 2:
return df[ | pd.notnull(df[column_list[0]]) | pandas.notnull |
import gym
import numpy as np
import torch
import stable_baselines3 as sb3
from stable_baselines3.common.evaluation import evaluate_policy
from stable_baselines3.common.env_util import make_vec_env
import pybullet_envs
import pandas as pd
import pickle
import os
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_theme(style='whitegrid', palette=[sns.color_palette('colorblind')[i] for i in [0,3,4,2]])
np.set_printoptions(suppress=True, linewidth=100, precision=4)
pd.set_option('precision', 4)
gym.logger.set_level(40)
plt.rcParams['font.family'] = 'monospace'
plt.rcParams['font.weight'] = 'bold'
class RLBanditEnv:
'''
numerical experiment where the policies are trained on rl environments and
then compared in the bandit setting via various policy evaluation methods
'''
def __init__(self, params):
self.__dict__.update(params)
self.make_env()
def make_env(self):
'''create the environment'''
try:
self.env = gym.make(self.env_name)
except:
self.env = make_vec_env(self.env_name, n_envs=1)
self.low = self.env.action_space.low
self.high = self.env.action_space.high
def train_target_policies(self, seed=None):
'''train policies to be ranked'''
if seed is not None:
np.random.seed(seed)
torch.manual_seed(seed)
self.env.seed(seed)
self.env.action_space.seed(seed)
models = {
'A2C': sb3.A2C('MlpPolicy', self.env, seed=seed).learn(self.train_steps),
'DDPG': sb3.DDPG('MlpPolicy', self.env, seed=seed).learn(self.train_steps),
'PPO': sb3.PPO('MlpPolicy', self.env, seed=seed).learn(self.train_steps),
'SAC': sb3.SAC('MlpPolicy', self.env, seed=seed).learn(self.train_steps),
'TD3': sb3.TD3('MlpPolicy', self.env, seed=seed).learn(self.train_steps)}
self.target_policies = {name: model.policy for name, model in models.items()}
self.num_policy_pairs = len(models) * (len(models) - 1) / 2
def evaluate_policy_rl(self, policy, num_sims=10):
'''evaluate policy in rl environment'''
reward_avg, reward_std = evaluate_policy(policy, self.env, n_eval_episodes=num_sims,
deterministic=False, warn=False)
return reward_avg, reward_std
def estimate_policy_value(self, policy, num_sims, seed=None):
'''estimate policy value in bandit environment'''
policy_value = 0
for _ in range(num_sims):
if seed is not None:
self.env.seed(seed)
obs = self.env.reset()
for t in range(self.env_steps):
action, _ = policy.predict(obs, deterministic=False)
obs, reward, done, _ = self.env.step(action)
policy_value += reward
if done:
break
policy_value /= num_sims
return policy_value
def evaluate_target_policies(self, num_sims=100):
'''evaluate target policies in bandit environment'''
self.value_true = {}
for name, policy in self.target_policies.items():
self.value_true[name] = self.estimate_policy_value(policy, num_sims)
def probability_proxy(self, action1, action2):
'''compute probability of taking action1 instead of action2'''
action_delta = (action1 - action2) / (self.high - self.low)
prob = np.exp((1 - 1 / (1 - action_delta**2 + 1e-08)).mean())
return prob
def generate_historical_data(self):
'''sample historical data by deploying target policies'''
self.historical_data, self.value_emp = [], {}
for name, policy in self.target_policies.items():
self.value_emp[name] = 0
seed = np.random.randint(1e+06)
self.env.seed(seed)
obs = self.env.reset()
actions, value, prob = [], 0, 1
for t in range(self.env_steps):
action, _ = policy.predict(obs, deterministic=False)
actions.append(action)
action_det, _ = policy.predict(obs, deterministic=True)
prob *= self.probability_proxy(action, action_det)
obs, reward, done, _ = self.env.step(action)
value += reward
if done:
break
self.historical_data.append([seed, actions, value, prob])
self.value_emp[name] += value
self.rho = np.mean(list(self.value_emp.values()))
def estimate_trajectory_probability(self, policy, trajectory):
'''estimate proability that the policy follows the trajectory'''
prob = 1.
seed, actions, _, _ = trajectory
self.env.seed(seed)
obs = self.env.reset()
for t in range(min(self.env_steps, len(actions))):
action, _ = policy.predict(obs, deterministic=True)
prob *= self.probability_proxy(action, actions[t])
obs, _, done, _ = self.env.step(action)
return prob
def compute_value_dim(self, policy):
'''evaluate the policy via the direct method'''
value_dim = []
for trajectory in self.historical_data:
s, a, r, _ = trajectory
prob = self.estimate_trajectory_probability(policy, trajectory)
value_dim.append(r * prob)
return np.mean(value_dim)
def compute_value_lde(self, policy):
'''evaluate the policy via the limited data estimator'''
value_lde = []
for trajectory in self.historical_data:
s, a, r, _ = trajectory
prob = self.estimate_trajectory_probability(policy, trajectory)
value_lde.append((r - self.rho) * prob + self.rho)
return np.mean(value_lde)
def compute_value_dre(self, policy):
'''evaluate the policy via the doubly robust estimator'''
value_dre = []
for trajectory in self.historical_data:
s, a, r, p = trajectory
prob = self.estimate_trajectory_probability(policy, trajectory)
value_dre.append((r - self.rho) * prob / (p + 1e-06) + self.rho)
return np.mean(value_dre)
def compute_value_ips(self, policy):
'''evaluate the policy via the inverse propensity scoring'''
value_ips = []
for trajectory in self.historical_data:
s, a, r, p = trajectory
prob = self.estimate_trajectory_probability(policy, trajectory)
value_ips.append(r * prob / (p + 1e-06))
return np.mean(value_ips)
def swap_count(self, array1, array2):
'''count the number of swaps required to transform array1 into array2'''
L = list(array2)
swaps = 0
for element in list(array1):
ind = L.index(element)
L.pop(ind)
swaps += ind
return swaps
def rank_target_policies(self):
'''evaluate and rank target policies via various methods'''
self.value_dim, self.value_lde, self.value_dre, self.value_ips = {}, {}, {}, {}
for name, policy in self.target_policies.items():
self.value_lde[name] = self.compute_value_lde(policy)
self.value_dre[name] = self.compute_value_dre(policy)
self.value_ips[name] = self.compute_value_ips(policy)
self.value_dim[name] = self.compute_value_dim(policy)
self.method_values = {'True': self.value_true, 'LDE': self.value_lde,
'DRE': self.value_dre, 'IPS': self.value_ips,
'DiM': self.value_dim, 'Emp': self.value_emp}
self.values = | pd.DataFrame.from_dict(self.method_values) | pandas.DataFrame.from_dict |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed May 23 11:11:57 2018
@author: kazuki.onodera
-d- -> /
-x- -> *
-p- -> +
-m- -> -
nohup python -u 000.py 0 > LOG/log_000.py_0.txt &
nohup python -u 000.py 1 > LOG/log_000.py_1.txt &
nohup python -u 000.py 2 > LOG/log_000.py_2.txt &
nohup python -u 000.py 3 > LOG/log_000.py_3.txt &
nohup python -u 000.py 4 > LOG/log_000.py_4.txt &
nohup python -u 000.py 5 > LOG/log_000.py_5.txt &
nohup python -u 000.py 6 > LOG/log_000.py_6.txt &
"""
import numpy as np
import pandas as pd
from multiprocessing import Pool, cpu_count
NTHREAD = cpu_count()
from itertools import combinations
from tqdm import tqdm
import sys
argv = sys.argv
import os, utils, gc
utils.start(__file__)
#==============================================================================
folders = [
# '../data',
'../feature', '../feature_unused',
# '../feature_var0', '../feature_corr1'
]
for fol in folders:
os.system(f'rm -rf {fol}')
os.system(f'mkdir {fol}')
col_app_money = ['app_AMT_INCOME_TOTAL', 'app_AMT_CREDIT', 'app_AMT_ANNUITY', 'app_AMT_GOODS_PRICE']
col_app_day = ['app_DAYS_BIRTH', 'app_DAYS_EMPLOYED', 'app_DAYS_REGISTRATION', 'app_DAYS_ID_PUBLISH', 'app_DAYS_LAST_PHONE_CHANGE']
def get_trte():
usecols = ['SK_ID_CURR', 'AMT_INCOME_TOTAL', 'AMT_CREDIT', 'AMT_ANNUITY', 'AMT_GOODS_PRICE']
usecols += ['DAYS_BIRTH', 'DAYS_EMPLOYED', 'DAYS_REGISTRATION', 'DAYS_ID_PUBLISH', 'DAYS_LAST_PHONE_CHANGE']
rename_di = {
'AMT_INCOME_TOTAL': 'app_AMT_INCOME_TOTAL',
'AMT_CREDIT': 'app_AMT_CREDIT',
'AMT_ANNUITY': 'app_AMT_ANNUITY',
'AMT_GOODS_PRICE': 'app_AMT_GOODS_PRICE',
'DAYS_BIRTH': 'app_DAYS_BIRTH',
'DAYS_EMPLOYED': 'app_DAYS_EMPLOYED',
'DAYS_REGISTRATION': 'app_DAYS_REGISTRATION',
'DAYS_ID_PUBLISH': 'app_DAYS_ID_PUBLISH',
'DAYS_LAST_PHONE_CHANGE': 'app_DAYS_LAST_PHONE_CHANGE',
}
trte = pd.concat([pd.read_csv('../input/application_train.csv.zip', usecols=usecols).rename(columns=rename_di),
pd.read_csv('../input/application_test.csv.zip', usecols=usecols).rename(columns=rename_di)],
ignore_index=True)
return trte
def prep_prev(df):
df['AMT_APPLICATION'].replace(0, np.nan, inplace=True)
df['AMT_CREDIT'].replace(0, np.nan, inplace=True)
df['CNT_PAYMENT'].replace(0, np.nan, inplace=True)
df['AMT_DOWN_PAYMENT'].replace(np.nan, 0, inplace=True)
df.loc[df['NAME_CONTRACT_STATUS']!='Approved', 'AMT_DOWN_PAYMENT'] = np.nan
df['RATE_DOWN_PAYMENT'].replace(np.nan, 0, inplace=True)
df.loc[df['NAME_CONTRACT_STATUS']!='Approved', 'RATE_DOWN_PAYMENT'] = np.nan
# df['xxx'].replace(0, np.nan, inplace=True)
# df['xxx'].replace(0, np.nan, inplace=True)
return
p = int(argv[1])
if True:
#def multi(p):
if p==0:
# =============================================================================
# application
# =============================================================================
def f1(df):
df['CODE_GENDER'] = 1 - (df['CODE_GENDER']=='F')*1 # 4 'XNA' are converted to 'M'
df['FLAG_OWN_CAR'] = (df['FLAG_OWN_CAR']=='Y')*1
df['FLAG_OWN_REALTY'] = (df['FLAG_OWN_REALTY']=='Y')*1
df['EMERGENCYSTATE_MODE'] = (df['EMERGENCYSTATE_MODE']=='Yes')*1
df['AMT_CREDIT-d-AMT_INCOME_TOTAL'] = df['AMT_CREDIT'] / df['AMT_INCOME_TOTAL']
df['AMT_ANNUITY-d-AMT_INCOME_TOTAL'] = df['AMT_ANNUITY'] / df['AMT_INCOME_TOTAL']
df['AMT_GOODS_PRICE-d-AMT_INCOME_TOTAL'] = df['AMT_GOODS_PRICE'] / df['AMT_INCOME_TOTAL']
df['AMT_CREDIT-d-AMT_ANNUITY'] = df['AMT_CREDIT'] / df['AMT_ANNUITY'] # how long should user pay?(month)
df['AMT_GOODS_PRICE-d-AMT_ANNUITY'] = df['AMT_GOODS_PRICE'] / df['AMT_ANNUITY']# how long should user pay?(month)
df['AMT_GOODS_PRICE-d-AMT_CREDIT'] = df['AMT_GOODS_PRICE'] / df['AMT_CREDIT']
df['AMT_GOODS_PRICE-m-AMT_CREDIT'] = df['AMT_GOODS_PRICE'] - df['AMT_CREDIT']
df['AMT_GOODS_PRICE-m-AMT_CREDIT-d-AMT_INCOME_TOTAL'] = df['AMT_GOODS_PRICE-m-AMT_CREDIT'] / df['AMT_INCOME_TOTAL']
df['age_finish_payment'] = df['DAYS_BIRTH'].abs() + (df['AMT_CREDIT-d-AMT_ANNUITY']*30)
# df['age_finish_payment'] = (df['DAYS_BIRTH']/-365) + df['credit-d-annuity']
df.loc[df['DAYS_EMPLOYED']==365243, 'DAYS_EMPLOYED'] = np.nan
df['DAYS_EMPLOYED-m-DAYS_BIRTH'] = df['DAYS_EMPLOYED'] - df['DAYS_BIRTH']
df['DAYS_REGISTRATION-m-DAYS_BIRTH'] = df['DAYS_REGISTRATION'] - df['DAYS_BIRTH']
df['DAYS_ID_PUBLISH-m-DAYS_BIRTH'] = df['DAYS_ID_PUBLISH'] - df['DAYS_BIRTH']
df['DAYS_LAST_PHONE_CHANGE-m-DAYS_BIRTH'] = df['DAYS_LAST_PHONE_CHANGE'] - df['DAYS_BIRTH']
df['DAYS_REGISTRATION-m-DAYS_EMPLOYED'] = df['DAYS_REGISTRATION'] - df['DAYS_EMPLOYED']
df['DAYS_ID_PUBLISH-m-DAYS_EMPLOYED'] = df['DAYS_ID_PUBLISH'] - df['DAYS_EMPLOYED']
df['DAYS_LAST_PHONE_CHANGE-m-DAYS_EMPLOYED'] = df['DAYS_LAST_PHONE_CHANGE'] - df['DAYS_EMPLOYED']
df['DAYS_ID_PUBLISH-m-DAYS_REGISTRATION'] = df['DAYS_ID_PUBLISH'] - df['DAYS_REGISTRATION']
df['DAYS_LAST_PHONE_CHANGE-m-DAYS_REGISTRATION'] = df['DAYS_LAST_PHONE_CHANGE'] - df['DAYS_REGISTRATION']
df['DAYS_LAST_PHONE_CHANGE-m-DAYS_ID_PUBLISH'] = df['DAYS_LAST_PHONE_CHANGE'] - df['DAYS_ID_PUBLISH']
col = ['DAYS_EMPLOYED-m-DAYS_BIRTH',
'DAYS_REGISTRATION-m-DAYS_BIRTH',
'DAYS_ID_PUBLISH-m-DAYS_BIRTH',
'DAYS_LAST_PHONE_CHANGE-m-DAYS_BIRTH',
'DAYS_REGISTRATION-m-DAYS_EMPLOYED',
'DAYS_ID_PUBLISH-m-DAYS_EMPLOYED',
'DAYS_LAST_PHONE_CHANGE-m-DAYS_EMPLOYED',
'DAYS_ID_PUBLISH-m-DAYS_REGISTRATION',
'DAYS_LAST_PHONE_CHANGE-m-DAYS_REGISTRATION',
'DAYS_LAST_PHONE_CHANGE-m-DAYS_ID_PUBLISH'
]
col_comb = list(combinations(col, 2))
for i,j in col_comb:
df[f'{i}-d-{j}'] = df[i] / df[j]
df['DAYS_EMPLOYED-d-DAYS_BIRTH'] = df['DAYS_EMPLOYED'] / df['DAYS_BIRTH']
df['DAYS_REGISTRATION-d-DAYS_BIRTH'] = df['DAYS_REGISTRATION'] / df['DAYS_BIRTH']
df['DAYS_ID_PUBLISH-d-DAYS_BIRTH'] = df['DAYS_ID_PUBLISH'] / df['DAYS_BIRTH']
df['DAYS_LAST_PHONE_CHANGE-d-DAYS_BIRTH'] = df['DAYS_LAST_PHONE_CHANGE'] / df['DAYS_BIRTH']
df['DAYS_REGISTRATION-d-DAYS_EMPLOYED'] = df['DAYS_REGISTRATION'] / df['DAYS_EMPLOYED']
df['DAYS_ID_PUBLISH-d-DAYS_EMPLOYED'] = df['DAYS_ID_PUBLISH'] / df['DAYS_EMPLOYED']
df['DAYS_LAST_PHONE_CHANGE-d-DAYS_EMPLOYED'] = df['DAYS_LAST_PHONE_CHANGE'] / df['DAYS_EMPLOYED']
df['DAYS_ID_PUBLISH-d-DAYS_REGISTRATION'] = df['DAYS_ID_PUBLISH'] / df['DAYS_REGISTRATION']
df['DAYS_LAST_PHONE_CHANGE-d-DAYS_REGISTRATION'] = df['DAYS_LAST_PHONE_CHANGE'] / df['DAYS_REGISTRATION']
df['DAYS_LAST_PHONE_CHANGE-d-DAYS_ID_PUBLISH'] = df['DAYS_LAST_PHONE_CHANGE'] / df['DAYS_ID_PUBLISH']
df['OWN_CAR_AGE-d-DAYS_BIRTH'] = (df['OWN_CAR_AGE']*(-365)) / df['DAYS_BIRTH']
df['OWN_CAR_AGE-m-DAYS_BIRTH'] = df['DAYS_BIRTH'] + (df['OWN_CAR_AGE']*365)
df['OWN_CAR_AGE-d-DAYS_EMPLOYED'] = df['OWN_CAR_AGE'] / df['DAYS_EMPLOYED']
df['OWN_CAR_AGE-m-DAYS_EMPLOYED'] = df['DAYS_EMPLOYED'] + (df['OWN_CAR_AGE']*365)
df['cnt_adults'] = df['CNT_FAM_MEMBERS'] - df['CNT_CHILDREN']
df['CNT_CHILDREN-d-CNT_FAM_MEMBERS'] = df['CNT_CHILDREN'] / df['CNT_FAM_MEMBERS']
df['income_per_adult'] = df['AMT_INCOME_TOTAL'] / df['cnt_adults']
# df.loc[df['CNT_CHILDREN']==0, 'CNT_CHILDREN'] = np.nan
df['AMT_INCOME_TOTAL-d-CNT_CHILDREN'] = df['AMT_INCOME_TOTAL'] / (df['CNT_CHILDREN']+0.000001)
df['AMT_CREDIT-d-CNT_CHILDREN'] = df['AMT_CREDIT'] / (df['CNT_CHILDREN']+0.000001)
df['AMT_ANNUITY-d-CNT_CHILDREN'] = df['AMT_ANNUITY'] / (df['CNT_CHILDREN']+0.000001)
df['AMT_GOODS_PRICE-d-CNT_CHILDREN'] = df['AMT_GOODS_PRICE'] / (df['CNT_CHILDREN']+0.000001)
df['AMT_INCOME_TOTAL-d-cnt_adults'] = df['AMT_INCOME_TOTAL'] / df['cnt_adults']
df['AMT_CREDIT-d-cnt_adults'] = df['AMT_CREDIT'] / df['cnt_adults']
df['AMT_ANNUITY-d-cnt_adults'] = df['AMT_ANNUITY'] / df['cnt_adults']
df['AMT_GOODS_PRICE-d-cnt_adults'] = df['AMT_GOODS_PRICE'] / df['cnt_adults']
df['AMT_INCOME_TOTAL-d-CNT_FAM_MEMBERS'] = df['AMT_INCOME_TOTAL'] / df['CNT_FAM_MEMBERS']
df['AMT_CREDIT-d-CNT_FAM_MEMBERS'] = df['AMT_CREDIT'] / df['CNT_FAM_MEMBERS']
df['AMT_ANNUITY-d-CNT_FAM_MEMBERS'] = df['AMT_ANNUITY'] / df['CNT_FAM_MEMBERS']
df['AMT_GOODS_PRICE-d-CNT_FAM_MEMBERS'] = df['AMT_GOODS_PRICE'] / df['CNT_FAM_MEMBERS']
# EXT_SOURCE_x
df['EXT_SOURCES_prod'] = df['EXT_SOURCE_1'] * df['EXT_SOURCE_2'] * df['EXT_SOURCE_3']
df['EXT_SOURCES_sum'] = df[['EXT_SOURCE_1', 'EXT_SOURCE_2', 'EXT_SOURCE_3']].sum(axis=1)
df['EXT_SOURCES_sum'] = df['EXT_SOURCES_sum'].fillna(df['EXT_SOURCES_sum'].mean())
df['EXT_SOURCES_mean'] = df[['EXT_SOURCE_1', 'EXT_SOURCE_2', 'EXT_SOURCE_3']].mean(axis=1)
df['EXT_SOURCES_mean'] = df['EXT_SOURCES_mean'].fillna(df['EXT_SOURCES_mean'].mean())
df['EXT_SOURCES_std'] = df[['EXT_SOURCE_1', 'EXT_SOURCE_2', 'EXT_SOURCE_3']].std(axis=1)
df['EXT_SOURCES_std'] = df['EXT_SOURCES_std'].fillna(df['EXT_SOURCES_std'].mean())
df['EXT_SOURCES_1-2-3'] = df['EXT_SOURCE_1'] - df['EXT_SOURCE_2'] - df['EXT_SOURCE_3']
df['EXT_SOURCES_2-1-3'] = df['EXT_SOURCE_2'] - df['EXT_SOURCE_1'] - df['EXT_SOURCE_3']
df['EXT_SOURCES_1-2'] = df['EXT_SOURCE_1'] - df['EXT_SOURCE_2']
df['EXT_SOURCES_2-3'] = df['EXT_SOURCE_2'] - df['EXT_SOURCE_3']
df['EXT_SOURCES_1-3'] = df['EXT_SOURCE_1'] - df['EXT_SOURCE_3']
# =========
# https://www.kaggle.com/jsaguiar/updated-0-792-lb-lightgbm-with-simple-features/code
# =========
df['DAYS_EMPLOYED_PERC'] = df['DAYS_EMPLOYED'] / df['DAYS_BIRTH']
df['INCOME_PER_PERSON'] = df['AMT_INCOME_TOTAL'] / df['CNT_FAM_MEMBERS']
df['PAYMENT_RATE'] = df['AMT_ANNUITY'] / df['AMT_CREDIT']
# =========
# https://www.kaggle.com/poohtls/fork-of-fork-lightgbm-with-simple-features/code
# =========
docs = [_f for _f in df.columns if 'FLAG_DOC' in _f]
live = [_f for _f in df.columns if ('FLAG_' in _f) & ('FLAG_DOC' not in _f) & ('_FLAG_' not in _f)]
inc_by_org = df[['AMT_INCOME_TOTAL', 'ORGANIZATION_TYPE']].groupby('ORGANIZATION_TYPE').median()['AMT_INCOME_TOTAL']
df['alldocs_kurt'] = df[docs].kurtosis(axis=1)
df['alldocs_skew'] = df[docs].skew(axis=1)
df['alldocs_mean'] = df[docs].mean(axis=1)
df['alldocs_sum'] = df[docs].sum(axis=1)
df['alldocs_std'] = df[docs].std(axis=1)
df['NEW_LIVE_IND_SUM'] = df[live].sum(axis=1)
df['NEW_INC_PER_CHLD'] = df['AMT_INCOME_TOTAL'] / (1 + df['CNT_CHILDREN'])
df['NEW_INC_BY_ORG'] = df['ORGANIZATION_TYPE'].map(inc_by_org)
df['NEW_ANNUITY_TO_INCOME_RATIO'] = df['AMT_ANNUITY'] / (1 + df['AMT_INCOME_TOTAL'])
df['NEW_CAR_TO_BIRTH_RATIO'] = df['OWN_CAR_AGE'] / df['DAYS_BIRTH']
df['NEW_CAR_TO_EMPLOY_RATIO'] = df['OWN_CAR_AGE'] / df['DAYS_EMPLOYED']
df['NEW_PHONE_TO_BIRTH_RATIO'] = df['DAYS_LAST_PHONE_CHANGE'] / df['DAYS_BIRTH']
df['NEW_PHONE_TO_EMPLOYED_RATIO'] = df['DAYS_LAST_PHONE_CHANGE'] / df['DAYS_EMPLOYED']
# =============================================================================
# Maxwell features
# =============================================================================
bdg_avg = df.filter(regex='_AVG$').columns
bdg_mode = df.filter(regex='_MODE$').columns
bdg_medi = df.filter(regex='_MEDI$').columns[:len(bdg_avg)] # ignore FONDKAPREMONT_MODE...
df['building_score_avg_mean'] = df[bdg_avg].mean(1)
df['building_score_avg_std'] = df[bdg_avg].std(1)
df['building_score_avg_sum'] = df[bdg_avg].sum(1)
df['building_score_mode_mean'] = df[bdg_mode].mean(1)
df['building_score_mode_std'] = df[bdg_mode].std(1)
df['building_score_mode_sum'] = df[bdg_mode].sum(1)
df['building_score_medi_mean'] = df[bdg_medi].mean(1)
df['building_score_medi_std'] = df[bdg_medi].std(1)
df['building_score_medi_sum'] = df[bdg_medi].sum(1)
df['maxwell_feature_1'] = (df['EXT_SOURCE_1'] * df['EXT_SOURCE_3']) ** (1 / 2)
df.replace(np.inf, np.nan, inplace=True) # TODO: any other plan?
df.replace(-np.inf, np.nan, inplace=True)
return
df = pd.read_csv('../input/application_train.csv.zip')
f1(df)
utils.to_pickles(df, '../data/train', utils.SPLIT_SIZE)
utils.to_pickles(df[['TARGET']], '../data/label', utils.SPLIT_SIZE)
df = pd.read_csv('../input/application_test.csv.zip')
f1(df)
utils.to_pickles(df, '../data/test', utils.SPLIT_SIZE)
df[['SK_ID_CURR']].to_pickle('../data/sub.p')
elif p==1:
# =============================================================================
# prev
# =============================================================================
"""
df = utils.read_pickles('../data/previous_application')
"""
df = pd.merge(pd.read_csv('../data/prev_new_v4.csv.gz'),
get_trte(), on='SK_ID_CURR', how='left')
# df = pd.merge(pd.read_csv('../input/previous_application.csv.zip'),
# get_trte(), on='SK_ID_CURR', how='left')
prep_prev(df)
df['FLAG_LAST_APPL_PER_CONTRACT'] = (df['FLAG_LAST_APPL_PER_CONTRACT']=='Y')*1
# day
for c in ['DAYS_FIRST_DRAWING', 'DAYS_FIRST_DUE', 'DAYS_LAST_DUE_1ST_VERSION',
'DAYS_LAST_DUE', 'DAYS_TERMINATION']:
df.loc[df[c]==365243, c] = np.nan
df['days_fdue-m-fdrw'] = df['DAYS_FIRST_DUE'] - df['DAYS_FIRST_DRAWING']
df['days_ldue1-m-fdrw'] = df['DAYS_LAST_DUE_1ST_VERSION'] - df['DAYS_FIRST_DRAWING']
df['days_ldue-m-fdrw'] = df['DAYS_LAST_DUE'] - df['DAYS_FIRST_DRAWING'] # total span
df['days_trm-m-fdrw'] = df['DAYS_TERMINATION'] - df['DAYS_FIRST_DRAWING']
df['days_ldue1-m-fdue'] = df['DAYS_LAST_DUE_1ST_VERSION'] - df['DAYS_FIRST_DUE']
df['days_ldue-m-fdue'] = df['DAYS_LAST_DUE'] - df['DAYS_FIRST_DUE']
df['days_trm-m-fdue'] = df['DAYS_TERMINATION'] - df['DAYS_FIRST_DUE']
df['days_ldue-m-ldue1'] = df['DAYS_LAST_DUE'] - df['DAYS_LAST_DUE_1ST_VERSION']
df['days_trm-m-ldue1'] = df['DAYS_TERMINATION'] - df['DAYS_LAST_DUE_1ST_VERSION']
df['days_trm-m-ldue'] = df['DAYS_TERMINATION'] - df['DAYS_LAST_DUE']
# money
df['total_debt'] = df['AMT_ANNUITY'] * df['CNT_PAYMENT']
df['AMT_CREDIT-d-total_debt'] = df['AMT_CREDIT'] / df['total_debt']
df['AMT_GOODS_PRICE-d-total_debt'] = df['AMT_GOODS_PRICE'] / df['total_debt']
df['AMT_GOODS_PRICE-d-AMT_CREDIT'] = df['AMT_GOODS_PRICE'] / df['AMT_CREDIT']
# app & money
df['AMT_ANNUITY-d-app_AMT_INCOME_TOTAL'] = df['AMT_ANNUITY'] / df['app_AMT_INCOME_TOTAL']
df['AMT_APPLICATION-d-app_AMT_INCOME_TOTAL'] = df['AMT_APPLICATION'] / df['app_AMT_INCOME_TOTAL']
df['AMT_CREDIT-d-app_AMT_INCOME_TOTAL'] = df['AMT_CREDIT'] / df['app_AMT_INCOME_TOTAL']
df['AMT_GOODS_PRICE-d-app_AMT_INCOME_TOTAL'] = df['AMT_GOODS_PRICE'] / df['app_AMT_INCOME_TOTAL']
df['AMT_ANNUITY-m-app_AMT_INCOME_TOTAL'] = df['AMT_ANNUITY'] - df['app_AMT_INCOME_TOTAL']
df['AMT_APPLICATION-m-app_AMT_INCOME_TOTAL'] = df['AMT_APPLICATION'] - df['app_AMT_INCOME_TOTAL']
df['AMT_CREDIT-m-app_AMT_INCOME_TOTAL'] = df['AMT_CREDIT'] - df['app_AMT_INCOME_TOTAL']
df['AMT_GOODS_PRICE-m-app_AMT_INCOME_TOTAL'] = df['AMT_GOODS_PRICE'] - df['app_AMT_INCOME_TOTAL']
df['AMT_ANNUITY-d-app_AMT_CREDIT'] = df['AMT_ANNUITY'] / df['app_AMT_CREDIT']
df['AMT_APPLICATION-d-app_AMT_CREDIT'] = df['AMT_APPLICATION'] / df['app_AMT_CREDIT']
df['AMT_CREDIT-d-app_AMT_CREDIT'] = df['AMT_CREDIT'] / df['app_AMT_CREDIT']
df['AMT_GOODS_PRICE-d-app_AMT_CREDIT'] = df['AMT_GOODS_PRICE'] / df['app_AMT_CREDIT']
df['AMT_ANNUITY-m-app_AMT_CREDIT'] = df['AMT_ANNUITY'] - df['app_AMT_CREDIT']
df['AMT_APPLICATION-m-app_AMT_CREDIT'] = df['AMT_APPLICATION'] - df['app_AMT_CREDIT']
df['AMT_CREDIT-m-app_AMT_CREDIT'] = df['AMT_CREDIT'] - df['app_AMT_CREDIT']
df['AMT_GOODS_PRICE-m-app_AMT_CREDIT'] = df['AMT_GOODS_PRICE'] - df['app_AMT_CREDIT']
df['AMT_ANNUITY-m-app_AMT_CREDIT-d-app_AMT_INCOME_TOTAL'] = (df['AMT_ANNUITY'] - df['app_AMT_CREDIT']) / df['app_AMT_INCOME_TOTAL']
df['AMT_APPLICATION-m-app_AMT_CREDIT-d-app_AMT_INCOME_TOTAL'] = (df['AMT_APPLICATION'] - df['app_AMT_CREDIT']) / df['app_AMT_INCOME_TOTAL']
df['AMT_CREDIT-m-app_AMT_CREDIT-d-app_AMT_INCOME_TOTAL'] = (df['AMT_CREDIT'] - df['app_AMT_CREDIT']) / df['app_AMT_INCOME_TOTAL']
df['AMT_GOODS_PRICE-m-app_AMT_CREDIT-d-app_AMT_INCOME_TOTAL'] = (df['AMT_GOODS_PRICE'] - df['app_AMT_CREDIT']) / df['app_AMT_INCOME_TOTAL']
df['AMT_ANNUITY-d-app_AMT_ANNUITY'] = df['AMT_ANNUITY'] / df['app_AMT_ANNUITY']
df['AMT_APPLICATION-d-app_AMT_ANNUITY'] = df['AMT_APPLICATION'] / df['app_AMT_ANNUITY']
df['AMT_CREDIT-d-app_AMT_ANNUITY'] = df['AMT_CREDIT'] / df['app_AMT_ANNUITY']
df['AMT_GOODS_PRICE-d-app_AMT_ANNUITY'] = df['AMT_GOODS_PRICE'] / df['app_AMT_ANNUITY']
df['AMT_ANNUITY-m-app_AMT_ANNUITY'] = df['AMT_ANNUITY'] - df['app_AMT_ANNUITY']
df['AMT_APPLICATION-m-app_AMT_ANNUITY'] = df['AMT_APPLICATION'] - df['app_AMT_ANNUITY']
df['AMT_CREDIT-m-app_AMT_ANNUITY'] = df['AMT_CREDIT'] - df['app_AMT_ANNUITY']
df['AMT_GOODS_PRICE-m-app_AMT_ANNUITY'] = df['AMT_GOODS_PRICE'] - df['app_AMT_ANNUITY']
df['AMT_ANNUITY-m-app_AMT_ANNUITY-d-app_AMT_INCOME_TOTAL'] = (df['AMT_ANNUITY'] - df['app_AMT_ANNUITY']) / df['app_AMT_INCOME_TOTAL']
df['AMT_APPLICATION-m-app_AMT_ANNUITY-d-app_AMT_INCOME_TOTAL'] = (df['AMT_APPLICATION'] - df['app_AMT_ANNUITY']) / df['app_AMT_INCOME_TOTAL']
df['AMT_CREDIT-m-app_AMT_ANNUITY-d-app_AMT_INCOME_TOTAL'] = (df['AMT_CREDIT'] - df['app_AMT_ANNUITY']) / df['app_AMT_INCOME_TOTAL']
df['AMT_GOODS_PRICE-m-app_AMT_ANNUITY-d-app_AMT_INCOME_TOTAL'] = (df['AMT_GOODS_PRICE'] - df['app_AMT_ANNUITY']) / df['app_AMT_INCOME_TOTAL']
df['AMT_ANNUITY-d-app_AMT_GOODS_PRICE'] = df['AMT_ANNUITY'] / df['app_AMT_GOODS_PRICE']
df['AMT_APPLICATION-d-app_AMT_GOODS_PRICE'] = df['AMT_APPLICATION'] / df['app_AMT_GOODS_PRICE']
df['AMT_CREDIT-d-app_AMT_GOODS_PRICE'] = df['AMT_CREDIT'] / df['app_AMT_GOODS_PRICE']
df['AMT_GOODS_PRICE-d-app_AMT_GOODS_PRICE'] = df['AMT_GOODS_PRICE'] / df['app_AMT_GOODS_PRICE']
df['AMT_ANNUITY-m-app_AMT_GOODS_PRICE'] = df['AMT_ANNUITY'] - df['app_AMT_GOODS_PRICE']
df['AMT_APPLICATION-m-app_AMT_GOODS_PRICE'] = df['AMT_APPLICATION'] - df['app_AMT_GOODS_PRICE']
df['AMT_CREDIT-m-app_AMT_GOODS_PRICE'] = df['AMT_CREDIT'] - df['app_AMT_GOODS_PRICE']
df['AMT_GOODS_PRICE-m-app_AMT_GOODS_PRICE'] = df['AMT_GOODS_PRICE'] - df['app_AMT_GOODS_PRICE']
df['AMT_ANNUITY-m-app_AMT_GOODS_PRICE-d-app_AMT_INCOME_TOTAL'] = (df['AMT_ANNUITY'] - df['app_AMT_GOODS_PRICE']) / df['app_AMT_INCOME_TOTAL']
df['AMT_APPLICATION-m-app_AMT_GOODS_PRICE-d-app_AMT_INCOME_TOTAL'] = (df['AMT_APPLICATION'] - df['app_AMT_GOODS_PRICE']) / df['app_AMT_INCOME_TOTAL']
df['AMT_CREDIT-m-app_AMT_GOODS_PRICE-d-app_AMT_INCOME_TOTAL'] = (df['AMT_CREDIT'] - df['app_AMT_GOODS_PRICE']) / df['app_AMT_INCOME_TOTAL']
df['AMT_GOODS_PRICE-m-app_AMT_GOODS_PRICE-d-app_AMT_INCOME_TOTAL'] = (df['AMT_GOODS_PRICE'] - df['app_AMT_GOODS_PRICE']) / df['app_AMT_INCOME_TOTAL']
# nejumi
f_name='nejumi'; init_rate=0.9; n_iter=500
df['AMT_ANNUITY_d_AMT_CREDIT_temp'] = df.AMT_ANNUITY / df.AMT_CREDIT
df[f_name] = df['AMT_ANNUITY_d_AMT_CREDIT_temp']*((1 + init_rate)**df.CNT_PAYMENT - 1)/((1 + init_rate)**df.CNT_PAYMENT)
for i in range(n_iter):
df[f_name] = df['AMT_ANNUITY_d_AMT_CREDIT_temp']*((1 + df[f_name])**df.CNT_PAYMENT - 1)/((1 + df[f_name])**df.CNT_PAYMENT)
df.drop(['AMT_ANNUITY_d_AMT_CREDIT_temp'], axis=1, inplace=True)
df.sort_values(['SK_ID_CURR', 'DAYS_DECISION'], inplace=True)
df.reset_index(drop=True, inplace=True)
col = [
'total_debt',
'AMT_CREDIT-d-total_debt',
'AMT_GOODS_PRICE-d-total_debt',
'AMT_GOODS_PRICE-d-AMT_CREDIT',
'AMT_ANNUITY-d-app_AMT_INCOME_TOTAL',
'AMT_APPLICATION-d-app_AMT_INCOME_TOTAL',
'AMT_CREDIT-d-app_AMT_INCOME_TOTAL',
'AMT_GOODS_PRICE-d-app_AMT_INCOME_TOTAL',
'AMT_ANNUITY-d-app_AMT_CREDIT',
'AMT_APPLICATION-d-app_AMT_CREDIT',
'AMT_CREDIT-d-app_AMT_CREDIT',
'AMT_GOODS_PRICE-d-app_AMT_CREDIT',
'AMT_ANNUITY-d-app_AMT_ANNUITY',
'AMT_APPLICATION-d-app_AMT_ANNUITY',
'AMT_CREDIT-d-app_AMT_ANNUITY',
'AMT_GOODS_PRICE-d-app_AMT_ANNUITY',
'AMT_ANNUITY-d-app_AMT_GOODS_PRICE',
'AMT_APPLICATION-d-app_AMT_GOODS_PRICE',
'AMT_CREDIT-d-app_AMT_GOODS_PRICE',
'AMT_GOODS_PRICE-d-app_AMT_GOODS_PRICE',
'AMT_ANNUITY-m-app_AMT_INCOME_TOTAL',
'AMT_APPLICATION-m-app_AMT_INCOME_TOTAL',
'AMT_CREDIT-m-app_AMT_INCOME_TOTAL',
'AMT_GOODS_PRICE-m-app_AMT_INCOME_TOTAL',
'AMT_ANNUITY-m-app_AMT_CREDIT',
'AMT_APPLICATION-m-app_AMT_CREDIT',
'AMT_CREDIT-m-app_AMT_CREDIT',
'AMT_GOODS_PRICE-m-app_AMT_CREDIT',
'AMT_ANNUITY-m-app_AMT_CREDIT-d-app_AMT_INCOME_TOTAL',
'AMT_APPLICATION-m-app_AMT_CREDIT-d-app_AMT_INCOME_TOTAL',
'AMT_CREDIT-m-app_AMT_CREDIT-d-app_AMT_INCOME_TOTAL',
'AMT_GOODS_PRICE-m-app_AMT_CREDIT-d-app_AMT_INCOME_TOTAL',
'AMT_ANNUITY-m-app_AMT_ANNUITY',
'AMT_APPLICATION-m-app_AMT_ANNUITY',
'AMT_CREDIT-m-app_AMT_ANNUITY',
'AMT_GOODS_PRICE-m-app_AMT_ANNUITY',
'AMT_ANNUITY-m-app_AMT_ANNUITY-d-app_AMT_INCOME_TOTAL',
'AMT_APPLICATION-m-app_AMT_ANNUITY-d-app_AMT_INCOME_TOTAL',
'AMT_CREDIT-m-app_AMT_ANNUITY-d-app_AMT_INCOME_TOTAL',
'AMT_GOODS_PRICE-m-app_AMT_ANNUITY-d-app_AMT_INCOME_TOTAL',
'AMT_ANNUITY-m-app_AMT_GOODS_PRICE',
'AMT_APPLICATION-m-app_AMT_GOODS_PRICE',
'AMT_CREDIT-m-app_AMT_GOODS_PRICE',
'AMT_GOODS_PRICE-m-app_AMT_GOODS_PRICE',
'AMT_ANNUITY-m-app_AMT_GOODS_PRICE-d-app_AMT_INCOME_TOTAL',
'AMT_APPLICATION-m-app_AMT_GOODS_PRICE-d-app_AMT_INCOME_TOTAL',
'AMT_CREDIT-m-app_AMT_GOODS_PRICE-d-app_AMT_INCOME_TOTAL',
'AMT_GOODS_PRICE-m-app_AMT_GOODS_PRICE-d-app_AMT_INCOME_TOTAL',
'nejumi'
]
def multi_prev(c):
ret_diff = []
ret_pctchng = []
key_bk = x_bk = None
for key, x in df[['SK_ID_CURR', c]].values:
# for key, x in tqdm(df[['SK_ID_CURR', c]].values, mininterval=30):
if key_bk is None:
ret_diff.append(None)
ret_pctchng.append(None)
else:
if key_bk == key:
ret_diff.append(x - x_bk)
ret_pctchng.append( (x_bk-x) / x_bk)
else:
ret_diff.append(None)
ret_pctchng.append(None)
key_bk = key
x_bk = x
ret_diff = pd.Series(ret_diff, name=f'{c}_diff')
ret_pctchng = pd.Series(ret_pctchng, name=f'{c}_pctchange')
ret = pd.concat([ret_diff, ret_pctchng], axis=1)
return ret
pool = Pool(len(col))
callback = pd.concat(pool.map(multi_prev, col), axis=1)
print('===== PREV ====')
print(callback.columns.tolist())
pool.close()
df = pd.concat([df, callback], axis=1)
# app & day
col_prev = ['DAYS_FIRST_DRAWING', 'DAYS_FIRST_DUE', 'DAYS_LAST_DUE_1ST_VERSION',
'DAYS_LAST_DUE', 'DAYS_TERMINATION']
for c1 in col_prev:
for c2 in col_app_day:
# print(f"'{c1}-m-{c2}',")
df[f'{c1}-m-{c2}'] = df[c1] - df[c2]
df[f'{c1}-d-{c2}'] = df[c1] / df[c2]
df['cnt_paid'] = df.apply(lambda x: min( np.ceil(x['DAYS_FIRST_DUE']/-30), x['CNT_PAYMENT'] ), axis=1)
df['cnt_paid_ratio'] = df['cnt_paid'] / df['CNT_PAYMENT']
df['cnt_unpaid'] = df['CNT_PAYMENT'] - df['cnt_paid']
df['amt_paid'] = df['AMT_ANNUITY'] * df['cnt_paid']
# df['amt_paid_ratio'] = df['amt_paid'] / df['total_debt'] # same as cnt_paid_ratio
df['amt_unpaid'] = df['total_debt'] - df['amt_paid']
df['active'] = (df['cnt_unpaid']>0)*1
df['completed'] = (df['cnt_unpaid']==0)*1
# future payment
df_tmp = pd.DataFrame()
print('future payment')
rem_max = df['cnt_unpaid'].max() # 80
# rem_max = 1
df['cnt_unpaid_tmp'] = df['cnt_unpaid']
for i in range(int( rem_max )):
c = f'future_payment_{i+1}m'
df_tmp[c] = df['cnt_unpaid_tmp'].map(lambda x: min(x, 1)) * df['AMT_ANNUITY']
df_tmp.loc[df_tmp[c]==0, c] = np.nan
df['cnt_unpaid_tmp'] -= 1
df['cnt_unpaid_tmp'] = df['cnt_unpaid_tmp'].map(lambda x: max(x, 0))
# df['prev_future_payment_max'] = df.filter(regex='^prev_future_payment_').max(1)
del df['cnt_unpaid_tmp']
df = pd.concat([df, df_tmp], axis=1)
# past payment
df_tmp = pd.DataFrame()
print('past payment')
rem_max = df['cnt_paid'].max() # 72
df['cnt_paid_tmp'] = df['cnt_paid']
for i in range(int( rem_max )):
c = f'past_payment_{i+1}m'
df_tmp[c] = df['cnt_paid_tmp'].map(lambda x: min(x, 1)) * df['AMT_ANNUITY']
df_tmp.loc[df_tmp[c]==0, c] = np.nan
df['cnt_paid_tmp'] -= 1
df['cnt_paid_tmp'] = df['cnt_paid_tmp'].map(lambda x: max(x, 0))
# df['prev_past_payment_max'] = df.filter(regex='^prev_past_payment_').max(1)
del df['cnt_paid_tmp']
df = pd.concat([df, df_tmp], axis=1)
df['APP_CREDIT_PERC'] = df['AMT_APPLICATION'] / df['AMT_CREDIT']
#df.filter(regex='^amt_future_payment_')
df.replace(np.inf, np.nan, inplace=True) # TODO: any other plan?
df.replace(-np.inf, np.nan, inplace=True)
utils.to_pickles(df, '../data/previous_application', utils.SPLIT_SIZE)
elif p==2:
# =============================================================================
# POS
# =============================================================================
"""
df = utils.read_pickles('../data/POS_CASH_balance')
"""
df = pd.read_csv('../input/POS_CASH_balance.csv.zip')
# data cleansing!!!
## drop signed. sample SK_ID_PREV==1769939
df = df[df.NAME_CONTRACT_STATUS!='Signed']
## Zombie NAME_CONTRACT_STATUS=='Completed' and CNT_INSTALMENT_FUTURE!=0. 1134377
df.loc[(df.NAME_CONTRACT_STATUS=='Completed') & (df.CNT_INSTALMENT_FUTURE!=0), 'NAME_CONTRACT_STATUS'] = 'Active'
## CNT_INSTALMENT_FUTURE=0 and Active. sample SK_ID_PREV==1998905, 2174168
df.loc[(df.CNT_INSTALMENT_FUTURE==0) & (df.NAME_CONTRACT_STATUS=='Active'), 'NAME_CONTRACT_STATUS'] = 'Completed'
## remove duplicated CNT_INSTALMENT_FUTURE=0. sample SK_ID_PREV==2601827
df_0 = df[df['CNT_INSTALMENT_FUTURE']==0]
df_1 = df[df['CNT_INSTALMENT_FUTURE']>0]
df_0['NAME_CONTRACT_STATUS'] = 'Completed'
df_0.sort_values(['SK_ID_PREV', 'MONTHS_BALANCE'], ascending=[True, False], inplace=True)
df_0.drop_duplicates('SK_ID_PREV', keep='last', inplace=True)
df = pd.concat([df_0, df_1], ignore_index=True)
del df_0, df_1; gc.collect()
# TODO: end in active. 1002879
# df['CNT_INSTALMENT_FUTURE_min'] = df.groupby('SK_ID_PREV').CNT_INSTALMENT_FUTURE.transform('min')
# df['MONTHS_BALANCE_max'] = df.groupby('SK_ID_PREV').MONTHS_BALANCE.transform('max')
# df.loc[(df.CNT_INSTALMENT_FUTURE_min!=0) & (df.MONTHS_BALANCE_max!=-1)]
df['CNT_INSTALMENT-m-CNT_INSTALMENT_FUTURE'] = df['CNT_INSTALMENT'] - df['CNT_INSTALMENT_FUTURE']
df['CNT_INSTALMENT_FUTURE-d-CNT_INSTALMENT'] = df['CNT_INSTALMENT_FUTURE'] / df['CNT_INSTALMENT']
df.sort_values(['SK_ID_PREV', 'MONTHS_BALANCE'], inplace=True)
df.reset_index(drop=True, inplace=True)
col = ['CNT_INSTALMENT_FUTURE', 'SK_DPD', 'SK_DPD_DEF']
def multi_pos(c):
ret_diff = []
ret_pctchng = []
key_bk = x_bk = None
for key, x in df[['SK_ID_PREV', c]].values:
# for key, x in tqdm(df[['SK_ID_CURR', c]].values, mininterval=30):
if key_bk is None:
ret_diff.append(None)
ret_pctchng.append(None)
else:
if key_bk == key:
ret_diff.append(x - x_bk)
ret_pctchng.append( (x_bk-x) / x_bk)
else:
ret_diff.append(None)
ret_pctchng.append(None)
key_bk = key
x_bk = x
ret_diff = pd.Series(ret_diff, name=f'{c}_diff')
ret_pctchng = pd.Series(ret_pctchng, name=f'{c}_pctchange')
ret = pd.concat([ret_diff, ret_pctchng], axis=1)
return ret
pool = Pool(len(col))
callback = pd.concat(pool.map(multi_pos, col), axis=1)
print('===== POS ====')
print(callback.columns.tolist())
pool.close()
df = pd.concat([df, callback], axis=1)
df['SK_DPD-m-SK_DPD_DEF'] = df['SK_DPD'] - df['SK_DPD_DEF']
# df['SK_DPD_diff_over0'] = (df['SK_DPD_diff']>0)*1
# df['SK_DPD_diff_over5'] = (df['SK_DPD_diff']>5)*1
# df['SK_DPD_diff_over10'] = (df['SK_DPD_diff']>10)*1
# df['SK_DPD_diff_over15'] = (df['SK_DPD_diff']>15)*1
# df['SK_DPD_diff_over20'] = (df['SK_DPD_diff']>20)*1
# df['SK_DPD_diff_over25'] = (df['SK_DPD_diff']>25)*1
df.replace(np.inf, np.nan, inplace=True) # TODO: any other plan?
df.replace(-np.inf, np.nan, inplace=True)
utils.to_pickles(df, '../data/POS_CASH_balance', utils.SPLIT_SIZE)
elif p==3:
# =============================================================================
# ins
# =============================================================================
"""
df = utils.read_pickles('../data/installments_payments')
"""
df = pd.read_csv('../input/installments_payments.csv.zip')
trte = get_trte()
df = pd.merge(df, trte, on='SK_ID_CURR', how='left')
prev = pd.read_csv('../input/previous_application.csv.zip',
usecols=['SK_ID_PREV', 'CNT_PAYMENT', 'AMT_ANNUITY'])
prev['CNT_PAYMENT'].replace(0, np.nan, inplace=True)
# prep_prev(prev)
df = pd.merge(df, prev, on='SK_ID_PREV', how='left')
del trte, prev; gc.collect()
df['month'] = (df['DAYS_ENTRY_PAYMENT']/30).map(np.floor)
# app
df['DAYS_ENTRY_PAYMENT-m-app_DAYS_BIRTH'] = df['DAYS_ENTRY_PAYMENT'] - df['app_DAYS_BIRTH']
df['DAYS_ENTRY_PAYMENT-m-app_DAYS_EMPLOYED'] = df['DAYS_ENTRY_PAYMENT'] - df['app_DAYS_EMPLOYED']
df['DAYS_ENTRY_PAYMENT-m-app_DAYS_REGISTRATION'] = df['DAYS_ENTRY_PAYMENT'] - df['app_DAYS_REGISTRATION']
df['DAYS_ENTRY_PAYMENT-m-app_DAYS_ID_PUBLISH'] = df['DAYS_ENTRY_PAYMENT'] - df['app_DAYS_ID_PUBLISH']
df['DAYS_ENTRY_PAYMENT-m-app_DAYS_LAST_PHONE_CHANGE'] = df['DAYS_ENTRY_PAYMENT'] - df['app_DAYS_LAST_PHONE_CHANGE']
df['AMT_PAYMENT-d-app_AMT_INCOME_TOTAL'] = df['AMT_PAYMENT'] / df['app_AMT_INCOME_TOTAL']
df['AMT_PAYMENT-d-app_AMT_CREDIT'] = df['AMT_PAYMENT'] / df['app_AMT_CREDIT']
df['AMT_PAYMENT-d-app_AMT_ANNUITY'] = df['AMT_PAYMENT'] / df['app_AMT_ANNUITY']
df['AMT_PAYMENT-d-app_AMT_GOODS_PRICE'] = df['AMT_PAYMENT'] / df['app_AMT_GOODS_PRICE']
# prev
df['NUM_INSTALMENT_ratio'] = df['NUM_INSTALMENT_NUMBER'] / df['CNT_PAYMENT']
df['AMT_PAYMENT-d-AMT_ANNUITY'] = df['AMT_PAYMENT'] / df['AMT_ANNUITY']
df['days_delayed_payment'] = df['DAYS_ENTRY_PAYMENT'] - df['DAYS_INSTALMENT']
df['amt_ratio'] = df['AMT_PAYMENT'] / df['AMT_INSTALMENT']
df['amt_delta'] = df['AMT_INSTALMENT'] - df['AMT_PAYMENT']
df['days_weighted_delay'] = df['amt_ratio'] * df['days_delayed_payment']
# Days past due and days before due (no negative values)
df['DPD'] = df['DAYS_ENTRY_PAYMENT'] - df['DAYS_INSTALMENT']
df['DBD'] = df['DAYS_INSTALMENT'] - df['DAYS_ENTRY_PAYMENT']
df['DPD'] = df['DPD'].apply(lambda x: x if x > 0 else 0)
df['DBD'] = df['DBD'].apply(lambda x: x if x > 0 else 0)
decay = 0.0003 # decay rate per a day
feature = f'days_weighted_delay_tsw3' # Time Series Weight
df[feature] = df['days_weighted_delay'] * (1 + (df['DAYS_ENTRY_PAYMENT']*decay) )
# df_tmp = pd.DataFrame()
# for i in range(0, 50, 5):
# c1 = f'delayed_day_over{i}'
# df_tmp[c1] = (df['days_delayed_payment']>i)*1
#
# c2 = f'delayed_money_{i}'
# df_tmp[c2] = df_tmp[c1] * df.AMT_PAYMENT
#
# c3 = f'delayed_money_ratio_{i}'
# df_tmp[c3] = df_tmp[c1] * df.amt_ratio
#
# c1 = f'not-delayed_day_{i}'
# df_tmp[c1] = (df['days_delayed_payment']<=i)*1
#
# c2 = f'not-delayed_money_{i}'
# df_tmp[c2] = df_tmp[c1] * df.AMT_PAYMENT
#
# c3 = f'not-delayed_money_ratio_{i}'
# df_tmp[c3] = df_tmp[c1] * df.amt_ratio
#
# df = pd.concat([df, df_tmp], axis=1)
df.replace(np.inf, np.nan, inplace=True) # TODO: any other plan?
df.replace(-np.inf, np.nan, inplace=True)
utils.to_pickles(df, '../data/installments_payments', utils.SPLIT_SIZE)
utils.to_pickles(df[df['days_delayed_payment']>0].reset_index(drop=True),
'../data/installments_payments_delay', utils.SPLIT_SIZE)
utils.to_pickles(df[df['days_delayed_payment']<=0].reset_index(drop=True),
'../data/installments_payments_notdelay', utils.SPLIT_SIZE)
elif p==4:
# =============================================================================
# credit card
# =============================================================================
"""
df = utils.read_pickles('../data/credit_card_balance')
"""
df = pd.read_csv('../input/credit_card_balance.csv.zip')
df = pd.merge(df, get_trte(), on='SK_ID_CURR', how='left')
df[col_app_day] = df[col_app_day]/30
# app
df['AMT_BALANCE-d-app_AMT_INCOME_TOTAL'] = df['AMT_BALANCE'] / df['app_AMT_INCOME_TOTAL']
df['AMT_BALANCE-d-app_AMT_CREDIT'] = df['AMT_BALANCE'] / df['app_AMT_CREDIT']
df['AMT_BALANCE-d-app_AMT_ANNUITY'] = df['AMT_BALANCE'] / df['app_AMT_ANNUITY']
df['AMT_BALANCE-d-app_AMT_GOODS_PRICE'] = df['AMT_BALANCE'] / df['app_AMT_GOODS_PRICE']
df['AMT_DRAWINGS_CURRENT-d-app_AMT_INCOME_TOTAL'] = df['AMT_DRAWINGS_CURRENT'] / df['app_AMT_INCOME_TOTAL']
df['AMT_DRAWINGS_CURRENT-d-app_AMT_CREDIT'] = df['AMT_DRAWINGS_CURRENT'] / df['app_AMT_CREDIT']
df['AMT_DRAWINGS_CURRENT-d-app_AMT_ANNUITY'] = df['AMT_DRAWINGS_CURRENT'] / df['app_AMT_ANNUITY']
df['AMT_DRAWINGS_CURRENT-d-app_AMT_GOODS_PRICE'] = df['AMT_DRAWINGS_CURRENT'] / df['app_AMT_GOODS_PRICE']
for c in col_app_day:
print(f'MONTHS_BALANCE-m-{c}')
df[f'MONTHS_BALANCE-m-{c}'] = df['MONTHS_BALANCE'] - df[c]
df['AMT_BALANCE-d-AMT_CREDIT_LIMIT_ACTUAL'] = df['AMT_BALANCE'] / df['AMT_CREDIT_LIMIT_ACTUAL']
df['AMT_BALANCE-d-AMT_DRAWINGS_CURRENT'] = df['AMT_BALANCE'] / df['AMT_DRAWINGS_CURRENT']
df['AMT_DRAWINGS_CURRENT-d-AMT_CREDIT_LIMIT_ACTUAL'] = df['AMT_DRAWINGS_CURRENT'] / df['AMT_CREDIT_LIMIT_ACTUAL']
df['AMT_TOTAL_RECEIVABLE-m-AMT_RECEIVABLE_PRINCIPAL'] = df['AMT_TOTAL_RECEIVABLE'] - df['AMT_RECEIVABLE_PRINCIPAL']
df['AMT_RECEIVABLE_PRINCIPAL-d-AMT_TOTAL_RECEIVABLE'] = df['AMT_RECEIVABLE_PRINCIPAL'] / df['AMT_TOTAL_RECEIVABLE']
df['SK_DPD-m-SK_DPD_DEF'] = df['SK_DPD'] - df['SK_DPD_DEF']
df['SK_DPD-m-SK_DPD_DEF_over0'] = (df['SK_DPD-m-SK_DPD_DEF']>0)*1
df['SK_DPD-m-SK_DPD_DEF_over5'] = (df['SK_DPD-m-SK_DPD_DEF']>5)*1
df['SK_DPD-m-SK_DPD_DEF_over10'] = (df['SK_DPD-m-SK_DPD_DEF']>10)*1
df['SK_DPD-m-SK_DPD_DEF_over15'] = (df['SK_DPD-m-SK_DPD_DEF']>15)*1
df['SK_DPD-m-SK_DPD_DEF_over20'] = (df['SK_DPD-m-SK_DPD_DEF']>20)*1
df['SK_DPD-m-SK_DPD_DEF_over25'] = (df['SK_DPD-m-SK_DPD_DEF']>25)*1
col = ['AMT_BALANCE', 'AMT_CREDIT_LIMIT_ACTUAL', 'AMT_DRAWINGS_ATM_CURRENT',
'AMT_DRAWINGS_CURRENT', 'AMT_DRAWINGS_OTHER_CURRENT',
'AMT_DRAWINGS_POS_CURRENT', 'AMT_INST_MIN_REGULARITY',
'AMT_PAYMENT_CURRENT', 'AMT_PAYMENT_TOTAL_CURRENT',
'AMT_RECEIVABLE_PRINCIPAL', 'AMT_RECIVABLE', 'AMT_TOTAL_RECEIVABLE',
'CNT_DRAWINGS_ATM_CURRENT', 'CNT_DRAWINGS_CURRENT',
'CNT_DRAWINGS_OTHER_CURRENT', 'CNT_DRAWINGS_POS_CURRENT',
'CNT_INSTALMENT_MATURE_CUM', 'SK_DPD',
'SK_DPD_DEF', 'AMT_BALANCE-d-app_AMT_INCOME_TOTAL',
'AMT_BALANCE-d-app_AMT_CREDIT', 'AMT_BALANCE-d-app_AMT_ANNUITY',
'AMT_BALANCE-d-app_AMT_GOODS_PRICE', 'AMT_DRAWINGS_CURRENT-d-app_AMT_INCOME_TOTAL',
'AMT_DRAWINGS_CURRENT-d-app_AMT_CREDIT', 'AMT_DRAWINGS_CURRENT-d-app_AMT_ANNUITY',
'AMT_DRAWINGS_CURRENT-d-app_AMT_GOODS_PRICE', 'AMT_BALANCE-d-AMT_CREDIT_LIMIT_ACTUAL',
'AMT_BALANCE-d-AMT_DRAWINGS_CURRENT', 'AMT_DRAWINGS_CURRENT-d-AMT_CREDIT_LIMIT_ACTUAL',
'AMT_TOTAL_RECEIVABLE-m-AMT_RECEIVABLE_PRINCIPAL',
'AMT_RECEIVABLE_PRINCIPAL-d-AMT_TOTAL_RECEIVABLE'
]
df.sort_values(['SK_ID_PREV', 'MONTHS_BALANCE'], inplace=True)
df.reset_index(drop=True, inplace=True)
def multi_cre(c):
ret_diff = []
ret_pctchng = []
key_bk = x_bk = None
for key, x in df[['SK_ID_PREV', c]].values:
if key_bk is None:
ret_diff.append(None)
ret_pctchng.append(None)
else:
if key_bk == key:
ret_diff.append(x - x_bk)
ret_pctchng.append( (x_bk-x) / x_bk)
else:
ret_diff.append(None)
ret_pctchng.append(None)
key_bk = key
x_bk = x
ret_diff = pd.Series(ret_diff, name=f'{c}_diff')
ret_pctchng = pd.Series(ret_pctchng, name=f'{c}_pctchange')
ret = pd.concat([ret_diff, ret_pctchng], axis=1)
return ret
pool = Pool(len(col))
callback1 = pd.concat(pool.map(multi_cre, col), axis=1)
print('===== CRE ====')
col = callback1.columns.tolist()
print(col)
pool.close()
# callback1['SK_ID_PREV'] = df['SK_ID_PREV']
df = pd.concat([df, callback1], axis=1)
del callback1; gc.collect()
pool = Pool(10)
callback2 = pd.concat(pool.map(multi_cre, col), axis=1)
print('===== CRE ====')
col = callback2.columns.tolist()
print(col)
pool.close()
df = pd.concat([df, callback2], axis=1)
del callback2; gc.collect()
df.replace(np.inf, np.nan, inplace=True) # TODO: any other plan?
df.replace(-np.inf, np.nan, inplace=True)
utils.to_pickles(df, '../data/credit_card_balance', utils.SPLIT_SIZE)
elif p==5:
# =============================================================================
# bureau
# =============================================================================
df = pd.read_csv('../input/bureau.csv.zip')
df = pd.merge(df, get_trte(), on='SK_ID_CURR', how='left')
col_bure_money = ['AMT_CREDIT_SUM', 'AMT_CREDIT_SUM_DEBT',
'AMT_CREDIT_SUM_LIMIT', 'AMT_CREDIT_SUM_OVERDUE']
col_bure_day = ['DAYS_CREDIT', 'DAYS_CREDIT_ENDDATE', 'DAYS_ENDDATE_FACT']
# app
for c1 in col_bure_money:
for c2 in col_app_money:
# print(f"'{c1}-d-{c2}',")
df[f'{c1}-d-{c2}'] = df[c1] / df[c2]
for c1 in col_bure_day:
for c2 in col_app_day:
# print(f"'{c1}-m-{c2}',")
df[f'{c1}-m-{c2}'] = df[c1] - df[c2]
df[f'{c1}-d-{c2}'] = df[c1] / df[c2]
df['DAYS_CREDIT_ENDDATE-m-DAYS_CREDIT'] = df['DAYS_CREDIT_ENDDATE'] - df['DAYS_CREDIT']
df['DAYS_ENDDATE_FACT-m-DAYS_CREDIT'] = df['DAYS_ENDDATE_FACT'] - df['DAYS_CREDIT']
df['DAYS_ENDDATE_FACT-m-DAYS_CREDIT_ENDDATE'] = df['DAYS_ENDDATE_FACT'] - df['DAYS_CREDIT_ENDDATE']
df['DAYS_CREDIT_UPDATE-m-DAYS_CREDIT'] = df['DAYS_CREDIT_UPDATE'] - df['DAYS_CREDIT']
df['DAYS_CREDIT_UPDATE-m-DAYS_CREDIT_ENDDATE'] = df['DAYS_CREDIT_UPDATE'] - df['DAYS_CREDIT_ENDDATE']
df['DAYS_CREDIT_UPDATE-m-DAYS_ENDDATE_FACT'] = df['DAYS_CREDIT_UPDATE'] - df['DAYS_ENDDATE_FACT']
df['AMT_CREDIT_SUM-m-AMT_CREDIT_SUM_DEBT'] = df['AMT_CREDIT_SUM'] - df['AMT_CREDIT_SUM_DEBT']
df['AMT_CREDIT_SUM_DEBT-d-AMT_CREDIT_SUM'] = df['AMT_CREDIT_SUM_DEBT'] / df['AMT_CREDIT_SUM']
df['AMT_CREDIT_SUM-m-AMT_CREDIT_SUM_DEBT-d-AMT_CREDIT_SUM_LIMIT'] = df['AMT_CREDIT_SUM-m-AMT_CREDIT_SUM_DEBT'] / df['AMT_CREDIT_SUM_LIMIT']
df['AMT_CREDIT_SUM_DEBT-d-AMT_CREDIT_SUM_LIMIT'] = df['AMT_CREDIT_SUM_DEBT'] / df['AMT_CREDIT_SUM_LIMIT']
df['AMT_CREDIT_SUM_DEBT-p-AMT_CREDIT_SUM_LIMIT'] = df['AMT_CREDIT_SUM_DEBT'] + df['AMT_CREDIT_SUM_LIMIT']
df['AMT_CREDIT_SUM-d-debt-p-AMT_CREDIT_SUM_DEBT-p-AMT_CREDIT_SUM_LIMIT'] = df['AMT_CREDIT_SUM'] / df['AMT_CREDIT_SUM_DEBT-p-AMT_CREDIT_SUM_LIMIT']
col = ['AMT_CREDIT_MAX_OVERDUE', 'CNT_CREDIT_PROLONG',
'AMT_CREDIT_SUM', 'AMT_CREDIT_SUM_DEBT', 'AMT_CREDIT_SUM_LIMIT',
'AMT_CREDIT_SUM_OVERDUE', 'DAYS_CREDIT_UPDATE',
'AMT_ANNUITY', 'AMT_CREDIT_SUM-d-app_AMT_INCOME_TOTAL',
'AMT_CREDIT_SUM-d-app_AMT_CREDIT', 'AMT_CREDIT_SUM-d-app_AMT_ANNUITY',
'AMT_CREDIT_SUM-d-app_AMT_GOODS_PRICE',
'AMT_CREDIT_SUM_DEBT-d-app_AMT_INCOME_TOTAL',
'AMT_CREDIT_SUM_DEBT-d-app_AMT_CREDIT',
'AMT_CREDIT_SUM_DEBT-d-app_AMT_ANNUITY',
'AMT_CREDIT_SUM_DEBT-d-app_AMT_GOODS_PRICE',
'AMT_CREDIT_SUM_LIMIT-d-app_AMT_INCOME_TOTAL',
'AMT_CREDIT_SUM_LIMIT-d-app_AMT_CREDIT',
'AMT_CREDIT_SUM_LIMIT-d-app_AMT_ANNUITY',
'AMT_CREDIT_SUM_LIMIT-d-app_AMT_GOODS_PRICE',
'AMT_CREDIT_SUM_OVERDUE-d-app_AMT_INCOME_TOTAL',
'AMT_CREDIT_SUM_OVERDUE-d-app_AMT_CREDIT',
'AMT_CREDIT_SUM_OVERDUE-d-app_AMT_ANNUITY',
'AMT_CREDIT_SUM_OVERDUE-d-app_AMT_GOODS_PRICE',
'AMT_CREDIT_SUM-m-AMT_CREDIT_SUM_DEBT',
'AMT_CREDIT_SUM_DEBT-d-AMT_CREDIT_SUM',
'AMT_CREDIT_SUM-m-AMT_CREDIT_SUM_DEBT-d-AMT_CREDIT_SUM_LIMIT',
'AMT_CREDIT_SUM_DEBT-d-AMT_CREDIT_SUM_LIMIT',
'AMT_CREDIT_SUM_DEBT-p-AMT_CREDIT_SUM_LIMIT',
'AMT_CREDIT_SUM-d-debt-p-AMT_CREDIT_SUM_DEBT-p-AMT_CREDIT_SUM_LIMIT'
]
df.sort_values(['SK_ID_CURR', 'DAYS_CREDIT'], inplace=True)
df.reset_index(drop=True, inplace=True)
def multi_b(c):
ret_diff = []
ret_pctchng = []
key_bk = x_bk = None
for key, x in df[['SK_ID_CURR', c]].values:
# for key, x in tqdm(df[['SK_ID_CURR', c]].values, mininterval=30):
if key_bk is None:
ret_diff.append(None)
ret_pctchng.append(None)
else:
if key_bk == key:
ret_diff.append(x - x_bk)
ret_pctchng.append( (x_bk-x) / x_bk)
else:
ret_diff.append(None)
ret_pctchng.append(None)
key_bk = key
x_bk = x
ret_diff = pd.Series(ret_diff, name=f'{c}_diff')
ret_pctchng = pd.Series(ret_pctchng, name=f'{c}_pctchange')
ret = pd.concat([ret_diff, ret_pctchng], axis=1)
return ret
pool = Pool(len(col))
callback = pd.concat(pool.map(multi_b, col), axis=1)
print('===== bureau ====')
print(callback.columns.tolist())
pool.close()
df = pd.concat([df, callback], axis=1)
df.replace(np.inf, np.nan, inplace=True) # TODO: any other plan?
df.replace(-np.inf, np.nan, inplace=True)
utils.to_pickles(df, '../data/bureau', utils.SPLIT_SIZE)
elif p==6:
# =============================================================================
# bureau_balance
# =============================================================================
df = pd.read_csv('../input/bureau_balance.csv.zip')
df.sort_values(['SK_ID_BUREAU', 'MONTHS_BALANCE'], inplace=True)
df = pd.get_dummies(df, columns=['STATUS'])
df.reset_index(drop=True, inplace=True)
# def multi_bb(c):
# ret_diff = []
# ret_pctchng = []
# key_bk = x_bk = None
# for key, x in df[['SK_ID_BUREAU', c]].values:
#
# if key_bk is None:
# ret_diff.append(None)
# ret_pctchng.append(None)
# else:
# if key_bk == key:
# ret_diff.append(x - x_bk)
# ret_pctchng.append( (x_bk-x) / x_bk)
# else:
# ret_diff.append(None)
# ret_pctchng.append(None)
# key_bk = key
# x_bk = x
#
# ret_diff = pd.Series(ret_diff, name=f'{c}_diff')
# ret_pctchng = pd.Series(ret_pctchng, name=f'{c}_pctchange')
# ret = pd.concat([ret_diff, ret_pctchng], axis=1)
#
# return ret
#
# pool = Pool(len(col))
# callback = pd.concat(pool.map(multi_bb, col), axis=1)
# print('===== bureau_balance ====')
# print(callback.columns.tolist())
# pool.close()
# df = pd.concat([df, callback], axis=1)
utils.to_pickles(df, '../data/bureau_balance', utils.SPLIT_SIZE)
elif p==7:
# =============================================================================
# future
# =============================================================================
df = pd.merge(pd.read_csv('../data/future_application_v3.csv.gz'),
get_trte(), on='SK_ID_CURR', how='left')
# df = pd.merge(pd.read_csv('../input/previous_application.csv.zip'),
# get_trte(), on='SK_ID_CURR', how='left')
prep_prev(df)
df['FLAG_LAST_APPL_PER_CONTRACT'] = (df['FLAG_LAST_APPL_PER_CONTRACT']=='Y')*1
# day
for c in ['DAYS_FIRST_DRAWING', 'DAYS_FIRST_DUE', 'DAYS_LAST_DUE_1ST_VERSION',
'DAYS_LAST_DUE', 'DAYS_TERMINATION']:
df.loc[df[c]==365243, c] = np.nan
df['days_fdue-m-fdrw'] = df['DAYS_FIRST_DUE'] - df['DAYS_FIRST_DRAWING']
df['days_ldue1-m-fdrw'] = df['DAYS_LAST_DUE_1ST_VERSION'] - df['DAYS_FIRST_DRAWING']
df['days_ldue-m-fdrw'] = df['DAYS_LAST_DUE'] - df['DAYS_FIRST_DRAWING'] # total span
df['days_trm-m-fdrw'] = df['DAYS_TERMINATION'] - df['DAYS_FIRST_DRAWING']
df['days_ldue1-m-fdue'] = df['DAYS_LAST_DUE_1ST_VERSION'] - df['DAYS_FIRST_DUE']
df['days_ldue-m-fdue'] = df['DAYS_LAST_DUE'] - df['DAYS_FIRST_DUE']
df['days_trm-m-fdue'] = df['DAYS_TERMINATION'] - df['DAYS_FIRST_DUE']
df['days_ldue-m-ldue1'] = df['DAYS_LAST_DUE'] - df['DAYS_LAST_DUE_1ST_VERSION']
df['days_trm-m-ldue1'] = df['DAYS_TERMINATION'] - df['DAYS_LAST_DUE_1ST_VERSION']
df['days_trm-m-ldue'] = df['DAYS_TERMINATION'] - df['DAYS_LAST_DUE']
# money
df['total_debt'] = df['AMT_ANNUITY'] * df['CNT_PAYMENT']
df['AMT_CREDIT-d-total_debt'] = df['AMT_CREDIT'] / df['total_debt']
df['AMT_GOODS_PRICE-d-total_debt'] = df['AMT_GOODS_PRICE'] / df['total_debt']
df['AMT_GOODS_PRICE-d-AMT_CREDIT'] = df['AMT_GOODS_PRICE'] / df['AMT_CREDIT']
# app & money
df['AMT_ANNUITY-d-app_AMT_INCOME_TOTAL'] = df['AMT_ANNUITY'] / df['app_AMT_INCOME_TOTAL']
df['AMT_APPLICATION-d-app_AMT_INCOME_TOTAL'] = df['AMT_APPLICATION'] / df['app_AMT_INCOME_TOTAL']
df['AMT_CREDIT-d-app_AMT_INCOME_TOTAL'] = df['AMT_CREDIT'] / df['app_AMT_INCOME_TOTAL']
df['AMT_GOODS_PRICE-d-app_AMT_INCOME_TOTAL'] = df['AMT_GOODS_PRICE'] / df['app_AMT_INCOME_TOTAL']
df['AMT_ANNUITY-m-app_AMT_INCOME_TOTAL'] = df['AMT_ANNUITY'] - df['app_AMT_INCOME_TOTAL']
df['AMT_APPLICATION-m-app_AMT_INCOME_TOTAL'] = df['AMT_APPLICATION'] - df['app_AMT_INCOME_TOTAL']
df['AMT_CREDIT-m-app_AMT_INCOME_TOTAL'] = df['AMT_CREDIT'] - df['app_AMT_INCOME_TOTAL']
df['AMT_GOODS_PRICE-m-app_AMT_INCOME_TOTAL'] = df['AMT_GOODS_PRICE'] - df['app_AMT_INCOME_TOTAL']
df['AMT_ANNUITY-d-app_AMT_CREDIT'] = df['AMT_ANNUITY'] / df['app_AMT_CREDIT']
df['AMT_APPLICATION-d-app_AMT_CREDIT'] = df['AMT_APPLICATION'] / df['app_AMT_CREDIT']
df['AMT_CREDIT-d-app_AMT_CREDIT'] = df['AMT_CREDIT'] / df['app_AMT_CREDIT']
df['AMT_GOODS_PRICE-d-app_AMT_CREDIT'] = df['AMT_GOODS_PRICE'] / df['app_AMT_CREDIT']
df['AMT_ANNUITY-m-app_AMT_CREDIT'] = df['AMT_ANNUITY'] - df['app_AMT_CREDIT']
df['AMT_APPLICATION-m-app_AMT_CREDIT'] = df['AMT_APPLICATION'] - df['app_AMT_CREDIT']
df['AMT_CREDIT-m-app_AMT_CREDIT'] = df['AMT_CREDIT'] - df['app_AMT_CREDIT']
df['AMT_GOODS_PRICE-m-app_AMT_CREDIT'] = df['AMT_GOODS_PRICE'] - df['app_AMT_CREDIT']
df['AMT_ANNUITY-m-app_AMT_CREDIT-d-app_AMT_INCOME_TOTAL'] = (df['AMT_ANNUITY'] - df['app_AMT_CREDIT']) / df['app_AMT_INCOME_TOTAL']
df['AMT_APPLICATION-m-app_AMT_CREDIT-d-app_AMT_INCOME_TOTAL'] = (df['AMT_APPLICATION'] - df['app_AMT_CREDIT']) / df['app_AMT_INCOME_TOTAL']
df['AMT_CREDIT-m-app_AMT_CREDIT-d-app_AMT_INCOME_TOTAL'] = (df['AMT_CREDIT'] - df['app_AMT_CREDIT']) / df['app_AMT_INCOME_TOTAL']
df['AMT_GOODS_PRICE-m-app_AMT_CREDIT-d-app_AMT_INCOME_TOTAL'] = (df['AMT_GOODS_PRICE'] - df['app_AMT_CREDIT']) / df['app_AMT_INCOME_TOTAL']
df['AMT_ANNUITY-d-app_AMT_ANNUITY'] = df['AMT_ANNUITY'] / df['app_AMT_ANNUITY']
df['AMT_APPLICATION-d-app_AMT_ANNUITY'] = df['AMT_APPLICATION'] / df['app_AMT_ANNUITY']
df['AMT_CREDIT-d-app_AMT_ANNUITY'] = df['AMT_CREDIT'] / df['app_AMT_ANNUITY']
df['AMT_GOODS_PRICE-d-app_AMT_ANNUITY'] = df['AMT_GOODS_PRICE'] / df['app_AMT_ANNUITY']
df['AMT_ANNUITY-m-app_AMT_ANNUITY'] = df['AMT_ANNUITY'] - df['app_AMT_ANNUITY']
df['AMT_APPLICATION-m-app_AMT_ANNUITY'] = df['AMT_APPLICATION'] - df['app_AMT_ANNUITY']
df['AMT_CREDIT-m-app_AMT_ANNUITY'] = df['AMT_CREDIT'] - df['app_AMT_ANNUITY']
df['AMT_GOODS_PRICE-m-app_AMT_ANNUITY'] = df['AMT_GOODS_PRICE'] - df['app_AMT_ANNUITY']
df['AMT_ANNUITY-m-app_AMT_ANNUITY-d-app_AMT_INCOME_TOTAL'] = (df['AMT_ANNUITY'] - df['app_AMT_ANNUITY']) / df['app_AMT_INCOME_TOTAL']
df['AMT_APPLICATION-m-app_AMT_ANNUITY-d-app_AMT_INCOME_TOTAL'] = (df['AMT_APPLICATION'] - df['app_AMT_ANNUITY']) / df['app_AMT_INCOME_TOTAL']
df['AMT_CREDIT-m-app_AMT_ANNUITY-d-app_AMT_INCOME_TOTAL'] = (df['AMT_CREDIT'] - df['app_AMT_ANNUITY']) / df['app_AMT_INCOME_TOTAL']
df['AMT_GOODS_PRICE-m-app_AMT_ANNUITY-d-app_AMT_INCOME_TOTAL'] = (df['AMT_GOODS_PRICE'] - df['app_AMT_ANNUITY']) / df['app_AMT_INCOME_TOTAL']
df['AMT_ANNUITY-d-app_AMT_GOODS_PRICE'] = df['AMT_ANNUITY'] / df['app_AMT_GOODS_PRICE']
df['AMT_APPLICATION-d-app_AMT_GOODS_PRICE'] = df['AMT_APPLICATION'] / df['app_AMT_GOODS_PRICE']
df['AMT_CREDIT-d-app_AMT_GOODS_PRICE'] = df['AMT_CREDIT'] / df['app_AMT_GOODS_PRICE']
df['AMT_GOODS_PRICE-d-app_AMT_GOODS_PRICE'] = df['AMT_GOODS_PRICE'] / df['app_AMT_GOODS_PRICE']
df['AMT_ANNUITY-m-app_AMT_GOODS_PRICE'] = df['AMT_ANNUITY'] - df['app_AMT_GOODS_PRICE']
df['AMT_APPLICATION-m-app_AMT_GOODS_PRICE'] = df['AMT_APPLICATION'] - df['app_AMT_GOODS_PRICE']
df['AMT_CREDIT-m-app_AMT_GOODS_PRICE'] = df['AMT_CREDIT'] - df['app_AMT_GOODS_PRICE']
df['AMT_GOODS_PRICE-m-app_AMT_GOODS_PRICE'] = df['AMT_GOODS_PRICE'] - df['app_AMT_GOODS_PRICE']
df['AMT_ANNUITY-m-app_AMT_GOODS_PRICE-d-app_AMT_INCOME_TOTAL'] = (df['AMT_ANNUITY'] - df['app_AMT_GOODS_PRICE']) / df['app_AMT_INCOME_TOTAL']
df['AMT_APPLICATION-m-app_AMT_GOODS_PRICE-d-app_AMT_INCOME_TOTAL'] = (df['AMT_APPLICATION'] - df['app_AMT_GOODS_PRICE']) / df['app_AMT_INCOME_TOTAL']
df['AMT_CREDIT-m-app_AMT_GOODS_PRICE-d-app_AMT_INCOME_TOTAL'] = (df['AMT_CREDIT'] - df['app_AMT_GOODS_PRICE']) / df['app_AMT_INCOME_TOTAL']
df['AMT_GOODS_PRICE-m-app_AMT_GOODS_PRICE-d-app_AMT_INCOME_TOTAL'] = (df['AMT_GOODS_PRICE'] - df['app_AMT_GOODS_PRICE']) / df['app_AMT_INCOME_TOTAL']
# nejumi
f_name='nejumi'; init_rate=0.9; n_iter=500
df['AMT_ANNUITY_d_AMT_CREDIT_temp'] = df.AMT_ANNUITY / df.AMT_CREDIT
df[f_name] = df['AMT_ANNUITY_d_AMT_CREDIT_temp']*((1 + init_rate)**df.CNT_PAYMENT - 1)/((1 + init_rate)**df.CNT_PAYMENT)
for i in range(n_iter):
df[f_name] = df['AMT_ANNUITY_d_AMT_CREDIT_temp']*((1 + df[f_name])**df.CNT_PAYMENT - 1)/((1 + df[f_name])**df.CNT_PAYMENT)
df.drop(['AMT_ANNUITY_d_AMT_CREDIT_temp'], axis=1, inplace=True)
df.sort_values(['SK_ID_CURR', 'DAYS_DECISION'], inplace=True)
df.reset_index(drop=True, inplace=True)
col = [
'total_debt',
'AMT_CREDIT-d-total_debt',
'AMT_GOODS_PRICE-d-total_debt',
'AMT_GOODS_PRICE-d-AMT_CREDIT',
'AMT_ANNUITY-d-app_AMT_INCOME_TOTAL',
'AMT_APPLICATION-d-app_AMT_INCOME_TOTAL',
'AMT_CREDIT-d-app_AMT_INCOME_TOTAL',
'AMT_GOODS_PRICE-d-app_AMT_INCOME_TOTAL',
'AMT_ANNUITY-d-app_AMT_CREDIT',
'AMT_APPLICATION-d-app_AMT_CREDIT',
'AMT_CREDIT-d-app_AMT_CREDIT',
'AMT_GOODS_PRICE-d-app_AMT_CREDIT',
'AMT_ANNUITY-d-app_AMT_ANNUITY',
'AMT_APPLICATION-d-app_AMT_ANNUITY',
'AMT_CREDIT-d-app_AMT_ANNUITY',
'AMT_GOODS_PRICE-d-app_AMT_ANNUITY',
'AMT_ANNUITY-d-app_AMT_GOODS_PRICE',
'AMT_APPLICATION-d-app_AMT_GOODS_PRICE',
'AMT_CREDIT-d-app_AMT_GOODS_PRICE',
'AMT_GOODS_PRICE-d-app_AMT_GOODS_PRICE',
'AMT_ANNUITY-m-app_AMT_INCOME_TOTAL',
'AMT_APPLICATION-m-app_AMT_INCOME_TOTAL',
'AMT_CREDIT-m-app_AMT_INCOME_TOTAL',
'AMT_GOODS_PRICE-m-app_AMT_INCOME_TOTAL',
'AMT_ANNUITY-m-app_AMT_CREDIT',
'AMT_APPLICATION-m-app_AMT_CREDIT',
'AMT_CREDIT-m-app_AMT_CREDIT',
'AMT_GOODS_PRICE-m-app_AMT_CREDIT',
'AMT_ANNUITY-m-app_AMT_CREDIT-d-app_AMT_INCOME_TOTAL',
'AMT_APPLICATION-m-app_AMT_CREDIT-d-app_AMT_INCOME_TOTAL',
'AMT_CREDIT-m-app_AMT_CREDIT-d-app_AMT_INCOME_TOTAL',
'AMT_GOODS_PRICE-m-app_AMT_CREDIT-d-app_AMT_INCOME_TOTAL',
'AMT_ANNUITY-m-app_AMT_ANNUITY',
'AMT_APPLICATION-m-app_AMT_ANNUITY',
'AMT_CREDIT-m-app_AMT_ANNUITY',
'AMT_GOODS_PRICE-m-app_AMT_ANNUITY',
'AMT_ANNUITY-m-app_AMT_ANNUITY-d-app_AMT_INCOME_TOTAL',
'AMT_APPLICATION-m-app_AMT_ANNUITY-d-app_AMT_INCOME_TOTAL',
'AMT_CREDIT-m-app_AMT_ANNUITY-d-app_AMT_INCOME_TOTAL',
'AMT_GOODS_PRICE-m-app_AMT_ANNUITY-d-app_AMT_INCOME_TOTAL',
'AMT_ANNUITY-m-app_AMT_GOODS_PRICE',
'AMT_APPLICATION-m-app_AMT_GOODS_PRICE',
'AMT_CREDIT-m-app_AMT_GOODS_PRICE',
'AMT_GOODS_PRICE-m-app_AMT_GOODS_PRICE',
'AMT_ANNUITY-m-app_AMT_GOODS_PRICE-d-app_AMT_INCOME_TOTAL',
'AMT_APPLICATION-m-app_AMT_GOODS_PRICE-d-app_AMT_INCOME_TOTAL',
'AMT_CREDIT-m-app_AMT_GOODS_PRICE-d-app_AMT_INCOME_TOTAL',
'AMT_GOODS_PRICE-m-app_AMT_GOODS_PRICE-d-app_AMT_INCOME_TOTAL',
'nejumi'
]
def multi_prev(c):
ret_diff = []
ret_pctchng = []
key_bk = x_bk = None
for key, x in df[['SK_ID_CURR', c]].values:
# for key, x in tqdm(df[['SK_ID_CURR', c]].values, mininterval=30):
if key_bk is None:
ret_diff.append(None)
ret_pctchng.append(None)
else:
if key_bk == key:
ret_diff.append(x - x_bk)
ret_pctchng.append( (x_bk-x) / x_bk)
else:
ret_diff.append(None)
ret_pctchng.append(None)
key_bk = key
x_bk = x
ret_diff = pd.Series(ret_diff, name=f'{c}_diff')
ret_pctchng = pd.Series(ret_pctchng, name=f'{c}_pctchange')
ret = | pd.concat([ret_diff, ret_pctchng], axis=1) | pandas.concat |
from __future__ import division
##External base packages.
import time
import glob
import os
import pdb
import sys
##External packages.
import pandas as pd
import numpy as np
from sklearn.preprocessing import Imputer
from numpy_sugar.linalg import economic_qs, economic_svd
from limix.stats import effsizes_se, lrt_pvalues
from glimix_core.lmm import LMM
from bgen_reader import read_bgen
#Internal code.
import qtl_output
import qtl_loader_utils
import qtl_parse_args
import qtl_utilities as utils
from qtl_snp_qc import do_snp_qc
#V0.1.4
def run_PrsQtl_analysis(pheno_filename, anno_filename, prsFile, output_dir, min_call_rate=0.95, blocksize=1000,
skipAutosomeFiltering = False, gaussianize_method=None, minimum_test_samples= 10, seed=np.random.randint(40000), n_perm=0, write_permutations = False, relatedness_score=None, feature_variant_covariate_filename = None, snps_filename=None, feature_filename=None, snp_feature_filename=None, genetic_range='all',
covariates_filename=None, kinship_filename=None, sample_mapping_filename=None, regressCovariatesUpfront = False):
fill_NaN = Imputer(missing_values=np.nan, strategy='mean', axis=0)
print('Running GRS QT analysis.')
lik = 'normal'
'''Core function to take input and run QTL tests on a given chromosome.'''
if relatedness_score is not None:
relatedness_score = float(relatedness_score)
[phenotype_df, kinship_df, covariate_df, sample2individual_df, annotation_df, snp_filter_df, snp_feature_filter_df, geneticaly_unique_individuals, minimum_test_samples, feature_list, risk_df, chromosome, selectionStart, selectionEnd, feature_variant_covariate_df]=\
utils.run_PrsQtl_analysis_load_intersect_phenotype_covariates_kinship_sample_mapping(pheno_filename=pheno_filename, anno_filename=anno_filename, prsFile=prsFile, skipAutosomeFiltering = skipAutosomeFiltering,
minimum_test_samples= minimum_test_samples, relatedness_score=relatedness_score, snps_filename=snps_filename, feature_filename=feature_filename, snp_feature_filename=snp_feature_filename, selection=genetic_range,
covariates_filename=covariates_filename, kinship_filename=kinship_filename, sample_mapping_filename=sample_mapping_filename, feature_variant_covariate_filename=feature_variant_covariate_filename)
mixed = kinship_df is not None
if (kinship_df is None) or (relatedness_score is None) :
geneticaly_unique_individuals = sample2individual_df['iid'].values
QS = None
if(feature_list==None or len(feature_list)==0):
print ('No features to be tested.')
sys.exit()
#Open output files
qtl_loader_utils.ensure_dir(output_dir)
if not selectionStart is None :
output_writer = qtl_output.hdf5_writer(output_dir+'/qtl_results_{}_{}_{}.h5'.format(chromosome,selectionStart,selectionEnd))
else :
output_writer = qtl_output.hdf5_writer(output_dir+'/qtl_results_{}.h5'.format(chromosome))
if(write_permutations):
if not selectionStart is None :
permutation_writer = qtl_output.hdf5_permutations_writer(output_dir+'/perm_results_{}_{}_{}.h5'.format(chromosome,selectionStart,selectionEnd),n_perm)
else :
permutation_writer = qtl_output.hdf5_permutations_writer(output_dir+'/perm_results_{}.h5'.format(chromosome),n_perm)
#Arrays to store indices of snps tested and pass and fail QC SNPs for features without missingness.
tested_snp_names = []
fail_qc_features = []
alpha_params = []
beta_params = []
n_samples = []
n_e_samples = []
na_containing_features=0
currentFeatureNumber = 0
snpQcInfoMain = None
for feature_id in feature_list:
snpQcInfo = None
currentFeatureNumber+= 1
if (len(phenotype_df.loc[feature_id,:]))<minimum_test_samples:
print("Feature: "+feature_id+" not tested not enough samples do QTL test.")
fail_qc_features.append(feature_id)
geneticaly_unique_individuals = tmp_unique_individuals
continue
data_written = False
contains_missing_samples = False
snpQuery = risk_df.index.values
snp_cov_df = None
if(feature_variant_covariate_df is not None):
if(feature_id in feature_variant_covariate_df['feature'].values):
covariateSnp = feature_variant_covariate_df['snp_id'].values[feature_variant_covariate_df['feature']==feature_id]
if(any(i in risk_df.index.values for i in covariateSnp)):
snp_cov_df = risk_df.loc[risk_df.index.map(lambda x: x in list(covariateSnp)),:].transpose()
if (len(snpQuery) != 0) and (snp_filter_df is not None):
snpQuery = list(set(snp_filter_df.index).intersection(set(snpQuery)))
if (len(snpQuery) != 0) and (snp_feature_filter_df is not None):
snpQuery = list(set(np.unique(snp_feature_filter_df['snp_id'].loc[snp_feature_filter_df['feature']==feature_id])).intersection(set(snpQuery)))
if len(snpQuery) == 0:
print("Feature: "+feature_id+" not tested. No SNPS passed QC for phenotype.")
fail_qc_features.append(feature_id)
continue
else:
phenotype_ds = phenotype_df.loc[feature_id]
contains_missing_samples = any(~np.isfinite(phenotype_ds))
if(contains_missing_samples):
#import pdb; pdb.set_trace()
print ('Feature: ' + feature_id + ' contains missing data.')
phenotype_ds.dropna(inplace=True)
na_containing_features = na_containing_features+1
'''select indices for relevant individuals in genotype matrix
These are not unique. NOT to be used to access phenotype/covariates data
'''
individual_ids = sample2individual_df.loc[phenotype_ds.index,'iid'].values
sample2individual_feature= sample2individual_df.loc[phenotype_ds.index]
if contains_missing_samples:
tmp_unique_individuals = geneticaly_unique_individuals
if (kinship_df is not None) and (relatedness_score is not None):
geneticaly_unique_individuals = utils.get_unique_genetic_samples(kinship_df.loc[individual_ids,individual_ids], relatedness_score);
else :
geneticaly_unique_individuals = individual_ids
if phenotype_ds.empty or len(geneticaly_unique_individuals)<minimum_test_samples :
print("Feature: "+feature_id+" not tested not enough samples do QTL test.")
fail_qc_features.append(feature_id)
if contains_missing_samples:
geneticaly_unique_individuals = tmp_unique_individuals
continue
elif np.var(phenotype_ds.values) == 0:
print("Feature: "+feature_id+" has no variance in selected individuals.")
fail_qc_features.append(feature_id)
if contains_missing_samples:
geneticaly_unique_individuals = tmp_unique_individuals
continue
print ('For feature: ' +str(currentFeatureNumber)+ '/'+str(len(feature_list))+ ' (' + feature_id + '): ' + str(len(snpQuery)) + ' risk scores will be tested.\n Please stand by.')
if(n_perm!=0):
bestPermutationPval = np.ones((n_perm), dtype=np.float)
#Here we need to start preparing the LMM, can use the fam for sample IDS in SNP matrix.
# test if the covariates, kinship, snp and phenotype are in the same order
if ((all(kinship_df.loc[individual_ids,individual_ids].index==sample2individual_feature.loc[phenotype_ds.index]['iid']) if kinship_df is not None else True) &\
(all(phenotype_ds.index==covariate_df.loc[sample2individual_feature['sample'],:].index)if covariate_df is not None else True)):
'''
if all lines are in order put in arrays the correct genotype and phenotype
x=a if cond1 else b <---> equivalent to if cond1: x=a else x=b; better readability of the code
'''
if kinship_df is not None:
kinship_mat = kinship_df.loc[individual_ids,individual_ids].values
kinship_mat = kinship_mat.astype(float)
##GOWER normalization of Kinship matrix.
kinship_mat *= (kinship_mat.shape[0] - 1) / (kinship_mat.trace() - kinship_mat.mean(0).sum())
## This needs to go with the subselection stuff.
if(QS is None and not contains_missing_samples):
QS = economic_qs(kinship_mat)
elif (contains_missing_samples):
QS_tmp = QS
QS = economic_qs(kinship_mat)
if kinship_df is None:
K = np.eye(len(phenotype_ds.index))
if(QS is None and not contains_missing_samples):
QS = economic_qs(K)
elif (contains_missing_samples):
QS_tmp = QS
QS = economic_qs(K)
cov_matrix = covariate_df.loc[sample2individual_feature['sample'],:].values if covariate_df is not None else None
if covariate_df is None:
cov_matrix = np.ones((len(individual_ids), 1))
#pdb.set_trace()
if snp_cov_df is not None:
snp_cov_df_tmp = snp_cov_df.loc[individual_ids,:]
snp_cov_df = pd.DataFrame(fill_NaN.fit_transform(snp_cov_df_tmp))
snp_cov_df.index=sample2individual_feature['sample']
snp_cov_df.columns=snp_cov_df_tmp.columns
cov_matrix = np.concatenate((cov_matrix,snp_cov_df.values),1)
snp_cov_df_tmp = None
snp_cov_df = None
cov_matrix = cov_matrix.astype(float)
else:
print ('There is an issue in mapping phenotypes vs covariates and/or kinship')
sys.exit()
phenotype = utils.force_normal_distribution(phenotype_ds.values,method=gaussianize_method) if gaussianize_method is not None else phenotype_ds.values
#Prepare LMM
phenotype = phenotype.astype(float)
##Mixed and test.
##This is a future change so we don't need to decompose the COVs every time.
##Like QS this needs to happen when genetic unique individuals is the same.
#svd_cov = economic_svd(cov_matrix)
#lmm = LMM(phenotype, cov_matrix, QS, SVD=svd_cov)
#These steps need to happen only once per phenotype.
#print(QS)
lmm = LMM(phenotype, cov_matrix, QS)
if not mixed:
lmm.delta = 1
lmm.fix('delta')
#Prepare null model.
lmm.fit(verbose=False)
if regressCovariatesUpfront:
phenotype_corrected = phenotype-cov_matrix[:,1:].dot(lmm.beta[1:])
cov_matrix_corrected = cov_matrix[:,0]
lmm = LMM(phenotype_corrected, cov_matrix_corrected, QS)
lmm.fit(verbose=False)
null_lml = lmm.lml()
flmm = lmm.get_fast_scanner()
#pdb.set_trace();
for snpGroup in utils.chunker(snpQuery, blocksize):
#Fix seed at the start of the first chunker so all permutations are based on the same random first split.
np.random.seed(seed)
snp_names = snpGroup
tested_snp_names.extend(snp_names)
snp_matrix_DF = risk_df.loc[snp_names,individual_ids].transpose()
##GRS var QC
snp_matrix_DF = snp_matrix_DF.loc[:,snp_matrix_DF.isna().sum(axis=0)!=snp_matrix_DF.shape[0],]
snp_matrix_DF = snp_matrix_DF.loc[:,(np.nanstd(snp_matrix_DF,axis=0)>0)]
# test if the covariates, kinship, snp and phenotype are in the same order
if (len(snp_matrix_DF.index) != len(sample2individual_feature.loc[phenotype_ds.index]['iid']) or not all(snp_matrix_DF.index==sample2individual_feature.loc[phenotype_ds.index]['iid'])):
print ('There is an issue in mapping phenotypes and genotypes')
sys.exit()
#Impute missingness
#pdb.set_trace()
call_rate = 1-snp_matrix_DF.isnull().sum()/len(snp_matrix_DF.index)
if snpQcInfo is None and call_rate is not None:
snpQcInfo = call_rate
elif call_rate is not None:
snpQcInfo = | pd.concat([snpQcInfo, call_rate], axis=0) | pandas.concat |
import torch
import random as rd
from os.path import join
from os.path import isfile
import numpy as np
import pandas as pd
import itertools
from collections import Counter
import tqdm
import pickle
from .dataset import Dataset
from torch_geometric.data import download_url, extract_zip
from ..parser import parse_ml25m, parse_mlsmall
def save_df(df, path):
df.to_csv(path, sep=';', index=False)
def reindex_df_mlsmall(movies, ratings, tagging):
"""
Args:
movies:
ratings:
tagging:
genome_tagging:
genome_tags:
Returns:
"""
# Reindex uid
unique_uids = np.sort(ratings.uid.unique()).astype(np.int)
uids = np.arange(unique_uids.shape[0]).astype(np.int)
raw_uid2uid = {raw_uid: uid for raw_uid, uid in zip(unique_uids, uids)}
ratings['uid'] = np.array([raw_uid2uid[raw_uid] for raw_uid in ratings.uid], dtype=np.int)
tagging['uid'] = np.array([raw_uid2uid[raw_uid] for raw_uid in tagging.uid], dtype=np.int)
# Reindex iid
unique_iids = np.sort(movies.iid.unique()).astype(np.int)
iids = np.arange(unique_iids.shape[0]).astype(np.int)
raw_iid2iid = {raw_iid: iid for raw_iid, iid in zip(unique_iids, iids)}
movies['iid'] = np.array([raw_iid2iid[raw_iid] for raw_iid in movies.iid], dtype=np.int)
ratings['iid'] = np.array([raw_iid2iid[raw_iid] for raw_iid in ratings.iid], dtype=np.int)
tagging['iid'] = np.array([raw_iid2iid[raw_iid] for raw_iid in tagging.iid], dtype=np.int)
# Create tid
unique_tags = np.sort(tagging.tag.unique()).astype(np.str)
tids = np.arange(unique_tags.shape[0]).astype(np.int)
tags = pd.DataFrame({'tid': tids, 'tag': unique_tags})
tag2tid = {tag: tid for tag, tid in zip(unique_tags, tids)}
tagging['tid'] = np.array([tag2tid[tag] for tag in tagging.tag], dtype=np.int)
tagging = tagging.drop(columns=['tag'])
return movies, ratings, tagging, tags
def reindex_df_ml25m(movies, ratings, tagging, genome_tagging, genome_tags):
"""
Args:
movies:
ratings:
tagging:
genome_tagging:
genome_tags:
Returns:
"""
# Reindex uid
unique_uids = np.sort(ratings.uid.unique()).astype(np.int)
uids = np.arange(unique_uids.shape[0]).astype(np.int)
raw_uid2uid = {raw_uid: uid for raw_uid, uid in zip(unique_uids, uids)}
ratings['uid'] = np.array([raw_uid2uid[raw_uid] for raw_uid in ratings.uid], dtype=np.int)
tagging['uid'] = np.array([raw_uid2uid[raw_uid] for raw_uid in tagging.uid], dtype=np.int)
# Reindex iid
unique_iids = np.sort(movies.iid.unique()).astype(np.int)
iids = np.arange(unique_iids.shape[0]).astype(np.int)
raw_iid2iid = {raw_iid: iid for raw_iid, iid in zip(unique_iids, iids)}
movies['iid'] = np.array([raw_iid2iid[raw_iid] for raw_iid in movies.iid], dtype=np.int)
ratings['iid'] = np.array([raw_iid2iid[raw_iid] for raw_iid in ratings.iid], dtype=np.int)
tagging['iid'] = np.array([raw_iid2iid[raw_iid] for raw_iid in tagging.iid], dtype=np.int)
genome_tagging['iid'] = np.array([raw_iid2iid[raw_iid] for raw_iid in genome_tagging.iid], dtype=np.int)
# Create tid
unique_tags = np.sort(tagging.tag.unique()).astype(np.str)
tids = np.arange(unique_tags.shape[0]).astype(np.int)
tags = pd.DataFrame({'tid': tids, 'tag': unique_tags})
tag2tid = {tag: tid for tag, tid in zip(unique_tags, tids)}
tagging['tid'] = np.array([tag2tid[tag] for tag in tagging.tag], dtype=np.int)
tagging = tagging.drop(columns=['tag'])
# Reindex genome_tid
unique_genome_tids = np.sort(genome_tags.genome_tid.unique()).astype(np.int)
genome_tids = np.arange(unique_genome_tids.shape[0]).astype(np.int)
raw_genome_tid2genome_tid = {raw_genome_tid: genome_tid for raw_genome_tid, genome_tid in zip(unique_genome_tids, genome_tids)}
genome_tags['genome_tid'] = np.array(
[raw_genome_tid2genome_tid[raw_genome_tid] for raw_genome_tid in genome_tags.genome_tid], dtype=np.int)
genome_tagging['genome_tid'] = np.array(
[raw_genome_tid2genome_tid[raw_genome_tid] for raw_genome_tid in genome_tagging.genome_tid])
return movies, ratings, tagging, tags, genome_tagging, genome_tags
def drop_infrequent_concept_from_str(df, concept_name, num_occs):
concept_strs = [concept_str for concept_str in df[concept_name]]
duplicated_concept = [concept_str.split(',') for concept_str in concept_strs]
duplicated_concept = list(itertools.chain.from_iterable(duplicated_concept))
writer_counter_dict = Counter(duplicated_concept)
del writer_counter_dict['']
del writer_counter_dict['N/A']
unique_concept = [k for k, v in writer_counter_dict.items() if v >= num_occs]
concept_strs = [
','.join([concept for concept in concept_str.split(',') if concept in unique_concept])
for concept_str in concept_strs
]
df[concept_name] = concept_strs
return df
def generate_mlsmall_hete_graph(
movies, ratings, tagging
):
def get_concept_num_from_str(df, concept_name):
concept_strs = [concept_str.split(',') for concept_str in df[concept_name]]
concepts = set(itertools.chain.from_iterable(concept_strs))
concepts.remove('')
num_concepts = len(concepts)
return list(concepts), num_concepts
######################### Define entities #########################
unique_uids = list(np.sort(ratings.uid.unique()))
num_uids = len(unique_uids)
unique_iids = list(np.sort(ratings.iid.unique()))
num_iids = len(unique_iids)
unique_genres = list(movies.keys()[3:22])
num_genres = len(unique_genres)
unique_years = list(movies.year.unique())
num_years = len(unique_years)
unique_directors, num_directors = get_concept_num_from_str(movies, 'directors')
unique_actors, num_actors = get_concept_num_from_str(movies, 'actors')
unique_writers, num_writers = get_concept_num_from_str(movies, 'writers')
unique_tids = list(np.sort(tagging.tid.unique()))
num_tids = len(unique_tids)
dataset_property_dict = {}
dataset_property_dict['unique_uids'] = unique_uids
dataset_property_dict['num_uids'] = num_uids
dataset_property_dict['unique_iids'] = unique_iids
dataset_property_dict['num_iids'] = num_iids
dataset_property_dict['unique_genres'] = unique_genres
dataset_property_dict['num_genres'] = num_genres
dataset_property_dict['unique_years'] = unique_years
dataset_property_dict['num_years'] = num_years
dataset_property_dict['unique_directors'] = unique_directors
dataset_property_dict['num_directors'] = num_directors
dataset_property_dict['unique_actors'] = unique_actors
dataset_property_dict['num_actors'] = num_actors
dataset_property_dict['unique_writers'] = unique_writers
dataset_property_dict['num_writers'] = num_writers
dataset_property_dict['unique_tids'] = unique_tids
dataset_property_dict['num_tids'] = num_tids
######################### Define number of entities #########################
num_nodes = num_uids + num_iids + num_genres + num_years + num_directors + num_actors + num_writers + \
num_tids
num_node_types = 8
dataset_property_dict['num_nodes'] = num_nodes
dataset_property_dict['num_node_types'] = num_node_types
types = ['uid', 'iid', 'genre', 'year', 'director', 'actor', 'writer', 'tid']
num_nodes_dict = {'uid': num_uids, 'iid': num_iids, 'genre': num_genres, 'year': num_years, 'director': num_directors,
'actor': num_actors, 'writer': num_writers, 'tid': num_tids}
######################### Define entities to node id map #########################
type_accs = {}
nid2e_dict = {}
acc = 0
type_accs['uid'] = acc
uid2nid = {uid: i + acc for i, uid in enumerate(unique_uids)}
for i, uid in enumerate(unique_uids):
nid2e_dict[i + acc] = ('uid', uid)
acc += num_uids
type_accs['iid'] = acc
iid2nid = {iid: i + acc for i, iid in enumerate(unique_iids)}
for i, iid in enumerate(unique_iids):
nid2e_dict[i + acc] = ('iid', iid)
acc += num_iids
type_accs['genre'] = acc
genre2nid = {genre: i + acc for i, genre in enumerate(unique_genres)}
for i, genre in enumerate(unique_genres):
nid2e_dict[i + acc] = ('genre', genre)
acc += num_genres
type_accs['year'] = acc
year2nid = {year: i + acc for i, year in enumerate(unique_years)}
for i, year in enumerate(unique_years):
nid2e_dict[i + acc] = ('year', year)
acc += num_years
type_accs['director'] = acc
director2nid = {director: i + acc for i, director in enumerate(unique_directors)}
for i, director in enumerate(unique_directors):
nid2e_dict[i + acc] = ('director', director)
acc += num_directors
type_accs['actor'] = acc
actor2nid = {actor: i + acc for i, actor in enumerate(unique_actors)}
for i, actor in enumerate(unique_actors):
nid2e_dict[i + acc] = ('actor', actor)
acc += num_actors
type_accs['writer'] = acc
writer2nid = {writer: i + acc for i, writer in enumerate(unique_writers)}
for i, writer in enumerate(unique_writers):
nid2e_dict[i + acc] = ('writer', writer)
acc += num_writers
type_accs['tid'] = acc
tag2nid = {tid: i + acc for i, tid in enumerate(unique_tids)}
for i, tid in enumerate(unique_tids):
nid2e_dict[i + acc] = ('tid', tid)
e2nid_dict = {'uid': uid2nid, 'iid': iid2nid, 'genre': genre2nid, 'year': year2nid, 'director': director2nid,
'actor': actor2nid, 'writer': writer2nid, 'tid': tag2nid}
dataset_property_dict['e2nid_dict'] = e2nid_dict
dataset_property_dict['nid2e_dict'] = nid2e_dict
######################### create graphs #########################
edge_index_nps = {}
print('Creating item attribute edges...')
inids = [e2nid_dict['iid'][iid] for iid in movies.iid]
year_nids = [e2nid_dict['year'][year] for year in movies.year]
year2item_edge_index_np = np.vstack((np.array(year_nids), np.array(inids)))
genre_nids = []
inids = []
for genre in unique_genres:
iids = movies[movies[genre]].iid
inids += [e2nid_dict['iid'][iid] for iid in iids]
genre_nids += [e2nid_dict['genre'][genre] for _ in range(iids.shape[0])]
genre2item_edge_index_np = np.vstack((np.array(genre_nids), np.array(inids)))
inids = [e2nid_dict['iid'][iid] for iid in movies.iid]
directors_list = [
[director for director in directors.split(',') if director != '']
for directors in movies.directors
]
directors_nids = [[e2nid_dict['director'][director] for director in directors] for directors in directors_list]
directors_nids = list(itertools.chain.from_iterable(directors_nids))
d_inids = [[i_nid for _ in range(len(directors_list[idx]))] for idx, i_nid in enumerate(inids)]
d_inids = list(itertools.chain.from_iterable(d_inids))
director2item_edge_index_np = np.vstack((np.array(directors_nids), np.array(d_inids)))
actors_list = [
[actor for actor in actors.split(',') if actor != '']
for actors in movies.actors
]
actor_nids = [[e2nid_dict['actor'][actor] for actor in actors] for actors in actors_list]
actor_nids = list(itertools.chain.from_iterable(actor_nids))
a_inids = [[i_nid for _ in range(len(actors_list[idx]))] for idx, i_nid in enumerate(inids)]
a_inids = list(itertools.chain.from_iterable(a_inids))
actor2item_edge_index_np = np.vstack((np.array(actor_nids), np.array(a_inids)))
writers_list = [
[writer for writer in writers.split(',') if writer != '']
for writers in movies.writers
]
writer_nids = [[e2nid_dict['writer'][writer] for writer in writers] for writers in writers_list]
writer_nids = list(itertools.chain.from_iterable(writer_nids))
w_inids = [[i_nid for _ in range(len(writers_list[idx]))] for idx, i_nid in enumerate(inids)]
w_inids = list(itertools.chain.from_iterable(w_inids))
writer2item_edge_index_np = np.vstack((np.array(writer_nids), np.array(w_inids)))
edge_index_nps['year2item'] = year2item_edge_index_np
edge_index_nps['genre2item'] = genre2item_edge_index_np
edge_index_nps['director2item'] = director2item_edge_index_np
edge_index_nps['actor2item'] = actor2item_edge_index_np
edge_index_nps['writer2item'] = writer2item_edge_index_np
unids = [e2nid_dict['uid'][uid] for uid in tagging.uid]
tnids = [e2nid_dict['tid'][tid] for tid in tagging.tid]
inids = [e2nid_dict['iid'][iid] for iid in tagging.iid]
tag2user_edge_index_np = np.vstack((np.array(tnids), np.array(unids)))
tag2item_edge_index_np = np.vstack((np.array(tnids), np.array(inids)))
edge_index_nps['tag2user'] = tag2user_edge_index_np
edge_index_nps['tag2item'] = tag2item_edge_index_np
print('Creating rating property edges...')
test_pos_unid_inid_map, neg_unid_inid_map = {}, {}
rating_np = np.zeros((0,))
user2item_edge_index_np = np.zeros((2, 0))
sorted_ratings = ratings.sort_values('uid')
pbar = tqdm.tqdm(unique_uids, total=len(unique_uids))
for uid in pbar:
pbar.set_description('Creating the edges for the user {}'.format(uid))
uid_ratings = sorted_ratings[sorted_ratings.uid == uid].sort_values('timestamp')
uid_iids = uid_ratings.iid.to_numpy()
uid_ratings = uid_ratings.rating.to_numpy()
unid = e2nid_dict['uid'][uid]
train_pos_uid_iids = list(uid_iids[:-1]) # Use leave one out setup
train_pos_uid_ratings = uid_ratings[:-1]
train_pos_uid_inids = [e2nid_dict['iid'][iid] for iid in train_pos_uid_iids]
test_pos_uid_iids = list(uid_iids[-1:])
test_pos_uid_inids = [e2nid_dict['iid'][iid] for iid in test_pos_uid_iids]
neg_uid_iids = list(set(unique_iids) - set(uid_iids))
neg_uid_inids = [e2nid_dict['iid'][iid] for iid in neg_uid_iids]
test_pos_unid_inid_map[unid] = test_pos_uid_inids
neg_unid_inid_map[unid] = neg_uid_inids
unid_user2item_edge_index_np = np.array(
[[unid for _ in range(len(train_pos_uid_inids))], train_pos_uid_inids]
)
user2item_edge_index_np = np.hstack([user2item_edge_index_np, unid_user2item_edge_index_np])
rating_np = np.concatenate([rating_np, train_pos_uid_ratings])
dataset_property_dict['rating_np'] = rating_np
edge_index_nps['user2item'] = user2item_edge_index_np
dataset_property_dict['edge_index_nps'] = edge_index_nps
dataset_property_dict['test_pos_unid_inid_map'], dataset_property_dict['neg_unid_inid_map'] = \
test_pos_unid_inid_map, neg_unid_inid_map
print('Building edge type map...')
edge_type_dict = {edge_type: edge_type_idx for edge_type_idx, edge_type in enumerate(list(edge_index_nps.keys()))}
dataset_property_dict['edge_type_dict'] = edge_type_dict
dataset_property_dict['num_edge_types'] = len(list(edge_index_nps.keys()))
print('Building the item occurrence map...')
item_count = ratings['iid'].value_counts()
item_nid_occs = {}
for iid in unique_iids:
item_nid_occs[e2nid_dict['iid'][iid]] = item_count[iid]
dataset_property_dict['item_nid_occs'] = item_nid_occs
# New functionality for pytorch geometric like dataset
dataset_property_dict['types'] = types
dataset_property_dict['num_nodes_dict'] = num_nodes_dict
dataset_property_dict['type_accs'] = type_accs
return dataset_property_dict
def generate_ml25m_hete_graph(
movies, ratings, tagging, genome_tagging
):
def get_concept_num_from_str(df, concept_name):
concept_strs = [concept_str.split(',') for concept_str in df[concept_name]]
concepts = set(itertools.chain.from_iterable(concept_strs))
concepts.remove('')
num_concepts = len(concepts)
return list(concepts), num_concepts
######################### Define entities #########################
unique_uids = list(np.sort(ratings.uid.unique()))
num_uids = len(unique_uids)
unique_iids = list(np.sort(ratings.iid.unique()))
num_iids = len(unique_iids)
unique_genres = list(movies.keys()[3:23])
num_genres = len(unique_genres)
unique_years = list(movies.year.unique())
num_years = len(unique_years)
unique_directors, num_directors = get_concept_num_from_str(movies, 'directors')
unique_actors, num_actors = get_concept_num_from_str(movies, 'actors')
unique_writers, num_writers = get_concept_num_from_str(movies, 'writers')
unique_tids = list(np.sort(tagging.tid.unique()))
num_tids = len(unique_tids)
unique_genome_tids = list(np.sort(genome_tagging.genome_tid.unique()))
num_genome_tids = len(unique_genome_tids)
dataset_property_dict = {}
dataset_property_dict['unique_uids'] = unique_uids
dataset_property_dict['num_uids'] = num_uids
dataset_property_dict['unique_iids'] = unique_iids
dataset_property_dict['num_iids'] = num_iids
dataset_property_dict['unique_genres'] = unique_genres
dataset_property_dict['num_genres'] = num_genres
dataset_property_dict['unique_years'] = unique_years
dataset_property_dict['num_years'] = num_years
dataset_property_dict['unique_directors'] = unique_directors
dataset_property_dict['num_directors'] = num_directors
dataset_property_dict['unique_actors'] = unique_actors
dataset_property_dict['num_actors'] = num_actors
dataset_property_dict['unique_writers'] = unique_writers
dataset_property_dict['num_writers'] = num_writers
dataset_property_dict['unique_tids'] = unique_tids
dataset_property_dict['num_tids'] = num_tids
dataset_property_dict['unique_genome_tids'] = unique_genome_tids
dataset_property_dict['num_genome_tids'] = num_genome_tids
######################### Define number of entities #########################
num_nodes = num_uids + num_iids + num_genres + num_years + num_directors + num_actors + num_writers + \
num_tids + num_genome_tids
num_node_types = 9
dataset_property_dict['num_nodes'] = num_nodes
dataset_property_dict['num_node_types'] = num_node_types
types = ['uid', 'iid', 'genre', 'year', 'director', 'actor', 'writer', 'tid', 'genome_tid']
num_nodes_dict = {'uid': num_uids, 'iid': num_iids, 'genre': num_genres, 'year': num_years, 'director': num_directors,
'actor': num_actors, 'writer': num_writers, 'tid': num_tids, 'genome_tid': num_genome_tids}
######################### Define entities to node id map #########################
type_accs = {}
nid2e_dict = {}
acc = 0
type_accs['uid'] = acc
uid2nid = {uid: i + acc for i, uid in enumerate(unique_uids)}
for i, uid in enumerate(unique_uids):
nid2e_dict[i + acc] = ('uid', uid)
acc += num_uids
type_accs['iid'] = acc
iid2nid = {iid: i + acc for i, iid in enumerate(unique_iids)}
for i, iid in enumerate(unique_iids):
nid2e_dict[i + acc] = ('iid', iid)
acc += num_iids
type_accs['genre'] = acc
genre2nid = {genre: i + acc for i, genre in enumerate(unique_genres)}
for i, genre in enumerate(unique_genres):
nid2e_dict[i + acc] = ('genre', genre)
acc += num_genres
type_accs['year'] = acc
year2nid = {year: i + acc for i, year in enumerate(unique_years)}
for i, year in enumerate(unique_years):
nid2e_dict[i + acc] = ('year', year)
acc += num_years
type_accs['director'] = acc
director2nid = {director: i + acc for i, director in enumerate(unique_directors)}
for i, director in enumerate(unique_directors):
nid2e_dict[i + acc] = ('director', director)
acc += num_directors
type_accs['actor'] = acc
actor2nid = {actor: i + acc for i, actor in enumerate(unique_actors)}
for i, actor in enumerate(unique_actors):
nid2e_dict[i + acc] = ('actor', actor)
acc += num_actors
type_accs['writer'] = acc
writer2nid = {writer: i + acc for i, writer in enumerate(unique_writers)}
for i, writer in enumerate(unique_writers):
nid2e_dict[i + acc] = ('writer', writer)
acc += num_writers
type_accs['tid'] = acc
tag2nid = {tid: i + acc for i, tid in enumerate(unique_tids)}
for i, tag in enumerate(unique_tids):
nid2e_dict[i + acc] = ('tid', tag)
acc += num_tids
type_accs['genome_tid'] = acc
genome_tag2nid = {genome_tid: i + acc for i, genome_tid in enumerate(unique_genome_tids)}
for i, genome_tag in enumerate(unique_genome_tids):
nid2e_dict[i + acc] = ('genome_tid', genome_tag)
e2nid_dict = {'uid': uid2nid, 'iid': iid2nid, 'genre': genre2nid, 'year': year2nid, 'director': director2nid,
'actor': actor2nid, 'writer': writer2nid, 'tid': tag2nid, 'genome_tid': genome_tag2nid}
dataset_property_dict['e2nid_dict'] = e2nid_dict
dataset_property_dict['nid2e_dict'] = nid2e_dict
######################### create graphs #########################
edge_index_nps = {}
print('Creating item attribute edges...')
inids = [e2nid_dict['iid'][iid] for iid in movies.iid]
year_nids = [e2nid_dict['year'][year] for year in movies.year]
year2item_edge_index_np = np.vstack((np.array(year_nids), np.array(inids)))
genre_nids = []
inids = []
for genre in unique_genres:
iids = movies[movies[genre]].iid
inids += [e2nid_dict['iid'][iid] for iid in iids]
genre_nids += [e2nid_dict['genre'][genre] for _ in range(iids.shape[0])]
genre2item_edge_index_np = np.vstack((np.array(genre_nids), np.array(inids)))
inids = [e2nid_dict['iid'][iid] for iid in movies.iid]
directors_list = [
[director for director in directors.split(',') if director != '']
for directors in movies.directors
]
directors_nids = [[e2nid_dict['director'][director] for director in directors] for directors in directors_list]
directors_nids = list(itertools.chain.from_iterable(directors_nids))
d_inids = [[i_nid for _ in range(len(directors_list[idx]))] for idx, i_nid in enumerate(inids)]
d_inids = list(itertools.chain.from_iterable(d_inids))
director2item_edge_index_np = np.vstack((np.array(directors_nids), np.array(d_inids)))
actors_list = [
[actor for actor in actors.split(',') if actor != '']
for actors in movies.actors
]
actor_nids = [[e2nid_dict['actor'][actor] for actor in actors] for actors in actors_list]
actor_nids = list(itertools.chain.from_iterable(actor_nids))
a_inids = [[i_nid for _ in range(len(actors_list[idx]))] for idx, i_nid in enumerate(inids)]
a_inids = list(itertools.chain.from_iterable(a_inids))
actor2item_edge_index_np = np.vstack((np.array(actor_nids), np.array(a_inids)))
writers_list = [
[writer for writer in writers.split(',') if writer != '']
for writers in movies.writers
]
writer_nids = [[e2nid_dict['writer'][writer] for writer in writers] for writers in writers_list]
writer_nids = list(itertools.chain.from_iterable(writer_nids))
w_inids = [[i_nid for _ in range(len(writers_list[idx]))] for idx, i_nid in enumerate(inids)]
w_inids = list(itertools.chain.from_iterable(w_inids))
writer2item_edge_index_np = np.vstack((np.array(writer_nids), np.array(w_inids)))
edge_index_nps['year2item'] = year2item_edge_index_np
edge_index_nps['genre2item'] = genre2item_edge_index_np
edge_index_nps['director2item'] = director2item_edge_index_np
edge_index_nps['actor2item'] = actor2item_edge_index_np
edge_index_nps['writer2item'] = writer2item_edge_index_np
inids = [e2nid_dict['iid'][iid] for iid in genome_tagging.iid]
genome_tnids = [e2nid_dict['genome_tid'][genome_tid] for genome_tid in genome_tagging.genome_tid]
genome_tag2item_edge_index_np = np.vstack((np.array(genome_tnids), np.array(inids)))
edge_index_nps['genome_tag2item'] = genome_tag2item_edge_index_np
unids = [e2nid_dict['uid'][uid] for uid in tagging.uid]
tnids = [e2nid_dict['tid'][tid] for tid in tagging.tid]
inids = [e2nid_dict['iid'][iid] for iid in tagging.iid]
tag2user_edge_index_np = np.vstack((np.array(tnids), np.array(unids)))
tag2item_edge_index_np = np.vstack((np.array(tnids), np.array(inids)))
edge_index_nps['tag2user'] = tag2user_edge_index_np
edge_index_nps['tag2item'] = tag2item_edge_index_np
print('Creating rating property edges...')
test_pos_unid_inid_map, neg_unid_inid_map = {}, {}
rating_np = np.zeros((0,))
user2item_edge_index_np = np.zeros((2, 0))
sorted_ratings = ratings.sort_values('uid')
pbar = tqdm.tqdm(unique_uids, total=len(unique_uids))
for uid in pbar:
pbar.set_description('Creating the edges for the user {}'.format(uid))
uid_ratings = sorted_ratings[sorted_ratings.uid == uid].sort_values('timestamp')
uid_iids = uid_ratings.iid.to_numpy()
uid_ratings = uid_ratings.rating.to_numpy()
unid = e2nid_dict['uid'][uid]
train_pos_uid_iids = list(uid_iids[:-1]) # Use leave one out setup
train_pos_uid_ratings = uid_ratings[:-1]
train_pos_uid_inids = [e2nid_dict['iid'][iid] for iid in train_pos_uid_iids]
test_pos_uid_iids = list(uid_iids[-1:])
test_pos_uid_inids = [e2nid_dict['iid'][iid] for iid in test_pos_uid_iids]
neg_uid_iids = list(set(unique_iids) - set(uid_iids))
neg_uid_inids = [e2nid_dict['iid'][iid] for iid in neg_uid_iids]
test_pos_unid_inid_map[unid] = test_pos_uid_inids
neg_unid_inid_map[unid] = neg_uid_inids
unid_user2item_edge_index_np = np.array(
[[unid for _ in range(len(train_pos_uid_inids))], train_pos_uid_inids]
)
user2item_edge_index_np = np.hstack([user2item_edge_index_np, unid_user2item_edge_index_np])
rating_np = np.concatenate([rating_np, train_pos_uid_ratings])
dataset_property_dict['rating_np'] = rating_np
edge_index_nps['user2item'] = user2item_edge_index_np
dataset_property_dict['edge_index_nps'] = edge_index_nps
dataset_property_dict['test_pos_unid_inid_map'], dataset_property_dict['neg_unid_inid_map'] = \
test_pos_unid_inid_map, neg_unid_inid_map
print('Building edge type map...')
edge_type_dict = {edge_type: edge_type_idx for edge_type_idx, edge_type in enumerate(list(edge_index_nps.keys()))}
dataset_property_dict['edge_type_dict'] = edge_type_dict
dataset_property_dict['num_edge_types'] = len(list(edge_index_nps.keys()))
print('Building the item occurrence map...')
item_count = ratings['iid'].value_counts()
item_nid_occs = {}
for iid in unique_iids:
item_nid_occs[e2nid_dict['iid'][iid]] = item_count[iid]
dataset_property_dict['item_nid_occs'] = item_nid_occs
# New functionality for pytorch geometric like dataset
dataset_property_dict['types'] = types
dataset_property_dict['num_nodes_dict'] = num_nodes_dict
dataset_property_dict['type_accs'] = type_accs
return dataset_property_dict
class MovieLens(Dataset):
url = 'http://files.grouplens.org/datasets/movielens/'
def __init__(self,
root,
name,
transform=None,
pre_transform=None,
pre_filter=None,
**kwargs):
self.name = name.lower()
self.type = kwargs['type']
assert self.name in ['25m', 'latest-small']
assert self.type in ['hete']
self.num_core = kwargs['num_core']
self.num_feat_core = kwargs['num_feat_core']
self.entity_aware = kwargs['entity_aware']
self.num_negative_samples = kwargs['num_negative_samples']
self.sampling_strategy = kwargs['sampling_strategy']
self.cf_loss_type = kwargs['cf_loss_type']
super(MovieLens, self).__init__(root, transform, pre_transform, pre_filter)
with open(self.processed_paths[0], 'rb') as f: # Read the class property
dataset_property_dict = pickle.load(f)
for k, v in dataset_property_dict.items():
self[k] = v
print('Dataset loaded!')
@property
def raw_file_names(self):
return 'ml-{}.zip'.format(self.name.lower())
@property
def processed_file_names(self):
return ['ml_{}_{}.pkl'.format(self.name, self.build_suffix())]
def download(self):
path = download_url(self.url + self.raw_file_names, self.raw_dir)
extract_zip(path, self.raw_dir)
def process(self):
if self.name == '25m':
try:
movies = pd.read_csv(join(self.processed_dir, 'movies.csv'), sep=';').fillna('')
ratings = pd.read_csv(join(self.processed_dir, 'ratings.csv'), sep=';')
tagging = pd.read_csv(join(self.processed_dir, 'tagging.csv'), sep=';')
genome_tagging = pd.read_csv(join(self.processed_dir, 'genome_tagging.csv'), sep=';')
print('Read data frame from {}!'.format(self.processed_dir))
except:
unzip_raw_dir = join(self.raw_dir, 'ml-{}'.format(self.name))
print('Data frame not found in {}! Read from raw data and preprocessing from {}!'.format(
self.processed_dir, unzip_raw_dir))
raw_movies_path = join(self.raw_dir, 'raw_movies.csv')
raw_ratings_path = join(self.raw_dir, 'raw_ratings.csv')
raw_tagging_path = join(self.raw_dir, 'raw_tagging.csv')
raw_genome_scores_path = join(self.raw_dir, 'raw_genome_scores.csv')
raw_genome_tags_path = join(self.raw_dir, 'raw_genome_tags.csv')
if not (isfile(raw_movies_path) and isfile(raw_ratings_path) and isfile(raw_tagging_path) and \
isfile(raw_genome_scores_path) and isfile(raw_genome_tags_path)):
print('Raw files not found! Reading directories and actors from api!')
movies, ratings, tagging, genome_scores, genome_tags = parse_ml25m(unzip_raw_dir)
save_df(movies, raw_movies_path)
save_df(ratings, raw_ratings_path)
save_df(tagging, raw_tagging_path)
save_df(genome_scores, raw_genome_scores_path)
save_df(genome_tags, raw_genome_tags_path)
else:
print('Raw files loaded!')
movies = pd.read_csv(raw_movies_path, sep=';').fillna('')
ratings = pd.read_csv(raw_ratings_path, sep=';')
tagging = pd.read_csv(raw_tagging_path, sep=';')
genome_scores = pd.read_csv(raw_genome_scores_path, sep=';')
genome_tags = pd.read_csv(raw_genome_tags_path, sep=';')
# Remove duplicates
movies = movies.drop_duplicates()
ratings = ratings.drop_duplicates()
tagging = tagging.drop_duplicates()
genome_scores = genome_scores.drop_duplicates()
genome_tags = genome_tags.drop_duplicates()
ratings = ratings[ratings.timestamp > 1514764799] #2M interactions
# Sync
movies = movies[movies.iid.isin(ratings.iid.unique())]
ratings = ratings[ratings.iid.isin(movies.iid.unique())]
tagging = tagging[tagging.iid.isin(ratings.iid.unique())]
tagging = tagging[tagging.uid.isin(ratings.uid.unique())]
genome_scores = genome_scores[genome_scores.iid.isin(ratings.iid.unique())]
genome_scores = genome_scores[genome_scores.genome_tid.isin(genome_tags.genome_tid.unique())]
genome_tags = genome_tags[genome_tags.genome_tid.isin(genome_scores.genome_tid.unique())]
# Remove infrequent movies
movie_count = ratings['iid'].value_counts()
movie_count.name = 'movie_count'
ratings = ratings[ratings.join(movie_count, on='iid').movie_count > self.num_core]
# Remove infrequent users
user_count = ratings['uid'].value_counts()
user_count.name = 'user_count'
ratings = ratings.join(user_count, on='uid')
ratings = ratings[ratings.user_count > self.num_core]
ratings = ratings[ratings.user_count < 30 * self.num_core]
ratings = ratings.drop(columns=['user_count'])
# Sync
movies = movies[movies.iid.isin(ratings.iid.unique())]
tagging = tagging[tagging.iid.isin(ratings.iid.unique())]
tagging = tagging[tagging.uid.isin(ratings.uid.unique())]
genome_scores = genome_scores[genome_scores.iid.isin(ratings.iid.unique())]
genome_tags = genome_tags[genome_tags.genome_tid.isin(genome_scores.genome_tid.unique())]
# Remove infrequent tags
tag_count = tagging['tag'].value_counts()
tag_count.name = 'tag_count'
tagging = tagging[tagging.join(tag_count, on='tag').tag_count > self.num_feat_core]
# Remove infrequent genome tags
genome_tagging = genome_scores[genome_scores.relevance > 0.5]
genome_tag_count = genome_tagging['genome_tid'].value_counts()
genome_tag_count.name = 'genome_tag_count'
genome_tagging = genome_tagging[
genome_tagging.join(genome_tag_count, 'genome_tid').genome_tag_count > self.num_feat_core]
# Reindex the uid and iid in case of missing values
movies, ratings, tagging, tags, genome_tagging, genome_tags = reindex_df_ml25m(
movies, ratings, tagging, genome_tagging, genome_tags)
# Drop the infrequent writer, actor and directors
movies = drop_infrequent_concept_from_str(movies, 'writers', self.num_feat_core)
movies = drop_infrequent_concept_from_str(movies, 'directors', self.num_feat_core)
movies = drop_infrequent_concept_from_str(movies, 'actors', self.num_feat_core)
# filter the years
years = movies.year.to_numpy()
years[years < 1950] = 1950
movies['year'] = years
if self.type == 'hete':
years = movies.year.to_numpy().astype(np.int)
min_year = min(years)
max_year = max(years)
num_years = (max_year - min_year) // 10
discretized_years = [min_year + i * 10 for i in range(num_years + 1)]
for i in range(len(discretized_years) - 1):
years[(discretized_years[i] <= years) & (years < discretized_years[i + 1])] = str(
discretized_years[i])
years[years < discretized_years[0]] = discretized_years[0]
years[years >= discretized_years[-1]] = discretized_years[-1]
movies['year'] = years
# save dfs
print('Saving processed csv...')
save_df(tags, join(self.processed_dir, 'tags.csv'))
save_df(tagging, join(self.processed_dir, 'tagging.csv'))
save_df(genome_tagging, join(self.processed_dir, 'genome_tagging.csv'))
save_df(genome_tags, join(self.processed_dir, 'genome_tags.csv'))
save_df(movies, join(self.processed_dir, 'movies.csv'))
save_df(ratings, join(self.processed_dir, 'ratings.csv'))
# Generate and save graph
if self.type == 'hete':
dataset_property_dict = generate_ml25m_hete_graph(movies, ratings, tagging, genome_tagging)
else:
raise NotImplementedError
with open(self.processed_paths[0], 'wb') as f:
pickle.dump(dataset_property_dict, f)
elif self.name == 'latest-small':
try:
movies = pd.read_csv(join(self.processed_dir, 'movies.csv'), sep=';').fillna('')
ratings = pd.read_csv(join(self.processed_dir, 'ratings.csv'), sep=';')
tagging = pd.read_csv(join(self.processed_dir, 'tagging.csv'), sep=';')
print('Read data frame from {}!'.format(self.processed_dir))
except:
unzip_raw_dir = join(self.raw_dir, 'ml-{}'.format(self.name))
print('Data frame not found in {}! Read from raw data and preprocessing from {}!'.format(
self.processed_dir, unzip_raw_dir))
raw_movies_path = join(self.raw_dir, 'raw_movies.csv')
raw_ratings_path = join(self.raw_dir, 'raw_ratings.csv')
raw_tagging_path = join(self.raw_dir, 'raw_tagging.csv')
if not (isfile(raw_movies_path) and isfile(raw_ratings_path) and isfile(raw_tagging_path)):
print('Raw files not found! Reading directories and actors from api!')
movies, ratings, tagging = parse_mlsmall(unzip_raw_dir)
save_df(movies, raw_movies_path)
save_df(ratings, raw_ratings_path)
save_df(tagging, raw_tagging_path)
else:
print('Raw files loaded!')
movies = pd.read_csv(raw_movies_path, sep=';').fillna('')
ratings = | pd.read_csv(raw_ratings_path, sep=';') | pandas.read_csv |
# -*- coding: utf-8 -*-
from __future__ import print_function
from datetime import datetime
import itertools
import numpy as np
import pytest
from pandas.compat import u
import pandas as pd
from pandas import (
DataFrame, Index, MultiIndex, Period, Series, Timedelta, date_range)
from pandas.tests.frame.common import TestData
import pandas.util.testing as tm
from pandas.util.testing import assert_frame_equal, assert_series_equal
class TestDataFrameReshape(TestData):
def test_pivot(self):
data = {
'index': ['A', 'B', 'C', 'C', 'B', 'A'],
'columns': ['One', 'One', 'One', 'Two', 'Two', 'Two'],
'values': [1., 2., 3., 3., 2., 1.]
}
frame = DataFrame(data)
pivoted = frame.pivot(
index='index', columns='columns', values='values')
expected = DataFrame({
'One': {'A': 1., 'B': 2., 'C': 3.},
'Two': {'A': 1., 'B': 2., 'C': 3.}
})
expected.index.name, expected.columns.name = 'index', 'columns'
tm.assert_frame_equal(pivoted, expected)
# name tracking
assert pivoted.index.name == 'index'
assert pivoted.columns.name == 'columns'
# don't specify values
pivoted = frame.pivot(index='index', columns='columns')
assert pivoted.index.name == 'index'
assert pivoted.columns.names == (None, 'columns')
def test_pivot_duplicates(self):
data = DataFrame({'a': ['bar', 'bar', 'foo', 'foo', 'foo'],
'b': ['one', 'two', 'one', 'one', 'two'],
'c': [1., 2., 3., 3., 4.]})
with pytest.raises(ValueError, match='duplicate entries'):
data.pivot('a', 'b', 'c')
def test_pivot_empty(self):
df = DataFrame({}, columns=['a', 'b', 'c'])
result = df.pivot('a', 'b', 'c')
expected = DataFrame()
tm.assert_frame_equal(result, expected, check_names=False)
def test_pivot_integer_bug(self):
df = DataFrame(data=[("A", "1", "A1"), ("B", "2", "B2")])
result = df.pivot(index=1, columns=0, values=2)
repr(result)
tm.assert_index_equal(result.columns, Index(['A', 'B'], name=0))
def test_pivot_index_none(self):
# gh-3962
data = {
'index': ['A', 'B', 'C', 'C', 'B', 'A'],
'columns': ['One', 'One', 'One', 'Two', 'Two', 'Two'],
'values': [1., 2., 3., 3., 2., 1.]
}
frame = DataFrame(data).set_index('index')
result = frame.pivot(columns='columns', values='values')
expected = DataFrame({
'One': {'A': 1., 'B': 2., 'C': 3.},
'Two': {'A': 1., 'B': 2., 'C': 3.}
})
expected.index.name, expected.columns.name = 'index', 'columns'
assert_frame_equal(result, expected)
# omit values
result = frame.pivot(columns='columns')
expected.columns = pd.MultiIndex.from_tuples([('values', 'One'),
('values', 'Two')],
names=[None, 'columns'])
expected.index.name = 'index'
tm.assert_frame_equal(result, expected, check_names=False)
assert result.index.name == 'index'
assert result.columns.names == (None, 'columns')
expected.columns = expected.columns.droplevel(0)
result = frame.pivot(columns='columns', values='values')
expected.columns.name = 'columns'
tm.assert_frame_equal(result, expected)
def test_stack_unstack(self):
df = self.frame.copy()
df[:] = np.arange(np.prod(df.shape)).reshape(df.shape)
stacked = df.stack()
stacked_df = DataFrame({'foo': stacked, 'bar': stacked})
unstacked = stacked.unstack()
unstacked_df = stacked_df.unstack()
assert_frame_equal(unstacked, df)
assert_frame_equal(unstacked_df['bar'], df)
unstacked_cols = stacked.unstack(0)
unstacked_cols_df = stacked_df.unstack(0)
assert_frame_equal(unstacked_cols.T, df)
assert_frame_equal(unstacked_cols_df['bar'].T, df)
def test_stack_mixed_level(self):
# GH 18310
levels = [range(3), [3, 'a', 'b'], [1, 2]]
# flat columns:
df = DataFrame(1, index=levels[0], columns=levels[1])
result = df.stack()
expected = Series(1, index=MultiIndex.from_product(levels[:2]))
assert_series_equal(result, expected)
# MultiIndex columns:
df = DataFrame(1, index=levels[0],
columns=MultiIndex.from_product(levels[1:]))
result = df.stack(1)
expected = DataFrame(1, index=MultiIndex.from_product([levels[0],
levels[2]]),
columns=levels[1])
assert_frame_equal(result, expected)
# as above, but used labels in level are actually of homogeneous type
result = df[['a', 'b']].stack(1)
expected = expected[['a', 'b']]
assert_frame_equal(result, expected)
def test_unstack_fill(self):
# GH #9746: fill_value keyword argument for Series
# and DataFrame unstack
# From a series
data = Series([1, 2, 4, 5], dtype=np.int16)
data.index = MultiIndex.from_tuples(
[('x', 'a'), ('x', 'b'), ('y', 'b'), ('z', 'a')])
result = data.unstack(fill_value=-1)
expected = DataFrame({'a': [1, -1, 5], 'b': [2, 4, -1]},
index=['x', 'y', 'z'], dtype=np.int16)
assert_frame_equal(result, expected)
# From a series with incorrect data type for fill_value
result = data.unstack(fill_value=0.5)
expected = DataFrame({'a': [1, 0.5, 5], 'b': [2, 4, 0.5]},
index=['x', 'y', 'z'], dtype=np.float)
assert_frame_equal(result, expected)
# GH #13971: fill_value when unstacking multiple levels:
df = DataFrame({'x': ['a', 'a', 'b'],
'y': ['j', 'k', 'j'],
'z': [0, 1, 2],
'w': [0, 1, 2]}).set_index(['x', 'y', 'z'])
unstacked = df.unstack(['x', 'y'], fill_value=0)
key = ('<KEY>')
expected = unstacked[key]
result = pd.Series([0, 0, 2], index=unstacked.index, name=key)
assert_series_equal(result, expected)
stacked = unstacked.stack(['x', 'y'])
stacked.index = stacked.index.reorder_levels(df.index.names)
# Workaround for GH #17886 (unnecessarily casts to float):
stacked = stacked.astype(np.int64)
result = stacked.loc[df.index]
assert_frame_equal(result, df)
# From a series
s = df['w']
result = s.unstack(['x', 'y'], fill_value=0)
expected = unstacked['w']
assert_frame_equal(result, expected)
def test_unstack_fill_frame(self):
# From a dataframe
rows = [[1, 2], [3, 4], [5, 6], [7, 8]]
df = DataFrame(rows, columns=list('AB'), dtype=np.int32)
df.index = MultiIndex.from_tuples(
[('x', 'a'), ('x', 'b'), ('y', 'b'), ('z', 'a')])
result = df.unstack(fill_value=-1)
rows = [[1, 3, 2, 4], [-1, 5, -1, 6], [7, -1, 8, -1]]
expected = DataFrame(rows, index=list('xyz'), dtype=np.int32)
expected.columns = MultiIndex.from_tuples(
[('A', 'a'), ('A', 'b'), ('B', 'a'), ('B', 'b')])
assert_frame_equal(result, expected)
# From a mixed type dataframe
df['A'] = df['A'].astype(np.int16)
df['B'] = df['B'].astype(np.float64)
result = df.unstack(fill_value=-1)
expected['A'] = expected['A'].astype(np.int16)
expected['B'] = expected['B'].astype(np.float64)
assert_frame_equal(result, expected)
# From a dataframe with incorrect data type for fill_value
result = df.unstack(fill_value=0.5)
rows = [[1, 3, 2, 4], [0.5, 5, 0.5, 6], [7, 0.5, 8, 0.5]]
expected = DataFrame(rows, index=list('xyz'), dtype=np.float)
expected.columns = MultiIndex.from_tuples(
[('A', 'a'), ('A', 'b'), ('B', 'a'), ('B', 'b')])
assert_frame_equal(result, expected)
def test_unstack_fill_frame_datetime(self):
# Test unstacking with date times
dv = pd.date_range('2012-01-01', periods=4).values
data = Series(dv)
data.index = MultiIndex.from_tuples(
[('x', 'a'), ('x', 'b'), ('y', 'b'), ('z', 'a')])
result = data.unstack()
expected = DataFrame({'a': [dv[0], pd.NaT, dv[3]],
'b': [dv[1], dv[2], pd.NaT]},
index=['x', 'y', 'z'])
assert_frame_equal(result, expected)
result = data.unstack(fill_value=dv[0])
expected = DataFrame({'a': [dv[0], dv[0], dv[3]],
'b': [dv[1], dv[2], dv[0]]},
index=['x', 'y', 'z'])
assert_frame_equal(result, expected)
def test_unstack_fill_frame_timedelta(self):
# Test unstacking with time deltas
td = [Timedelta(days=i) for i in range(4)]
data = Series(td)
data.index = MultiIndex.from_tuples(
[('x', 'a'), ('x', 'b'), ('y', 'b'), ('z', 'a')])
result = data.unstack()
expected = DataFrame({'a': [td[0], pd.NaT, td[3]],
'b': [td[1], td[2], pd.NaT]},
index=['x', 'y', 'z'])
assert_frame_equal(result, expected)
result = data.unstack(fill_value=td[1])
expected = DataFrame({'a': [td[0], td[1], td[3]],
'b': [td[1], td[2], td[1]]},
index=['x', 'y', 'z'])
assert_frame_equal(result, expected)
def test_unstack_fill_frame_period(self):
# Test unstacking with period
periods = [Period('2012-01'), Period('2012-02'), Period('2012-03'),
Period('2012-04')]
data = Series(periods)
data.index = MultiIndex.from_tuples(
[('x', 'a'), ('x', 'b'), ('y', 'b'), ('z', 'a')])
result = data.unstack()
expected = DataFrame({'a': [periods[0], None, periods[3]],
'b': [periods[1], periods[2], None]},
index=['x', 'y', 'z'])
assert_frame_equal(result, expected)
result = data.unstack(fill_value=periods[1])
expected = DataFrame({'a': [periods[0], periods[1], periods[3]],
'b': [periods[1], periods[2], periods[1]]},
index=['x', 'y', 'z'])
assert_frame_equal(result, expected)
def test_unstack_fill_frame_categorical(self):
# Test unstacking with categorical
data = | pd.Series(['a', 'b', 'c', 'a'], dtype='category') | pandas.Series |
"""
Utils for time series generation
--------------------------------
"""
import math
from typing import Union
import numpy as np
import pandas as pd
import holidays
from ..timeseries import TimeSeries
from ..logging import raise_if_not, get_logger
logger = get_logger(__name__)
def constant_timeseries(value: float = 1,
length: int = 10,
freq: str = 'D',
start_ts: pd.Timestamp = pd.Timestamp('2000-01-01')) -> TimeSeries:
"""
Creates a constant univariate TimeSeries with the given value, length, start date and frequency.
Parameters
----------
value
The constant value that the TimeSeries object will assume at every index.
length
The length of the returned TimeSeries.
freq
The time difference between two adjacent entries in the returned TimeSeries. A DateOffset alias is expected;
see `docs <https://pandas.pydata.org/pandas-docs/stable/user_guide/TimeSeries.html#dateoffset-objects>`_.
start_ts
The time index of the first entry in the returned TimeSeries.
Returns
-------
TimeSeries
A constant TimeSeries with value 'value'.
"""
times = pd.date_range(periods=length, freq=freq, start=start_ts)
values = np.full(length, value)
return TimeSeries.from_times_and_values(times, values, freq=freq)
def linear_timeseries(start_value: float = 0,
end_value: float = 1,
length: int = 10,
freq: str = 'D',
start_ts: pd.Timestamp = pd.Timestamp('2000-01-01')) -> TimeSeries:
"""
Creates a univariate TimeSeries with a starting value of `start_value` that increases linearly such that
it takes on the value `end_value` at the last entry of the TimeSeries. This means that
the difference between two adjacent entries will be equal to
(`end_value` - `start_value`) / (`length` - 1).
Parameters
----------
start_value
The value of the first entry in the TimeSeries.
end_value
The value of the last entry in the TimeSeries.
length
The length of the returned TimeSeries.
freq
The time difference between two adjacent entries in the returned TimeSeries. A DateOffset alias is expected.
start_ts
The time index of the first entry in the returned TimeSeries.
Returns
-------
TimeSeries
A linear TimeSeries created as indicated above.
"""
times = pd.date_range(periods=length, freq=freq, start=start_ts)
values = np.linspace(start_value, end_value, length)
return TimeSeries.from_times_and_values(times, values, freq=freq)
def sine_timeseries(value_frequency: float = 0.1,
value_amplitude: float = 1.,
value_phase: float = 0.,
value_y_offset: float = 0.,
length: int = 10,
freq: str = 'D',
start_ts: pd.Timestamp = pd.Timestamp('2000-01-01')) -> TimeSeries:
"""
Creates a univariate TimeSeries with a sinusoidal value progression with a given frequency, amplitude,
phase and y offset.
Parameters
----------
value_frequency
The number of periods that take place within one time unit given in `freq`.
value_amplitude
The maximum difference between any value of the returned TimeSeries and `y_offset`.
value_phase
The relative position within one period of the first value of the returned TimeSeries (in radians).
value_y_offset
The shift of the sine function along the y axis.
length
The length of the returned TimeSeries.
freq
The time difference between two adjacent entries in the returned TimeSeries. A DateOffset alias is expected.
start_ts
The time index of the first entry in the returned TimeSeries.
Returns
-------
TimeSeries
A sinusoidal TimeSeries parametrized as indicated above.
"""
times = pd.date_range(periods=length, freq=freq, start=start_ts)
values = np.array(range(length), dtype=float)
f = np.vectorize(
lambda x: value_amplitude * math.sin(2 * math.pi * value_frequency * x + value_phase) + value_y_offset
)
values = f(values)
return TimeSeries.from_times_and_values(times, values, freq=freq)
def gaussian_timeseries(length: int = 10,
freq: str = 'D',
mean: Union[float, np.ndarray] = 0.,
std: Union[float, np.ndarray] = 1.,
start_ts: pd.Timestamp = pd.Timestamp('2000-01-01')) -> TimeSeries:
"""
Creates a gaussian univariate TimeSeries by sampling all the series values independently,
from a gaussian distribution with mean `mean` and standard deviation `std`.
Parameters
----------
length
The length of the returned TimeSeries.
freq
The time difference between two adjacent entries in the returned TimeSeries. A DateOffset alias is expected.
mean
The mean of the gaussian distribution that is sampled at each step.
If a float value is given, the same mean is used at every step.
If a numpy.ndarray of floats with the same length as `length` is
given, a different mean is used at each time step.
std
The standard deviation of the gaussian distribution that is sampled at each step.
If a float value is given, the same standard deviation is used at every step.
If an array of dimension `(length, length)` is given, it will
be used as covariance matrix for a multivariate gaussian distribution.
start_ts
The time index of the first entry in the returned TimeSeries.
Returns
-------
TimeSeries
A white noise TimeSeries created as indicated above.
"""
if (type(mean) == np.ndarray):
raise_if_not(mean.shape == (length,), 'If a vector of means is provided, '
'it requires the same length as the TimeSeries.', logger)
if (type(std) == np.ndarray):
raise_if_not(std.shape == (length, length), 'If a matrix of standard deviations is provided, '
'its shape has to match the length of the TimeSeries.', logger)
times = pd.date_range(periods=length, freq=freq, start=start_ts)
values = np.random.normal(mean, std, size=length)
return TimeSeries.from_times_and_values(times, values, freq=freq)
def random_walk_timeseries(length: int = 10,
freq: str = 'D',
mean: float = 0.,
std: float = 1.,
start_ts: pd.Timestamp = pd.Timestamp('2000-01-01')) -> TimeSeries:
"""
Creates a random walk univariate TimeSeries, where each step is obtained by sampling a gaussian distribution
with mean `mean` and standard deviation `std`.
Parameters
----------
length
The length of the returned TimeSeries.
freq
The time difference between two adjacent entries in the returned TimeSeries. A DateOffset alias is expected.
mean
The mean of the gaussian distribution that is sampled at each step.
std
The standard deviation of the gaussian distribution that is sampled at each step.
start_ts
The time index of the first entry in the returned TimeSeries.
Returns
-------
TimeSeries
A random walk TimeSeries created as indicated above.
"""
times = pd.date_range(periods=length, freq=freq, start=start_ts)
values = np.cumsum(np.random.normal(mean, std, size=length))
return TimeSeries.from_times_and_values(times, values, freq=freq)
def holidays_timeseries(time_index,
country_code: str,
prov: str = None,
state: str = None) -> TimeSeries:
"""
Creates a binary univariate TimeSeries with index `time_index` that equals 1 at every index that lies within
(or equals) a selected country's holiday, and 0 otherwise.
Available countries can be found `here <https://github.com/dr-prodigy/python-holidays#available-countries>`_.
Parameters
----------
country_code
The country ISO code
prov
The province
state
The state
Returns
-------
TimeSeries
A new binary holiday TimeSeries instance.
"""
scope = range(time_index[0].year, (time_index[-1] + pd.Timedelta(days=1)).year)
country_holidays = holidays.CountryHoliday(country_code, prov=prov, state=state, years=scope)
index_series = | pd.Series(time_index, index=time_index) | pandas.Series |
#
# Copyright (c) 2018 <NAME> <<EMAIL>>
#
# See the file LICENSE for your rights.
#
"""
Methods for processing VERIFICATION data.
"""
import os
import re
import numpy as np
import pandas as pd
from datetime import datetime, timedelta
import pickle
import requests
from collections import OrderedDict
from mosx.MesoPy import Meso
from mosx.obs.methods import get_obs_hourly, reindex_hourly
from mosx.util import generate_dates, get_array, get_ghcn_stid
def get_cf6_files(config, num_files=1):
"""
After code by Luke Madaus
Retrieves CF6 climate verification data released by the NWS. Parameter num_files determines how many recent files
are downloaded.
"""
# Create directory if it does not exist
site_directory = config['SITE_ROOT']
# Construct the web url address. Check if a special 3-letter station ID is provided.
nws_url = 'http://forecast.weather.gov/product.php?site=NWS&issuedby=%s&product=CF6&format=TXT'
try:
stid3 = config['station_id3']
except KeyError:
stid3 = config['station_id'][1:].upper()
nws_url = nws_url % stid3
# Determine how many files (iterations of product) we want to fetch
if num_files == 1:
if config['verbose']:
print('get_cf6_files: retrieving latest CF6 file for %s' % config['station_id'])
else:
if config['verbose']:
print('get_cf6_files: retrieving %s archived CF6 files for %s' % (num_files, config['station_id']))
# Fetch files
for r in range(1, num_files + 1):
# Format the web address: goes through 'versions' on NWS site which correspond to increasingly older files
version = 'version=%d&glossary=0' % r
nws_site = '&'.join((nws_url, version))
response = requests.get(nws_site)
cf6_data = response.text
# Remove the header
try:
body_and_footer = cf6_data.split('CXUS')[1] # Mainland US
except IndexError:
try:
body_and_footer = cf6_data.split('CXHW')[1] # Hawaii
except IndexError:
body_and_footer = cf6_data.split('CXAK')[1] # Alaska
body_and_footer_lines = body_and_footer.splitlines()
if len(body_and_footer_lines) <= 2:
body_and_footer = cf6_data.split('000')[2]
# Remove the footer
body = body_and_footer.split('[REMARKS]')[0]
# Find the month and year of the file
current_year = re.search('YEAR: *(\d{4})', body).groups()[0]
try:
current_month = re.search('MONTH: *(\D{3,9})', body).groups()[0]
current_month = current_month.strip() # Gets rid of newlines and whitespace
datestr = '%s %s' % (current_month, current_year)
file_date = datetime.strptime(datestr, '%B %Y')
except: # Some files have a different formatting, although this may be fixed now.
current_month = re.search('MONTH: *(\d{2})', body).groups()[0]
current_month = current_month.strip()
datestr = '%s %s' % (current_month, current_year)
file_date = datetime.strptime(datestr, '%m %Y')
# Write to a temporary file, check if output file exists, and if so, make sure the new one has more data
datestr = file_date.strftime('%Y%m')
filename = '%s/%s_%s.cli' % (site_directory, config['station_id'].upper(), datestr)
temp_file = '%s/temp.cli' % site_directory
with open(temp_file, 'w') as out:
out.write(body)
def file_len(file_name):
with open(file_name) as f:
for i, l in enumerate(f):
pass
return i + 1
if os.path.isfile(filename):
old_file_len = file_len(filename)
new_file_len = file_len(temp_file)
if old_file_len < new_file_len:
if config['verbose']:
print('get_cf6_files: overwriting %s' % filename)
os.remove(filename)
os.rename(temp_file, filename)
else:
if config['verbose']:
print('get_cf6_files: %s already exists' % filename)
else:
if config['verbose']:
print('get_cf6_files: writing %s' % filename)
os.rename(temp_file, filename)
def _cf6_wind(config):
"""
After code by <NAME>
This function is used internally only.
Generates wind verification values from climate CF6 files stored in SITE_ROOT. These files can be generated
externally by get_cf6_files.py. This function is not necessary if climo data from _climo_wind is found, except for
recent values which may not be in the NCDC database yet.
:param config:
:return: dict: wind values from CF6 files
"""
if config['verbose']:
print('_cf6_wind: searching for CF6 files in %s' % config['SITE_ROOT'])
allfiles = os.listdir(config['SITE_ROOT'])
filelist = [f for f in allfiles if f.startswith(config['station_id'].upper()) and f.endswith('.cli')]
filelist.sort()
if len(filelist) == 0:
raise IOError('No CF6 files found.')
if config['verbose']:
print('_cf6_wind: found %d CF6 files.' % len(filelist))
# Interpret CF6 files
if config['verbose']:
print('_cf6_wind: reading CF6 files')
cf6_values = {}
for file in filelist:
year, month = re.search('(\d{4})(\d{2})', file).groups()
infile = open('%s/%s' % (config['SITE_ROOT'], file), 'r')
for line in infile:
matcher = re.compile(
'( \d|\d{2}) ( \d{2}|-\d{2}| \d| -\d|\d{3})')
if matcher.match(line):
# We've found an ob line!
lsp = line.split()
day = int(lsp[0])
curdt = datetime(int(year), int(month), day)
cf6_values[curdt] = {}
# Wind
if lsp[11] == 'M':
cf6_values[curdt]['wind'] = 0.0
else:
cf6_values[curdt]['wind'] = float(lsp[11]) * 0.868976
return cf6_values
def _climo_wind(config, dates=None):
"""
Fetches climatological wind data using ulmo package to retrieve NCDC archives.
:param config:
:param dates: list of datetime objects
:return: dict: dictionary of wind values
"""
import ulmo
if config['verbose']:
print('_climo_wind: fetching data from NCDC (may take a while)...')
v = 'WSF2'
wind_dict = {}
D = ulmo.ncdc.ghcn_daily.get_data(get_ghcn_stid(config), as_dataframe=True, elements=[v])
if dates is None:
dates = list(D[v].index.to_timestamp().to_pydatetime())
for date in dates:
wind_dict[date] = {'wind': D[v].loc[date]['value'] / 10. * 1.94384}
return wind_dict
def pop_rain(series):
"""
Converts a series of rain values into 0 or 1 depending on whether there is measurable rain
:param series:
:return:
"""
new_series = series.copy()
new_series[series >= 0.01] = 1.
new_series[series < 0.01] = 0.
return new_series
def categorical_rain(series):
"""
Converts a series of rain values into categorical precipitation quantities a la MOS.
:param series:
:return:
"""
new_series = series.copy()
for j in range(len(series)):
if series.iloc[j] < 0.01:
new_series.iloc[j] = 0.
elif series.iloc[j] < 0.10:
new_series.iloc[j] = 1.
elif series.iloc[j] < 0.25:
new_series.iloc[j] = 2.
elif series.iloc[j] < 0.50:
new_series.iloc[j] = 3.
elif series.iloc[j] < 1.00:
new_series.iloc[j] = 4.
elif series.iloc[j] < 2.00:
new_series.iloc[j] = 5.
elif series.iloc[j] >= 2.00:
new_series.iloc[j] = 6.
else: # missing, or something else that's strange
new_series.iloc[j] = 0.
return new_series
def verification(config, output_file=None, use_cf6=True, use_climo=True, force_rain_quantity=False):
"""
Generates verification data from MesoWest and saves to a file, which is used to train the model and check test
results.
:param config:
:param output_file: str: path to output file
:param use_cf6: bool: if True, uses wind values from CF6 files
:param use_climo: bool: if True, uses wind values from NCDC climatology
:param force_rain_quantity: if True, returns the actual quantity of rain (rather than POP); useful for validation
files
:return:
"""
if output_file is None:
output_file = '%s/%s_verif.pkl' % (config['SITE_ROOT'], config['station_id'])
dates = generate_dates(config)
api_dates = generate_dates(config, api=True, api_add_hour=config['forecast_hour_start'] + 24)
# Read new data for daily values
m = Meso(token=config['meso_token'])
if config['verbose']:
print('verification: MesoPy initialized for station %s' % config['station_id'])
print('verification: retrieving latest obs and metadata')
latest = m.latest(stid=config['station_id'])
obs_list = list(latest['STATION'][0]['SENSOR_VARIABLES'].keys())
# Look for desired variables
vars_request = ['air_temp', 'wind_speed', 'precip_accum_one_hour']
vars_option = ['air_temp_low_6_hour', 'air_temp_high_6_hour', 'precip_accum_six_hour']
# Add variables to the api request if they exist
if config['verbose']:
print('verification: searching for 6-hourly variables...')
for var in vars_option:
if var in obs_list:
if config['verbose']:
print('verification: found variable %s, adding to data' % var)
vars_request += [var]
vars_api = ''
for var in vars_request:
vars_api += var + ','
vars_api = vars_api[:-1]
# Units
units = 'temp|f,precip|in,speed|kts'
# Retrieve data
obspd = pd.DataFrame()
for api_date in api_dates:
if config['verbose']:
print('verification: retrieving data from %s to %s' % api_date)
obs = m.timeseries(stid=config['station_id'], start=api_date[0], end=api_date[1], vars=vars_api, units=units)
obspd = pd.concat((obspd, pd.DataFrame.from_dict(obs['STATION'][0]['OBSERVATIONS'])), ignore_index=True)
# Rename columns to requested vars
obs_var_names = obs['STATION'][0]['SENSOR_VARIABLES']
obs_var_keys = list(obs_var_names.keys())
col_names = list(map(''.join, obspd.columns.values))
for c in range(len(col_names)):
col = col_names[c]
for k in range(len(obs_var_keys)):
key = obs_var_keys[k]
if col == list(obs_var_names[key].keys())[0]:
col_names[c] = key
obspd.columns = col_names
# Make sure we have columns for all requested variables
for var in vars_request:
if var not in col_names:
obspd = obspd.assign(**{var: np.nan})
# Change datetime column to datetime object, subtract 6 hours to use 6Z days
if config['verbose']:
print('verification: setting time back %d hours for daily statistics' % config['forecast_hour_start'])
dateobj = pd.to_datetime(obspd['date_time']) - timedelta(hours=config['forecast_hour_start'])
obspd['date_time'] = dateobj
datename = 'date_time_minus_%d' % config['forecast_hour_start']
obspd = obspd.rename(columns={'date_time': datename})
# Reformat data into hourly and daily
# Hourly
def hour(dates):
date = dates.iloc[0]
return datetime(date.year, date.month, date.day, date.hour)
def last(values):
return values.iloc[-1]
aggregate = {datename: hour}
if 'air_temp_high_6_hour' in vars_request and 'air_temp_low_6_hour' in vars_request:
aggregate['air_temp_high_6_hour'] = np.max
aggregate['air_temp_low_6_hour'] = np.min
aggregate['air_temp'] = {'air_temp_max': np.max, 'air_temp_min': np.min}
if 'precip_accum_six_hour' in vars_request:
aggregate['precip_accum_six_hour'] = np.max
aggregate['wind_speed'] = np.max
aggregate['precip_accum_one_hour'] = np.max
if config['verbose']:
print('verification: grouping data by hour for hourly observations')
# Note that obs in hour H are reported at hour H, not H+1
obs_hourly = obspd.groupby([pd.DatetimeIndex(obspd[datename]).year,
pd.DatetimeIndex(obspd[datename]).month,
pd.DatetimeIndex(obspd[datename]).day,
pd.DatetimeIndex(obspd[datename]).hour]).agg(aggregate)
# Rename columns
col_names = obs_hourly.columns.values
col_names_new = []
for c in range(len(col_names)):
if col_names[c][0] == 'air_temp':
col_names_new.append(col_names[c][1])
else:
col_names_new.append(col_names[c][0])
obs_hourly.columns = col_names_new
# Daily
def day(dates):
date = dates.iloc[0]
return datetime(date.year, date.month, date.day)
aggregate[datename] = day
aggregate['air_temp_min'] = np.min
aggregate['air_temp_max'] = np.max
aggregate['precip_accum_six_hour'] = np.sum
try:
aggregate.pop('air_temp')
except:
pass
if config['verbose']:
print('verification: grouping data by day for daily verifications')
obs_daily = obs_hourly.groupby([pd.DatetimeIndex(obs_hourly[datename]).year,
| pd.DatetimeIndex(obs_hourly[datename]) | pandas.DatetimeIndex |
import os
import pickle
from pathlib import Path
from time import perf_counter
import numba as nb
import numpy as np
import pandas as pd
from tqdm import tqdm
from matplotlib import pyplot as pl
from lenskit import topn
from lenskit import util
from lenskit import batch
from lenskit import crossfold as xf
from lenskit.algorithms import als
from lenskit.metrics.predict import rmse
from lenskit.algorithms import Recommender
from lenskit.algorithms.basic import TopN, Memorized
METRICS = {
'ndcg': topn.ndcg,
'recall': topn.recall,
'precision': topn.precision,
'recip_rank': topn.recip_rank,
}
def split_dataset(ratings, user_fraction=.1):
"""Split a dataset in train/test data"""
n_users = len(ratings['user'].unique())
# There are many ways to separate a dataset in (train, test) data, here are two:
# - Row separation: the test set will contain users that the model knows.
# The performance of the model will be its ability to predict "new"
# tastes for a known user
# - User separation: the test set will contain users that the model has
# never encountered. The performance of the model will be its abiliy to
# predict new users behaviours considering the behaviour of other
# known users.
# see [lkpy documentation](https://lkpy.readthedocs.io/en/stable/crossfold.html)
# Here the sampling is as follow:
# - Sample test_fraction * n_total users
# - Randomly select half of their listenings for the test set
result = list(xf.sample_users(
ratings[['user', 'item', 'rating']],
partitions=1,
size=int(n_users * user_fraction),
method=xf.SampleFrac(.5)
))[0]
print(f'n test users: {len(result.test["user"].unique())}')
return result.train, result.test
def train_model(
train,
n_factors=30,
n_iterations=20,
regularization=.1,
save_training_loss=False,
confidence_factor=40):
"""Train (and evaluate iterations if requested) model"""
# Encapsulate the model into a TopN recommender
model = Recommender.adapt(als.ImplicitMF(
n_factors,
iterations=n_iterations,
weight=confidence_factor,
progress=tqdm,
method='cg'
))
# Compute the confidence values for user-item pairs
train['rating'] = 1 + confidence_factor * train['rating']
if save_training_loss:
loss = np.zeros(n_iterations)
for i, intermediate_model in enumerate(model.fit_iters(train)):
predictions = generate_predictions(intermediate_model, train)
loss[i] = evaluate_model_loss(intermediate_model, predictions)
else:
model.fit(train)
loss = None
return model, loss
def generate_predictions(model, user_item):
"""Generate the rating predictions for each user->item pair
:returns: pd.DataFrame. A dataframe with at least the columns 'user',
'item', 'prediction' (the predicted scores)
"""
return batch.predict(model, user_item)
def generate_recommendations(model, test_ratings, n_recommendations=50):
"""Generate recommendations for a given model
"""
users = test_ratings.user.unique()
return batch.recommend(model, users, n_recommendations)
def evaluate_model_recommendations(recommendations, test, metrics) -> pd.DataFrame:
"""Evaluates a model via its recommendations
:param recommendations: pd.DataFrame with at least the following columns :
'user', 'item', 'score', 'rank'
:param test: pd.DataFrame. The testing data
:param metrics: list. A list of metrics' names (see recodiv.model.METRICS)
"""
analysis = topn.RecListAnalysis(n_jobs=20)
users = test.user.unique()
rec_users = recommendations['user'].unique()
for metric_name in metrics:
analysis.add_metric(METRICS[metric_name])
return analysis.compute(recommendations, test)
def evaluate_model_loss(model, predictions):
# do not consider the user-item pairs where no prediction could be generated
# (ie the items not in train set)
predictions = predictions[predictions['prediction'].notna()]
confidence = 1 + model.predictor.weight * predictions['rating'].to_numpy()
prediction = predictions['prediction'].to_numpy()
reg = model.predictor.reg * (
np.linalg.norm(model.predictor.user_features_, 'fro') \
+ np.linalg.norm(model.predictor.item_features_, 'fro')
)
return confidence @ (1 - prediction)**2 + reg
def rank_to_weight(user_item, recommendations):
"""Compute the weight associated to each recommendation for each user in
recommendations
:param user_item: pd.DataFrame(columns=['user', 'item', 'rating']). All the
known user-item listenings counts
:param recommendations: pd.DataFrame(columns=['user', 'item', 'rank']). All
the recommendations made to each user in recommendations.
:returns: the recommendations DataFrame with the column ['weight']
"""
n_r = recommendations['rank'].max() # n_recommendations
users = recommendations.user.unique()
n_users = users.shape[0]
# get the volume of the users in the recommendations DataFrame
user_volume = user_item.groupby('user')['rating'].sum()
def user_weights(x):
# x.name is the id of the user
return (2 * user_volume[x.name] / (n_r * (n_r - 1))) * (n_r - x)
recommendations['weight'] = recommendations.groupby('user')['rank'].transform(user_weights)
return recommendations
@nb.njit
def wasserstein_1d(distribution: np.ndarray, other: np.ndarray) -> float:
"""Compute the Optimal Transport Distance between histograms
We assume that distribution a sorted in increasing index and have the same
total weight
"""
work = w_sum = u_sum = r = 0
i = j = 0
while i < distribution.shape[0] and j < other.shape[0]:
if i <= j:
work += np.abs(w_sum - u_sum) * (i - r)
w_sum += distribution[i]
r = i
i += 1
else:
work += np.abs(w_sum - u_sum) * (j - r)
u_sum += other[j]
r = j
j += 1
return work / u_sum
def tags_distance(distribution: pd.Series, other: pd.Series, tags: pd.Index, p=1):
"""Compute the Optimal Transport Distance between histograms (see
https://arxiv.org/pdf/1803.00567.pdf p.30-33)
"""
if p < 1:
raise ValueError('p must be greater or equal that 1')
if p != 1:
raise NotImplementedError('Only wasserstein 1 is currently supported')
# Make the tag distributions have the same support
distrib = distribution.reindex(index=tags, fill_value=0)
other = other.reindex(index=tags, fill_value=0)
# Sort by tag (in the lexicographic order) and normalize the distributions
# This is important because in the distance we implicitly associate a tag to
# a point in N.
distrib = distrib.sort_index()
distrib = distrib / distrib.sum()
other = other.sort_index()
other = other / other.sum()
# print(distrib, other, sep='\n')
return wasserstein_1d(distrib.to_numpy(), other.to_numpy())
if __name__ == '__main__':
tags = pd.Index(['rock', 'pop', 'jazz', 'metal', 'classical'])
distribution = | pd.Series({'rock': 3, 'pop': 10, 'jazz': 1}) | pandas.Series |
from path_manager import get_models_path
from gensim.models.wrappers.ldamallet import malletmodel2ldamodel
import os
import json
import numpy as np
import pandas as pd
from gensim.corpora import Dictionary
from gensim.models.wrappers import LdaMallet
from sklearn.metrics import euclidean_distances
class LDAInferencer:
def __init__(
self, corpus_id, model_id
):
# We will use the 0 index convention
CORPUS_ID = corpus_id
MODEL_ID = model_id
self.corpus_id = corpus_id
self.model_id = model_id
self.corpus_part = '_'.join(model_id.split('_')[:-1])
self.num_topics = int(model_id.split('_')[-1])
self.models_path = get_models_path('LDA')
self.model_folder = os.path.join(
self.models_path, f'{CORPUS_ID}-{MODEL_ID}')
self.model_data_folder = os.path.join(self.model_folder, 'data')
self.model = LdaMallet.load(os.path.join(
self.model_data_folder, f'{CORPUS_ID}_lda_model_{MODEL_ID}.mallet.lda'))
self.gensim_model = malletmodel2ldamodel(
self.model, iterations=self.model.iterations)
self.g_dict = Dictionary()
self.g_dict.id2token = self.model.id2word
self.g_dict.token2id = {k: v for v, k in self.g_dict.id2token.items()}
self.normalized_topics = self.model.get_topics()
self.topics = self.model.word_topics
self.documents_topics = pd.read_csv(
os.path.join(self.model_data_folder,
f'doc_topics_{MODEL_ID}_with_details.csv'),
# header='', # Change to True if topic id should be present as the header
index_col=0 # Change to True if the uid should be present as the index
)
self.documents_topics.columns = self.documents_topics.columns.astype(
int)
self.normalized_topics_by_documents = self.documents_topics / \
self.documents_topics.sum()
self.normalized_documents_topics = self.documents_topics.div(
self.documents_topics.sum(axis=1), axis=0)
self.topic_composition_ranges = self._get_topic_composition_ranges()
def get_topic_share(self, topic_id, doc_ids, serialize=False):
if isinstance(doc_ids, str):
doc_ids = [doc_ids]
topic_share = self.normalized_topics_by_documents.reindex(doc_ids)[
topic_id].to_dict()
if serialize:
topic_share = json.dumps(topic_share)
return topic_share
def infer_topics(self, text, topn_topics=None, total_topic_score=None, serialize=False):
if isinstance(text, str):
text = text.split()
if len(text) == 1:
doc_topics = self.gensim_model.get_term_topics(
self.g_dict.token2id[text[0]])
else:
doc = self.g_dict.doc2bow(text)
doc_topics = self.gensim_model[doc]
found_topics = {i for i, v in doc_topics}
print(found_topics)
for i in range(self.model.num_topics):
if i not in found_topics:
doc_topics.append((i, 0))
doc_topics = pd.DataFrame(doc_topics, columns=['topic', 'score'])
doc_topics = doc_topics.sort_values('score', ascending=False)
if total_topic_score is not None:
tdoc_topics = doc_topics[doc_topics.score.cumsum(
) <= total_topic_score]
if tdoc_topics.empty:
doc_topics = doc_topics.head(1)
else:
doc_topics = tdoc_topics
if topn_topics is not None and doc_topics.shape[0] > topn_topics:
doc_topics = doc_topics.head(topn_topics)
# doc_topics['topic'] = doc_topics['topic'].astype(int)
doc_topics = doc_topics.to_dict('records')
if serialize:
doc_topics = json.dumps(doc_topics)
return doc_topics
def get_model_topic_words(self, topn_words=5, total_word_score=None, serialize=False):
payload = []
for topic_id in range(self.num_topics):
topic_words = self.get_topic_words(
topic_id,
topn_words=topn_words,
total_word_score=total_word_score
)
payload.append({'topic_id': topic_id, 'topic_words': topic_words})
if serialize:
payload = json.dumps(payload)
return payload
def get_topic_words(self, topic_id, topn_words=10, total_word_score=None, serialize=False):
topic_id = int(topic_id)
topic_words = pd.DataFrame(self.model.show_topic(
topic_id, topn=topn_words), columns=['word', 'score'])
topic_words = topic_words.sort_values('score', ascending=False)
if total_word_score is not None:
ttopic_words = topic_words[topic_words.score.cumsum(
) <= total_word_score]
if ttopic_words.empty:
topic_words = topic_words.head(1)
else:
topic_words = ttopic_words
if topn_words is not None and topic_words.shape[0] > topn_words:
topic_words = topic_words.head(topn_words)
topic_words = topic_words.to_dict('records')
if serialize:
topic_words = json.dumps(topic_words)
return topic_words
def get_doc_topic_words(self, text, topn_topics=10, topn_words=10, total_topic_score=1, total_word_score=1, serialize=False):
doc_topics = self.infer_topics(
text, topn_topics=topn_topics, total_topic_score=total_topic_score)
doc_topic_words = []
for dt in doc_topics:
topic = dt['topic']
topic_score = dt['score']
topic_words = self.get_topic_words(
topic, topn_words=topn_words, total_word_score=total_word_score)
topic_data = {'topic': topic, 'score': topic_score}
topic_data['words'] = topic_words
doc_topic_words.append(topic_data)
doc_topic_words = pd.DataFrame(doc_topic_words).to_dict('records')
if serialize:
doc_topic_words = json.dumps(doc_topic_words)
return doc_topic_words
def get_doc_topic_words_by_id(self, doc_id, topn_topics=10, topn_words=10, total_topic_score=1, total_word_score=1, serialize=False):
doc_topics = self.get_doc_topic_by_id(
doc_id, topn=topn_topics, serialize=False)
doc_topic_words = []
for dt in doc_topics:
topic = dt['topic']
topic_score = dt['score']
topic_words = self.get_topic_words(
topic, topn_words=topn_words, total_word_score=total_word_score)
topic_data = {'topic': topic, 'score': topic_score}
topic_data['words'] = topic_words
doc_topic_words.append(topic_data)
doc_topic_words = pd.DataFrame(doc_topic_words).to_dict('records')
if serialize:
doc_topic_words = json.dumps(doc_topic_words)
return doc_topic_words
def get_combined_doc_topic_words(self, text, topn_topics=None, topn_words=None, total_topic_score=0.8, total_word_score=0.2, serialize=False):
doc_topics = self.infer_topics(
text, topn_topics=topn_topics, total_topic_score=total_topic_score)
doc_topic_words = []
for dt in doc_topics:
topic = dt['topic']
topic_score = dt['score']
topic_words = self.get_topic_words(
topic, topn_words=topn_words, total_word_score=total_word_score)
for tw in topic_words:
word = tw['word']
word_score = tw['score']
doc_topic_words.append({
'topic': topic,
'word': word,
'topic_score': topic_score,
'word_score': word_score,
'score': topic_score * word_score
})
doc_topic_words = pd.DataFrame(doc_topic_words).sort_values(
'score', ascending=False).to_dict('records')
if serialize:
doc_topic_words = json.dumps(doc_topic_words)
return doc_topic_words
def get_doc_topic_by_id(self, doc_id, topn=None, serialize=False):
doc = self.documents_topics.loc[doc_id]
if doc.empty:
return []
# Just call is score for consistency
doc.name = 'score'
doc.index.name = 'topic'
doc = doc.sort_values(ascending=False) / doc.sum()
doc.index = doc.index.astype(int)
doc = doc.reset_index()
if topn is not None:
doc = doc.head(topn)
doc = doc.to_dict('records')
if serialize:
doc = json.dumps(doc)
return doc
def get_similar_documents(self, document, topn=10, return_data='id', return_similarity=False, duplicate_threshold=0.01, show_duplicates=True, serialize=False):
doc_topics = self.infer_topics(document)
doc_topics = | pd.DataFrame(doc_topics) | pandas.DataFrame |
# imports
#region
import os
import pyreadstat
import pandas as pd
import numpy as np
from statsmodels.stats.weightstats import DescrStatsW
from sklearn.linear_model import LinearRegression
import statsmodels.api as sm
import statsmodels.formula.api as smf
import seaborn as sns
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
from libs.utils import *
from libs.plots import *
from libs.extensions import *
plt.ioff()
#endregion
# load new EDGAR v5.0 data ---
root = 'D:\\projects\\fakta-o-klimatu\\work\\111-emise-svet-srovnani\\data'
edgar_files = ['CH4', 'CO2_excl_short-cycle_org_C', 'CO2_org_short-cycle_C', 'N2O']
ef = edgar_files[0]
edgar_df = None
for ef in edgar_files:
logger(ef)
ey = 2018 if ef == 'CO2_excl_short-cycle_org_C' else 2015
frame = pd.read_excel(f'{root}\\edgar_v5.0\\v50_{ef}_1970_{ey}.xls', sheet_name='TOTALS BY COUNTRY',
header=9)
frame = frame[['ISO_A3'] + list(range(1970, ey + 1))].rename(columns={'ISO_A3': 'code'}).set_index('code')
frame.columns = frame.columns.rename('year')
frame = frame.unstack().rename(f'edgar50_{ef}').reset_index()
frame = frame[~frame['code'].isin(['SEA', 'AIR'])]
if edgar_df is None:
edgar_df = frame
else:
edgar_df = pd.merge(edgar_df, frame, how='outer')
edgar_df.to_csv(root + '\\edgar_v5.0.csv', index=False)
edgar_df.show()
data = edgar_df.copy()
# find sensible GDP vs population vs CO2eq (or CO2) data vs time ?
root = 'D:\\projects\\fakta-o-klimatu\\work\\111-emise-svet-srovnani\\data'
df = pd.read_csv(root + '\\data_all.csv')
df.show_csv()
df.query('code == "CZE"').show_csv()
df = pd.merge(df, edgar_df, how='left', on=['code', 'year'])
df.to_csv(f'{root}\\data_all_w_edgar50.csv', index=False)
df = pd.read_csv(f'{root}\\data_all_w_edgar50.csv')
df['edgar432_co2'] = df['edgar432_CO2_excl_short-cycle_org_C']
df['edgar432_co2_w_short'] = df['edgar432_co2'] + df['edgar432_CO2_org_short-cycle_C']
# actually, these are old sensitivities!
df['edgar432_co2eq'] = df['edgar432_CO2_excl_short-cycle_org_C'] + 25 * df['edgar432_CH4'] + 298 * df['edgar432_N2O']
df['edgar432_co2eq_w_short'] = df['edgar432_co2eq'] + df['edgar432_CO2_org_short-cycle_C']
df['edgar50_co2'] = df['edgar50_CO2_excl_short-cycle_org_C']
df['edgar50_co2_w_short'] = df['edgar50_co2'] + df['edgar50_CO2_org_short-cycle_C']
# I am using the new sensitivities here, 28 and 265
df['edgar50_co2eq'] = df['edgar50_CO2_excl_short-cycle_org_C'] + 28 * df['edgar50_CH4'] + 265 * df['edgar50_N2O']
df['edgar50_co2eq_w_short'] = df['edgar50_co2eq'] + df['edgar50_CO2_org_short-cycle_C']
data = df[['code', 'year', 'SP.POP.TOTL', 'NY.GDP.MKTP.PP.KD', 'edgar50_co2eq']] \
.rename(columns={'year': 'year_data', 'SP.POP.TOTL': 'pop', 'NY.GDP.MKTP.PP.KD': 'gdp_ppp',
'edgar50_co2eq': 'co2eq'})
data
sns.lineplot(x='year_data', y='co2eq', data=data, units='code', estimator=None).show()
sns.lineplot(x='year_data', y='pop', data=data, units='code', estimator=None).show()
sns.lineplot(x='year_data', y='gdp_ppp', data=data, units='code', estimator=None).show()
vars = ['pop', 'gdp_ppp', 'co2eq']
data = data.dropna(subset=vars)
codes = pd.DataFrame({'code': np.sort(data['code'].unique())})
codes['year'] = np.int_(2012)
data['year_data'] = np.int_(data['year_data'])
res = pd.merge_asof(codes, data.sort_values('year_data'), by='code', left_on='year', right_on='year_data')
res = pd.merge(res, countries[['code', 'en_short', 'en_region', 'cz_short', 'cz_region', 'en_category', 'cz_category',
'cz_cat_desc']])
df.dtypes
data
countries = pd.read_csv('D:\\projects\\fakta-o-klimatu\\work\\emission-intensity\\countries.csv')
countries.show()
data.show()
no_years = data.groupby('code')['year_data'].count().rename('count').reset_index()
max_pop = data.groupby('code')['pop'].max().reset_index()
pop_years = pd.merge(no_years, max_pop)
pop_years['pop'].sum() # 7_248_361_589
pop_years[pop_years['count'] < 26]['pop'].sum() # 139_046_348
pop_years[pop_years['count'] == 26]['pop'].sum() # 7_109_315_241
pop_years[pop_years['count'] == 23]['pop']
countries.dtypes
countries = | pd.merge(countries, pop_years) | pandas.merge |
import pandas as pd
import os
def get_options_data() -> pd.DataFrame:
"""Get FX option contracts (1m maturity) vol, in frac of 1 p.a.
Output contains
'base', 'counter' - str - 3-letter ISO
'date' - pd.Timestamp
'd10', 'd25' ... 'd90' - float - vol at that delta, in frac of 1 p.a.
"""
data = pd.read_feather(
os.path.join(os.environ.get("RESEARCH_DATA_PATH"),
"fx",
"fx-iv-by-delta-1m-blb-d.ftr")
)
data.loc[:, "vol"] /= 100
data.loc[:, "delta"] = data["delta"].map("d{}".format)
data.loc[:, "date"] = | pd.to_datetime(data.loc[:, "date"]) | pandas.to_datetime |
import numpy as np
import pytest
import pandas as pd
import pandas._testing as tm
from pandas.core.arrays.sparse import (
SparseArray,
SparseDtype,
)
arr_data = np.array([np.nan, np.nan, 1, 2, 3, np.nan, 4, 5, np.nan, 6])
arr = SparseArray(arr_data)
class TestGetitem:
def test_getitem(self):
dense = arr.to_dense()
for i in range(len(arr)):
tm.assert_almost_equal(arr[i], dense[i])
tm.assert_almost_equal(arr[-i], dense[-i])
def test_getitem_arraylike_mask(self):
arr = SparseArray([0, 1, 2])
result = arr[[True, False, True]]
expected = SparseArray([0, 2])
tm.assert_sp_array_equal(result, expected)
@pytest.mark.parametrize(
"slc",
[
np.s_[:],
np.s_[1:10],
np.s_[1:100],
np.s_[10:1],
np.s_[:-3],
np.s_[-5:-4],
np.s_[:-12],
np.s_[-12:],
np.s_[2:],
np.s_[2::3],
np.s_[::2],
np.s_[::-1],
np.s_[::-2],
np.s_[1:6:2],
np.s_[:-6:-2],
],
)
@pytest.mark.parametrize(
"as_dense", [[np.nan] * 10, [1] * 10, [np.nan] * 5 + [1] * 5, []]
)
def test_getslice(self, slc, as_dense):
as_dense = np.array(as_dense)
arr = SparseArray(as_dense)
result = arr[slc]
expected = SparseArray(as_dense[slc])
tm.assert_sp_array_equal(result, expected)
def test_getslice_tuple(self):
dense = np.array([np.nan, 0, 3, 4, 0, 5, np.nan, np.nan, 0])
sparse = SparseArray(dense)
res = sparse[(slice(4, None),)]
exp = SparseArray(dense[4:])
tm.assert_sp_array_equal(res, exp)
sparse = SparseArray(dense, fill_value=0)
res = sparse[(slice(4, None),)]
exp = SparseArray(dense[4:], fill_value=0)
tm.assert_sp_array_equal(res, exp)
msg = "too many indices for array"
with pytest.raises(IndexError, match=msg):
sparse[4:, :]
with pytest.raises(IndexError, match=msg):
# check numpy compat
dense[4:, :]
def test_boolean_slice_empty(self):
arr = | SparseArray([0, 1, 2]) | pandas.core.arrays.sparse.SparseArray |
"""Interactions with rainfall and river data."""
import numpy as np
import pandas as pd
__all__ = ["get_station_data"]
def get_station_data(filename, station_reference):
"""Return readings for a specified recording station from .csv file.
Parameters
----------
filename: str
filename to read
station_reference
station_reference to return.
>>> data = get_station_data('resources/wet_day.csv)
"""
frame = pd.read_csv(filename)
frame = frame.loc[frame.stationReference == station_reference]
return | pd.to_numeric(frame.value.values) | pandas.to_numeric |
#!/usr/bin/env python
# coding: utf-8
import sys
import os
from datetime import datetime, timedelta
import urllib
import matplotlib as mpl
# mpl.use("Agg")
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats
from scipy.integrate import odeint
import scipy.signal
import pandas as pd
import seaborn as sns
sns.set_context('paper', font_scale=1.3)
red, blue, green = sns.color_palette('Set1', 3)
colors = {'red':red, 'blue':blue, 'green':green}
from click_spinner import spinner
from inference import get_last_NPI_date
from inference import get_first_NPI_date
from inference import params_bounds
from inference import get_model_class
from inference import find_start_day
from model.normal_prior_model import NormalPriorModel
from model.fixed_tau_model import FixedTauModel
from sklearn.metrics import mean_squared_error
def int_to_dt(t):
return pd.to_datetime(start_date) + timedelta(days=t)
def date_to_int(x):
dt = datetime.strptime(x + ' 2020', '%b %d %Y')
td = dt - start_date
return td.days
def date_to_date(x):
dt = datetime.strptime(x + ' 2020', '%b %d %Y')
return dt
def τ_to_string(τ, start_date):
return (pd.to_datetime(start_date) + timedelta(days=τ)).strftime('%b %d')
def load_chain(job_id=None, fname=None, delete_chain_less_than=None, nburn=2_000_000):
with spinner():
if fname is None:
fname = os.path.join(output_folder, job_id, 'inference', '{}.npz'.format(country))
inference_data = np.load(fname)
chain = inference_data['chain']
var_names = list(inference_data['var_names'])
nsteps, ndim, N, Td1, Td2, model_type = inference_data['params']
X = inference_data['incidences']
start_date = inference_data['start_date']
logliks = inference_data['logliks']
# print("Loaded {} with parameters:".format(fname))
# print(var_names)
nchains, nsteps, ndim = chain.shape
if delete_chain_less_than:
if len((chain[:,1_000_000, var_names.index('τ')]<delete_chain_less_than).nonzero())>1:
raise AssertionError('too many bad chains')
bad_chain_ind = (chain[:,1_000_000, var_names.index('τ')]<delete_chain_less_than).nonzero()[0][0]
chain = np.delete(chain, bad_chain_ind, axis=0)
chain = chain[:, nburn:, :]
chain = chain.reshape((-1, ndim))
logliks = logliks.reshape((nchains, nsteps))
if delete_chain_less_than:
logliks = np.delete(logliks, bad_chain_ind, axis=0)
logliks = logliks[:, nburn:].ravel()
return chain, logliks, Td1, Td2, model_type, X, start_date, N
def posterior_prediction(chain, model, nreps):
θ = chain[np.random.choice(chain.shape[0], nreps)]
return np.array([
model.generate_daily_cases(θi) for θi in θ
])
def load_data(country_name, up_to_date=None):
if country_name=='Wuhan':
df = pd.read_csv('../data/Incidence.csv')
df['date'] = pd.to_datetime(df['Date'], dayfirst=True)
df['cases'] = df[country_name]
df = df[::-1] # TODO why?
N = pd.read_csv('../data/pop.csv', index_col='City').loc[country_name].values[0]
else:
url = 'https://github.com/ImperialCollegeLondon/covid19model/raw/master/data/COVID-19-up-to-date.csv'
fname = '../data/COVID-19-up-to-date_master.csv'
if not os.path.exists(fname):
urllib.request.urlretrieve(url, fname)
df = pd.read_csv(fname, encoding='iso-8859-1')
df['date'] = pd.to_datetime(df['dateRep'], format='%d/%m/%Y')
df = df[df['countriesAndTerritories'] == country_name]
N = df.iloc[0]['popData2018']
cases_and_dates = df.iloc[::-1][['cases','date']]
if up_to_date:
cases_and_dates = cases_and_dates[cases_and_dates['date']<=up_to_date]
start_date = find_start_day(cases_and_dates)
X = cases_and_dates.loc[cases_and_dates['date'] >= start_date, 'cases'].values
T = cases_and_dates.loc[cases_and_dates['date'] >= start_date, 'date']
return X, T
if __name__ == '__main__':
nreps = 1000
date_threshold = datetime(2020, 4, 11)
last_date = datetime(2020, 4, 11) + timedelta(15)
output_folder = r'../../output-tmp'
job_id = sys.argv[1]
country = sys.argv[2]
if len(sys.argv) > 2:
color = sys.argv[3]
if color in colors:
color = colors[color]
else:
color = blue
X, T = load_data(country, up_to_date=last_date)
idx = date_threshold < T
ndays = len(X)
chain_fname = os.path.join(output_folder, job_id, 'inference', '{}.npz'.format(country))
delete_chain_less_than = None
if job_id=='7M' and country=='Spain': #TODO make input parameter
delete_chain_less_than = 15
chain, _, Td1, Td2, model_type, _, start_date, N = load_chain(fname=chain_fname,delete_chain_less_than=delete_chain_less_than)
X_mean = scipy.signal.savgol_filter(X, 3, 1)
model_class = get_model_class(model_type)
model = model_class(country, X, pd.to_datetime(start_date), N, get_last_NPI_date(country), get_first_NPI_date(country), params_bounds, Td1, Td2)
X_pred = posterior_prediction(chain, model, nreps)
pvalue = (X_pred[:,idx].max(axis=1) > X[idx].max()).mean() # P(max(X_pred) > max(X))
pvalue_file = os.path.join(output_folder, job_id, 'figures', 'ppc_pvalue.txt'.format(country))
with open(pvalue_file, 'at') as f:
print("{}\t{:.4g}".format(country, pvalue), file=f)
#RMSE
unseen_idxs_14 = T > date_threshold
unseen_idxs_7 = (T > date_threshold) & (T < date_threshold+timedelta(8))
rmse7 = np.sqrt([mean_squared_error(X[unseen_idxs_7],pred) for pred in X_pred[:,unseen_idxs_7]]).mean()
rmse14 = np.sqrt([mean_squared_error(X[unseen_idxs_14],pred) for pred in X_pred[:,unseen_idxs_14]]).mean()
rmse_file = os.path.join(output_folder, job_id, 'figures', 'ppc_rmse.csv'.format(country))
with open(rmse_file, 'at') as f:
print("{}\t{:.4g}\t{:.4g}".format(country, rmse7, rmse14), file=f)
fig, ax = plt.subplots(1, 1, figsize=(6, 4), sharex=True, sharey=True)
ymax = min(X.max()*2, max(X.max(), X_pred.max()))
t = np.arange(0, ndays)
ax.plot(t[~idx], X[~idx], 'o', color='k', alpha=0.5)
ax.plot(t[~idx], X_mean[~idx], '-', color='k')
ax.plot(t[idx], X[idx], '*', color='k', alpha=0.5)
ax.plot(t[idx], X_mean[idx], '--', color='k')
ax.plot(X_pred.T, color=color, alpha=0.01)
ax.axvline((date_threshold-pd.to_datetime(start_date)).days, color='k', ls='--', lw=2)
labels = [τ_to_string(int(d), start_date) for d in t[::5]]
ax.set_xticks(t[::5])
ax.set_xticklabels(labels, rotation=45)
ax.set(ylabel='Daily cases', ylim=(-10, ymax))
NPI_dates = | pd.read_csv('../data/NPI_dates.csv') | pandas.read_csv |
import pytest
from mapping import mappings
from pandas.util.testing import assert_frame_equal, assert_series_equal
import pandas as pd
from pandas import Timestamp as TS
import numpy as np
from pandas.tseries.offsets import BDay
@pytest.fixture
def dates():
return pd.Series(
[TS('2016-10-20'), TS('2016-11-21'), TS('2016-12-20')],
index=['CLX16', 'CLZ16', 'CLF17']
)
def test_not_in_roll_one_generic_static_roller(dates):
dt = dates.iloc[0]
contract_dates = dates.iloc[0:2]
sd, ed = (dt + BDay(-8), dt + BDay(-7))
timestamps = pd.date_range(sd, ed, freq='b')
cols = pd.MultiIndex.from_product([[0], ['front', 'back']])
idx = [-2, -1, 0]
trans = pd.DataFrame([[1.0, 0.0], [0.5, 0.5], [0.0, 1.0]],
index=idx, columns=cols)
midx = pd.MultiIndex.from_product([timestamps, ['CLX16']])
midx.names = ['date', 'contract']
cols = pd.Index([0], name='generic')
wts_exp = pd.DataFrame([1.0, 1.0], index=midx, columns=cols)
# with DatetimeIndex
wts = mappings.roller(timestamps, contract_dates,
mappings.static_transition, transition=trans)
assert_frame_equal(wts, wts_exp)
# with tuple
wts = mappings.roller(tuple(timestamps), contract_dates,
mappings.static_transition, transition=trans)
assert_frame_equal(wts, wts_exp)
def test_not_in_roll_one_generic_static_transition(dates):
contract_dates = dates.iloc[0:2]
ts = dates.iloc[0] + BDay(-8)
cols = pd.MultiIndex.from_product([[0], ['front', 'back']])
idx = [-2, -1, 0]
transition = pd.DataFrame([[1.0, 0.0], [0.5, 0.5], [0.0, 1.0]],
index=idx, columns=cols)
wts = mappings.static_transition(ts, contract_dates, transition)
wts_exp = [(0, 'CLX16', 1.0, ts)]
assert wts == wts_exp
def test_non_numeric_column_static_transition(dates):
contract_dates = dates.iloc[0:2]
ts = dates.iloc[0] + BDay(-8)
cols = pd.MultiIndex.from_product([["CL1"], ['front', 'back']])
idx = [-2, -1, 0]
transition = pd.DataFrame([[1.0, 0.0], [0.5, 0.5], [0.0, 1.0]],
index=idx, columns=cols)
wts = mappings.static_transition(ts, contract_dates, transition)
wts_exp = [("CL1", 'CLX16', 1.0, ts)]
assert wts == wts_exp
def test_finished_roll_pre_expiry_static_transition(dates):
contract_dates = dates.iloc[0:2]
ts = dates.iloc[0] + BDay(-2)
cols = pd.MultiIndex.from_product([[0], ['front', 'back']])
idx = [-9, -8]
transition = pd.DataFrame([[1.0, 0.0], [0.0, 1.0]],
index=idx, columns=cols)
wts = mappings.static_transition(ts, contract_dates, transition)
wts_exp = [(0, 'CLZ16', 1.0, ts)]
assert wts == wts_exp
def test_not_in_roll_one_generic_filtering_front_contracts_static_transition(dates): # NOQA
contract_dates = dates.iloc[0:2]
ts = dates.iloc[1] + BDay(-8)
cols = pd.MultiIndex.from_product([[0], ['front', 'back']])
idx = [-2, -1, 0]
transition = pd.DataFrame([[1.0, 0.0], [0.5, 0.5], [0.0, 1.0]],
index=idx, columns=cols)
wts = mappings.static_transition(ts, contract_dates, transition)
wts_exp = [(0, 'CLZ16', 1.0, ts)]
assert wts == wts_exp
def test_roll_with_holiday(dates):
contract_dates = dates.iloc[-2:]
ts = pd.Timestamp("2016-11-17")
cols = pd.MultiIndex.from_product([[0], ['front', 'back']])
idx = [-2, -1, 0]
transition = pd.DataFrame([[1.0, 0.0], [0.5, 0.5], [0.0, 1.0]],
index=idx, columns=cols)
holidays = [np.datetime64("2016-11-18")]
# the holiday moves the roll schedule up one day, since Friday is
# excluded as a day
wts = mappings.static_transition(ts, contract_dates, transition,
holidays)
wts_exp = [(0, 'CLZ16', 0.5, ts), (0, 'CLF17', 0.5, ts)]
assert wts == wts_exp
def test_not_in_roll_one_generic_zero_weight_back_contract_no_contract_static_transition(dates): # NOQA
contract_dates = dates.iloc[0:1]
ts = dates.iloc[0] + BDay(-8)
cols = pd.MultiIndex.from_product([[0], ['front', 'back']])
idx = [-2, -1, 0]
transition = pd.DataFrame([[1.0, 0.0], [0.5, 0.5], [0.0, 1.0]],
index=idx, columns=cols)
wts = mappings.static_transition(ts, contract_dates, transition)
wts_exp = [(0, 'CLX16', 1.0, ts)]
assert wts == wts_exp
def test_aggregate_weights():
ts = pd.Timestamp("2015-01-01")
wts_list = [(0, 'CLX16', 1.0, ts), (1, 'CLZ16', 1.0, ts)]
wts = mappings.aggregate_weights(wts_list)
idx = pd.MultiIndex.from_product([[ts], ["CLX16", "CLZ16"]],
names=["date", "contract"])
cols = pd.Index([0, 1], name="generic")
wts_exp = pd.DataFrame([[1.0, 0], [0, 1.0]], index=idx, columns=cols)
assert_frame_equal(wts, wts_exp)
def test_aggregate_weights_drop_date():
ts = pd.Timestamp("2015-01-01")
wts_list = [(0, 'CLX16', 1.0, ts), (1, 'CLZ16', 1.0, ts)]
wts = mappings.aggregate_weights(wts_list, drop_date=True)
idx = pd.Index(["CLX16", "CLZ16"], name="contract")
cols = pd.Index([0, 1], name="generic")
wts_exp = pd.DataFrame([[1.0, 0], [0, 1.0]], index=idx, columns=cols)
assert_frame_equal(wts, wts_exp)
def test_static_bad_transitions(dates):
contract_dates = dates.iloc[[0]]
ts = dates.iloc[0] + BDay(-8)
# transition does not contain 'front' column
cols = pd.MultiIndex.from_product([[0], ['not_front', 'back']])
idx = [-2, -1, 0]
transition = pd.DataFrame([[1.0, 0.0], [0.5, 0.5], [0.0, 1.0]],
index=idx, columns=cols)
with pytest.raises(ValueError):
mappings.static_transition(ts, contract_dates, transition)
# transition does not sum to one across rows
cols = pd.MultiIndex.from_product([[0], ['front', 'back']])
transition = pd.DataFrame([[1.0, 0.0], [0.5, 0.0], [0.0, 1.0]],
index=idx, columns=cols)
with pytest.raises(ValueError):
mappings.static_transition(ts, contract_dates, transition)
# transition is not monotonic increasing in back
transition = pd.DataFrame([[0.7, 0.3], [0.8, 0.2], [0.0, 1.0]],
index=idx, columns=cols)
with pytest.raises(ValueError):
mappings.static_transition(ts, contract_dates, transition)
def test_no_roll_date_two_generics_static_transition(dates):
dt = dates.iloc[0]
contract_dates = dates
ts = dt + BDay(-8)
cols = pd.MultiIndex.from_product([[0, 1], ['front', 'back']])
idx = [-2, -1, 0]
transition = pd.DataFrame([[1.0, 0.0, 1.0, 0.0], [0.5, 0.5, 0.5, 0.5],
[0.0, 1.0, 0.0, 1.0]],
index=idx, columns=cols)
wts = mappings.static_transition(ts, contract_dates, transition)
wts_exp = [(0, 'CLX16', 1.0, ts), (1, 'CLZ16', 1.0, ts)]
assert wts == wts_exp
def test_not_in_roll_two_generics_static_roller(dates):
dt = dates.iloc[0]
contract_dates = dates.iloc[0:3]
sd, ed = (dt + BDay(-8), dt + BDay(-7))
timestamps = pd.date_range(sd, ed, freq='b')
cols = pd.MultiIndex.from_product([[0, 1], ['front', 'back']])
idx = [-2, -1, 0]
transition = pd.DataFrame([[1.0, 0.0, 1.0, 0.0], [0.5, 0.5, 0.5, 0.5],
[0.0, 1.0, 0.0, 1.0]],
index=idx, columns=cols)
wts = mappings.roller(timestamps, contract_dates,
mappings.static_transition,
transition=transition)
midx = pd.MultiIndex.from_product([timestamps, ['CLX16', 'CLZ16']])
midx.names = ['date', 'contract']
cols = pd.Index([0, 1], name='generic')
wts_exp = pd.DataFrame([[1.0, 0.0], [0.0, 1.0],
[1.0, 0.0], [0.0, 1.0]], index=midx,
columns=cols)
assert_frame_equal(wts, wts_exp)
def test_during_roll_two_generics_one_day_static_transition(dates):
contract_dates = dates
ts = dates.iloc[0] + BDay(-1)
cols = pd.MultiIndex.from_product([[0, 1], ['front', 'back']])
idx = [-2, -1, 0]
transition = pd.DataFrame([[1.0, 0.0, 1.0, 0.0], [0.5, 0.5, 0.5, 0.5],
[0.0, 1.0, 0.0, 1.0]],
index=idx, columns=cols)
wts = mappings.static_transition(ts, contract_dates, transition)
wts_exp = [(0, 'CLX16', 0.5, ts), (0, 'CLZ16', 0.5, ts),
(1, 'CLZ16', 0.5, ts), (1, 'CLF17', 0.5, ts)]
assert wts == wts_exp
def test_invalid_contract_dates():
ts = [pd.Timestamp("2016-10-19")]
cols = pd.MultiIndex.from_product([[0, 1], ['front', 'back']])
idx = [-1, 0]
trans = pd.DataFrame([[1.0, 0.0, 1.0, 0.0], [0.0, 1.0, 0.0, 1.0]],
index=idx, columns=cols)
non_unique_index = pd.Series([pd.Timestamp('2016-10-20'),
pd.Timestamp('2016-11-21')],
index=['instr1', 'instr1'])
with pytest.raises(ValueError):
mappings.roller(ts, non_unique_index, mappings.static_transition,
transition=trans)
non_unique_vals = pd.Series([pd.Timestamp('2016-10-20'),
pd.Timestamp('2016-10-20')],
index=['instr1', 'instr2'])
with pytest.raises(ValueError):
mappings.roller(ts, non_unique_vals, mappings.static_transition,
transition=trans)
non_monotonic_vals = pd.Series([pd.Timestamp('2016-10-20'),
pd.Timestamp('2016-10-19')],
index=['instr1', 'instr2'])
with pytest.raises(ValueError):
mappings.static_transition(ts[0], non_monotonic_vals, transition=trans)
not_enough_vals = pd.Series([pd.Timestamp('2016-10-19')],
index=['instr1'])
with pytest.raises(IndexError):
mappings.static_transition(ts[0], not_enough_vals, transition=trans)
def test_during_roll_two_generics_one_day_static_roller(dates):
dt = dates.iloc[0]
contract_dates = dates
timestamps = pd.DatetimeIndex([dt + BDay(-1)])
cols = pd.MultiIndex.from_product([[0, 1], ['front', 'back']])
idx = [-2, -1, 0]
trans = pd.DataFrame([[1.0, 0.0, 1.0, 0.0], [0.5, 0.5, 0.5, 0.5],
[0.0, 1.0, 0.0, 1.0]],
index=idx, columns=cols)
wts = mappings.roller(timestamps, contract_dates,
mappings.static_transition, transition=trans)
midx = pd.MultiIndex.from_product([timestamps,
['CLF17', 'CLX16', 'CLZ16']])
midx.names = ['date', 'contract']
cols = | pd.Index([0, 1], name='generic') | pandas.Index |
#%%
import numpy as np
import pandas as pd
import altair as alt
# First generate plot of observed values
observed = pd.read_csv('../processed/Frederikse2020_observed_GMSL_from_1900.csv')
observed['year'] = pd.to_datetime(observed['year'], format='%Y')
#%%
# Plot only the mean values
chart = alt.Chart(observed).encode(
x=alt.X(field='year', type='temporal', timeUnit='year', title='year'),
y=alt.Y(field='observed_GMSL_mean', type='quantitative', title='observed change from average sea-level in 1900 [mm]'),
tooltip=[alt.Tooltip(field='year', type='temporal', format='%Y', title='year'),
alt.Tooltip(field='observed_GMSL_mean', type='quantitative', title='mean [mm]', format='0.1f')]
).properties(width='container', height=300)
a = chart.mark_area(color='dodgerblue', fillOpacity=0.4).encode(
x=alt.X(field='year', type='temporal', timeUnit='year', title='year'),
y='observed_GMSL_lower:Q',
y2='observed_GMSL_upper:Q').properties(width='container', height=300)
l = chart.mark_line(color='dodgerblue')
p = chart.mark_point(color='dodgerblue', filled=True)
layer = alt.layer(a, l, p)
layer.save('observed_GMSL.json')
# %%
# Do the same for estimated contributors
contrib = | pd.read_csv('../processed/Frederikse2020_contributor_GMSL_from_1900.csv') | pandas.read_csv |
# License: Apache-2.0
import numpy as np
import pandas as pd
import pytest
from pandas.testing import assert_frame_equal, assert_series_equal
from sklearn.ensemble import RandomForestClassifier
from gators.feature_selection.select_from_model import SelectFromModel
from gators.pipeline.pipeline import Pipeline
from gators.transformers.transformer import Transformer
class MultiplyTransformer(Transformer):
def __init__(self, multiplier):
self.multiplier = multiplier
def fit(self, X, y=None):
return self
def transform(self, X):
return self.multiplier * X
def transform_numpy(self, X):
return self.multiplier * X
class NameTransformer(Transformer):
def fit(self, X, y=None):
self.column_names = [f"{c}_new" for c in X.columns]
self.column_mapping = dict(zip(self.column_names, [[c] for c in X.columns]))
self.column_mapping["D_new"] = "D"
return self
def transform(self, X):
return X.rename(columns=dict(zip(X.columns, self.column_names)))
def transform_numpy(self, X):
return X
@pytest.fixture
def pipeline_example():
X = pd.DataFrame(
[
[1.764, 0.4, 0.979, 2.241],
[1.868, -0.977, 0.95, -0.151],
[-0.103, 0.411, 0.144, 1.454],
[0.761, 0.122, 0.444, 0.334],
],
columns=list("ABCD"),
)
y = pd.Series([0, 1, 0, 1], name="TARGET")
steps = [
MultiplyTransformer(4.0),
MultiplyTransformer(0.5),
NameTransformer(),
]
pipe = Pipeline(steps)
X_expected = pd.DataFrame(
{
"A_new": {0: 3.528, 1: 3.736, 2: -0.206, 3: 1.522},
"B_new": {0: 0.8, 1: -1.954, 2: 0.822, 3: 0.244},
"C_new": {0: 1.958, 1: 1.9, 2: 0.288, 3: 0.888},
"D_new": {0: 4.482, 1: -0.302, 2: 2.908, 3: 0.668},
}
)
return pipe, X, X_expected
@pytest.fixture
def pipeline_with_feature_selection_example():
X = pd.DataFrame(
[
[1.764, 0.4, 0.979, 2.241],
[1.868, -0.977, 0.95, -0.151],
[-0.103, 0.411, 0.144, 1.454],
[0.761, 0.122, 0.444, 0.334],
],
columns=list("ABCD"),
)
y = pd.Series([0, 1, 0, 1], name="TARGET")
model = RandomForestClassifier(n_estimators=6, max_depth=4, random_state=0)
steps = [
MultiplyTransformer(4.0),
MultiplyTransformer(0.5),
NameTransformer(),
SelectFromModel(model=model, k=3),
]
pipe = Pipeline(steps).fit(X, y)
X_expected = pd.DataFrame(
{
"A_new": {0: 3.528, 1: 3.736, 2: -0.206, 3: 1.522},
"B_new": {0: 0.8, 1: -1.954, 2: 0.822, 3: 0.244},
"C_new": {0: 1.958, 1: 1.9, 2: 0.288, 3: 0.888},
"D_new": {0: 4.482, 1: -0.302, 2: 2.908, 3: 0.668},
}
)
return pipe, X, X_expected
@pytest.fixture
def pipeline_with_model_example():
X = pd.DataFrame(
[
[1.764, 0.4, 0.979, 2.241],
[1.868, -0.977, 0.95, -0.151],
[-0.103, 0.411, 0.144, 1.454],
[0.761, 0.122, 0.444, 0.334],
],
columns=list("ABCD"),
)
y = | pd.Series([0, 1, 0, 1], name="TARGET") | pandas.Series |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.