prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This module is for visualizing the results
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from sklearn.manifold import TSNE
import numpy as np
import matplotlib.pyplot as plt
import networkx as nx
import os
import seaborn
import pandas as pd
from matplotlib import colors as mcolors
seaborn.set_style("darkgrid")
def draw_embedding(embs, names, resultpath, algos, show_label):
"""Function to draw the embedding.
Args:
embs (matrix): Two dimesnional embeddings.
names (list):List of string name.
resultpath (str):Path where the result will be save.
algos (str): Name of the algorithms which generated the algorithm.
show_label (bool): If True, prints the string names of the entities and relations.
"""
print("\t drawing figure!")
pos = {}
node_color_mp = {}
unique_ent = set(names)
colors = list(dict(mcolors.BASE_COLORS, **mcolors.CSS4_COLORS).keys())
tot_col = len(colors)
j = 0
for i, e in enumerate(unique_ent):
node_color_mp[e] = colors[j]
j += 1
if j >= tot_col:
j = 0
G = nx.Graph()
hm_ent = {}
for i, ent in enumerate(names):
hm_ent[i] = ent
G.add_node(i)
pos[i] = embs[i]
colors = []
for n in list(G.nodes):
colors.append(node_color_mp[hm_ent[n]])
plt.figure()
nodes_draw = nx.draw_networkx_nodes(G,
pos,
node_color=colors,
node_size=50)
nodes_draw.set_edgecolor('k')
if show_label:
nx.draw_networkx_labels(G, pos, font_size=8)
if not os.path.exists(resultpath):
os.mkdir(resultpath)
files = os.listdir(resultpath)
file_no = len(
[c for c in files if algos + '_embedding_plot' in c])
filename = algos + '_embedding_plot_' + str(file_no) + '.png'
plt.savefig(str(resultpath / filename), bbox_inches='tight', dpi=300)
# plt.show()
def draw_embedding_rel_space(h_emb,
r_emb,
t_emb,
h_name,
r_name,
t_name,
resultpath,
algos,
show_label):
"""Function to draw the embedding in relation space.
Args:
h_emb (matrix): Two dimesnional embeddings of head.
r_emb (matrix): Two dimesnional embeddings of relation.
t_emb (matrix): Two dimesnional embeddings of tail.
h_name (list):List of string name of the head.
r_name (list):List of string name of the relation.
t_name (list):List of string name of the tail.
resultpath (str):Path where the result will be save.
algos (str): Name of the algorithms which generated the algorithm.
show_label (bool): If True, prints the string names of the entities and relations.
"""
print("\t drawing figure!")
pos = {}
node_color_mp_ent = {}
node_color_mp_rel = {}
unique_ent = set(h_name) | set(t_name)
unique_rel = set(r_name)
colors = list(dict(mcolors.BASE_COLORS, **mcolors.CSS4_COLORS).keys())
tot_col = len(colors)
j = 0
for i, e in enumerate(unique_ent):
node_color_mp_ent[e] = colors[j]
j += 1
if j >= tot_col:
j = 0
tot_col = len(colors)
j = 0
for i, r in enumerate(unique_rel):
node_color_mp_rel[r] = colors[j]
j += 1
if j >= tot_col:
j = 0
G = nx.DiGraph()
idx = 0
head_colors = []
rel_colors = []
tail_colors = []
head_nodes = []
tail_nodes = []
rel_nodes = []
for i in range(len(h_name)):
G.add_edge(idx, idx + 1)
G.add_edge(idx + 1, idx + 2)
head_nodes.append(idx)
rel_nodes.append(idx + 1)
tail_nodes.append(idx + 2)
head_colors.append(node_color_mp_ent[h_name[i]])
rel_colors.append(node_color_mp_rel[r_name[i]])
tail_colors.append(node_color_mp_ent[t_name[i]])
pos[idx] = h_emb[i]
pos[idx + 1] = r_emb[i]
pos[idx + 2] = t_emb[i]
idx += 3
plt.figure()
nodes_draw = nx.draw_networkx_nodes(G,
pos,
nodelist=head_nodes,
node_color=head_colors,
node_shape='o',
node_size=50)
nodes_draw.set_edgecolor('k')
nodes_draw = nx.draw_networkx_nodes(G,
pos,
nodelist=rel_nodes,
node_color=rel_colors,
node_size=50,
node_shape='D',
with_labels=show_label)
nodes_draw.set_edgecolor('k')
nodes_draw = nx.draw_networkx_nodes(G,
pos,
nodelist=tail_nodes,
node_color=tail_colors,
node_shape='*',
node_size=50)
nodes_draw.set_edgecolor('k')
if show_label:
nx.draw_networkx_labels(G, pos, font_size=8)
nx.draw_networkx_edges(G, pos, arrows=True, width=0.5, alpha=0.5)
if not os.path.exists(resultpath):
os.mkdir(resultpath)
files = os.listdir(resultpath)
file_no = len(
[c for c in files if algos + '_embedding_plot' in c])
plt.savefig(str(resultpath / (algos + '_embedding_plot_' + str(file_no) + '.png')), bbox_inches='tight', dpi=300)
# plt.show()
class Visualization(object):
"""Class to aid in visualizing the results and embddings.
Args:
model (object): Model object
vis_opts (list): Options for visualization.
sess (object): TensorFlow session object, initialized by the trainer.
Examples:
>>> from pykg2vec.utils.visualization import Visualization
>>> from pykg2vec.utils.trainer import Trainer
>>> from pykg2vec.core.TransE import TransE
>>> model = TransE()
>>> trainer = Trainer(model=model, debug=False)
>>> trainer.build_model()
>>> trainer.train_model()
>>> viz = Visualization(model=model)
>>> viz.plot_train_result()
"""
def __init__(self, model=None, vis_opts=None, sess=None):
self.sess = sess
if vis_opts:
self.ent_only_plot = vis_opts["ent_only_plot"]
self.rel_only_plot = vis_opts["rel_only_plot"]
self.ent_and_rel_plot = vis_opts["ent_and_rel_plot"]
else:
self.ent_only_plot = False
self.rel_only_plot = False
self.ent_and_rel_plot = False
self.model = model
self.algo_list = ['Complex', 'ConvE','HoLE', 'DistMult', 'DistMult2', 'KG2E_EL','KG2E_KL',
'KGMeta', 'NTN', 'ProjE_pointwise', 'Rescal',
'RotatE', 'SLM', 'SME_Bilinear','SME_Linear', 'TransD', 'TransE', 'TransH',
'TransM', 'TransR', 'TuckER']
self.h_name = []
self.r_name = []
self.t_name = []
self.h_emb = []
self.r_emb = []
self.t_emb = []
self.h_proj_emb = []
self.r_proj_emb = []
self.t_proj_emb = []
if self.model != None:
self.validation_triples_ids = self.model.config.knowledge_graph.read_cache_data('triplets_valid')
self.idx2entity = self.model.config.knowledge_graph.read_cache_data('idx2entity')
self.idx2relation = self.model.config.knowledge_graph.read_cache_data('idx2relation')
self.get_idx_n_emb()
def get_idx_n_emb(self):
"""Function to get the integer ids and the embedding."""
idx = np.random.choice(len(self.validation_triples_ids), self.model.config.disp_triple_num)
triples = []
for i in range(len(idx)):
triples.append(self.validation_triples_ids[idx[i]])
for t in triples:
self.h_name.append(self.idx2entity[t.h])
self.r_name.append(self.idx2relation[t.r])
self.t_name.append(self.idx2entity[t.t])
emb_h, emb_r, emb_t = self.model.get_embed(t.h, t.r, t.t, self.sess)
self.h_emb.append(emb_h)
self.r_emb.append(emb_r)
self.t_emb.append(emb_t)
if self.ent_and_rel_plot:
try:
emb_h, emb_r, emb_t = self.model.get_proj_embed(t.h, t.r, t.t, self.sess)
self.h_proj_emb.append(emb_h)
self.r_proj_emb.append(emb_r)
self.t_proj_emb.append(emb_t)
except Exception as e:
print(e.args)
def plot_embedding(self,
resultpath=None,
algos=None,
show_label=False,
disp_num_r_n_e = 20):
"""Function to plot the embedding.
Args:
resultpath (str): Path where the result will be saved.
show_label (bool): If True, will display the labels.
algos (str): Name of the algorithms that generated the embedding.
disp_num_r_n_e (int): Total number of entities to display for head, tail and relation.
"""
if not self.model:
raise NotImplementedError('Please provide a model!')
if self.ent_only_plot:
x = np.concatenate((self.h_emb, self.t_emb), axis=0)
ent_names = np.concatenate((self.h_name, self.t_name), axis=0)
print("\t Reducing dimension using TSNE to 2!")
x = TSNE(n_components=2).fit_transform(x)
x = np.asarray(x)
ent_names = np.asarray(ent_names)
draw_embedding(x, ent_names, resultpath, algos + '_entity_plot', show_label)
if self.rel_only_plot:
x = self.r_emb
print("\t Reducing dimension using TSNE to 2!")
x = TSNE(n_components=2).fit_transform(x)
draw_embedding(x, self.r_name, resultpath, algos + '_rel_plot', show_label)
if self.ent_and_rel_plot:
length = len(self.h_proj_emb)
x = np.concatenate((self.h_proj_emb, self.r_proj_emb, self.t_proj_emb), axis=0)
print("\t Reducing dimension using TSNE to 2!")
x = TSNE(n_components=2).fit_transform(x)
h_embs = x[:length, :]
r_embs = x[length:2 * length, :]
t_embs = x[2 * length:3 * length, :]
draw_embedding_rel_space(h_embs[:disp_num_r_n_e],
r_embs[:disp_num_r_n_e],
t_embs[:disp_num_r_n_e],
self.h_name[:disp_num_r_n_e],
self.r_name[:disp_num_r_n_e],
self.t_name[:disp_num_r_n_e],
resultpath, algos + '_ent_n_rel_plot', show_label)
def plot_train_result(self):
"""Function to plot the training result."""
algo = self.algo_list
path = self.model.config.path_result
result = self.model.config.path_figures
data = [self.model.config.data]
files = os.listdir(str(path))
files_lwcase = [f.lower() for f in files]
for d in data:
df = pd.DataFrame()
for a in algo:
file_no = len([c for c in files_lwcase if a.lower() in c if 'training' in c])
if file_no < 1:
continue
with open(str(path / (a + '_Training_results_' + str(file_no - 1) + '.csv')), 'r') as fh:
df_2 = pd.read_csv(fh)
if df.empty:
df['Epochs'] = df_2['Epochs']
df['Loss'] = df_2['Loss']
df['Algorithm'] = [a] * len(df_2)
else:
df_3 = pd.DataFrame()
df_3['Epochs'] = df_2['Epochs']
df_3['Loss'] = df_2['Loss']
df_3['Algorithm'] = [a] * len(df_2)
frames = [df, df_3]
df = pd.concat(frames)
plt.figure()
ax = seaborn.lineplot(x="Epochs", y="Loss", hue="Algorithm",
markers=True, dashes=False, data=df)
files = os.listdir(str(result))
files_lwcase = [f.lower() for f in files]
file_no = len([c for c in files_lwcase if d.lower() in c if 'training' in c])
plt.savefig(str(result / (d + '_training_loss_plot_' + str(file_no) + '.pdf')), bbox_inches='tight', dpi=300)
# plt.show()
def plot_test_result(self):
"""Function to plot the testing result."""
algo = self.algo_list
path = self.model.config.path_result
result = self.model.config.path_figures
data = [self.model.config.data]
hits = self.model.config.hits
if path is None or algo is None or data is None:
raise NotImplementedError('Please provide valid path, algorithm and dataset!')
files = os.listdir(str(path))
# files_lwcase = [f.lower() for f in files if 'Testing' in f]
# print(files_lwcase)
for d in data:
df = pd.DataFrame()
for a in algo:
file_algo = [c for c in files if a.lower() in c.lower() if 'testing' in c.lower()]
if not file_algo:
continue
with open(str(path / file_algo[-1]), 'r') as fh:
df_2 = pd.read_csv(fh)
if df.empty:
df['Algorithm'] = [a] * len(df_2)
df['Epochs'] = df_2['Epoch']
df['Mean Rank'] = df_2['Mean Rank']
df['Filt Mean Rank'] = df_2['Filtered Mean Rank']
for hit in hits:
df['Hits' + str(hit)] = df_2['Hit-%d Ratio'%hit]
df['Filt Hits' + str(hit)] = df_2['Filtered Hit-%d Ratio'%hit]
else:
df_3 = | pd.DataFrame() | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Aug 15 11:51:39 2020
This is best run inside Spyder, not as standalone script.
Author: @hk_nien on Twitter.
"""
import re
import sys
import io
import urllib
import urllib.request
from pathlib import Path
import time
import locale
import json
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
import nl_regions
import scipy.signal
import scipy.interpolate
import scipy.integrate
import tools
from g_mobility_data import get_g_mobility_data
from nlcovidstats_data import (
init_data,
DFS,
get_municipalities_by_pop,
load_cumulative_cases,
)
# These delay values are tuned to match the RIVM Rt estimates.
# The represent the delay (days) from infection to report date,
# referencing the report date.
# Extrapolation: constant value.
DELAY_INF2REP = [
('2020-07-01', 7.5),
('2020-09-01', 7),
('2020-09-15', 9),
('2020-10-09', 9),
('2020-11-08', 7),
('2020-12-01', 6.5),
('2021-02-15', 6.5),
('2021-04-05', 4),
('2021-07-06', 4),
('2021-07-15', 5),
('2021-07-23', 4),
('2021-07-30', 4),
('2021-11-04', 4),
('2021-11-11', 4.5),
('2021-11-20', 5),
('2021-11-25', 5),
('2021-12-04', 4.5), # test capacity increased
('2021-12-08', 4), # Speculation...
]
_DOW_CORR_CACHE = {} # keys: dayrange tuples.
def get_dow_correction_rolling(nweeks=7, taper=0.5):
"""Return DoW correction factors for all dates.
Parameters:
- nweeks: number of preceding weeks to use for each date.
- taper: which fraction of old data to taper to lower weight.
Return:
- Series with same timestamp index as cases data.
"""
df, _ = get_region_data('Nederland', lastday=-1, correct_dow=None)
# df = df.iloc[3:-3].copy() # strip edge points without well defined 7d mean.
# Correction factor - 1
df['Delta_factor'] = df['Delta']/df['Delta7r']
ntaper = int(nweeks*taper + 0.5)
kernel = np.zeros(nweeks*2 + 1)
kernel[-nweeks:] = 1
kernel[-nweeks:-nweeks+ntaper] = np.linspace(1/ntaper, 1-1/ntaper, ntaper)
kernel /= kernel.sum()
df['Dow_factor'] = np.nan
for idow in range(7):
row_select = df.index[df.index.dayofweek == idow]
facs = df.loc[row_select, 'Delta_factor']
n = len(facs)
assert len(facs) > nweeks
mean_factors = np.convolve(facs, kernel, mode='same')
mean_factors[mean_factors == 0] = np.nan
df.loc[row_select, 'Dow_factor'] = 1/mean_factors
df.loc[df.index[:8], 'Dow_factor'] = np.nan
return df['Dow_factor']
def get_dow_correction(dayrange=(-50, -1), verbose=False):
"""Return array with day-of-week correction factors.
- dayrange: days to consider for DoW correction.
- verbose: whether to show plots and print diagnostics.
Return:
- dow_corr_factor: array (7,) with DoW correction (0=Monday).
"""
dayrange = tuple(dayrange)
if dayrange in _DOW_CORR_CACHE and not verbose:
return _DOW_CORR_CACHE[dayrange].copy()
# timestamp index, columns Delta, Delta7r, and others.
df, _ = get_region_data('Nederland', lastday=dayrange[-1], correct_dow=None)
df = df.iloc[:-4] # Discard the last rows that have no correct rolling average.
df = df.iloc[dayrange[0]-dayrange[1]:]
# Correction factor - 1
df['Delta_factor'] = df['Delta']/df['Delta7r']
# Collect by day of week (0=Monday)
factor_by_dow = np.zeros(7)
for i in range(7):
factor_by_dow[i] = 1 / df.loc[df.index.dayofweek == i, 'Delta_factor'].mean()
factor_by_dow /= factor_by_dow.mean()
df['Delta_est_factor'] = factor_by_dow[df.index.dayofweek]
df['Delta_corrected'] = df['Delta'] * df['Delta_est_factor']
rms_dc = (df['Delta_corrected']/df['Delta7r']).std()
rms_d = df['Delta_factor'].std()
if verbose:
print('DoW effect: deviations from 7-day rolling average.\n'
f' Original: RMS={rms_d:.3g}; after correction: RMS={rms_dc:.3g}')
fig, ax = plt.subplots(tight_layout=True)
ax.plot(df['Delta_factor'], label='Delta')
ax.plot(df['Delta_corrected'] / df['Delta7r'], label='Delta_corrected')
ax.plot(df['Delta_est_factor'], label='Correction factor')
tools.set_xaxis_dateformat(ax, 'Date')
ax.legend()
ax.set_ylabel('Daily cases deviation')
title = 'Day-of-week correction on daily cases'
ax.set_title(title)
fig.canvas.set_window_title(title)
fig.show()
if rms_dc > 0.8*rms_d:
print(f'WARNING: DoW correction for dayrange={dayrange} does not seem to work.\n'
' Abandoning this correction.')
factor_by_dow = np.ones(7)
_DOW_CORR_CACHE[dayrange] = factor_by_dow.copy()
return factor_by_dow
def get_region_data(region, lastday=-1, printrows=0, correct_anomalies=True,
correct_dow='r7'):
"""Get case counts and population for one municipality.
It uses the global DFS['mun'], DFS['cases'] dataframe.
Parameters:
- region: region name (see below)
- lastday: last day to include.
- printrows: print this many of the most recent rows
- correct_anomalies: correct known anomalies (hiccups in reporting)
by reassigning cases to earlier dates.
- correct_dow: None, 'r7' (only for extrapolated rolling-7 average)
Special municipalities:
- 'Nederland': all
- 'HR:Zuid', 'HR:Noord', 'HR:Midden', 'HR:Midden+Zuid', 'HR:Midden+Noord':
holiday regions.
- 'MS:xx-yy': municipalities with population xx <= pop/1000 < yy'
- 'P:xx': province
Use data up to lastday.
Return:
- df: dataframe with added columns:
- Delta: daily increase in case count (per capita).
- Delta_dowc: daily increase, day-of-week correction applied
based on national pattern in most recent 7 weeks.
- Delta7r: daily increase as 7-day rolling average
(last 3 days are estimated).
- DeltaSG: daily increase, smoothed with (15, 2) Savitsky-Golay filter.Region selec
- pop: population.
"""
df1, npop = nl_regions.select_cases_region(DFS['cases'], region)
# df1 will have index 'Date_of_report', columns:
# 'Total_reported', 'Hospital_admission', 'Deceased'
assert correct_dow in [None, 'r7']
if lastday < -1 or lastday > 0:
df1 = df1.iloc[:lastday+1]
if len(df1) == 0:
raise ValueError(f'No data for region={region!r}.')
# nc: number of cases
nc = df1['Total_reported'].diff()
if printrows > 0:
print(nc[-printrows:])
nc.iat[0] = 0
df1['Delta'] = nc/npop
if correct_anomalies:
_correct_delta_anomalies(df1)
nc = df1['Delta'] * npop
nc7 = nc.rolling(7, center=True).mean()
nc7[np.abs(nc7) < 1e-10] = 0.0 # otherwise +/-1e-15 issues.
nc7a = nc7.to_numpy()
# last 3 elements are NaN, use mean of last 4 raw (dow-corrected) to
# get an estimated trend and use exponential growth or decay
# for filling the data.
if correct_dow == 'r7':
# mean number at t=-1.5 days
dow_correction = get_dow_correction((lastday-49, lastday)) # (7,) array
df1['Delta_dowc'] = df1['Delta'] * dow_correction[df1.index.dayofweek]
nc1 = np.mean(nc.iloc[-4:] * dow_correction[nc.index[-4:].dayofweek])
else:
nc1 = nc.iloc[-4:].mean() # mean number at t=-1.5 days
log_slope = (np.log(nc1) - np.log(nc7a[-4]))/1.5
nc7.iloc[-3:] = nc7a[-4] * np.exp(np.arange(1, 4)*log_slope)
# 1st 3 elements are NaN
nc7.iloc[:3] = np.linspace(0, nc7.iloc[3], 3, endpoint=False)
df1['Delta7r'] = nc7/npop
df1['DeltaSG'] = scipy.signal.savgol_filter(
nc/npop, 15, 2, mode='interp')
return df1, npop
def _correct_delta_anomalies(df):
"""Apply anomaly correction to 'Delta' column.
Store original values to 'Delta_orig' column.
Pull data from DFS['anomalies']
"""
dfa = DFS['anomalies']
df['Delta_orig'] = df['Delta'].copy()
dt_tol = pd.Timedelta(12, 'h') # tolerance on date matching
match_date = lambda dt: abs(df.index - dt) < dt_tol
preserve_n = True
for (date, data) in dfa.iterrows():
if date == '2021-02-08':
print('@foo')
f = data['fraction']
dt = data['days_back']
dn = df.loc[match_date(date), 'Delta_orig'] * f
if len(dn) == 0:
print(f'Anomaly correction: no match for {date}; skipping.')
continue
assert len(dn) == 1
dn = dn[0]
df.loc[match_date(date + pd.Timedelta(dt, 'd')), 'Delta'] += dn
if dt != 0:
df.loc[match_date(date), 'Delta'] -= dn
else:
preserve_n = False
if preserve_n:
assert np.isclose(df["Delta"].sum(), df["Delta_orig"].sum(), rtol=1e-6, atol=0)
else:
delta = df["Delta"].sum() - df["Delta_orig"].sum()
print(f'Note: case count increased by {delta*17.4e6:.0f} cases due to anomalies.')
def construct_Dfunc(delays, plot=False):
"""Return interpolation functions fD(t) and fdD(t).
fD(t) is the delay between infection and reporting at reporting time t.
fdD(t) is its derivative.
Parameter:
- delays: tuples (datetime_report, delay_days). Extrapolation is at
constant value.
- plot: whether to generate a plot.
Return:
- fD: interpolation function for D(t) with t in nanoseconds since epoch.
- fdD: interpolation function for dD/dt.
(taking time in ns but returning dD per day.)
- delay_str: delay string e.g. '7' or '7-9'
"""
ts0 = [float(pd.to_datetime(x[0]).to_datetime64()) for x in delays]
Ds0 = [float(x[1]) for x in delays]
if len(delays) == 1:
# prevent interp1d complaining.
ts0 = [ts0[0], ts0[0]+1e9]
Ds0 = np.concatenate([Ds0, Ds0])
# delay function as linear interpolation;
# nanosecond timestamps as t value.
fD0 = scipy.interpolate.interp1d(
ts0, Ds0, kind='linear', bounds_error=False,
fill_value=(Ds0[0], Ds0[-1])
)
# construct derivative dD/dt, smoothen out
day = 1e9*86400 # one day in nanoseconds
ts = np.arange(ts0[0]-3*day, ts0[-1]+3.01*day, day)
dDs = (fD0(ts+3*day) - fD0(ts-3*day))/6
fdD = scipy.interpolate.interp1d(
ts, dDs, 'linear', bounds_error=False,
fill_value=(dDs[0], dDs[-1]))
# reconstruct D(t) to be consistent with the smoothened derivative.
Ds = scipy.integrate.cumtrapz(dDs, ts/day, initial=0) + Ds0[0]
fD = scipy.interpolate.interp1d(
ts, Ds, 'linear', bounds_error=False,
fill_value=(Ds[0], Ds[-1]))
Dmin, Dmax = np.min(Ds0), np.max(Ds0)
if Dmin == Dmax:
delay_str = f'{Dmin:.0f}'
else:
delay_str = f'{Dmin:.0f}-{Dmax:.0f}'
if plot:
fig, ax = plt.subplots(1, 1, figsize=(7, 3), tight_layout=True)
tsx = np.linspace(
ts[0],
int(pd.to_datetime('now').to_datetime64())
)
ax.plot(pd.to_datetime(tsx.astype(np.int64)), fD(tsx))
ax.set_ylabel('Vertraging (dagen)')
tools.set_xaxis_dateformat(ax, 'Rapportagedatum')
title = 'Vertraging = t_rapportage - t_infectie - t_generatie/2'
fig.canvas.set_window_title(title)
ax.set_title(title)
fig.show()
return fD, fdD, delay_str
def estimate_Rt_df(r, delay=9, Tc=4.0):
"""Return Rt data, assuming delay infection-reporting.
- r: Series with smoothed new reported cases.
(e.g. 7-day rolling average or other smoothed data).
- delay: assume delay days from infection to positive report.
alternatively: list of (timestamp, delay) tuples if the delay varies over time.
The timestamps refer to the date of report.
- Tc: assume generation interval.
Return:
- DataFrame with columns 'Rt' and 'delay'.
"""
if not hasattr(delay, '__getitem__'):
# simple delay - attach data to index with proper offset
log_r = np.log(r.to_numpy()) # shape (n,)
assert len(log_r.shape) == 1
log_slope = (log_r[2:] - log_r[:-2])/2 # (n-2,)
Rt = np.exp(Tc*log_slope) # (n-2,)
index = r.index[1:-1] - pd.Timedelta(delay, unit='days')
Rdf = pd.DataFrame(
dict(Rt=pd.Series(index=index, data=Rt, name='Rt'))
)
Rdf['delay'] = delay
else:
# the hard case: delay varies over time.
# if ri is the rate of infections, tr the reporting date, and D
# the delay, then:
# ri(tr-D(tr)) = r(tr) / (1 - dD/dt)
fD, fdD, _ = construct_Dfunc(delay)
# note: timestamps in nanoseconds since epoch, rates in 'per day' units.
day_ns = 86400e9
tr = r.index.astype(int)
ti = tr - fD(tr) * day_ns
ri = r.to_numpy() / (1 - fdD(tr))
# now get log-derivative the same way as above
log_ri = np.log(np.where(ri==0, np.nan, ri))
log_slope = (log_ri[2:] - log_ri[:-2])/2 # (n-2,)
Rt = np.exp(Tc*log_slope) # (n-2,)
# build series with timestamp index
# (Note: int64 must be specified explicitly in Windows, 'int' will be
# int32.)
Rt_series = pd.Series(
data=Rt, name='Rt',
index=pd.to_datetime(ti[1:-1].astype(np.int64))
)
Rdf = pd.DataFrame(dict(Rt=Rt_series))
Rdf['delay'] = fD(tr[1:-1])
return Rdf
def get_t2_Rt(ncs, delta_t, i0=-3):
"""Return most recent doubling time and Rt, from case series"""
# exponential fit
t_gen = 4.0 # generation time (d)
t_double = delta_t / np.log2(ncs.iloc[i0]/ncs.iloc[i0-delta_t])
Rt = 2**(t_gen / t_double)
return t_double, Rt
def add_labels(ax, labels, xpos, mindist_scale=1.0, logscale=True):
"""Add labels, try to have them avoid bumping.
- labels: list of tuples (y, txt)
- mindist_scale: set to >1 or <1 to tweak label spacing.
"""
from scipy.optimize import fmin_cobyla
ymin, ymax = ax.get_ylim()
if logscale:
mindist = np.log10(ymax/ymin)*0.025*mindist_scale
else:
mindist = (ymax - ymin)*0.025*mindist_scale
labels = sorted(labels)
# log positions and sorted$ffmpeg -i Rt_%03d.png -c:v libx264 -r 25 -pix_fmt yuv420p out.mp4
if logscale:
Ys = np.log10([l[0] for l in labels])
else:
Ys = np.array([l[0] for l in labels])
n = len(Ys)
# Distance matrix: D @ y = distances between adjacent y values
D = np.zeros((n-1, n))
for i in range(n-1):
D[i, i] = -1
D[i, i+1] = 1
def cons(Y):
ds = D @ Y
errs = np.array([ds - mindist, ds])
#print(f'{np.around(errs, 2)}')
return errs.reshape(-1)
# optimization function
def func(Y):
return ((Y - Ys)**2).sum()
new_Ys = fmin_cobyla(func, Ys, cons, catol=mindist*0.05)
for Y, (_, txt) in zip(new_Ys, labels):
y = 10**Y if logscale else Y
ax.text(xpos, y, txt, verticalalignment='center')
def _zero2nan(s):
"""Return copy of array/series s, negative/zeros replaced by NaN."""
sc = s.copy()
sc[s <= 0] = np.nan
return sc
def _add_event_labels(ax, tmin, tmax, with_ribbons=True, textbox=False, bottom=True,
flagmatch='RGraph'):
"""Add event labels and ribbons to axis (with date on x-axis).
- ax: axis object
- tmin, tmax: time range to assume for x axis.
- textbox: whether to draw text in a semi-transparent box.
- bottom: whether to put labels at the bottom rather than top.
- flagmatch: which flags to match (regexp).
"""
ymin, ymax = ax.get_ylim()
y_lab = ymin if bottom else ymax
ribbon_yspan = (ymax - ymin)*0.35
ribbon_hgt = ribbon_yspan*0.1 # ribbon height
ribbon_ystep = ribbon_yspan*0.2
df_events = DFS['events']
ribbon_colors = ['#ff0000', '#cc7700'] * 10
if df_events is not None:
i_res = 0
for _, (res_t, res_t_end, res_d, flags) in df_events.reset_index().iterrows():
if not (tmin <= res_t <= tmax):
continue
if flags and not re.match(flagmatch, flags):
continue
res_d = res_d.replace('\\n', '\n')
# note; with \n in text, alignment gets problematic.
txt = ax.text(res_t, y_lab, f' {res_d}', rotation=90, horizontalalignment='center',
verticalalignment='bottom' if bottom else 'top',
fontsize=8)
if textbox:
txt.set_bbox(dict(facecolor='white', alpha=0.4, linewidth=0))
if | pd.isna(res_t_end) | pandas.isna |
"""A collections of functions to facilitate
analysis of HiC data based on the cooler and cooltools
interfaces."""
import warnings
from typing import Tuple, Dict, Callable
import cooltools.expected
import cooltools.snipping
import pandas as pd
import bioframe
import cooler
import pairtools
import numpy as np
import multiprocess
from .snipping_lib import flexible_pileup
# define type aliases
CisTransPairs = Dict[str, pd.DataFrame]
PairsSamples = Dict[str, CisTransPairs]
# define functions
def get_expected(
clr: cooler.Cooler, arms: pd.DataFrame, proc: int = 20, ignore_diagonals: int = 2
) -> pd.DataFrame:
"""Takes a clr file handle and a pandas dataframe
with chromosomal arms (generated by getArmsHg19()) and calculates
the expected read number at a certain genomic distance.
The proc parameters defines how many processes should be used
to do the calculations. ingore_diags specifies how many diagonals
to ignore (0 mains the main diagonal, 1 means the main diagonal
and the flanking tow diagonals and so on)"""
with multiprocess.Pool(proc) as pool:
expected = cooltools.expected.diagsum(
clr,
tuple(arms.itertuples(index=False, name=None)),
transforms={"balanced": lambda p: p["count"] * p["weight1"] * p["weight2"]},
map=pool.map,
ignore_diags=ignore_diagonals,
)
# construct a single dataframe for all regions (arms)
expected_df = (
expected.groupby(["region", "diag"])
.aggregate({"n_valid": "sum", "count.sum": "sum", "balanced.sum": "sum"})
.reset_index()
)
# account for different number of valid bins in diagonals
expected_df["balanced.avg"] = expected_df["balanced.sum"] / expected_df["n_valid"]
return expected_df
def get_arms_hg19() -> pd.DataFrame:
"""Downloads the coordinates for chromosomal arms of the
genome assembly hg19 and returns it as a dataframe."""
# download chromosomal sizes
chromsizes = bioframe.fetch_chromsizes("hg19")
# download centromers
centromeres = bioframe.fetch_centromeres("hg19")
centromeres.set_index("chrom", inplace=True)
centromeres = centromeres.mid
# define chromosomes that are well defined (filter out unassigned contigs)
good_chroms = list(chromsizes.index[:23])
# construct arm regions (for each chromosome fro 0-centromere and from centromere to the end)
arms = [
arm
for chrom in good_chroms
for arm in (
(chrom, 0, centromeres.get(chrom, 0)),
(chrom, centromeres.get(chrom, 0), chromsizes.get(chrom, 0)),
)
]
# construct dataframe out of arms
arms = pd.DataFrame(arms, columns=["chrom", "start", "end"])
return arms
def _assign_supports(features, supports):
"""assigns supports to entries in snipping windows.
Workaround for bug in cooltools 0.2.0 that duplicate
supports are not handled correctly. Copied from cooltools.common.assign_regions"""
index_name = features.index.name # Store the name of index
features = (
features.copy().reset_index()
) # Store the original features' order as a column with original index
if "chrom" in features.columns:
overlap = bioframe.overlap(
features,
supports,
how="left",
cols1=["chrom", "start", "end"],
cols2=["chrom", "start", "end"],
keep_order=True,
return_overlap=True,
)
overlap_columns = [
"index_1",
"chrom_1",
"start_1",
"end_1",
] # To filter out duplicates later
overlap["overlap_length"] = overlap["overlap_end"] - overlap["overlap_start"]
# Filter out overlaps with multiple regions:
overlap = (
overlap.sort_values("overlap_length", ascending=False)
.drop_duplicates(overlap_columns, keep="first")
.sort_index()
).reset_index(drop=True)
# Copy single column with overlapping region name:
features["region"] = overlap["name_2"]
if "chrom1" in features.columns:
for idx in ("1", "2"):
overlap = bioframe.overlap(
features,
supports,
how="left",
cols1=[f"chrom{idx}", f"start{idx}", f"end{idx}"],
cols2=[f"chrom", f"start", f"end"],
keep_order=True,
return_overlap=True,
)
overlap_columns = [
"index_1",
f"chrom{idx}_1",
f"start{idx}_1",
f"end{idx}_1",
] # To filter out duplicates later
overlap[f"overlap_length{idx}"] = (
overlap[f"overlap_end{idx}"] - overlap[f"overlap_start{idx}"]
)
# Filter out overlaps with multiple regions:
overlap = (
overlap.sort_values(f"overlap_length{idx}", ascending=False)
.drop_duplicates(overlap_columns, keep="first")
.sort_index()
).reset_index(drop=True)
# Copy single column with overlapping region name:
features[f"region{idx}"] = overlap["name_2"]
# Form a single column with region names where region1 == region2, and np.nan in other cases:
features["region"] = np.where(
features["region1"] == features["region2"], features["region1"], np.nan
)
features = features.drop(
["region1", "region2"], axis=1
) # Remove unnecessary columns
features = features.set_index(
index_name if not index_name is None else "index"
) # Restore the original index
features.index.name = index_name # Restore original index title
return features
def assign_regions(
window: int,
binsize: int,
chroms: pd.Series,
positions: pd.Series,
arms: pd.DataFrame,
) -> pd.DataFrame:
"""Constructs a 2d region around a series of chromosomal location.
Window specifies the windowsize for the constructed regions. The total region
assigned will be pos-window until pos+window. The binsize specifies the size
of the HiC bins. The positions which represent the center of the regions
is givin the the chroms series and the positions series."""
# construct windows from the passed chromosomes and positions
snipping_windows = cooltools.snipping.make_bin_aligned_windows(
binsize, chroms.values, positions.values, window
)
# assign chromosomal arm to each position
snipping_windows = _assign_supports(snipping_windows, bioframe.parse_regions(arms))
return snipping_windows
def assign_regions_2d(
window: int,
binsize: int,
chroms1: pd.Series,
positions1: pd.Series,
chroms2: pd.Series,
positions2: pd.Series,
arms: pd.DataFrame,
) -> pd.DataFrame:
"""Constructs a 2d region around a series of chromosomal location pairs.
Window specifies the windowsize for the constructed regions. The total region
assigned will be pos-window until pos+window. The binsize specifies the size
of the HiC bins. The positions which represent the center of the regions
is given by the chroms1 and chroms2 series as well as the
positions1 and positions2 series."""
# construct windows from the passed chromosomes 1 and positions 1
windows1 = assign_regions(window, binsize, chroms1, positions1, arms)
windows1.columns = [str(i) + "1" for i in windows1.columns]
# construct windows from the passed chromosomes 1 and positions 1
windows2 = assign_regions(window, binsize, chroms2, positions2, arms)
windows2.columns = [str(i) + "2" for i in windows2.columns]
windows = pd.concat((windows1, windows2), axis=1)
# concatenate windows
windows = pd.concat((windows1, windows2), axis=1)
# filter for mapping to different regions
windows_final = windows.loc[windows["region1"] == windows["region2"], :]
# subset data and rename regions
windows_small = windows_final[
["chrom1", "start1", "end1", "chrom2", "start2", "end2", "region1"]
]
windows_small.columns = [
"chrom1",
"start1",
"end1",
"chrom2",
"start2",
"end2",
"region",
]
return windows_small
def do_pileup_obs_exp(
clr: cooler.Cooler,
expected_df: pd.DataFrame,
snipping_windows: pd.DataFrame,
proc: int = 5,
collapse: bool = True,
) -> np.ndarray:
"""Takes a cooler file handle, an expected dataframe
constructed by getExpected, snipping windows constructed
by assignRegions and performs a pileup on all these regions
based on the obs/exp value. Returns a numpy array
that contains averages of all selected regions.
The collapse parameter specifies whether to return
the average window over all piles (collapse=True), or the individual
windows (collapse=False)."""
region_frame = get_regions_from_snipping_windows(expected_df)
oe_snipper = cooltools.snipping.ObsExpSnipper(
clr, expected_df, regions=bioframe.parse_regions(region_frame)
)
# set warnings filter to ignore RuntimeWarnings since cooltools
# does not check whether there are inf or 0 values in
# the expected dataframe
with warnings.catch_warnings():
warnings.simplefilter("ignore", RuntimeWarning)
with multiprocess.Pool(proc) as pool:
# extract a matrix of obs/exp average values for each snipping_window
oe_pile = cooltools.snipping.pileup(
snipping_windows, oe_snipper.select, oe_snipper.snip, map=pool.map
)
if collapse:
# calculate the average of all windows
collapsed_pile = np.nanmean(oe_pile[:, :, :], axis=2)
return collapsed_pile
return oe_pile
def do_pileup_iccf(
clr: cooler.Cooler,
snipping_windows: pd.DataFrame,
proc: int = 5,
collapse: bool = True,
) -> np.ndarray:
"""Takes a cooler file handle and snipping windows constructed
by assignRegions and performs a pileup on all these regions
based on the corrected HiC counts. Returns a numpy array
that contains averages of all selected regions. The collapse
parameter specifies whether to return
the average window over all piles (collapse=True), or the individual
windows (collapse=False)."""
# get regions from snipping windows
region_frame = get_regions_from_snipping_windows(snipping_windows)
iccf_snipper = cooltools.snipping.CoolerSnipper(
clr, regions=bioframe.parse_regions(region_frame)
)
with multiprocess.Pool(proc) as pool:
iccf_pile = cooltools.snipping.pileup(
snipping_windows, iccf_snipper.select, iccf_snipper.snip, map=pool.map
)
if collapse:
# calculate the average of all windows
collapsed_pile_plus = np.nanmean(iccf_pile[:, :, :], axis=2)
return collapsed_pile_plus
return iccf_pile
def sliding_diamond(
array: np.ndarray, side_len: int = 6, center_x: bool = True
) -> Tuple[np.ndarray, np.ndarray]:
"""Will slide a diamond of side length 'sideLen'
down the diagonal of the passed array and return
the average values for each position and
the relative position of each value with respect
to the center of the array (in Bin units)"""
# initialize accumulators for diamond value and x-position
diamond_accumulator = list()
bin_accumulator = list()
if side_len % 2 == 0:
half_window = side_len
for i in range(0, (array.shape[0] - half_window + 1)):
# extract diamond
diamond_array = array[i : (i + half_window), i : (i + half_window)]
# set inf to nan for calculation of mean
diamond_array[np.isinf(diamond_array)] = np.nan
diamond_accumulator.append(np.nanmean(diamond_array))
# append x-value for this particular bin
bin_accumulator.append(
np.median(
range(
i,
(i + half_window),
)
)
)
else:
half_window = side_len // 2
for i in range(half_window, (array.shape[0] - half_window)):
# extract diamond
diamond_array = array[
i - half_window : (i + half_window) + 1,
i - half_window : (i + half_window) + 1,
]
# set inf to nan for calculation of mean
diamond_array[np.isinf(diamond_array)] = np.nan
diamond_accumulator.append(np.nanmean(diamond_array))
# append x-value for this particular bin
bin_accumulator.append(
np.median(
range(
i - half_window,
(i + half_window) + 1,
)
)
)
if center_x:
x_out = np.array(bin_accumulator - np.median(bin_accumulator))
else:
x_out = np.array(bin_accumulator)
return (x_out, np.array(diamond_accumulator))
def load_pairs(path: str) -> pd.DataFrame:
"""Function to load a .pairs or .pairsam file
into a pandas dataframe.
This only works for relatively small files!"""
# get handels for header and pairs_body
header, pairs_body = pairtools._headerops.get_header(
pairtools._fileio.auto_open(path, "r")
)
# extract column names from header
cols = pairtools._headerops.extract_column_names(header)
# read data into dataframe
frame = pd.read_csv(pairs_body, sep="\t", names=cols)
return frame
def down_sample_pairs(
sample_dict: PairsSamples, distance: int = 10 ** 4
) -> PairsSamples:
"""Will downsample cis and trans reads in sampleDict to contain
as many combined cis and trans reads as the sample with the lowest readnumber of the
specified distance."""
# initialize output dictionary
out_dict = {sample: {} for sample in sample_dict}
for sample in sample_dict.keys():
# create temporary dataframes
cis_temp = sample_dict[sample]["cis"]
cis_temp["rType"] = "cis"
trans_temp = sample_dict[sample]["trans"]
trans_temp["rType"] = "trans"
# concatenate them and store in outdict
out_dict[sample]["all"] = pd.concat((cis_temp, trans_temp))
# filter on distance
out_dict[sample]["all"] = out_dict[sample]["all"].loc[
(out_dict[sample]["all"]["pos2"] - out_dict[sample]["all"]["pos1"])
> distance,
:,
]
# get the minimum number of reads
min_reads = min([len(i["all"]) for i in out_dict.values()])
# do the downsampling and split into cis and trans
for sample in out_dict.keys():
out_dict[sample]["all"] = out_dict[sample]["all"].sample(n=min_reads)
out_dict[sample]["cis"] = out_dict[sample]["all"].loc[
out_dict[sample]["all"]["rType"] == "cis", :
]
out_dict[sample]["trans"] = out_dict[sample]["all"].loc[
out_dict[sample]["all"]["rType"] == "trans", :
]
# get rid of all reads
out_dict[sample].pop("all")
return out_dict
def pile_to_frame(pile: np.ndarray) -> pd.DataFrame:
"""Takes a pile of pileup windows produced
by doPileupsObsExp/doPileupsICCF (with collapse set to False;
this is numpy ndarray with the following dimensions:
pile.shape = [windowSize, windowSize, windowNumber])
and arranges them as a dataframe with the pixels of the
pile flattened into columns and each individual window
being a row.
Window1: | Pixel 1 | Pixel 2 | Pixel3| ...
Window2: | Pixel 1 | Pixel 2 | Pixel3| ...
Window3: | Pixel 1 | Pixel 2 | Pixel3| ...
"""
return pd.DataFrame(
pile.flatten().reshape(pile.shape[0] ** 2, pile.shape[2])
).transpose()
def get_diag_indices(arr: np.ndarray) -> list:
"""Helper function that returns the indices of the diagonal
of a given array into a flattened representation of the array.
For example, the 3 by 3 array:
[0, 1, 2]
[3, 4, 5]
[6, 7, 8]
would have diagonal indices [0, 4, 8].
"""
assert arr.shape[0] == arr.shape[1], "Please supply a square array!"
shape = arr.shape[0]
return [
i + index for index, i in enumerate(range(0, shape ** 2 - shape + 1, shape))
]
def get_pairing_score(
clr: cooler.Cooler,
windowsize: int = 4 * 10 ** 4,
func: Callable = np.mean,
regions: pd.DataFrame = | pd.DataFrame() | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""model_bnb_h.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1LubfQy8-34FekTlgdarShQ5MUnXa328i
"""
import pandas as pd
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
import statsmodels.api as sm
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import mean_squared_error
import math
import seaborn as sns
sns.set()
import warnings
warnings.filterwarnings('ignore')
data_path = '/content/Binance_' + 'BNB' + 'USDT_1h.csv'
data_path
df = pd.read_csv(data_path)
df['Volume'] = df['Volume '+'BNB']
def moving_average(df, n):
"""Calculate the moving average for the given data.
:param df: pandas.DataFrame
:param n:
:return: pandas.DataFrame
"""
MA = pd.Series(df['Close'].rolling(n, min_periods=n).mean(), name='MA_' + str(n))
df = df.join(MA)
return df
def exponential_moving_average(df, n):
"""
:param df: pandas.DataFrame
:param n:
:return: pandas.DataFrame
"""
EMA = pd.Series(df['Close'].ewm(span=n, min_periods=n).mean(), name='EMA_' + str(n))
df = df.join(EMA)
return df
def momentum(df, n):
"""
:param df: pandas.DataFrame
:param n:
:return: pandas.DataFrame
"""
M = pd.Series(df['Close'].diff(n), name='Momentum_' + str(n))
df = df.join(M)
return df
def rate_of_change(df, n):
"""
:param df: pandas.DataFrame
:param n:
:return: pandas.DataFrame
"""
M = df['Close'].diff(n - 1)
N = df['Close'].shift(n - 1)
ROC = pd.Series(M / N, name='ROC_' + str(n))
df = df.join(ROC)
return df
def average_true_range(df, n):
"""
:param df: pandas.DataFrame
:param n:
:return: pandas.DataFrame
"""
i = 0
TR_l = [0]
while i < df.index[-1]:
TR = max(df.loc[i + 1, 'High'], df.loc[i, 'Close']) - min(df.loc[i + 1, 'Low'], df.loc[i, 'Close'])
TR_l.append(TR)
i = i + 1
TR_s = pd.Series(TR_l)
ATR = pd.Series(TR_s.ewm(span=n, min_periods=n).mean(), name='ATR_' + str(n))
df = df.join(ATR)
return df
def bollinger_bands(df, n):
"""
:param df: pandas.DataFrame
:param n:
:return: pandas.DataFrame
"""
MA = pd.Series(df['Close'].rolling(n, min_periods=n).mean())
MSD = pd.Series(df['Close'].rolling(n, min_periods=n).std())
b1 = 4 * MSD / MA
B1 = pd.Series(b1, name='BollingerB_' + str(n))
df = df.join(B1)
b2 = (df['Close'] - MA + 2 * MSD) / (4 * MSD)
B2 = pd.Series(b2, name='Bollinger%b_' + str(n))
df = df.join(B2)
return df
def ppsr(df):
"""Calculate Pivot Points, Supports and Resistances for given data
:param df: pandas.DataFrame
:return: pandas.DataFrame
"""
PP = pd.Series((df['High'] + df['Low'] + df['Close']) / 3)
R1 = pd.Series(2 * PP - df['Low'])
S1 = pd.Series(2 * PP - df['High'])
R2 = pd.Series(PP + df['High'] - df['Low'])
S2 = pd.Series(PP - df['High'] + df['Low'])
R3 = pd.Series(df['High'] + 2 * (PP - df['Low']))
S3 = pd.Series(df['Low'] - 2 * (df['High'] - PP))
psr = {'PP': PP, 'R1': R1, 'S1': S1, 'R2': R2, 'S2': S2, 'R3': R3, 'S3': S3}
PSR = pd.DataFrame(psr)
df = df.join(PSR)
return df
def stochastic_oscillator_k(df):
"""Calculate stochastic oscillator %K for given data.
:param df: pandas.DataFrame
:return: pandas.DataFrame
"""
SOk = pd.Series((df['Close'] - df['Low']) / (df['High'] - df['Low']), name='SO%k')
df = df.join(SOk)
return df
def stochastic_oscillator_d(df, n):
"""Calculate stochastic oscillator %D for given data.
:param df: pandas.DataFrame
:param n:
:return: pandas.DataFrame
"""
SOk = pd.Series((df['Close'] - df['Low']) / (df['High'] - df['Low']), name='SO%k')
SOd = pd.Series(SOk.ewm(span=n, min_periods=n).mean(), name='SO%d_' + str(n))
df = df.join(SOd)
return df
def trix(df, n):
"""Calculate TRIX for given data.
:param df: pandas.DataFrame
:param n:
:return: pandas.DataFrame
"""
EX1 = df['Close'].ewm(span=n, min_periods=n).mean()
EX2 = EX1.ewm(span=n, min_periods=n).mean()
EX3 = EX2.ewm(span=n, min_periods=n).mean()
i = 0
ROC_l = [np.nan]
while i + 1 <= df.index[-1]:
ROC = (EX3[i + 1] - EX3[i]) / EX3[i]
ROC_l.append(ROC)
i = i + 1
Trix = pd.Series(ROC_l, name='Trix_' + str(n))
df = df.join(Trix)
return df
def average_directional_movement_index(df, n, n_ADX):
"""Calculate the Average Directional Movement Index for given data.
:param df: pandas.DataFrame
:param n:
:param n_ADX:
:return: pandas.DataFrame
"""
i = 0
UpI = []
DoI = []
while i + 1 <= df.index[-1]:
UpMove = df.loc[i + 1, 'High'] - df.loc[i, 'High']
DoMove = df.loc[i, 'Low'] - df.loc[i + 1, 'Low']
if UpMove > DoMove and UpMove > 0:
UpD = UpMove
else:
UpD = 0
UpI.append(UpD)
if DoMove > UpMove and DoMove > 0:
DoD = DoMove
else:
DoD = 0
DoI.append(DoD)
i = i + 1
i = 0
TR_l = [0]
while i < df.index[-1]:
TR = max(df.loc[i + 1, 'High'], df.loc[i, 'Close']) - min(df.loc[i + 1, 'Low'], df.loc[i, 'Close'])
TR_l.append(TR)
i = i + 1
TR_s = pd.Series(TR_l)
ATR = pd.Series(TR_s.ewm(span=n, min_periods=n).mean())
UpI = pd.Series(UpI)
DoI = | pd.Series(DoI) | pandas.Series |
# -- coding: utf-8 --
import io
import cv2
import matplotlib.pyplot as plt
import numpy as np
from PIL import Image
import pandas as pd
def pie(predict_data):
plt.rcParams['font.sans-serif'] = ['Microsoft Yahei']
# 指定饼图的每个切片名称
labels = '弱火', '正常', '过火'
colors = ['r', 'y', 'b']
# 指定每个切片的数值,从而决定了百分比
if predict_data[0] > predict_data[1] and predict_data[0] > predict_data[2]:
explode = (0.1, 0, 0)
elif predict_data[1] > predict_data[0] and predict_data[1] > predict_data[2]:
explode = (0, 0.1, 0)
else:
explode = (0, 0, 0.1)
fig1, ax1 = plt.subplots()
ax1.pie(predict_data, explode=explode, labels=labels, autopct='%1.1f%%',
shadow=True, startangle=90, colors=colors)
ax1.axis('equal') # Equal aspect ratio ensures that pie is drawn as a circle.
canvas = fig1.canvas
# 获取Plt的数据并使用cv2进行保存
buffer = io.BytesIO() # 获取输入输出流对象
canvas.print_png(buffer) # 将画布上的内容打印到输入输出流对象
data = buffer.getvalue() # 获取流的值
# print("plt的二进制流为:\n", data)
buffer.write(data) # 将数据写入buffer
img = Image.open(buffer) # 使用Image打开图片数据
img = np.asarray(img)
# cv2.imwrite("02.jpg", img)
buffer.close()
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
plt.clf() # 防止内存泄漏。清除当前figure的所有axes,但是不关闭这个window,所以能继续复用于其他的plot。
plt.close() # 关闭 window,如果没有指定,则指当前 window
return img
def history_save(predict_data, save_file):
df = | pd.DataFrame(predict_data, columns=['结果', '概率', '弱火', '正常', '过火'], dtype=float) | pandas.DataFrame |
# -*- encoding: utf-8 -*-
import time
import json
import pandas as pd
class Hmm:
def __init__(self):
self.trans_p = {'S': {}, 'B': {}, 'M': {}, 'E': {}}
self.emit_p = {'S': {}, 'B': {}, 'M': {}, 'E': {}}
self.start_p = {'S': 0, 'B': 0, 'M': 0, 'E': 0}
self.state_num = {'S': 0, 'B': 0, 'M': 0, 'E': 0}
self.state_list = ['S', 'B', 'M', 'E']
self.line_num = 0
self.smooth = 1e-6
@staticmethod
def __state(word):
"""获取词语的BOS标签,标注采用 4-tag 标注方法,
tag = {S,B,M,E},S表示单字为词,B表示词的首字,M表示词的中间字,E表示词的结尾字
Args:
word (string): 函数返回词语 word 的状态标签
"""
if len(word) == 1:
state = ['S']
else:
state = list('B' + 'M' * (len(word) - 2) + 'E')
return state
def train(self, filepath, save_model=False):
"""训练hmm, 学习发射概率、转移概率等参数
Args:
save_model: 是否保存模型参数
filepath (string): 训练预料的路径
"""
print("正在训练模型……")
start_time = time.thread_time()
with open(filepath, 'r', encoding='utf8') as f:
for line in f.readlines():
self.line_num += 1
line = line.strip().split()
# 获取观测(字符)序列
char_seq = list(''.join(line))
# 获取状态(BMES)序列
state_seq = []
for word in line:
state_seq.extend(self.__state(word))
# 判断是否等长
assert len(char_seq) == len(state_seq)
# 统计参数
for i, s in enumerate(state_seq):
self.state_num[s] = self.state_num.get(s, 0) + 1.0
self.emit_p[s][char_seq[i]] = self.emit_p[s].get(
char_seq[i], 0) + 1.0
if i == 0:
self.start_p[s] += 1.0
else:
last_s = state_seq[i - 1]
self.trans_p[last_s][s] = self.trans_p[last_s].get(
s, 0) + 1.0
# 归一化:
self.start_p = {
k: (v + 1.0) / (self.line_num + 4)
for k, v in self.start_p.items()
}
self.emit_p = {
k: {w: num / self.state_num[k]
for w, num in dic.items()}
for k, dic in self.emit_p.items()
}
self.trans_p = {
k1: {k2: num / self.state_num[k1]
for k2, num in dic.items()}
for k1, dic in self.trans_p.items()
}
end_time = time.thread_time()
print("训练完成,耗时 {:.3f}s".format(end_time - start_time))
# 保存参数
if save_model:
parameters = {
'start_p': self.start_p,
'trans_p': self.trans_p,
'emit_p': self.emit_p
}
jsonstr = json.dumps(parameters, ensure_ascii=False, indent=4)
param_filepath = "./data/HmmParam_Token.json"
with open(param_filepath, 'w', encoding='utf8') as jsonfile:
jsonfile.write(jsonstr)
def viterbi(self, text):
"""Viterbi 算法
Args:
text (string): 句子
Returns:
list: 最优标注序列
"""
text = list(text)
dp = | pd.DataFrame(index=self.state_list) | pandas.DataFrame |
import pandas as pd
from web3 import Web3
def get_cleaned_poap_data():
###__getting all info about POAP events__###
poap_events = pd.read_json("datasets/event_data.json")
# renaming event columns for merging with poap dataset
new_event_columns_names = {}
for col in poap_events.columns:
if "event" in col:
new_name = col
else:
new_name = f"event_{col}"
new_event_columns_names[col] = new_name
poap_events = poap_events.rename(columns=new_event_columns_names)
###__getting all info about POAP token__###
poap_xdai = pd.read_json("datasets/xdai_token_data.json")
poap_xdai["chain"] = "xdai"
poap_eth = pd.read_json("datasets/ethereum_token_data.json")
poap_eth["chain"] = "ethereum"
poap = pd.concat([poap_eth, poap_xdai])
poap_original_length = len(poap)
###__merging POAP events and tokens__###
poap = poap.merge(poap_events, on="event_id", how="left")
poap_new_length = len(poap)
if poap_original_length != poap_new_length:
raise ValueError(
"There was a problem with merging, new dataframe has different amount of lines"
)
# getting checkSummed addresses
poap["owner_id_checksum"] = poap["owner_id"].apply(
lambda adr: Web3.toChecksumAddress(adr)
)
return poap.loc[poap.owner_id != "0x0000000000000000000000000000000000000000"]
def get_daohaus_cleaned_data():
dao_members_dh = | pd.read_json("datasets/dao_member_daohaus.json") | pandas.read_json |
import numpy as np
import pandas as pd
from numba import njit, typeof
from numba.typed import List
from datetime import datetime, timedelta
import pytest
import vectorbt as vbt
from vectorbt.portfolio.enums import *
from vectorbt.generic.enums import drawdown_dt
from vectorbt import settings
from vectorbt.utils.random import set_seed
from vectorbt.portfolio import nb
from tests.utils import record_arrays_close
seed = 42
day_dt = np.timedelta64(86400000000000)
settings.returns['year_freq'] = '252 days' # same as empyrical
price = pd.Series([1., 2., 3., 4., 5.], index=pd.Index([
datetime(2020, 1, 1),
datetime(2020, 1, 2),
datetime(2020, 1, 3),
datetime(2020, 1, 4),
datetime(2020, 1, 5)
]))
price_wide = price.vbt.tile(3, keys=['a', 'b', 'c'])
big_price = pd.DataFrame(np.random.uniform(size=(1000,)))
big_price.index = [datetime(2018, 1, 1) + timedelta(days=i) for i in range(1000)]
big_price_wide = big_price.vbt.tile(1000)
# ############# nb ############# #
def assert_same_tuple(tup1, tup2):
for i in range(len(tup1)):
assert tup1[i] == tup2[i] or np.isnan(tup1[i]) and np.isnan(tup2[i])
def test_process_order_nb():
# Errors, ignored and rejected orders
log_record = np.empty(1, dtype=log_dt)[0]
log_record[0] = 0
log_record[1] = 0
log_record[2] = 0
log_record[3] = 0
log_record[-1] = 0
cash_now, shares_now, order_result = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(), log_record)
assert cash_now == 100.
assert shares_now == 100.
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=1, status_info=0))
cash_now, shares_now, order_result = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10), log_record)
assert cash_now == 100.
assert shares_now == 100.
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=1, status_info=1))
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
-100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
np.nan, 100., 10., 1100.,
nb.create_order_nb(size=10, price=10), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., np.inf, 10., 1100.,
nb.create_order_nb(size=10, price=10), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., np.nan, 10., 1100.,
nb.create_order_nb(size=10, price=10), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, size_type=-2), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, size_type=20), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, direction=-2), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, direction=20), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., -100., 10., 1100.,
nb.create_order_nb(size=10, price=10, direction=Direction.LongOnly), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, direction=Direction.ShortOnly), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=np.inf), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=-10), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, fees=np.inf), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, fees=-1), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, fixed_fees=np.inf), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, fixed_fees=-1), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, slippage=np.inf), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, slippage=-1), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, min_size=np.inf), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, min_size=-1), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, max_size=0), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, max_size=-10), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, reject_prob=np.nan), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, reject_prob=-1), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, reject_prob=2), log_record)
cash_now, shares_now, order_result = nb.process_order_nb(
100., 100., 10., np.nan,
nb.create_order_nb(size=1, price=10, size_type=SizeType.TargetPercent), log_record)
assert cash_now == 100.
assert shares_now == 100.
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=1, status_info=3))
cash_now, shares_now, order_result = nb.process_order_nb(
100., 100., 10., -10.,
nb.create_order_nb(size=1, price=10, size_type=SizeType.TargetPercent), log_record)
assert cash_now == 100.
assert shares_now == 100.
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=4))
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., np.inf, 1100.,
nb.create_order_nb(size=10, price=10, size_type=SizeType.TargetValue), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., -10., 1100.,
nb.create_order_nb(size=10, price=10, size_type=SizeType.TargetValue), log_record)
cash_now, shares_now, order_result = nb.process_order_nb(
100., 100., np.nan, 1100.,
nb.create_order_nb(size=10, price=10, size_type=SizeType.TargetValue), log_record)
assert cash_now == 100.
assert shares_now == 100.
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=1, status_info=2))
cash_now, shares_now, order_result = nb.process_order_nb(
100., -10., 10., 1100.,
nb.create_order_nb(size=np.inf, price=10, direction=Direction.ShortOnly), log_record)
assert cash_now == 100.
assert shares_now == -10.
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=6))
cash_now, shares_now, order_result = nb.process_order_nb(
100., -10., 10., 1100.,
nb.create_order_nb(size=-np.inf, price=10, direction=Direction.All), log_record)
assert cash_now == 100.
assert shares_now == -10.
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=6))
cash_now, shares_now, order_result = nb.process_order_nb(
100., 10., 10., 1100.,
nb.create_order_nb(size=0, price=10), log_record)
assert cash_now == 100.
assert shares_now == 10.
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=1, status_info=5))
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=15, price=10, max_size=10, allow_partial=False, raise_reject=True), log_record)
cash_now, shares_now, order_result = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=15, price=10, max_size=10, allow_partial=False), log_record)
assert cash_now == 100.
assert shares_now == 100.
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=9))
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, reject_prob=1., raise_reject=True), log_record)
cash_now, shares_now, order_result = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, reject_prob=1.), log_record)
assert cash_now == 100.
assert shares_now == 100.
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=10))
cash_now, shares_now, order_result = nb.process_order_nb(
0., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, direction=Direction.LongOnly), log_record)
assert cash_now == 0.
assert shares_now == 100.
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=7))
cash_now, shares_now, order_result = nb.process_order_nb(
0., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, direction=Direction.All), log_record)
assert cash_now == 0.
assert shares_now == 100.
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=7))
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
np.inf, 100., 10., 1100.,
nb.create_order_nb(size=np.inf, price=10, direction=Direction.LongOnly), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
np.inf, 100., 10., 1100.,
nb.create_order_nb(size=np.inf, price=10, direction=Direction.All), log_record)
cash_now, shares_now, order_result = nb.process_order_nb(
100., 0., 10., 1100.,
nb.create_order_nb(size=-10, price=10, direction=Direction.ShortOnly), log_record)
assert cash_now == 100.
assert shares_now == 0.
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=8))
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
np.inf, 100., 10., 1100.,
nb.create_order_nb(size=np.inf, price=10, direction=Direction.ShortOnly), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
np.inf, 100., 10., 1100.,
nb.create_order_nb(size=-np.inf, price=10, direction=Direction.All), log_record)
cash_now, shares_now, order_result = nb.process_order_nb(
100., 0., 10., 1100.,
nb.create_order_nb(size=-10, price=10, direction=Direction.LongOnly), log_record)
assert cash_now == 100.
assert shares_now == 0.
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=8))
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, fixed_fees=100, raise_reject=True), log_record)
cash_now, shares_now, order_result = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, fixed_fees=100), log_record)
assert cash_now == 100.
assert shares_now == 100.
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=11))
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, min_size=100, raise_reject=True), log_record)
cash_now, shares_now, order_result = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, min_size=100), log_record)
assert cash_now == 100.
assert shares_now == 100.
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=12))
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=100, price=10, allow_partial=False, raise_reject=True), log_record)
cash_now, shares_now, order_result = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=100, price=10, allow_partial=False), log_record)
assert cash_now == 100.
assert shares_now == 100.
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=13))
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=-10, price=10, min_size=100, raise_reject=True), log_record)
cash_now, shares_now, order_result = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=-10, price=10, min_size=100), log_record)
assert cash_now == 100.
assert shares_now == 100.
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=12))
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=-200, price=10, direction=Direction.LongOnly, allow_partial=False,
raise_reject=True),
log_record)
cash_now, shares_now, order_result = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=-200, price=10, direction=Direction.LongOnly, allow_partial=False), log_record)
assert cash_now == 100.
assert shares_now == 100.
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=13))
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=-10, price=10, fixed_fees=1000, raise_reject=True), log_record)
cash_now, shares_now, order_result = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=-10, price=10, fixed_fees=1000), log_record)
assert cash_now == 100.
assert shares_now == 100.
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=11))
# Calculations
cash_now, shares_now, order_result = nb.process_order_nb(
100., 0., 10., 100.,
nb.create_order_nb(size=10, price=10, fees=0.1, fixed_fees=1, slippage=0.1), log_record)
assert cash_now == 0.
assert shares_now == 8.18181818181818
assert_same_tuple(order_result, OrderResult(
size=8.18181818181818, price=11.0, fees=10.000000000000014, side=0, status=0, status_info=-1))
cash_now, shares_now, order_result = nb.process_order_nb(
100., 0., 10., 100.,
nb.create_order_nb(size=100, price=10, fees=0.1, fixed_fees=1, slippage=0.1), log_record)
assert cash_now == 0.
assert shares_now == 8.18181818181818
assert_same_tuple(order_result, OrderResult(
size=8.18181818181818, price=11.0, fees=10.000000000000014, side=0, status=0, status_info=-1))
cash_now, shares_now, order_result = nb.process_order_nb(
100., 0., 10., 100.,
nb.create_order_nb(size=-10, price=10, fees=0.1, fixed_fees=1, slippage=0.1), log_record)
assert cash_now == 180.
assert shares_now == -10.
assert_same_tuple(order_result, OrderResult(
size=10.0, price=9.0, fees=10.0, side=1, status=0, status_info=-1))
cash_now, shares_now, order_result = nb.process_order_nb(
100., 0., 10., 100.,
nb.create_order_nb(size=-100, price=10, fees=0.1, fixed_fees=1, slippage=0.1), log_record)
assert cash_now == 909.
assert shares_now == -100.
assert_same_tuple(order_result, OrderResult(
size=100.0, price=9.0, fees=91.0, side=1, status=0, status_info=-1))
cash_now, shares_now, order_result = nb.process_order_nb(
100., 0., 10., 100.,
nb.create_order_nb(size=10, price=10, size_type=SizeType.TargetShares), log_record)
assert cash_now == 0.
assert shares_now == 10.
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=0, status=0, status_info=-1))
cash_now, shares_now, order_result = nb.process_order_nb(
100., 0., 10., 100.,
nb.create_order_nb(size=-10, price=10, size_type=SizeType.TargetShares), log_record)
assert cash_now == 200.
assert shares_now == -10.
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=1, status=0, status_info=-1))
cash_now, shares_now, order_result = nb.process_order_nb(
100., 0., 10., 100.,
nb.create_order_nb(size=100, price=10, size_type=SizeType.TargetValue), log_record)
assert cash_now == 0.
assert shares_now == 10.
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=0, status=0, status_info=-1))
cash_now, shares_now, order_result = nb.process_order_nb(
100., 0., 10., 100.,
nb.create_order_nb(size=-100, price=10, size_type=SizeType.TargetValue), log_record)
assert cash_now == 200.
assert shares_now == -10.
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=1, status=0, status_info=-1))
cash_now, shares_now, order_result = nb.process_order_nb(
100., 0., 10., 100.,
nb.create_order_nb(size=1, price=10, size_type=SizeType.TargetPercent), log_record)
assert cash_now == 0.
assert shares_now == 10.
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=0, status=0, status_info=-1))
cash_now, shares_now, order_result = nb.process_order_nb(
100., 0., 10., 100.,
nb.create_order_nb(size=-1, price=10, size_type=SizeType.TargetPercent), log_record)
assert cash_now == 200.
assert shares_now == -10.
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=1, status=0, status_info=-1))
cash_now, shares_now, order_result = nb.process_order_nb(
100., 0., 10., 100.,
nb.create_order_nb(size=1, price=10, size_type=SizeType.Percent), log_record)
assert cash_now == 0.
assert shares_now == 10.
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=0, status=0, status_info=-1))
cash_now, shares_now, order_result = nb.process_order_nb(
100., 0., 10., 100.,
nb.create_order_nb(size=0.5, price=10, size_type=SizeType.Percent, fixed_fees=1.), log_record)
assert cash_now == 50.
assert shares_now == 4.9
assert_same_tuple(order_result, OrderResult(
size=4.9, price=10.0, fees=1., side=0, status=0, status_info=-1))
cash_now, shares_now, order_result = nb.process_order_nb(
0., 10., 10., 100.,
nb.create_order_nb(size=-1, price=10, size_type=SizeType.Percent), log_record)
assert cash_now == 100.
assert shares_now == 0.
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=1, status=0, status_info=-1))
cash_now, shares_now, order_result = nb.process_order_nb(
0., 10., 10., 100.,
nb.create_order_nb(size=-0.5, price=10, size_type=SizeType.Percent, fixed_fees=1.), log_record)
assert cash_now == 49.
assert shares_now == 5.
assert_same_tuple(order_result, OrderResult(
size=5., price=10.0, fees=1., side=1, status=0, status_info=-1))
cash_now, shares_now, order_result = nb.process_order_nb(
100., -10., 10., 100.,
nb.create_order_nb(size=1., price=10, size_type=SizeType.Percent), log_record)
assert cash_now == 0.
assert shares_now == 0.
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=0, status=0, status_info=-1))
cash_now, shares_now, order_result = nb.process_order_nb(
0., -10., 10., 100.,
nb.create_order_nb(size=-1., price=10, size_type=SizeType.Percent), log_record)
assert cash_now == 100.
assert shares_now == -20.
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=1, status=0, status_info=-1))
cash_now, shares_now, order_result = nb.process_order_nb(
100., 0., 10., 100.,
nb.create_order_nb(size=np.inf, price=10), log_record)
assert cash_now == 0.
assert shares_now == 10.
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=0, status=0, status_info=-1))
cash_now, shares_now, order_result = nb.process_order_nb(
100., 0., 10., 100.,
nb.create_order_nb(size=-np.inf, price=10), log_record)
assert cash_now == 200.
assert shares_now == -10.
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=1, status=0, status_info=-1))
cash_now, shares_now, order_result = nb.process_order_nb(
150., -5., 10., 100.,
nb.create_order_nb(size=-np.inf, price=10), log_record)
assert cash_now == 200.
assert shares_now == -10.
assert_same_tuple(order_result, OrderResult(
size=5., price=10.0, fees=0., side=1, status=0, status_info=-1))
# Logging
_ = nb.process_order_nb(
100., 0., 10., 100.,
nb.create_order_nb(log=True), log_record)
assert_same_tuple(log_record, (
0, 0, 0, 0, 100., 0., 10., 100., np.nan, 0, 2, np.nan, 0., 0., 0., 0., np.inf, 0.,
True, False, True, 100., 0., np.nan, np.nan, np.nan, -1, 1, 0, 0
))
_ = nb.process_order_nb(
100., 0., 10., 100.,
nb.create_order_nb(size=np.inf, price=10, log=True), log_record)
assert_same_tuple(log_record, (
0, 0, 0, 0, 100., 0., 10., 100., np.inf, 0, 2, 10., 0., 0., 0., 0., np.inf, 0.,
True, False, True, 0., 10., 10., 10., 0., 0, 0, -1, 0
))
_ = nb.process_order_nb(
100., 0., 10., 100.,
nb.create_order_nb(size=-np.inf, price=10, log=True), log_record)
assert_same_tuple(log_record, (
0, 0, 0, 0, 100., 0., 10., 100., -np.inf, 0, 2, 10., 0., 0., 0., 0., np.inf, 0.,
True, False, True, 200., -10., 10., 10., 0., 1, 0, -1, 0
))
def test_build_call_seq_nb():
group_lens = np.array([1, 2, 3, 4])
np.testing.assert_array_equal(
nb.build_call_seq_nb((10, 10), group_lens, CallSeqType.Default),
nb.build_call_seq((10, 10), group_lens, CallSeqType.Default)
)
np.testing.assert_array_equal(
nb.build_call_seq_nb((10, 10), group_lens, CallSeqType.Reversed),
nb.build_call_seq((10, 10), group_lens, CallSeqType.Reversed)
)
set_seed(seed)
out1 = nb.build_call_seq_nb((10, 10), group_lens, CallSeqType.Random)
set_seed(seed)
out2 = nb.build_call_seq((10, 10), group_lens, CallSeqType.Random)
np.testing.assert_array_equal(out1, out2)
# ############# from_signals ############# #
entries = pd.Series([True, True, True, False, False], index=price.index)
entries_wide = entries.vbt.tile(3, keys=['a', 'b', 'c'])
exits = pd.Series([False, False, True, True, True], index=price.index)
exits_wide = exits.vbt.tile(3, keys=['a', 'b', 'c'])
def from_signals_all(price=price, entries=entries, exits=exits, **kwargs):
return vbt.Portfolio.from_signals(price, entries, exits, direction='all', **kwargs)
def from_signals_longonly(price=price, entries=entries, exits=exits, **kwargs):
return vbt.Portfolio.from_signals(price, entries, exits, direction='longonly', **kwargs)
def from_signals_shortonly(price=price, entries=entries, exits=exits, **kwargs):
return vbt.Portfolio.from_signals(price, entries, exits, direction='shortonly', **kwargs)
class TestFromSignals:
def test_one_column(self):
record_arrays_close(
from_signals_all().order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 3, 0, 200., 4., 0., 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly().order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 3, 0, 100., 4., 0., 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly().order_records,
np.array([
(0, 0, 0, 100., 1., 0., 1), (1, 3, 0, 50., 4., 0., 0)
], dtype=order_dt)
)
portfolio = from_signals_all()
pd.testing.assert_index_equal(
portfolio.wrapper.index,
pd.DatetimeIndex(['2020-01-01', '2020-01-02', '2020-01-03', '2020-01-04', '2020-01-05'])
)
pd.testing.assert_index_equal(
portfolio.wrapper.columns,
pd.Int64Index([0], dtype='int64')
)
assert portfolio.wrapper.ndim == 1
assert portfolio.wrapper.freq == day_dt
assert portfolio.wrapper.grouper.group_by is None
def test_multiple_columns(self):
record_arrays_close(
from_signals_all(price=price_wide).order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 3, 0, 200., 4., 0., 1),
(2, 0, 1, 100., 1., 0., 0), (3, 3, 1, 200., 4., 0., 1),
(4, 0, 2, 100., 1., 0., 0), (5, 3, 2, 200., 4., 0., 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(price=price_wide).order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 3, 0, 100., 4., 0., 1),
(2, 0, 1, 100., 1., 0., 0), (3, 3, 1, 100., 4., 0., 1),
(4, 0, 2, 100., 1., 0., 0), (5, 3, 2, 100., 4., 0., 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(price=price_wide).order_records,
np.array([
(0, 0, 0, 100., 1., 0., 1), (1, 3, 0, 50., 4., 0., 0),
(2, 0, 1, 100., 1., 0., 1), (3, 3, 1, 50., 4., 0., 0),
(4, 0, 2, 100., 1., 0., 1), (5, 3, 2, 50., 4., 0., 0)
], dtype=order_dt)
)
portfolio = from_signals_all(price=price_wide)
pd.testing.assert_index_equal(
portfolio.wrapper.index,
pd.DatetimeIndex(['2020-01-01', '2020-01-02', '2020-01-03', '2020-01-04', '2020-01-05'])
)
pd.testing.assert_index_equal(
portfolio.wrapper.columns,
pd.Index(['a', 'b', 'c'], dtype='object')
)
assert portfolio.wrapper.ndim == 2
assert portfolio.wrapper.freq == day_dt
assert portfolio.wrapper.grouper.group_by is None
def test_size(self):
record_arrays_close(
from_signals_all(size=[[-1, 0, 1, np.inf]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 3, 0, 2.0, 4.0, 0.0, 1), (2, 0, 2, 1.0, 1.0, 0.0, 0),
(3, 3, 2, 2.0, 4.0, 0.0, 1), (4, 0, 3, 100.0, 1.0, 0.0, 0), (5, 3, 3, 200.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=[[-1, 0, 1, np.inf]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 3, 0, 1.0, 4.0, 0.0, 1), (2, 0, 2, 1.0, 1.0, 0.0, 0),
(3, 3, 2, 1.0, 4.0, 0.0, 1), (4, 0, 3, 100.0, 1.0, 0.0, 0), (5, 3, 3, 100.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=[[-1, 0, 1, np.inf]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 3, 0, 1.0, 4.0, 0.0, 0), (2, 0, 2, 1.0, 1.0, 0.0, 1),
(3, 3, 2, 1.0, 4.0, 0.0, 0), (4, 0, 3, 100.0, 1.0, 0.0, 1), (5, 3, 3, 50.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
def test_percent(self):
with pytest.raises(Exception) as e_info:
_ = from_signals_all(size=0.5, size_type='percent')
record_arrays_close(
from_signals_all(size=0.5, size_type='percent', close_first=True).order_records,
np.array([
(0, 0, 0, 50., 1., 0., 0), (1, 3, 0, 50., 4., 0., 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_all(size=0.5, size_type='percent', close_first=True, accumulate=True).order_records,
np.array([
(0, 0, 0, 50., 1., 0., 0), (1, 1, 0, 12.5, 2., 0., 0),
(2, 3, 0, 31.25, 4., 0., 1), (3, 4, 0, 15.625, 5., 0., 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=0.5, size_type='percent').order_records,
np.array([
(0, 0, 0, 50., 1., 0., 0), (1, 3, 0, 50., 4., 0., 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=0.5, size_type='percent').order_records,
np.array([], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(
price=price_wide, size=0.5, size_type='percent',
group_by=np.array([0, 0, 0]), cash_sharing=True).order_records,
np.array([
(0, 0, 0, 50., 1., 0., 0), (1, 0, 1, 25., 1., 0., 0),
(2, 0, 2, 12.5, 1., 0., 0), (3, 3, 0, 50., 4., 0., 1),
(4, 3, 1, 25., 4., 0., 1), (5, 3, 2, 12.5, 4., 0., 1)
], dtype=order_dt)
)
def test_price(self):
record_arrays_close(
from_signals_all(price=price * 1.01).order_records,
np.array([
(0, 0, 0, 99.00990099009901, 1.01, 0.0, 0), (1, 3, 0, 198.01980198019803, 4.04, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(price=price * 1.01).order_records,
np.array([
(0, 0, 0, 99.00990099, 1.01, 0., 0), (1, 3, 0, 99.00990099, 4.04, 0., 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(price=price * 1.01).order_records,
np.array([
(0, 0, 0, 99.00990099009901, 1.01, 0.0, 1), (1, 3, 0, 49.504950495049506, 4.04, 0.0, 0)
], dtype=order_dt)
)
def test_fees(self):
record_arrays_close(
from_signals_all(size=1, fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 3, 0, 2.0, 4.0, 0.0, 1), (2, 0, 1, 1.0, 1.0, 0.1, 0),
(3, 3, 1, 2.0, 4.0, 0.8, 1), (4, 0, 2, 1.0, 1.0, 1.0, 0), (5, 3, 2, 2.0, 4.0, 8.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1, fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 3, 0, 1.0, 4.0, 0.0, 1), (2, 0, 1, 1.0, 1.0, 0.1, 0),
(3, 3, 1, 1.0, 4.0, 0.4, 1), (4, 0, 2, 1.0, 1.0, 1.0, 0), (5, 3, 2, 1.0, 4.0, 4.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=1, fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 3, 0, 1.0, 4.0, 0.0, 0), (2, 0, 1, 1.0, 1.0, 0.1, 1),
(3, 3, 1, 1.0, 4.0, 0.4, 0), (4, 0, 2, 1.0, 1.0, 1.0, 1), (5, 3, 2, 1.0, 4.0, 4.0, 0)
], dtype=order_dt)
)
def test_fixed_fees(self):
record_arrays_close(
from_signals_all(size=1, fixed_fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 3, 0, 2.0, 4.0, 0.0, 1), (2, 0, 1, 1.0, 1.0, 0.1, 0),
(3, 3, 1, 2.0, 4.0, 0.1, 1), (4, 0, 2, 1.0, 1.0, 1.0, 0), (5, 3, 2, 2.0, 4.0, 1.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1, fixed_fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 3, 0, 1.0, 4.0, 0.0, 1), (2, 0, 1, 1.0, 1.0, 0.1, 0),
(3, 3, 1, 1.0, 4.0, 0.1, 1), (4, 0, 2, 1.0, 1.0, 1.0, 0), (5, 3, 2, 1.0, 4.0, 1.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=1, fixed_fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 3, 0, 1.0, 4.0, 0.0, 0), (2, 0, 1, 1.0, 1.0, 0.1, 1),
(3, 3, 1, 1.0, 4.0, 0.1, 0), (4, 0, 2, 1.0, 1.0, 1.0, 1), (5, 3, 2, 1.0, 4.0, 1.0, 0)
], dtype=order_dt)
)
def test_slippage(self):
record_arrays_close(
from_signals_all(size=1, slippage=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 3, 0, 2.0, 4.0, 0.0, 1), (2, 0, 1, 1.0, 1.1, 0.0, 0),
(3, 3, 1, 2.0, 3.6, 0.0, 1), (4, 0, 2, 1.0, 2.0, 0.0, 0), (5, 3, 2, 2.0, 0.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1, slippage=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 3, 0, 1.0, 4.0, 0.0, 1), (2, 0, 1, 1.0, 1.1, 0.0, 0),
(3, 3, 1, 1.0, 3.6, 0.0, 1), (4, 0, 2, 1.0, 2.0, 0.0, 0), (5, 3, 2, 1.0, 0.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=1, slippage=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 3, 0, 1.0, 4.0, 0.0, 0), (2, 0, 1, 1.0, 0.9, 0.0, 1),
(3, 3, 1, 1.0, 4.4, 0.0, 0), (4, 0, 2, 1.0, 0.0, 0.0, 1), (5, 3, 2, 1.0, 8.0, 0.0, 0)
], dtype=order_dt)
)
def test_min_size(self):
record_arrays_close(
from_signals_all(size=1, min_size=[[0., 1., 2.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 3, 0, 2.0, 4.0, 0.0, 1), (2, 0, 1, 1.0, 1.0, 0.0, 0),
(3, 3, 1, 2.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1, min_size=[[0., 1., 2.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 3, 0, 1.0, 4.0, 0.0, 1), (2, 0, 1, 1.0, 1.0, 0.0, 0),
(3, 3, 1, 1.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=1, min_size=[[0., 1., 2.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 3, 0, 1.0, 4.0, 0.0, 0), (2, 0, 1, 1.0, 1.0, 0.0, 1),
(3, 3, 1, 1.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
def test_max_size(self):
record_arrays_close(
from_signals_all(size=1, max_size=[[0.5, 1., np.inf]]).order_records,
np.array([
(0, 0, 0, 0.5, 1.0, 0.0, 0), (1, 3, 0, 0.5, 4.0, 0.0, 1), (2, 4, 0, 0.5, 5.0, 0.0, 1),
(3, 0, 1, 1.0, 1.0, 0.0, 0), (4, 3, 1, 1.0, 4.0, 0.0, 1), (5, 4, 1, 1.0, 5.0, 0.0, 1),
(6, 0, 2, 1.0, 1.0, 0.0, 0), (7, 3, 2, 2.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1, max_size=[[0.5, 1., np.inf]]).order_records,
np.array([
(0, 0, 0, 0.5, 1.0, 0.0, 0), (1, 3, 0, 0.5, 4.0, 0.0, 1), (2, 0, 1, 1.0, 1.0, 0.0, 0),
(3, 3, 1, 1.0, 4.0, 0.0, 1), (4, 0, 2, 1.0, 1.0, 0.0, 0), (5, 3, 2, 1.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=1, max_size=[[0.5, 1., np.inf]]).order_records,
np.array([
(0, 0, 0, 0.5, 1.0, 0.0, 1), (1, 3, 0, 0.5, 4.0, 0.0, 0), (2, 0, 1, 1.0, 1.0, 0.0, 1),
(3, 3, 1, 1.0, 4.0, 0.0, 0), (4, 0, 2, 1.0, 1.0, 0.0, 1), (5, 3, 2, 1.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
def test_reject_prob(self):
record_arrays_close(
from_signals_all(size=1., reject_prob=[[0., 0.5, 1.]], seed=42).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 3, 0, 2.0, 4.0, 0.0, 1), (2, 1, 1, 1.0, 2.0, 0.0, 0),
(3, 3, 1, 2.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1., reject_prob=[[0., 0.5, 1.]], seed=42).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 3, 0, 1.0, 4.0, 0.0, 1), (2, 1, 1, 1.0, 2.0, 0.0, 0),
(3, 3, 1, 1.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=1., reject_prob=[[0., 0.5, 1.]], seed=42).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 3, 0, 1.0, 4.0, 0.0, 0), (2, 1, 1, 1.0, 2.0, 0.0, 1),
(3, 3, 1, 1.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
def test_close_first(self):
record_arrays_close(
from_signals_all(close_first=[[False, True]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 3, 0, 200.0, 4.0, 0.0, 1), (2, 0, 1, 100.0, 1.0, 0.0, 0),
(3, 3, 1, 100.0, 4.0, 0.0, 1), (4, 4, 1, 80.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_all(
price=pd.Series(price.values[::-1], index=price.index),
entries=pd.Series(entries.values[::-1], index=price.index),
exits=pd.Series(exits.values[::-1], index=price.index),
close_first=[[False, True]]
).order_records,
np.array([
(0, 0, 0, 20.0, 5.0, 0.0, 1), (1, 3, 0, 100.0, 2.0, 0.0, 0), (2, 0, 1, 20.0, 5.0, 0.0, 1),
(3, 3, 1, 20.0, 2.0, 0.0, 0), (4, 4, 1, 160.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
def test_allow_partial(self):
record_arrays_close(
from_signals_all(size=1000, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 3, 0, 1100.0, 4.0, 0.0, 1), (2, 3, 1, 1000.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1000, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 3, 0, 100.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=1000, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 1000.0, 1.0, 0.0, 1), (1, 3, 0, 275.0, 4.0, 0.0, 0), (2, 0, 1, 1000.0, 1.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_all(size=np.inf, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 3, 0, 200.0, 4.0, 0.0, 1), (2, 0, 1, 100.0, 1.0, 0.0, 0),
(3, 3, 1, 200.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=np.inf, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 3, 0, 100.0, 4.0, 0.0, 1), (2, 0, 1, 100.0, 1.0, 0.0, 0),
(3, 3, 1, 100.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=np.inf, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1), (1, 3, 0, 50.0, 4.0, 0.0, 0), (2, 0, 1, 100.0, 1.0, 0.0, 1)
], dtype=order_dt)
)
def test_raise_reject(self):
record_arrays_close(
from_signals_all(size=1000, allow_partial=True, raise_reject=True).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 3, 0, 1100.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1000, allow_partial=True, raise_reject=True).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 3, 0, 100.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
with pytest.raises(Exception) as e_info:
_ = from_signals_shortonly(size=1000, allow_partial=True, raise_reject=True).order_records
with pytest.raises(Exception) as e_info:
_ = from_signals_all(size=1000, allow_partial=False, raise_reject=True).order_records
with pytest.raises(Exception) as e_info:
_ = from_signals_longonly(size=1000, allow_partial=False, raise_reject=True).order_records
with pytest.raises(Exception) as e_info:
_ = from_signals_shortonly(size=1000, allow_partial=False, raise_reject=True).order_records
def test_accumulate(self):
record_arrays_close(
from_signals_all(size=1, accumulate=True).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 1, 0, 1.0, 2.0, 0.0, 0), (2, 3, 0, 1.0, 4.0, 0.0, 1),
(3, 4, 0, 1.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1, accumulate=True).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 1, 0, 1.0, 2.0, 0.0, 0), (2, 3, 0, 1.0, 4.0, 0.0, 1),
(3, 4, 0, 1.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=1, accumulate=True).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 1, 0, 1.0, 2.0, 0.0, 1), (2, 3, 0, 1.0, 4.0, 0.0, 0),
(3, 4, 0, 1.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
def test_log(self):
record_arrays_close(
from_signals_all(log=True).log_records,
np.array([
(0, 0, 0, 0, 100.0, 0.0, 1.0, 100.0, np.inf, 0, 2, 1.0, 0.0, 0.0, 0.0, 1e-08, np.inf, 0.0,
True, False, True, 0.0, 100.0, 100.0, 1.0, 0.0, 0, 0, -1, 0),
(1, 3, 0, 0, 0.0, 100.0, 4.0, 400.0, -np.inf, 0, 2, 4.0, 0.0, 0.0, 0.0, 1e-08, np.inf, 0.0,
True, False, True, 800.0, -100.0, 200.0, 4.0, 0.0, 1, 0, -1, 1)
], dtype=log_dt)
)
def test_conflict_mode(self):
kwargs = dict(
price=price.iloc[:3],
entries=pd.DataFrame([
[True, True, True, True, True],
[True, True, True, True, False],
[True, True, True, True, True]
]),
exits=pd.DataFrame([
[True, True, True, True, True],
[False, False, False, False, True],
[True, True, True, True, True]
]),
size=1.,
conflict_mode=[[
'ignore',
'entry',
'exit',
'opposite',
'opposite'
]]
)
record_arrays_close(
from_signals_all(**kwargs).order_records,
np.array([
(0, 1, 0, 1.0, 2.0, 0.0, 0), (1, 0, 1, 1.0, 1.0, 0.0, 0), (2, 0, 2, 1.0, 1.0, 0.0, 1),
(3, 1, 2, 2.0, 2.0, 0.0, 0), (4, 2, 2, 2.0, 3.0, 0.0, 1), (5, 1, 3, 1.0, 2.0, 0.0, 0),
(6, 2, 3, 2.0, 3.0, 0.0, 1), (7, 1, 4, 1.0, 2.0, 0.0, 1), (8, 2, 4, 2.0, 3.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(**kwargs).order_records,
np.array([
(0, 1, 0, 1.0, 2.0, 0.0, 0), (1, 0, 1, 1.0, 1.0, 0.0, 0), (2, 1, 2, 1.0, 2.0, 0.0, 0),
(3, 2, 2, 1.0, 3.0, 0.0, 1), (4, 1, 3, 1.0, 2.0, 0.0, 0), (5, 2, 3, 1.0, 3.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(**kwargs).order_records,
np.array([
(0, 1, 0, 1.0, 2.0, 0.0, 1), (1, 0, 1, 1.0, 1.0, 0.0, 1), (2, 1, 2, 1.0, 2.0, 0.0, 1),
(3, 2, 2, 1.0, 3.0, 0.0, 0), (4, 1, 3, 1.0, 2.0, 0.0, 1), (5, 2, 3, 1.0, 3.0, 0.0, 0)
], dtype=order_dt)
)
def test_init_cash(self):
record_arrays_close(
from_signals_all(price=price_wide, size=1., init_cash=[0., 1., 100.]).order_records,
np.array([
(0, 3, 0, 1.0, 4.0, 0.0, 1), (1, 0, 1, 1.0, 1.0, 0.0, 0), (2, 3, 1, 2.0, 4.0, 0.0, 1),
(3, 0, 2, 1.0, 1.0, 0.0, 0), (4, 3, 2, 2.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(price=price_wide, size=1., init_cash=[0., 1., 100.]).order_records,
np.array([
(0, 0, 1, 1.0, 1.0, 0.0, 0), (1, 3, 1, 1.0, 4.0, 0.0, 1), (2, 0, 2, 1.0, 1.0, 0.0, 0),
(3, 3, 2, 1.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(price=price_wide, size=1., init_cash=[0., 1., 100.]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 3, 0, 0.25, 4.0, 0.0, 0), (2, 0, 1, 1.0, 1.0, 0.0, 1),
(3, 3, 1, 0.5, 4.0, 0.0, 0), (4, 0, 2, 1.0, 1.0, 0.0, 1), (5, 3, 2, 1.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
with pytest.raises(Exception) as e_info:
_ = from_signals_all(init_cash=np.inf).order_records
with pytest.raises(Exception) as e_info:
_ = from_signals_longonly(init_cash=np.inf).order_records
with pytest.raises(Exception) as e_info:
_ = from_signals_shortonly(init_cash=np.inf).order_records
def test_group_by(self):
portfolio = from_signals_all(price=price_wide, group_by=np.array([0, 0, 1]))
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 3, 0, 200.0, 4.0, 0.0, 1), (2, 0, 1, 100.0, 1.0, 0.0, 0),
(3, 3, 1, 200.0, 4.0, 0.0, 1), (4, 0, 2, 100.0, 1.0, 0.0, 0), (5, 3, 2, 200.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
pd.testing.assert_index_equal(
portfolio.wrapper.grouper.group_by,
pd.Int64Index([0, 0, 1], dtype='int64')
)
pd.testing.assert_series_equal(
portfolio.init_cash,
pd.Series([200., 100.], index=pd.Int64Index([0, 1], dtype='int64')).rename('init_cash')
)
assert not portfolio.cash_sharing
def test_cash_sharing(self):
portfolio = from_signals_all(price=price_wide, group_by=np.array([0, 0, 1]), cash_sharing=True)
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 3, 0, 200.0, 4.0, 0.0, 1), (2, 3, 1, 200.0, 4.0, 0.0, 1),
(3, 0, 2, 100.0, 1.0, 0.0, 0), (4, 3, 2, 200.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
pd.testing.assert_index_equal(
portfolio.wrapper.grouper.group_by,
pd.Int64Index([0, 0, 1], dtype='int64')
)
pd.testing.assert_series_equal(
portfolio.init_cash,
pd.Series([100., 100.], index=pd.Int64Index([0, 1], dtype='int64')).rename('init_cash')
)
assert portfolio.cash_sharing
with pytest.raises(Exception) as e_info:
_ = portfolio.regroup(group_by=False)
def test_call_seq(self):
portfolio = from_signals_all(price=price_wide, group_by=np.array([0, 0, 1]), cash_sharing=True)
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 3, 0, 200.0, 4.0, 0.0, 1), (2, 3, 1, 200.0, 4.0, 0.0, 1),
(3, 0, 2, 100.0, 1.0, 0.0, 0), (4, 3, 2, 200.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
np.testing.assert_array_equal(
portfolio.call_seq.values,
np.array([
[0, 1, 0],
[0, 1, 0],
[0, 1, 0],
[0, 1, 0],
[0, 1, 0]
])
)
portfolio = from_signals_all(
price=price_wide, group_by=np.array([0, 0, 1]),
cash_sharing=True, call_seq='reversed')
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 1, 100.0, 1.0, 0.0, 0), (1, 3, 1, 200.0, 4.0, 0.0, 1), (2, 3, 0, 200.0, 4.0, 0.0, 1),
(3, 0, 2, 100.0, 1.0, 0.0, 0), (4, 3, 2, 200.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
np.testing.assert_array_equal(
portfolio.call_seq.values,
np.array([
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0]
])
)
portfolio = from_signals_all(
price=price_wide, group_by=np.array([0, 0, 1]),
cash_sharing=True, call_seq='random', seed=seed)
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 1, 100.0, 1.0, 0.0, 0), (1, 3, 1, 200.0, 4.0, 0.0, 1), (2, 3, 0, 200.0, 4.0, 0.0, 1),
(3, 0, 2, 100.0, 1.0, 0.0, 0), (4, 3, 2, 200.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
np.testing.assert_array_equal(
portfolio.call_seq.values,
np.array([
[1, 0, 0],
[0, 1, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0]
])
)
kwargs = dict(
price=1.,
entries=pd.DataFrame([
[False, False, True],
[False, True, False],
[True, False, False],
[False, False, True],
[False, True, False],
]),
exits=pd.DataFrame([
[False, False, False],
[False, False, True],
[False, True, False],
[True, False, False],
[False, False, True],
]),
group_by=np.array([0, 0, 0]),
cash_sharing=True,
call_seq='auto'
)
portfolio = from_signals_all(**kwargs)
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 2, 100.0, 1.0, 0.0, 0), (1, 1, 2, 200.0, 1.0, 0.0, 1), (2, 1, 1, 200.0, 1.0, 0.0, 0),
(3, 2, 1, 400.0, 1.0, 0.0, 1), (4, 2, 0, 400.0, 1.0, 0.0, 0), (5, 3, 0, 800.0, 1.0, 0.0, 1),
(6, 3, 2, 800.0, 1.0, 0.0, 0), (7, 4, 2, 1400.0, 1.0, 0.0, 1), (8, 4, 1, 1400.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
portfolio.call_seq.values,
np.array([
[0, 1, 2],
[2, 0, 1],
[1, 2, 0],
[0, 1, 2],
[2, 0, 1]
])
)
portfolio = from_signals_longonly(**kwargs)
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 2, 100.0, 1.0, 0.0, 0), (1, 1, 2, 100.0, 1.0, 0.0, 1), (2, 1, 1, 100.0, 1.0, 0.0, 0),
(3, 2, 1, 100.0, 1.0, 0.0, 1), (4, 2, 0, 100.0, 1.0, 0.0, 0), (5, 3, 0, 100.0, 1.0, 0.0, 1),
(6, 3, 2, 100.0, 1.0, 0.0, 0), (7, 4, 2, 100.0, 1.0, 0.0, 1), (8, 4, 1, 100.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
portfolio.call_seq.values,
np.array([
[0, 1, 2],
[2, 0, 1],
[1, 2, 0],
[0, 1, 2],
[2, 0, 1]
])
)
portfolio = from_signals_shortonly(**kwargs)
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 2, 100.0, 1.0, 0.0, 1), (1, 1, 1, 200.0, 1.0, 0.0, 1), (2, 1, 2, 100.0, 1.0, 0.0, 0),
(3, 2, 0, 300.0, 1.0, 0.0, 1), (4, 2, 1, 200.0, 1.0, 0.0, 0), (5, 3, 2, 400.0, 1.0, 0.0, 1),
(6, 3, 0, 300.0, 1.0, 0.0, 0), (7, 4, 1, 500.0, 1.0, 0.0, 1), (8, 4, 2, 400.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
portfolio.call_seq.values,
np.array([
[2, 0, 1],
[1, 0, 2],
[0, 2, 1],
[2, 1, 0],
[1, 0, 2]
])
)
portfolio = from_signals_longonly(**kwargs, size=1., size_type='percent')
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 2, 100.0, 1.0, 0.0, 0), (1, 1, 2, 100.0, 1.0, 0.0, 1), (2, 1, 1, 100.0, 1.0, 0.0, 0),
(3, 2, 1, 100.0, 1.0, 0.0, 1), (4, 2, 0, 100.0, 1.0, 0.0, 0), (5, 3, 0, 100.0, 1.0, 0.0, 1),
(6, 3, 2, 100.0, 1.0, 0.0, 0), (7, 4, 2, 100.0, 1.0, 0.0, 1), (8, 4, 1, 100.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
portfolio.call_seq.values,
np.array([
[0, 1, 2],
[2, 0, 1],
[1, 0, 2],
[0, 1, 2],
[2, 0, 1]
])
)
def test_max_orders(self):
_ = from_signals_all(price=price_wide)
_ = from_signals_all(price=price_wide, max_orders=6)
with pytest.raises(Exception) as e_info:
_ = from_signals_all(price=price_wide, max_orders=5)
def test_max_logs(self):
_ = from_signals_all(price=price_wide, log=True)
_ = from_signals_all(price=price_wide, log=True, max_logs=6)
with pytest.raises(Exception) as e_info:
_ = from_signals_all(price=price_wide, log=True, max_logs=5)
# ############# from_holding ############# #
class TestFromHolding:
def test_from_holding(self):
record_arrays_close(
vbt.Portfolio.from_holding(price).order_records,
vbt.Portfolio.from_signals(price, True, False, accumulate=False).order_records
)
# ############# from_random_signals ############# #
class TestFromRandom:
def test_from_random_n(self):
result = vbt.Portfolio.from_random_signals(price, n=2, seed=seed)
record_arrays_close(
result.order_records,
vbt.Portfolio.from_signals(
price,
[True, False, True, False, False],
[False, True, False, False, True]
).order_records
)
pd.testing.assert_index_equal(
result.wrapper.index,
price.vbt.wrapper.index
)
pd.testing.assert_index_equal(
result.wrapper.columns,
price.vbt.wrapper.columns
)
result = vbt.Portfolio.from_random_signals(price, n=[1, 2], seed=seed)
record_arrays_close(
result.order_records,
vbt.Portfolio.from_signals(
price,
[[False, True], [True, False], [False, True], [False, False], [False, False]],
[[False, False], [False, True], [False, False], [False, True], [True, False]]
).order_records
)
pd.testing.assert_index_equal(
result.wrapper.index,
pd.DatetimeIndex([
'2020-01-01', '2020-01-02', '2020-01-03', '2020-01-04', '2020-01-05'
], dtype='datetime64[ns]', freq=None)
)
pd.testing.assert_index_equal(
result.wrapper.columns,
pd.Int64Index([1, 2], dtype='int64', name='rand_n')
)
def test_from_random_prob(self):
result = vbt.Portfolio.from_random_signals(price, prob=0.5, seed=seed)
record_arrays_close(
result.order_records,
vbt.Portfolio.from_signals(
price,
[True, False, False, False, False],
[False, False, False, False, True]
).order_records
)
pd.testing.assert_index_equal(
result.wrapper.index,
price.vbt.wrapper.index
)
pd.testing.assert_index_equal(
result.wrapper.columns,
price.vbt.wrapper.columns
)
result = vbt.Portfolio.from_random_signals(price, prob=[0.25, 0.5], seed=seed)
record_arrays_close(
result.order_records,
vbt.Portfolio.from_signals(
price,
[[False, True], [False, False], [False, False], [False, False], [True, False]],
[[False, False], [False, True], [False, False], [False, False], [False, False]]
).order_records
)
pd.testing.assert_index_equal(
result.wrapper.index,
pd.DatetimeIndex([
'2020-01-01', '2020-01-02', '2020-01-03', '2020-01-04', '2020-01-05'
], dtype='datetime64[ns]', freq=None)
)
pd.testing.assert_index_equal(
result.wrapper.columns,
pd.MultiIndex.from_tuples([(0.25, 0.25), (0.5, 0.5)], names=['rprob_entry_prob', 'rprob_exit_prob'])
)
# ############# from_orders ############# #
order_size = pd.Series([np.inf, -np.inf, np.nan, np.inf, -np.inf], index=price.index)
order_size_wide = order_size.vbt.tile(3, keys=['a', 'b', 'c'])
order_size_one = pd.Series([1, -1, np.nan, 1, -1], index=price.index)
def from_orders_all(price=price, size=order_size, **kwargs):
return vbt.Portfolio.from_orders(price, size, direction='all', **kwargs)
def from_orders_longonly(price=price, size=order_size, **kwargs):
return vbt.Portfolio.from_orders(price, size, direction='longonly', **kwargs)
def from_orders_shortonly(price=price, size=order_size, **kwargs):
return vbt.Portfolio.from_orders(price, size, direction='shortonly', **kwargs)
class TestFromOrders:
def test_one_column(self):
record_arrays_close(
from_orders_all().order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 1, 0, 200.0, 2.0, 0.0, 1), (2, 3, 0, 100.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly().order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 1, 0, 100.0, 2.0, 0.0, 1), (2, 3, 0, 50.0, 4.0, 0.0, 0),
(3, 4, 0, 50.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly().order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1), (1, 1, 0, 100.0, 2.0, 0.0, 0)
], dtype=order_dt)
)
portfolio = from_orders_all()
pd.testing.assert_index_equal(
portfolio.wrapper.index,
pd.DatetimeIndex(['2020-01-01', '2020-01-02', '2020-01-03', '2020-01-04', '2020-01-05'])
)
pd.testing.assert_index_equal(
portfolio.wrapper.columns,
pd.Int64Index([0], dtype='int64')
)
assert portfolio.wrapper.ndim == 1
assert portfolio.wrapper.freq == day_dt
assert portfolio.wrapper.grouper.group_by is None
def test_multiple_columns(self):
record_arrays_close(
from_orders_all(price=price_wide).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 1, 0, 200.0, 2.0, 0.0, 1), (2, 3, 0, 100.0, 4.0, 0.0, 0),
(3, 0, 1, 100.0, 1.0, 0.0, 0), (4, 1, 1, 200.0, 2.0, 0.0, 1), (5, 3, 1, 100.0, 4.0, 0.0, 0),
(6, 0, 2, 100.0, 1.0, 0.0, 0), (7, 1, 2, 200.0, 2.0, 0.0, 1), (8, 3, 2, 100.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(price=price_wide).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 1, 0, 100.0, 2.0, 0.0, 1), (2, 3, 0, 50.0, 4.0, 0.0, 0),
(3, 4, 0, 50.0, 5.0, 0.0, 1), (4, 0, 1, 100.0, 1.0, 0.0, 0), (5, 1, 1, 100.0, 2.0, 0.0, 1),
(6, 3, 1, 50.0, 4.0, 0.0, 0), (7, 4, 1, 50.0, 5.0, 0.0, 1), (8, 0, 2, 100.0, 1.0, 0.0, 0),
(9, 1, 2, 100.0, 2.0, 0.0, 1), (10, 3, 2, 50.0, 4.0, 0.0, 0), (11, 4, 2, 50.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(price=price_wide).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1), (1, 1, 0, 100.0, 2.0, 0.0, 0), (2, 0, 1, 100.0, 1.0, 0.0, 1),
(3, 1, 1, 100.0, 2.0, 0.0, 0), (4, 0, 2, 100.0, 1.0, 0.0, 1), (5, 1, 2, 100.0, 2.0, 0.0, 0)
], dtype=order_dt)
)
portfolio = from_orders_all(price=price_wide)
pd.testing.assert_index_equal(
portfolio.wrapper.index,
| pd.DatetimeIndex(['2020-01-01', '2020-01-02', '2020-01-03', '2020-01-04', '2020-01-05']) | pandas.DatetimeIndex |
import sys
from PyQt5.QtCore import QSize
from PyQt5.QtGui import QPixmap, QImage, QPalette, QBrush
from PyQt5.QtWidgets import *
from PyQt5 import uic
import pandas as pd
form_class = uic.loadUiType('./ui/Title_.ui')[0]
cam = True
class Title(QWidget, form_class):
def __init__(self):
super().__init__()
self.setupUi(self)
self.setWindowTitle("Main")
# 배경설정 전체 화면에 맞게 설정
self.background_path = "./image/3.png"
self.oImage = QImage(self.background_path)
self.sImage = self.oImage.scaled(QSize(1920, 1080))
# 파렛트 설정
self.palette = QPalette()
self.palette.setBrush(10, QBrush(self.sImage))
self.setPalette(self.palette)
# 버튼 이벤트 등록 ##
self.re_btn.clicked.connect(lambda state, button=self.re_btn: self.btn_clicked(button))
self.cal_btn.clicked.connect(lambda state, button=self.cal_btn: self.btn_clicked(button))
self.start_btn.clicked.connect(lambda state, button=self.start_btn: self.btn_clicked(button))
self.exec_btn.clicked.connect(self.exec)
# self.search_btn.clicked.connect(lambda state, button = self.search_btn : self.btn_clicked(state, button))
self.del_btn.clicked.connect(self.deleteFriend)
# csv 파일로 부터 친구목록 (thread = id) 읽어 와서 리스트로 저장
self.f = | pd.read_csv('./file/friend.csv') | pandas.read_csv |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.model_selection import KFold
from sklearn.preprocessing import StandardScaler
from scipy.stats import multivariate_normal as mvn
import seaborn as sn
import math
import gc
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
from tensorflow.keras.callbacks import EarlyStopping
from tensorflow.keras.optimizers import Adam
from sklearn.preprocessing import MinMaxScaler, normalize
import FourierClock
from scipy.stats import ks_2samp
from functools import reduce
import random
import os
from numpy.linalg import norm
import subprocess
from copulas.multivariate import GaussianMultivariate
val_errors1 = []
test_errors1 = []
N_GENES = 30
SEED = 0
random.seed(SEED)
np.random.seed(SEED)
tf.random.set_seed(SEED)
os.environ['PYTHONHASHSEED'] = str(SEED)
df = pd.read_csv('Data\\X_train_raw.csv').T
df_valid = pd.read_csv('Data\\X_valid_raw.csv').T
df_test = pd.concat((pd.read_csv('Data\\X_test_raw_A.txt').T, pd.read_csv('Data\\X_test_raw_B.txt').T)).iloc[[0, 1, 2, 4, 5], :]
rach_clusters = pd.read_csv('Data\\X_train_clusters.csv')
Y_data = df.iloc[1:, -1].astype('float64')
Y_copy = Y_data
Y_valid_data = df_valid.iloc[1:, -1].astype('float64')
Y_valid_copy = Y_valid_data
common_IDs = reduce(np.intersect1d, (df.iloc[0, :-1].values, df_valid.iloc[0, :-1].values, df_test.iloc[0, :].values))
idx = np.where(df.iloc[0, :].isin(common_IDs))[0]
df = df.iloc[:, idx]
idx_valid = np.where(df_valid.iloc[0, :].isin(common_IDs))[0]
df_valid = df_valid.iloc[:, idx_valid]
idx_test = np.where(df_test.iloc[0, :].isin(common_IDs))[0]
df_test = df_test.iloc[:, idx_test]
X_data = df.iloc[1:, :].astype('float64')
X_ID = df.iloc[0, :]
X_valid_data = df_valid.iloc[1:, :].astype('float64')
X_valid_ID = df_valid.iloc[0, :]
X_test_data = df_test.iloc[1:, :].astype('float64')
X_test_ID = df_test.iloc[0, :]
X_ID1 = np.argsort(X_ID)
X_ID = X_ID.iloc[X_ID1]
X_data = X_data.iloc[:, X_ID1]
X_data.columns = X_ID
X_ID1 = np.argsort(X_valid_ID)
X_valid_ID = X_valid_ID.iloc[X_ID1]
X_valid_data = X_valid_data.iloc[:, X_ID1]
X_valid_data.columns = X_valid_ID
X_ID1 = np.argsort(X_test_ID)
X_test_ID = X_test_ID.iloc[X_ID1]
X_test_data = X_test_data.iloc[:, X_ID1]
X_test_data.columns = X_test_ID
# Variance threshold
from sklearn.feature_selection import VarianceThreshold
selector = VarianceThreshold()
selector.fit(X_data)
var_idx = selector.variances_ > 5
X_data = X_data.iloc[:, var_idx]
X_ID = X_ID.iloc[var_idx]
X_valid_data = X_valid_data.iloc[:, var_idx]
X_valid_ID = X_valid_ID.iloc[var_idx]
X_test_data = X_test_data.iloc[:, var_idx]
X_test_ID = X_test_ID.iloc[var_idx]
X_data.reset_index(inplace=True, drop=True)
X_valid_data.reset_index(inplace=True, drop=True)
X_test_data.reset_index(inplace=True, drop=True)
X_ID.reset_index(inplace=True, drop=True)
X_valid_ID.reset_index(inplace=True, drop=True)
X_test_ID.reset_index(inplace=True, drop=True)
del df
gc.collect()
n_folds = Y_data.shape[0]
folds = KFold(n_splits=n_folds, shuffle=True, random_state=SEED)
y_cos = -np.cos((2 * np.pi * Y_data.astype('float64') / 24)+(np.pi/2))
y_sin = np.sin((2 * np.pi * Y_data.astype('float64') / 24)+(np.pi/2))
Y_valid_cos = -np.cos((2 * np.pi * Y_valid_data.astype('float64') / 24)+(np.pi/2))
Y_valid_sin = np.sin((2 * np.pi * Y_valid_data.astype('float64') / 24)+(np.pi/2))
def cyclical_loss(y_true, y_pred):
error = 0
for i in range(y_pred.shape[0]):
error += np.arccos((y_true[i, :] @ y_pred[i, :]) / (norm(y_true[i, :]) * norm(y_pred[i, :])))
return error
def custom_loss(y_true, y_pred):
return tf.reduce_mean((tf.math.acos(tf.matmul(y_true, tf.transpose(y_pred)) / ((tf.norm(y_true) * tf.norm(y_pred)) + tf.keras.backend.epsilon()))**2))
adam = Adam(lr=0.00001, beta_1=0.9, beta_2=0.999, amsgrad=False)
def larger_model():
# create model
model = Sequential()
model.add(Dense(256, kernel_initializer='normal', activation='relu'))
model.add(Dense(1024, kernel_initializer='normal', activation='relu'))
model.add(Dense(2, kernel_initializer='normal'))
# Compile model
model.compile(loss=custom_loss, optimizer=adam)
return model
Y_data = np.concatenate((y_cos.values.reshape(-1, 1), y_sin.values.reshape(-1, 1)), axis=1)
Y_valid_data = np.concatenate((Y_valid_cos.values.reshape(-1, 1), Y_valid_sin.values.reshape(-1, 1)), axis=1)
error = 0 # Initialise error
all_preds = np.zeros((Y_data.shape[0], 2)) # Create empty array
all_valid_preds = np.zeros((Y_valid_data.shape[0], 2)) # Create empty array
early_stop = EarlyStopping(patience=100, restore_best_weights=True, monitor='val_loss', mode='min')
X_data_times = X_data.T
Y_times = np.array([0, 4, 8, 12, 16, 20, 24, 28, 32, 36, 40, 44])
scaler = StandardScaler()
X_data_times_idx = X_data_times.index
X_data_times = (scaler.fit_transform(X_data_times.T)).T
X_data_times = pd.DataFrame(data=X_data_times, index=X_data_times_idx)
X_data_times = pd.concat((pd.DataFrame(Y_times.reshape(1, 12)), X_data_times), axis=0)
X_data_times.to_csv('Data\\X_train_times.csv', header=None)
subprocess.call(['C:\\Program Files\\R\\R-4.0.3\\bin\\Rscript', 'metacycle_scores.R'], shell=False)
arser_scores = pd.read_csv('MetaScores\\ARSresult_X_train_times.csv')
jtk_scores = pd.read_csv('MetaScores\\JTKresult_X_train_times.csv')
auto_indices, auto_clock_genes, auto_scores = FourierClock.get_autocorrelated_genes(X_data, X_ID)
auto_scores = np.abs(np.array(auto_scores))
cross_indices, cross_clock_genes, cross_scores = FourierClock.cross_corr(X_data, Y_copy, X_ID)
cross_scores = np.abs(np.array(cross_scores))
scores = np.concatenate((auto_scores.reshape(-1, 1), cross_scores.reshape(-1, 1),
arser_scores['fdr_BH'].values.reshape(-1, 1), jtk_scores['ADJ.P'].values.reshape(-1, 1)),
axis=1)
scores[:, 2:] = 1-scores[:, 2:]
num_resamples = 1000 # Change to 50,000/100,000
gcopula = GaussianMultivariate()
gcopula.fit(scores)
random_sample = gcopula.sample(num_resamples)
sample_scores = pd.DataFrame(random_sample)
mean = np.mean(sample_scores.values, axis=0)
covariance = np.cov(sample_scores.T)
dist = mvn(mean=mean, cov=covariance, allow_singular=True)
gene_scores = []
for i in range(scores.shape[0]):
gene_scores.append(dist.cdf(x=scores[i, :]))
gene_scores = np.array(gene_scores)
gene_scores = np.concatenate((arser_scores['CycID'].values.reshape(-1, 1), gene_scores.reshape(-1, 1)), axis=1)
gene_scores = gene_scores[gene_scores[:, 1].argsort()[::-1]]
selected_genes = gene_scores[:N_GENES*3, 0]
idx = np.where(X_ID.isin(selected_genes))[0]
selected_scores = gene_scores[idx]
X_data = X_data.iloc[:, idx]
idx_valid = np.where(X_valid_ID.isin(selected_genes))[0]
X_valid_data = X_valid_data.iloc[:, idx_valid]
idx_test = np.where(X_test_ID.isin(selected_genes))[0]
X_test_data = X_test_data.iloc[:, idx_test]
X_ID = X_ID.iloc[idx]
X_valid_ID = X_valid_ID.iloc[idx_valid]
X_test_ID = X_test_ID.iloc[idx_test]
scores = []
pvalues = []
for i in range(X_data.shape[1]):
l = ks_2samp(X_data.iloc[:, i], X_valid_data.iloc[:, i])
scores.append(i)
pvalues.append(l.pvalue)
pvalues_idx = np.argsort(pvalues)
scores = pvalues_idx[(pvalues_idx.shape[0]-2*N_GENES):]
similar_genes = selected_genes[scores]
X_data = X_data.iloc[:, scores]
selected_scores = selected_scores[scores]
X_ID = X_ID.iloc[scores]
X_valid_data = X_valid_data.iloc[:, scores]
X_test_data = X_test_data.iloc[:, scores]
Y_copy_res = np.array([0, 4, 8, 12, 16, 20, 0, 4, 8, 12, 16, 20])
X_ID2 = X_data.columns.values
scaler = MinMaxScaler()
scaler.fit(X_data)
X_data = scaler.transform(X_data)
X_valid_data = scaler.transform(X_valid_data)
X_test_data = scaler.transform(X_test_data)
X_data = pd.DataFrame(data=X_data, columns=X_ID2)
X_valid_data = | pd.DataFrame(data=X_valid_data, columns=X_ID2) | pandas.DataFrame |
from datetime import datetime as dt
import os
import pandas as pd
import ntpath
import numpy as np
import math
from distutils.dir_util import copy_tree
from shutil import rmtree
import sqlite3
# 'cleanData' is taking the data that was imported from 'http://football-data.co.uk/'
# and 'cleaning' the data so that only necessary factors are used for testing.
# This function is used to make a directory.
def make_directory(path):
directory = os.path.dirname(path)
if not os.path.exists(directory):
os.makedirs(directory)
# If a directory already exists it will be removed.
def rmv_dir(path):
if os.path.exists(path):
rmtree(path)
# This function is used to copy a file/folder.
def copy_csv(from_path, to_path):
make_directory(to_path)
if os.path.isfile(from_path):
with open(to_path, 'w') as to_file, open(from_path, 'r') as from_file:
for line in from_file:
to_file.write(line)
elif os.path.isdir(from_path):
copy_tree(from_path, to_path)
else:
raise ValueError("Copy_CSV Error. File either does not exist, or is an unsupported file type")
# clean the original raw_data data by storing only the columns that we need, and removing the rest.
def clean(from_path, to_path, columns):
def convert_date(date):
if date == '':
return None
else:
_, file = ntpath.split(to_path)
if len(date.split('-')) == 3:
return date
else:
return dt.strptime(date, '%d/%m/%y').date()
# The convert Score function will check to see if the score is 'Not a Number'(NaN).
# The latter part of this conditional statement will more than likely be used more.
def convert_score(score):
if math.isnan(score):
return score
else:
return int(score)
df = pd.read_csv(from_path, error_bad_lines=False)
df = df[columns]
df = df[pd.notnull(df['Date'])]
df['FTHG'] = df['FTHG'].apply(convert_score)
df['FTAG'] = df['FTAG'].apply(convert_score)
df['Date'] = df['Date'].apply(convert_date)
head, _ = ntpath.split(to_path)
if not os.path.exists(head):
os.makedirs(head)
df.to_csv(to_path, index=False)
# This function is cleaning the data in the raw_data folder from every year.
def clean_everything(from_folder, to_folder, columns, from_year, to_year):
for year in range(from_year, to_year + 1):
csv = '{}-{}.csv'.format(year, year + 1)
frompath = os.path.join(from_folder, csv)
topath = os.path.join(to_folder, csv)
print("Cleaning data", frompath, "...")
clean(frompath, topath, columns)
# The years are then concatenated through this function.
def combine_games(cleaned_folder_path, final_path, start_year, end_year, make_file=True):
print("Combining matches played from {} to {}...".format(start_year, end_year))
dfList = []
for year in range(start_year, end_year + 1):
file = '{}-{}.csv'.format(year, year + 1)
path = os.path.join(cleaned_folder_path, file)
df = pd.read_csv(path)
dfList.append(df)
df = pd.concat(dfList, ignore_index=True, sort=False)
if make_file:
df.to_csv(final_path, index=False)
return df
def get_match_results_against(file_path, cleaned_folder_path, final_path, from_year, to_year):
print("Getting head-to-head results...")
team_detail, match_detail = {}, {}
match_detail_columns = [
'HT_win_rate_against',
'AT_win_rate_against'
]
for item in match_detail_columns:
match_detail[item] = []
# Get head-to-head result from fromYear to toYear
df = combine_games(cleaned_folder_path, final_path, from_year, to_year, make_file=False)
for index, row in df.iterrows():
home_team = row['HomeTeam']
away_team = row['AwayTeam']
if home_team not in team_detail:
team_detail[home_team] = {}
if away_team not in team_detail:
team_detail[away_team] = {}
if away_team not in team_detail[home_team]:
team_detail[home_team][away_team] = {
'match_played': 0,
'win': 0
}
if home_team not in team_detail[away_team]:
team_detail[away_team][home_team] = {
'match_played': 0,
'win': 0
}
TD_HT_AT = team_detail[home_team][away_team]
TD_AT_HT = team_detail[away_team][home_team]
home_team_win_rate = TD_HT_AT['win'] / TD_HT_AT['match_played'] if TD_HT_AT['match_played'] > 0 else np.nan
away_team_win_rate = TD_AT_HT['win'] / TD_AT_HT['match_played'] if TD_AT_HT['match_played'] > 0 else np.nan
match_detail['HT_win_rate_against'].append(home_team_win_rate)
match_detail['AT_win_rate_against'].append(away_team_win_rate)
TD_HT_AT['match_played'] += 1
TD_AT_HT['match_played'] += 1
game_result = row['FTR']
if game_result == 'H':
TD_HT_AT['win'] += 1
elif game_result == 'A':
TD_AT_HT['win'] += 1
# Only take the last x results of df and combine with filedf.
# This is because we don't always want to merge all data from 1993 to 2018
filed_f = pd.read_csv(file_path)
row_count = filed_f.shape[0]
filed_f['HT_win_rate_against'] = pd.Series(match_detail['HT_win_rate_against'][-row_count:], index=filed_f.index)
filed_f['AT_win_rate_against'] = pd.Series(match_detail['AT_win_rate_against'][-row_count:], index=filed_f.index)
filed_f.to_csv(file_path, index=False)
def remove_goal_scores(final_path):
print("Removing Goal Scores...")
df = | pd.read_csv(final_path) | pandas.read_csv |
import numpy as np
import pandas as pd
from scipy.spatial import distance
from seaborn import clustermap
Air = ["taxonomic_profile_4.txt" ,"taxonomic_profile_7.txt" ,"taxonomic_profile_8.txt" ,"taxonomic_profile_9.txt" ,"taxonomic_profile_10.txt" ,"taxonomic_profile_11.txt" ,"taxonomic_profile_12.txt" ,"taxonomic_profile_23.txt" ,"taxonomic_profile_26.txt" ,"taxonomic_profile_27.txt"]
Skin = ["taxonomic_profile_1.txt" ,"taxonomic_profile_13.txt" ,"taxonomic_profile_14.txt" ,"taxonomic_profile_15.txt" ,"taxonomic_profile_16.txt" ,"taxonomic_profile_17.txt" ,"taxonomic_profile_18.txt" ,"taxonomic_profile_19.txt" ,"taxonomic_profile_20.txt" ,"taxonomic_profile_28.txt"]
Oral = ["taxonomic_profile_6.txt" ,"taxonomic_profile_7.txt" ,"taxonomic_profile_8.txt" ,"taxonomic_profile_13.txt" ,"taxonomic_profile_14.txt" ,"taxonomic_profile_15.txt" ,"taxonomic_profile_16.txt" ,"taxonomic_profile_17.txt" ,"taxonomic_profile_18.txt" ,"taxonomic_profile_19.txt"]
Gas = ["taxonomic_profile_0.txt" ,"taxonomic_profile_1.txt" ,"taxonomic_profile_2.txt" ,"taxonomic_profile_3.txt" ,"taxonomic_profile_4.txt" ,"taxonomic_profile_5.txt" ,"taxonomic_profile_9.txt" ,"taxonomic_profile_10.txt" ,"taxonomic_profile_11.txt" ,"taxonomic_profile_12.txt"]
Uro = ["taxonomic_profile_0.txt" ,"taxonomic_profile_2.txt" ,"taxonomic_profile_3.txt" ,"taxonomic_profile_5.txt" ,"taxonomic_profile_6.txt" ,"taxonomic_profile_21.txt" ,"taxonomic_profile_22.txt" ,"taxonomic_profile_24.txt" ,"taxonomic_profile_25.txt"]
os.chdir("./Air/short_read")
C = pd.DataFrame()
for i in range(len(Air)):
tmp = pd.read_csv(Air[i], header=3, delimiter="\t")
C = C.append(tmp[tmp['RANK']=='species']['PERCENTAGE'], ignore_index=True)
os.chdir("./Skin/short_read")
U1 = pd.DataFrame()
for i in range(len(Skin)):
tmp = pd.read_csv(Skin[i], header=3, delimiter="\t")
U1 = U1.append(tmp[tmp['RANK']=='species']['PERCENTAGE'], ignore_index=True)
C = C.append(U1, ignore_index=True)
os.chdir("./Oral/short_read")
U1 = | pd.DataFrame() | pandas.DataFrame |
import pandas as pd
def compare_df(df_list,column,name_ext):
"""Creates DataFrame from same column of multiple DataFrames (df_list) and
resample it linear in time.
It needs:
df_list ... 1D list with pandas.DataFrames which have the common column(s)
<column(s)>
column ... column or list of columns to be copied to new DataFrame
name_ext ... (string) list with extensions for each entry in df_list for
new column(s) name (<column(s)>_<name_ext>), must be of same size as
df_list
It returns:
df_compare ... pandas.DataFrame which holds all extracted columns sampled
at the same times and ready for comparison
"""
# First of all compare length of df_list with length of name_ext
if len(df_list) != len(name_ext):
print('Length of list with pandas.DataFrames does not match length of list with extensions!')
print('Empty DataFrame will be returned!')
df_compare = pd.DataFrame()
else:
# create 1D list of wished column(s)
compare = []
for i in range(len(df_list)):
if type(column) == str:
if column in df_list[i].columns:
compare.append(df_list[i][column].to_frame())
compare[i].columns = [column+name_ext[i]]
else:
# if column does not exist for df_list[i], append empty
# DataFrame
compare.append(pd.DataFrame())
# end if column in df_list[i].columns:
elif type(column) == list:
exist_col = []
name_col = []
for col in column:
if col in df_list[i].columns:
exist_col.append(col)
name_col.append(col+name_ext[i])
# end if col in df_list[i].columns:
# for col in column:
if len(exist_col)>1:
compare.append(df_list[i][exist_col])
else:
compare.append(df_list[i][exist_col].to_frame())
# end if len(exist_col)>1:
compare[i].columns = name_col
else:
compare.append(pd.DataFrame())
# end if type(column) == str:
# end for i in range(len(df_list)):
df_compare = pd.concat(compare,axis=1)
df_compare = df_compare.interpolate(method='time')
# limit each column to time window in original DataFrame
for i in range(len(df_list)):
if type(column) == str:
if column in df_list[i].columns:
df_compare.loc[df_list[i].apply(pd.Series.last_valid_index)[column]+ | pd.tseries.offsets.DateOffset(seconds=1) | pandas.tseries.offsets.DateOffset |
"""
Common functions used in flux calculation
(c) 2016-2017 <NAME> <<EMAIL>>
"""
from collections import namedtuple
import warnings
import numpy as np
from scipy import optimize
import scipy.constants.constants as sci_const
import pandas as pd
# Physical constants
# Do not modify unless you are in a different universe.
# - 'p_std': standard atmospheric pressure [Pa]
# - 'R_gas': the universal gas constant [J mol^-1 K^-1]
# - 'T_0': zero Celsius in Kelvin
# - 'air_conc_std': air concentration at STP condition [mol m^-3]
phys_const = {
'T_0': sci_const.zero_Celsius,
'p_std': sci_const.atm,
'R_gas': sci_const.R,
'air_conc_std': sci_const.atm / (sci_const.R * sci_const.zero_Celsius)}
T_0 = phys_const['T_0']
def chamber_lookup_table_func(doy, chamber_config):
"""
(float) -> namedtuple
Return a chamber meta information look-up table.
"""
# define returned data template
ChamberLookupTableResult = namedtuple(
'ChamberLookupTableResult',
['schedule_start', 'schedule_end', 'n_ch', 'smpl_cycle_len',
'n_cycle_per_day', 'unit_of_time', 'df'])
for sch_id in chamber_config:
if type(chamber_config[sch_id]['schedule_start']) is str:
sch_start = pd.Timestamp(chamber_config[sch_id]['schedule_start'])
sch_end = | pd.Timestamp(chamber_config[sch_id]['schedule_end']) | pandas.Timestamp |
from discord.ext import commands
import discord
import math
import pandas as pd
from numpy import nan
import datetime as dt
import plotly.graph_objects as go
import tweepy
from wol_bot_static import token, teams, ha, pred_cols, twitter_apikey, twitter_secret_apikey, \
twitter_access_token, twitter_secret_access_token, poll_channel, help_brief, help_desc
import asyncio
# token - Discord bot token
# teams - dictionary for converting team code to full team name
# ha - dictionary for converting h to home and a to away
# pred_ - columns for prediction data
# prediction league functions
def game_result(wolves, opp):
if(wolves > opp):
return "wolves"
elif(opp > wolves):
return "opp"
else:
return "draw"
def make_ordinal(n):
'''
Convert an integer into its ordinal representation::
make_ordinal(0) => '0th'
make_ordinal(3) => '3rd'
make_ordinal(122) => '122nd'
make_ordinal(213) => '213th'
'''
n = int(n)
suffix = ['th', 'st', 'nd', 'rd', 'th'][min(n % 10, 4)]
if 11 <= (n % 100) <= 13:
suffix = 'th'
return str(n) + suffix
def refresh_scores():
pred = pd.read_csv('data_wol/predictions.csv')
results = pd.read_csv('data_wol/results.csv')
#get games that need their point values to be updated
update = pred[pred['pts'].isnull()]
for index, row in update.iterrows():
game = results[(results['game'] == row['game']) & (results['opp_score'].notnull())]
#if game has been played and has a result, add the points, otherwise pass
if game.shape[0] > 0:
total_pts = 0
ind = min(list(game.index))
if (game['opp_score'][ind] == row['opp_score']):
total_pts += 1
if (game['wolves'][ind] == row['wolves']):
total_pts += 1
if (game_result(game['wolves'][ind], game['opp_score'][ind]) == game_result(row['wolves'], row['opp_score'])):
total_pts += 2
pred.at[index, 'pts'] = int(total_pts)
pred.to_csv('data_wol/predictions.csv', index=False)
# poll functions
def get_poll_info(polls, code):
return polls[polls["code"] == code.lower()]
def get_user_responses(responses, code, author):
return responses[(responses["user"] == str(author)) & (responses["code"] == code)]
def get_poll_results(responses, polls, code):
poll = get_poll_info(polls, code)
poll_responses = responses[responses["code"] == code]["response"].value_counts(normalize=True)
msg = "Results:\n**{}**\n```".format(poll["poll"].iloc[0])
other = 0
for i in range(len(poll_responses)):
if i <= 4:
msg += "{:18.15} {:.1f}%\n".format(poll_responses.index[i], poll_responses[i] * 100)
else:
other += poll_responses[i]
if other > 0:
msg += "{:18.15} {:.1f}%```".format("other", other * 100)
else:
msg += "```"
return msg
def add_polls_row(polls, code, poll, limit):
nr = pd.DataFrame({"code": code, "poll": poll, "vote_limit": limit, "open": 1}, index=[0])
pd.concat([nr, polls]).reset_index(drop=True).to_csv('data_wol/polls.csv', index=False)
def add_responses_row(responses, code, response, author):
nr = pd.DataFrame({"code": code, "response": response, "user": author}, index=[0])
pd.concat([nr, responses]).reset_index(drop=True).to_csv("data_wol/poll_responses.csv", index=False)
def poll_code_exists(polls, code):
return code in polls['code'].unique()
#refresh scores on startup
refresh_scores()
bot = commands.Bot(command_prefix='$')
@bot.event
async def on_ready():
print('We have logged in as {0.user}'.format(bot))
@bot.command(brief=help_brief["ping"], description=help_desc["ping"])
async def ping(ctx):
latency = bot.latency
await ctx.send(latency)
# prediction table commands
@bot.command(brief=help_brief["score"], description=help_desc["score"])
async def score(ctx, game, score):
results_score = pd.read_csv('data_wol/results.csv')
pred_score = pd.read_csv('data_wol/predictions.csv')
#get data
author = str(ctx.message.author)
score_parts = score.split('-') #command will have wolves score first and opp score second
#check if game exists
exists_v = results_score['game'].str.contains(game)
exists = (exists_v.sum()) > 0
if exists:
date = results_score[results_score['game'] == game]['date']
time = results_score[results_score['game'] == game]['time']
fixture = dt.datetime.strptime('{} {}'.format(date[min(date.index)], time[min(time.index)]), '%m/%d/%Y %H:%M')
#if (fixture < dt.datetime.now()):
# message = "It's too late to predict {} {}.".format(teams[game[0:2]], ha[game[2]])
#else:
nrow = [author, game, score_parts[1], score_parts[0], nan] # data has opp first, wolves second
#overwrite if exists
overwrite_check = pred_score[(pred_score['user'] == author) & (pred_score['game'] == game)]
if overwrite_check.shape[0] == 1:
pred_score.loc[list(overwrite_check.index)[0]] = nrow
else:
pred_score = pred_score.append(pd.DataFrame([nrow], columns=pred_cols), ignore_index=True)
pred_score.to_csv('data_wol/predictions.csv', index=False)
message = "Score recorded! You predicted Wolves {}, {} {}.".format(score_parts[0], teams[game[:-1]], score_parts[1])
else:
nexts = results_score[results_score['wolves'].isnull()]['game']
next = nexts[min(nexts.index)]
message = "Please enter prediction for the next match {} {} with game code '{}'.".format(teams[next[0:2]], ha[next[2]], next)
await ctx.send(message)
@bot.command(brief=help_brief["format"], description=help_desc["format"])
async def format(ctx):
results_format = pd.read_csv('data_wol/results.csv')
nexts = results_format[results_format['wolves'].isnull()]['game']
next = nexts[min(nexts.index)]
message = "Command should be formatted as '$score GAMECODE WOLSCORE-OPPSCORE'. Example, '$score mch 2-1' where 'mch' " \
"translates to 'Manchester City Home'. Next match is {} {} with a game code of '{}'.".format(teams[next[0:2]], ha[next[2]], next)
await ctx.message.author.send(message)
@bot.command(hidden=True)
async def short_lb(ctx):
pred_lb = pd.read_csv('data_wol/predictions.csv')
lb = pred_lb.groupby(['user']).sum().sort_values(by=['pts'], ascending=False).reset_index()
top_5 = lb.nlargest(5, 'pts')
user = lb[lb['user'] == str(ctx.author)]
message = '``` Rank | User | Pts \n'
for index, row in top_5.iterrows():
message += '{:^8}|{:^18}|{:^7}\n'.format(make_ordinal(index + 1), row['user'].split('#')[0], int(row['pts']))
message += '...\n{:^8}|{:^18}|{:^7}```'.format(make_ordinal(list(user.index)[0] + 1), list(user['user'])[0].split('#')[0],
int(list(user['pts'])[0]))
await ctx.send(message)
@bot.command(hidden=True)
async def leaderboard(ctx):
full_pred_lb = pd.read_csv('data_wol/predictions.csv')
full_lb = full_pred_lb.groupby(['user']).sum().sort_values(by=['pts'], ascending=False).reset_index()
user_list = [x.split('#')[0] for x in list(full_lb['user'])]
files = []
for i in range(math.ceil(len(user_list) / 20)):
if i * 20 < len(user_list):
temp_full_lb = full_lb[(i * 20):((i + 1) * 20)]
temp_user_list = user_list[(i * 20):((i + 1) * 20)]
else:
temp_full_lb = full_lb[(i * 20):len(full_lb)]
temp_user_list = user_list[(i * 20):len(user_list)]
layout = go.Layout(autosize=True, margin = {'l': 0, 'r': 0, 't': 0, 'b': 0} )
fig = go.Figure(layout=layout, data=[go.Table(columnwidth=[10, 15, 10],
header=dict(values=['Rank', 'User', 'Points'], font=dict(color='black', size=12),
height=(500 / (temp_full_lb.shape[0] + 1))),
cells=dict(
values=[list(range((i * 20) + 1, ((i + 1) * 20) + 1)), temp_user_list, list(temp_full_lb['pts'])],
font=dict(color='black', size=11), height= (500 / (temp_full_lb.shape[0] + 1))))
])
fig.update_layout(width=350, height=700) #(25 * (full_lb.shape[0] + 2))
fig.write_image("data_wol/table{}.png".format(i))
files.append("data_wol/table{}.png".format(i))
await asyncio.wait([ctx.send(file=discord.File(f)) for f in files])
@bot.command(hidden=True)
async def refresh(ctx):
refresh_scores()
await ctx.send('Scores have been updated.')
# tweet commands
@bot.command(hidden=True)
async def tweet(ctx, *, tweet):
auth = tweepy.OAuthHandler(twitter_apikey, twitter_secret_apikey)
auth.set_access_token(twitter_access_token, twitter_secret_access_token)
api = tweepy.API(auth)
api.update_status(tweet)
await ctx.send('You tried to Tweet: {}'.format(tweet))
@bot.command(brief=help_brief["tweethelp"], description=help_desc["tweethelp"])
async def tweethelp(ctx):
await ctx.send("<NAME> can tweet! Any message with three 💬 reactions will be tweeted to the Discord Twitter account "
"(as long as mods permit). Messages with the 🔹 reaction have been sent. Check out the server Twitter page "
"at https://twitter.com/WwfcDiscord")
@bot.event
async def on_reaction_add(reaction, user):
print(reaction.message.content)
print(reaction.emoji)
channel_id = 346329500637855745
#channel_id = 557526209043628032
if reaction.message.channel.id != channel_id:
return
if reaction.emoji == "💬":
reaction_ct = reaction.message.reactions
tweet_go = 0
already_tweeted = 0
mod_denied = 0
print(reaction_ct)
for re in reaction_ct:
if re.emoji == "📵" and re.count > 0:
mod_denied = 1
if re.emoji == "🔹" and re.count > 0 and re.me:
already_tweeted = 1
if re.emoji == "💬" and re.count >= 3:
tweet_go = 1
print("tweet is a go")
if already_tweeted > 0:
print("already tweeted")
return
elif mod_denied > 0:
print("the mod denied your tweet")
return
elif tweet_go > 0:
auth = tweepy.OAuthHandler(twitter_apikey, twitter_secret_apikey)
auth.set_access_token(twitter_access_token, twitter_secret_access_token)
api = tweepy.API(auth)
api.update_status(reaction.message.content)
print("sent tweet")
await reaction.message.add_reaction("🔹")
return
#await reaction.message.channel.send(reaction.emoji)
# poll commands
@bot.command(hidden=True)
async def addpoll(ctx, code, limit, *poll_args):
poll = ' '.join(poll_args)
code = code.lower()
polls = pd.read_csv('data_wol/polls.csv')
if poll_code_exists(polls, code):
msg = "Code '{}' already exists. Try a new code.".format(code)
else:
add_polls_row(polls, code, poll, limit)
await bot.get_channel(poll_channel).send("New poll added:\n**{}**\n"
"Code: **{}**\nResponse limit: **{}**\n"
"Respond in #poll-spam with command '$vote {} *RESPONSE*'".format(poll, code, limit, code))
msg = "Poll added with code {}. Response limited to {} per user.".format(code, limit)
await ctx.send(msg)
@bot.command(hidden=True)
async def closepoll(ctx, code, delete):
code = code.lower()
delete = delete.lower()
polls = | pd.read_csv('data_wol/polls.csv') | pandas.read_csv |
# -*- coding: utf-8 -*-
import csv
import os
import platform
import codecs
import re
import sys
from datetime import datetime
import pytest
import numpy as np
from pandas._libs.lib import Timestamp
import pandas as pd
import pandas.util.testing as tm
from pandas import DataFrame, Series, Index, MultiIndex
from pandas import compat
from pandas.compat import (StringIO, BytesIO, PY3,
range, lrange, u)
from pandas.errors import DtypeWarning, EmptyDataError, ParserError
from pandas.io.common import URLError
from pandas.io.parsers import TextFileReader, TextParser
class ParserTests(object):
"""
Want to be able to test either C+Cython or Python+Cython parsers
"""
data1 = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
def test_empty_decimal_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
# Parsers support only length-1 decimals
msg = 'Only length-1 decimal markers supported'
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(data), decimal='')
def test_bad_stream_exception(self):
# Issue 13652:
# This test validates that both python engine
# and C engine will raise UnicodeDecodeError instead of
# c engine raising ParserError and swallowing exception
# that caused read to fail.
handle = open(self.csv_shiftjs, "rb")
codec = codecs.lookup("utf-8")
utf8 = codecs.lookup('utf-8')
# stream must be binary UTF8
stream = codecs.StreamRecoder(
handle, utf8.encode, utf8.decode, codec.streamreader,
codec.streamwriter)
if compat.PY3:
msg = "'utf-8' codec can't decode byte"
else:
msg = "'utf8' codec can't decode byte"
with tm.assert_raises_regex(UnicodeDecodeError, msg):
self.read_csv(stream)
stream.close()
def test_read_csv(self):
if not compat.PY3:
if compat.is_platform_windows():
prefix = u("file:///")
else:
prefix = u("file://")
fname = prefix + compat.text_type(self.csv1)
self.read_csv(fname, index_col=0, parse_dates=True)
def test_1000_sep(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
expected = DataFrame({
'A': [1, 10],
'B': [2334, 13],
'C': [5, 10.]
})
df = self.read_csv(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
def test_squeeze(self):
data = """\
a,1
b,2
c,3
"""
idx = Index(['a', 'b', 'c'], name=0)
expected = Series([1, 2, 3], name=1, index=idx)
result = self.read_table(StringIO(data), sep=',', index_col=0,
header=None, squeeze=True)
assert isinstance(result, Series)
tm.assert_series_equal(result, expected)
def test_squeeze_no_view(self):
# see gh-8217
# Series should not be a view
data = """time,data\n0,10\n1,11\n2,12\n4,14\n5,15\n3,13"""
result = self.read_csv(StringIO(data), index_col='time', squeeze=True)
assert not result._is_view
def test_malformed(self):
# see gh-6607
# all
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
"""
msg = 'Expected 3 fields in line 4, saw 5'
with tm.assert_raises_regex(Exception, msg):
self.read_table(StringIO(data), sep=',',
header=1, comment='#')
# first chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
msg = 'Expected 3 fields in line 6, saw 5'
with tm.assert_raises_regex(Exception, msg):
it = self.read_table(StringIO(data), sep=',',
header=1, comment='#',
iterator=True, chunksize=1,
skiprows=[2])
it.read(5)
# middle chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
msg = 'Expected 3 fields in line 6, saw 5'
with tm.assert_raises_regex(Exception, msg):
it = self.read_table(StringIO(data), sep=',', header=1,
comment='#', iterator=True, chunksize=1,
skiprows=[2])
it.read(3)
# last chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
msg = 'Expected 3 fields in line 6, saw 5'
with tm.assert_raises_regex(Exception, msg):
it = self.read_table(StringIO(data), sep=',', header=1,
comment='#', iterator=True, chunksize=1,
skiprows=[2])
it.read()
# skipfooter is not supported with the C parser yet
if self.engine == 'python':
# skipfooter
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
footer
"""
msg = 'Expected 3 fields in line 4, saw 5'
with tm.assert_raises_regex(Exception, msg):
self.read_table(StringIO(data), sep=',',
header=1, comment='#',
skipfooter=1)
def test_quoting(self):
bad_line_small = """printer\tresult\tvariant_name
Klosterdruckerei\tKlosterdruckerei <Salem> (1611-1804)\tMuller, Jacob
Klosterdruckerei\tKlosterdruckerei <Salem> (1611-1804)\tMuller, Jakob
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\t"Furststiftische Hofdruckerei, <Kempten""
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\tGaller, Alois
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\tHochfurstliche Buchhandlung <Kempten>""" # noqa
pytest.raises(Exception, self.read_table, StringIO(bad_line_small),
sep='\t')
good_line_small = bad_line_small + '"'
df = self.read_table(StringIO(good_line_small), sep='\t')
assert len(df) == 3
def test_unnamed_columns(self):
data = """A,B,C,,
1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
expected = np.array([[1, 2, 3, 4, 5],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]], dtype=np.int64)
df = self.read_table(StringIO(data), sep=',')
tm.assert_almost_equal(df.values, expected)
tm.assert_index_equal(df.columns,
Index(['A', 'B', 'C', 'Unnamed: 3',
'Unnamed: 4']))
def test_csv_mixed_type(self):
data = """A,B,C
a,1,2
b,3,4
c,4,5
"""
expected = DataFrame({'A': ['a', 'b', 'c'],
'B': [1, 3, 4],
'C': [2, 4, 5]})
out = self.read_csv(StringIO(data))
tm.assert_frame_equal(out, expected)
def test_read_csv_dataframe(self):
df = self.read_csv(self.csv1, index_col=0, parse_dates=True)
df2 = self.read_table(self.csv1, sep=',', index_col=0,
parse_dates=True)
tm.assert_index_equal(df.columns, pd.Index(['A', 'B', 'C', 'D']))
assert df.index.name == 'index'
assert isinstance(
df.index[0], (datetime, np.datetime64, Timestamp))
assert df.values.dtype == np.float64
tm.assert_frame_equal(df, df2)
def test_read_csv_no_index_name(self):
df = self.read_csv(self.csv2, index_col=0, parse_dates=True)
df2 = self.read_table(self.csv2, sep=',', index_col=0,
parse_dates=True)
tm.assert_index_equal(df.columns,
pd.Index(['A', 'B', 'C', 'D', 'E']))
assert isinstance(df.index[0], (datetime, np.datetime64, Timestamp))
assert df.loc[:, ['A', 'B', 'C', 'D']].values.dtype == np.float64
tm.assert_frame_equal(df, df2)
def test_read_table_unicode(self):
fin = BytesIO(u('\u0141aski, Jan;1').encode('utf-8'))
df1 = self.read_table(fin, sep=";", encoding="utf-8", header=None)
assert isinstance(df1[0].values[0], compat.text_type)
def test_read_table_wrong_num_columns(self):
# too few!
data = """A,B,C,D,E,F
1,2,3,4,5,6
6,7,8,9,10,11,12
11,12,13,14,15,16
"""
pytest.raises(ValueError, self.read_csv, StringIO(data))
def test_read_duplicate_index_explicit(self):
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo,12,13,14,15
bar,12,13,14,15
"""
result = self.read_csv(StringIO(data), index_col=0)
expected = self.read_csv(StringIO(data)).set_index(
'index', verify_integrity=False)
tm.assert_frame_equal(result, expected)
result = self.read_table(StringIO(data), sep=',', index_col=0)
expected = self.read_table(StringIO(data), sep=',', ).set_index(
'index', verify_integrity=False)
tm.assert_frame_equal(result, expected)
def test_read_duplicate_index_implicit(self):
data = """A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo,12,13,14,15
bar,12,13,14,15
"""
# make sure an error isn't thrown
self.read_csv(StringIO(data))
self.read_table(StringIO(data), sep=',')
def test_parse_bools(self):
data = """A,B
True,1
False,2
True,3
"""
data = self.read_csv(StringIO(data))
assert data['A'].dtype == np.bool_
data = """A,B
YES,1
no,2
yes,3
No,3
Yes,3
"""
data = self.read_csv(StringIO(data),
true_values=['yes', 'Yes', 'YES'],
false_values=['no', 'NO', 'No'])
assert data['A'].dtype == np.bool_
data = """A,B
TRUE,1
FALSE,2
TRUE,3
"""
data = self.read_csv(StringIO(data))
assert data['A'].dtype == np.bool_
data = """A,B
foo,bar
bar,foo"""
result = self.read_csv(StringIO(data), true_values=['foo'],
false_values=['bar'])
expected = DataFrame({'A': [True, False], 'B': [False, True]})
tm.assert_frame_equal(result, expected)
def test_int_conversion(self):
data = """A,B
1.0,1
2.0,2
3.0,3
"""
data = self.read_csv(StringIO(data))
assert data['A'].dtype == np.float64
assert data['B'].dtype == np.int64
def test_read_nrows(self):
expected = self.read_csv(StringIO(self.data1))[:3]
df = self.read_csv(StringIO(self.data1), nrows=3)
tm.assert_frame_equal(df, expected)
# see gh-10476
df = self.read_csv(StringIO(self.data1), nrows=3.0)
tm.assert_frame_equal(df, expected)
msg = r"'nrows' must be an integer >=0"
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(self.data1), nrows=1.2)
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(self.data1), nrows='foo')
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(self.data1), nrows=-1)
def test_read_chunksize(self):
reader = self.read_csv(StringIO(self.data1), index_col=0, chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunks = list(reader)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
# with invalid chunksize value:
msg = r"'chunksize' must be an integer >=1"
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(self.data1), chunksize=1.3)
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(self.data1), chunksize='foo')
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(self.data1), chunksize=0)
def test_read_chunksize_and_nrows(self):
# gh-15755
# With nrows
reader = self.read_csv(StringIO(self.data1), index_col=0,
chunksize=2, nrows=5)
df = self.read_csv(StringIO(self.data1), index_col=0, nrows=5)
tm.assert_frame_equal(pd.concat(reader), df)
# chunksize > nrows
reader = self.read_csv(StringIO(self.data1), index_col=0,
chunksize=8, nrows=5)
df = self.read_csv(StringIO(self.data1), index_col=0, nrows=5)
tm.assert_frame_equal(pd.concat(reader), df)
# with changing "size":
reader = self.read_csv(StringIO(self.data1), index_col=0,
chunksize=8, nrows=5)
df = self.read_csv(StringIO(self.data1), index_col=0, nrows=5)
tm.assert_frame_equal(reader.get_chunk(size=2), df.iloc[:2])
tm.assert_frame_equal(reader.get_chunk(size=4), df.iloc[2:5])
with pytest.raises(StopIteration):
reader.get_chunk(size=3)
def test_read_chunksize_named(self):
reader = self.read_csv(
StringIO(self.data1), index_col='index', chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col='index')
chunks = list(reader)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
def test_get_chunk_passed_chunksize(self):
data = """A,B,C
1,2,3
4,5,6
7,8,9
1,2,3"""
result = self.read_csv(StringIO(data), chunksize=2)
piece = result.get_chunk()
assert len(piece) == 2
def test_read_chunksize_generated_index(self):
# GH 12185
reader = self.read_csv(StringIO(self.data1), chunksize=2)
df = self.read_csv(StringIO(self.data1))
tm.assert_frame_equal(pd.concat(reader), df)
reader = self.read_csv(StringIO(self.data1), chunksize=2, index_col=0)
df = self.read_csv(StringIO(self.data1), index_col=0)
tm.assert_frame_equal(pd.concat(reader), df)
def test_read_text_list(self):
data = """A,B,C\nfoo,1,2,3\nbar,4,5,6"""
as_list = [['A', 'B', 'C'], ['foo', '1', '2', '3'], ['bar',
'4', '5', '6']]
df = self.read_csv(StringIO(data), index_col=0)
parser = TextParser(as_list, index_col=0, chunksize=2)
chunk = parser.read(None)
tm.assert_frame_equal(chunk, df)
def test_iterator(self):
# See gh-6607
reader = self.read_csv(StringIO(self.data1), index_col=0,
iterator=True)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunk = reader.read(3)
tm.assert_frame_equal(chunk, df[:3])
last_chunk = reader.read(5)
tm.assert_frame_equal(last_chunk, df[3:])
# pass list
lines = list(csv.reader(StringIO(self.data1)))
parser = TextParser(lines, index_col=0, chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunks = list(parser)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
# pass skiprows
parser = TextParser(lines, index_col=0, chunksize=2, skiprows=[1])
chunks = list(parser)
tm.assert_frame_equal(chunks[0], df[1:3])
treader = self.read_table(StringIO(self.data1), sep=',', index_col=0,
iterator=True)
assert isinstance(treader, TextFileReader)
# gh-3967: stopping iteration when chunksize is specified
data = """A,B,C
foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
reader = self.read_csv(StringIO(data), iterator=True)
result = list(reader)
expected = DataFrame(dict(A=[1, 4, 7], B=[2, 5, 8], C=[
3, 6, 9]), index=['foo', 'bar', 'baz'])
tm.assert_frame_equal(result[0], expected)
# chunksize = 1
reader = self.read_csv(StringIO(data), chunksize=1)
result = list(reader)
expected = DataFrame(dict(A=[1, 4, 7], B=[2, 5, 8], C=[
3, 6, 9]), index=['foo', 'bar', 'baz'])
assert len(result) == 3
tm.assert_frame_equal(pd.concat(result), expected)
# skipfooter is not supported with the C parser yet
if self.engine == 'python':
# test bad parameter (skipfooter)
reader = self.read_csv(StringIO(self.data1), index_col=0,
iterator=True, skipfooter=1)
pytest.raises(ValueError, reader.read, 3)
def test_pass_names_with_index(self):
lines = self.data1.split('\n')
no_header = '\n'.join(lines[1:])
# regular index
names = ['index', 'A', 'B', 'C', 'D']
df = self.read_csv(StringIO(no_header), index_col=0, names=names)
expected = self.read_csv(StringIO(self.data1), index_col=0)
tm.assert_frame_equal(df, expected)
# multi index
data = """index1,index2,A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
lines = data.split('\n')
no_header = '\n'.join(lines[1:])
names = ['index1', 'index2', 'A', 'B', 'C', 'D']
df = self.read_csv(StringIO(no_header), index_col=[0, 1],
names=names)
expected = self.read_csv(StringIO(data), index_col=[0, 1])
tm.assert_frame_equal(df, expected)
df = self.read_csv(StringIO(data), index_col=['index1', 'index2'])
tm.assert_frame_equal(df, expected)
def test_multi_index_no_level_names(self):
data = """index1,index2,A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
data2 = """A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
lines = data.split('\n')
no_header = '\n'.join(lines[1:])
names = ['A', 'B', 'C', 'D']
df = self.read_csv(StringIO(no_header), index_col=[0, 1],
header=None, names=names)
expected = self.read_csv(StringIO(data), index_col=[0, 1])
tm.assert_frame_equal(df, expected, check_names=False)
# 2 implicit first cols
df2 = self.read_csv(StringIO(data2))
tm.assert_frame_equal(df2, df)
# reverse order of index
df = self.read_csv(StringIO(no_header), index_col=[1, 0], names=names,
header=None)
expected = self.read_csv(StringIO(data), index_col=[1, 0])
tm.assert_frame_equal(df, expected, check_names=False)
def test_multi_index_blank_df(self):
# GH 14545
data = """a,b
"""
df = self.read_csv(StringIO(data), header=[0])
expected = DataFrame(columns=['a', 'b'])
tm.assert_frame_equal(df, expected)
round_trip = self.read_csv(StringIO(
expected.to_csv(index=False)), header=[0])
tm.assert_frame_equal(round_trip, expected)
data_multiline = """a,b
c,d
"""
df2 = self.read_csv(StringIO(data_multiline), header=[0, 1])
cols = MultiIndex.from_tuples([('a', 'c'), ('b', 'd')])
expected2 = DataFrame(columns=cols)
tm.assert_frame_equal(df2, expected2)
round_trip = self.read_csv(StringIO(
expected2.to_csv(index=False)), header=[0, 1])
tm.assert_frame_equal(round_trip, expected2)
def test_no_unnamed_index(self):
data = """ id c0 c1 c2
0 1 0 a b
1 2 0 c d
2 2 2 e f
"""
df = self.read_table(StringIO(data), sep=' ')
assert df.index.name is None
def test_read_csv_parse_simple_list(self):
text = """foo
bar baz
qux foo
foo
bar"""
df = self.read_csv(StringIO(text), header=None)
expected = DataFrame({0: ['foo', 'bar baz', 'qux foo',
'foo', 'bar']})
tm.assert_frame_equal(df, expected)
@tm.network
def test_url(self):
# HTTP(S)
url = ('https://raw.github.com/pandas-dev/pandas/master/'
'pandas/tests/io/parser/data/salaries.csv')
url_table = self.read_table(url)
dirpath = | tm.get_data_path() | pandas.util.testing.get_data_path |
from project import logger
from flask_mongoengine import ValidationError
from mongoengine import MultipleObjectsReturned, DoesNotExist
import pandas as pd
def get_user(id_, username=None):
from project.auth.models import User
user_obj = None
try:
if username:
user_obj = User.objects.get(username=username)
elif id_:
user_obj = User.objects.get(id=id_)
except MultipleObjectsReturned:
user_obj = User.objects(username)[0]
except DoesNotExist:
logger.warning("user or id does not exist in db")
return user_obj
def account_list_to_df(accts):
category = list()
type_ = list()
dates = list()
values = dict()
for acct in accts:
category.append(acct.name)
type_.append(acct.acct_type)
for entry in acct.history:
if entry.entry_date in values:
values[entry.entry_date].append(entry.value)
else:
dates.append(entry.entry_date.strftime('%b %y'))
values[entry.entry_date] = [entry.value]
acct_df = pd.DataFrame(values, index=category)
acct_df['Type'] = | pd.Series(type_) | pandas.Series |
#Ref: <NAME>
"""
Code tested on Tensorflow: 2.2.0
Keras: 2.4.3
dataset: https://finance.yahoo.com/quote/GE/history/
Also try S&P: https://finance.yahoo.com/quote/%5EGSPC/history?p=%5EGSPC
"""
import numpy as np
from keras.models import Sequential
from keras.layers import LSTM
from keras.layers import Dense, Dropout
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.preprocessing import StandardScaler
import seaborn as sns
#from datetime import datetime
df = pd.read_csv('data/GE.csv')
#Separate dates for future plotting
train_dates = | pd.to_datetime(df['Date']) | pandas.to_datetime |
import pandas as pd
import numpy as np
import scipy.stats as scs
import keras
from keras.models import Sequential, Model, Input
from keras.layers import Dense, Dropout, Activation
import tensorflow as tf
import requests
import json
from IPython.display import display, Image
import urllib.request
from PIL.ExifTags import TAGS
import PIL.Image
import time
from geopy.geocoders import Nominatim
def extract_image_url(pd_series):
'''
Extracts image URLs from the pictures column in the RescueGroups database.
INPUT: Pandas Series where each item is a list of dictionaries of dictionaries??
OUTPUT: Pandas dataframe with animalID and imageURL
'''
large_image_urls = []
animalIDs = []
for lst in pd_series:
for dct in lst:
large_image_urls.append(dct['largeUrl'])
for url in large_image_urls:
animalIDs.append(url.split('/')[-2])
return pd.DataFrame({'animalID': animalIDs,'ImageUrl': large_image_urls})
def extract_df(filepath):
'''
Extracts orgId, animalID, name breed and animalLocation from RescueGroups JSON and adds imageURLs
INPUT: JSON filepath, string
OUTPUT: Pandas dataframes
'''
df = pd.read_json(filepath, lines=True)
images = extract_image_url(df.pictures)
df1 = df[['orgID','animalID','name','animalLocation']]
# NOTE: You loose images with this concat
result = pd.concat([df1, images.ImageUrl], axis=1, join_axes=[df1.index])
# Return combined dataframe and original image source dataframe
return result, images
def download_images(urls, length=25):
'''
Downloads all images from Rescue Groups S3 bucket
INPUT: Pandas Series of URLs
OUTPUT: Images stored in data directory.
'''
for image_url in list(urls)[0:length]:
image_name = image_url.split('/')[-1]
r = requests.get(image_url, allow_redirects = True)
open('data/images/'+image_name, 'wb').write(r.content)
def load_RG_data():
'''
Load data from RescueGroup JSONs into Pandas dataframes and merge to single dataframe
INPUT: None
OUTPUT: Returns 2 dataframes, one of image URLs and other of other info
'''
df0, image0 = extract_df('/Users/bil2ab/galvanize/RG_JSON/h9DH7711_newpets_1.json')
df1, image1 = extract_df('/Users/bil2ab/galvanize/RG_JSON/h9DH7711_pets_1.json')
df2, image2 = extract_df('/Users/bil2ab/galvanize/RG_JSON/h9DH7711_pets_2.json')
df3, image3 = extract_df('/Users/bil2ab/galvanize/RG_JSON/h9DH7711_pets_3.json')
df4, image4 = extract_df('/Users/bil2ab/galvanize/RG_JSON/h9DH7711_pets_4.json')
df5, image5 = extract_df('/Users/bil2ab/galvanize/RG_JSON/h9DH7711_pets_5.json')
combined_df = df0.append([df1, df2, df3, df4, df5])
combined_imgs = image0.append([image1, image2, image3, image4, image5])
combined_df = combined_df.reset_index(drop=True)
combined_imgs = combined_imgs.reset_index(drop=True)
total_records = [df0.shape[0], df1.shape[0], df2.shape[0], df3.shape[0], df4.shape[0], df5.shape[0]]
image_records = [image0.shape[0], image1.shape[0], image2.shape[0], image3.shape[0], image4.shape[0], image5.shape[0]]
print('Total Records: ',sum(total_records))
print('Total Images: ',sum(image_records))
return combined_df, combined_imgs
def zip_lookup(zip_code):
'''
Find city and state from zip code.
INPUT: zip code
OUTPUT: Returns city and state
'''
geolocator = Nominatim()
location = geolocator.geocode(zip_code)
city = location.address.split(',')[0].strip()
state = location.address.split(',')[1].strip()
return city, state
def gps_lookup(gps):
'''
Find city and state from GPS coordinates.
INPUT: zip code
OUTPUT: Returns city and state
'''
geolocator = Nominatim()
location = geolocator.geocode(gps)
city = location.address.split(',')[0].strip()
state = location.address.split(',')[1].strip()
return city, state
def rotate_image(filepath):
'''Phones rotate images by changing exif data,
but we really need to rotate them for processing'''
image=Image.open(filepath)
try:
for orientation in ExifTags.TAGS.keys():
if ExifTags.TAGS[orientation]=='Orientation':
break
exif=dict(image._getexif().items())
if exif[orientation] == 3:
print('ROTATING 180')
image=image.rotate(180, expand=True)
elif exif[orientation] == 6:
print('ROTATING 270')
image=image.rotate(270, expand=True)
elif exif[orientation] == 8:
print('ROTATING 90')
image=image.rotate(90, expand=True)
image.save(filepath)
image.close()
except (AttributeError, KeyError, IndexError):
# cases: image don't have getexif
pass
return(image)
def vectorize_dog_images(image_path_list, length=25):
'''
Take collection of dog images and vectorize each image to a 1D NumPy array.
INPUT: List, Pandas Series, some iterable of filepaths to dog images (strings)
OUTPUT: Returns Numpy data file
'''
start = time.time()
feature_array_list = []
#image_path_list formerly combined_df.ImageUrl[0:4750]
for url in image_path_list[0:length]:
image_path = 'data/images/'+url.split('/')[-1]
dog = load_img(image_path, target_size=(224, 224))
numpy_image = img_to_array(dog)
image_batch = np.expand_dims(numpy_image, axis=0)
processed_image = vgg16.preprocess_input(image_batch.copy())
feature_array = model.predict(processed_image)
feature_array_list.append(feature_array)
#doggie = np.asarray(feature_array_list)
#np.save('data/RG_features', doggie)
end = time.time()
total_time = end-start
print('Total Time: '+str(total_time))
print('All dog features vectorized!')
return feature_array_list
def similarity(user_image, feature_array_collection):
'''
Calculate cosine similarity between user submitted image and entire adoptable dog corpus
INPUT: User submitted image in form of feature vector (NumPy Array, 1D x 4096 features)
OUTPUT: Returns list of cosine similarity scores between user submitted image and entire adoptable dog corpus
'''
results = []
for feature_array in feature_array_collection:
results.append(distance.cosine(user_image.flatten(),feature_array.flatten()))
#print('Max Similarity Score = ' +str(max(results))
#similar_images=pd.DataFrame({'imgfile':images,'simscore':sims})
return results
def top_matches(results, imageURLs, num_top_matches):
'''
Creates zipped list of image files and similarity scores.
INPUT: Similarity scores (list), imageURLs (Pandas Series), top_matches (int)
OUTPUT: Returns similarity scores and images urls (Pandas Dataframe)
'''
zipped_dogs = list(zip(imageURLs.tolist(),results))
sorted_zipped_dogs = sorted(zipped_dogs, key = lambda t: t[1])
#num_top_matches=10
return sorted_zipped_dogs[0:num_top_matches]
#return pd.DataFrame({'Image_URLs':sorted_zipped_dogs[0],'Similarity_Score':sorted_zipped_dogs[1]})
def find_matches(pred, collection_features, images):
pred = pred.flatten()
nimages = len(collection_features)
#vectorize cosine similarity
#sims= inner(pred,collection_features)/norm(pred)/norm(collection_features,axis=1)
sims = []
for i in range(0,nimages):
sims.append(distance.cosine(pred.flatten(),collection_features[i].flatten()))
print('max sim = ' +str(max(sims)))
similar_images= | pd.DataFrame({'imgfile':images,'simscore':sims}) | pandas.DataFrame |
import math
import numpy as np
import pandas as pd
from sklearn.cluster import KMeans
import json
with open('prescraped/artist_result.csv') as c:
table = pd.read_csv(c, header=None)
popular = table[table.iloc[:, 4] >= 65]
candidates = table[table.iloc[:,4]<65]
popular_ids = set()
for pid in popular.iloc[:,0]:
popular_ids.add(pid)
candidates_ids = set()
for cid in candidates.iloc[:,0]:
candidates_ids.add(cid)
means_cols = []
for i in range(5,table.shape[1],2):
means_cols.append(i)
artist_info = {}
genres = set()
for i, row in table.iterrows():
#both = np.array(row.iloc[5:])
means = []
for col in means_cols:
means.append(row.iloc[col])
artist_genres = []
for g in row.iloc[2].replace('[', '').replace(']','').replace("'", "").split(','):
genres.add(g.strip())
artist_genres.append(g.strip())
artist_info[row.iloc[0]] = {'name': row.iloc[1], 'followers': int(row.iloc[3]),
'means': means, 'genres': artist_genres}
data_means = table.iloc[:,means_cols]
#data_both = table.iloc[:,5:]
num_clust = math.floor(popular.shape[0]/2)
means_clusters = KMeans(n_clusters=num_clust, init='k-means++').fit(data_means)
for i, row in table.iterrows():
artist_info[row.iloc[0]]['cluster'] = means_clusters.labels_[i].item()
df_artists_clusters = | pd.DataFrame(columns=['id', 'cluster']) | pandas.DataFrame |
# coding:utf-8
#
# The MIT License (MIT)
#
# Copyright (c) 2018-2020 azai/Rgveda/GolemQuant
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
import datetime
from datetime import datetime as dt, timezone, timedelta, date
import time
import numpy as np
import pandas as pd
import pymongo
try:
import QUANTAXIS as QA
except:
print('PLEASE run "pip install QUANTAXIS" before call GolemQ.fetch.StockCN_realtime modules')
pass
try:
from utils.parameter import (
AKA,
INDICATOR_FIELD as FLD,
TREND_STATUS as ST
)
except:
class AKA():
"""
常量,专有名称指标,定义成常量可以避免直接打字符串造成的拼写错误。
"""
# 蜡烛线指标
CODE = 'code'
NAME = 'name'
OPEN = 'open'
HIGH = 'high'
LOW = 'low'
CLOSE = 'close'
VOLUME = 'volume'
VOL = 'vol'
DATETIME = 'datetime'
LAST_CLOSE = 'last_close'
PRE_CLOSE = 'pre_close'
def __setattr__(self, name, value):
raise Exception(u'Const Class can\'t allow to change property\' value.')
return super().__setattr__(name, value)
from QUANTAXIS.QAUtil import (
QASETTING,
)
client = QASETTING.client['QAREALTIME']
from utils.symbol import (
normalize_code
)
def GQ_fetch_stock_realtime_adv(code=None,
num=1,
collections=client.get_collection('realtime_{}'.format(date.today())),
verbose=True,
suffix=False,):
'''
返回当日的上下五档, code可以是股票可以是list, num是每个股票获取的数量
:param code:
:param num:
:param collections: realtime_XXXX-XX-XX 每天实时时间
:param suffix: 股票代码是否带沪深交易所后缀
:return: DataFrame
'''
if code is not None:
# code 必须转换成list 去查询数据库,因为五档数据用一个collection保存了股票,指数及基金,所以强制必须使用标准化代码
if isinstance(code, str):
code = [normalize_code(code)]
elif isinstance(code, list):
code = [normalize_code(symbol) for symbol in code]
pass
else:
print("QA Error GQ_fetch_stock_realtime_adv parameter code is not List type or String type")
#print(verbose, code)
items_from_collections = [
item for item in collections.find({'code': {
'$in': code
}},
limit=num * len(code),
sort=[('datetime',
pymongo.DESCENDING)])
]
if (items_from_collections is None) or \
(len(items_from_collections) == 0):
if verbose:
print("QA Error GQ_fetch_stock_realtime_adv find parameter code={} num={} collection={} return NOne"
.format(code,
num,
collections))
return
data = pd.DataFrame(items_from_collections)
if (suffix == False):
# 返回代码数据中是否包含交易所代码
data['code'] = data.apply(lambda x: x.at['code'][:6], axis=1)
data_set_index = data.set_index(['datetime',
'code'],
drop=False).drop(['_id'],
axis=1)
return data_set_index
else:
print("QA Error GQ_fetch_stock_realtime_adv parameter code is None")
def GQ_fetch_stock_day_realtime_adv(codelist,
data_day,
verbose=True):
"""
查询日线实盘数据,支持多股查询
"""
if codelist is not None:
# codelist 必须转换成list 去查询数据库
if isinstance(codelist, str):
codelist = [codelist]
elif isinstance(codelist, list):
pass
else:
print("QA Error GQ_fetch_stock_day_realtime_adv parameter codelist is not List type or String type")
start_time = dt.strptime(str(dt.now().date()) + ' 09:15', '%Y-%m-%d %H:%M')
if ((dt.now() > start_time) and ((dt.now() - data_day.data.index.get_level_values(level=0)[-1].to_pydatetime()) > timedelta(hours=10))) or \
((dt.now() < start_time) and ((dt.now() - data_day.data.index.get_level_values(level=0)[-1].to_pydatetime()) > timedelta(hours=40))):
if (verbose == True):
print('时间戳差距超过:', dt.now() - data_day.data.index.get_level_values(level=0)[-1].to_pydatetime(),
'尝试查找实盘数据....', codelist)
#print(codelist, verbose)
try:
if (dt.now() > start_time):
collections = client.get_collection('realtime_{}'.format(date.today()))
else:
collections = client.get_collection('realtime_{}'.format(date.today() - timedelta(hours=24)))
data_realtime = GQ_fetch_stock_realtime_adv(codelist, num=8000, verbose=verbose, suffix=False, collections=collections)
except:
data_realtime = QA.QA_fetch_stock_realtime_adv(codelist, num=8000, verbose=verbose)
if (data_realtime is not None) and \
(len(data_realtime) > 0):
# 合并实盘实时数据
data_realtime = data_realtime.drop_duplicates((["datetime",
'code'])).set_index(["datetime",
'code'],
drop=False)
data_realtime = data_realtime.reset_index(level=[1], drop=True)
data_realtime['date'] = pd.to_datetime(data_realtime['datetime']).dt.strftime('%Y-%m-%d')
data_realtime['datetime'] = pd.to_datetime(data_realtime['datetime'])
for code in codelist:
# 顺便检查股票行情长度,发现低于30天直接砍掉。
if (len(data_day.select_code(code[:6])) < 30):
print(u'{} 行情只有{}天数据,新股或者数据不足,不进行择时分析。'.format(code,
len(data_day.select_code(code))))
data_day.data.drop(data_day.select_code(code), inplace=True)
continue
# *** 注意,QA_data_tick_resample_1min 函数不支持多标的 *** 需要循环处理
data_realtime_code = data_realtime[data_realtime['code'].eq(code)]
if (len(data_realtime_code) > 0):
data_realtime_code = data_realtime_code.set_index(['datetime']).sort_index()
if ('volume' in data_realtime_code.columns) and \
('vol' not in data_realtime_code.columns):
# 我也不知道为什么要这样转来转去,但是各家(新浪,pytdx)l1数据就是那么不统一
data_realtime_code.rename(columns={"volume": "vol"},
inplace = True)
elif ('volume' in data_realtime_code.columns):
data_realtime_code['vol'] = np.where(np.isnan(data_realtime_code['vol']),
data_realtime_code['volume'],
data_realtime_code['vol'])
# 一分钟数据转出来了
#data_realtime_1min =
#QA.QA_data_tick_resample_1min(data_realtime_code,
# type_='1min')
try:
data_realtime_1min = QA.QA_data_tick_resample_1min(data_realtime_code,
type_='1min')
except:
print('fooo1', code)
print(data_realtime_code)
raise('foooo1{}'.format(code))
data_realtime_1day = QA.QA_data_min_to_day(data_realtime_1min)
if (len(data_realtime_1day) > 0):
# 转成日线数据
data_realtime_1day.rename(columns={"vol": "volume"},
inplace = True)
# 假装复了权,我建议复权那几天直接量化处理,复权几天内对策略买卖点影响很大
data_realtime_1day['adj'] = 1.0
data_realtime_1day['datetime'] = pd.to_datetime(data_realtime_1day.index)
data_realtime_1day = data_realtime_1day.set_index(['datetime', 'code'],
drop=True).sort_index()
# issue:成交量计算不正确,成交量计算差距较大,这里尝试处理方法,但是貌似不对
data_realtime_1day[AKA.VOLUME] = data_realtime_1min[AKA.VOLUME][-1] / data_realtime_1min[AKA.CLOSE][-1]
# if (len(data_realtime_1day) > 0):
# print(u'日线 status:',
# data_day.data.index.get_level_values(level=0)[-1]
# ==
# data_realtime_1day.index.get_level_values(level=0)[-1],
# '时间戳差距超过:', dt.now() -
# data_day.data.index.get_level_values(level=0)[-1].to_pydatetime(),
#'尝试查找实盘数据....', codelist)
# print(data_day.data.tail(3), data_realtime_1day)
if (data_day.data.index.get_level_values(level=0)[-1] != data_realtime_1day.index.get_level_values(level=0)[-1]):
if (verbose == True):
print(u'追加实时实盘数据,股票代码:{} 时间:{} 价格:{}'.format(data_realtime_1day.index[0][1],
data_realtime_1day.index[-1][0],
data_realtime_1day[AKA.CLOSE][-1]))
data_day.data = data_day.data.append(data_realtime_1day,
sort=True)
return data_day
def GQ_fetch_stock_min_realtime_adv(codelist,
data_min,
frequency,
verbose=True):
"""
查询A股的指定小时/分钟线线实盘数据
"""
if codelist is not None:
# codelist 必须转换成list 去查询数据库
if isinstance(codelist, str):
codelist = [codelist]
elif isinstance(codelist, list):
pass
else:
if verbose:
print("QA Error GQ_fetch_stock_min_realtime_adv parameter codelist is not List type or String type")
if data_min is None:
if verbose:
print(u'代码:{} 今天停牌或者已经退市*'.format(codelist))
return None
try:
foo = (dt.now() - data_min.data.index.get_level_values(level=0)[-1].to_pydatetime())
except:
if verbose:
print(u'代码:{} 今天停牌或者已经退市**'.format(codelist))
return None
start_time = dt.strptime(str(dt.now().date()) + ' 09:15', '%Y-%m-%d %H:%M')
if ((dt.now() > start_time) and ((dt.now() - data_min.data.index.get_level_values(level=0)[-1].to_pydatetime()) > timedelta(hours=10))) or \
((dt.now() < start_time) and ((dt.now() - data_min.data.index.get_level_values(level=0)[-1].to_pydatetime()) > timedelta(hours=24))):
if (verbose == True):
print('时间戳差距超过:', dt.now() - data_min.data.index.get_level_values(level=0)[-1].to_pydatetime(),
'尝试查找实盘数据....', codelist)
#print(codelist, verbose)
try:
if (dt.now() > start_time):
collections = client.get_collection('realtime_{}'.format(date.today()))
else:
collections = client.get_collection('realtime_{}'.format(date.today() - timedelta(hours=24)))
data_realtime = GQ_fetch_stock_realtime_adv(codelist, num=8000, verbose=verbose, suffix=False, collections=collections)
except:
data_realtime = QA.QA_fetch_stock_realtime_adv(codelist, num=8000, verbose=verbose)
if (data_realtime is not None) and \
(len(data_realtime) > 0):
# 合并实盘实时数据
data_realtime = data_realtime.drop_duplicates((["datetime",
'code'])).set_index(["datetime",
'code'],
drop=False)
data_realtime = data_realtime.reset_index(level=[1], drop=True)
data_realtime['date'] = pd.to_datetime(data_realtime['datetime']).dt.strftime('%Y-%m-%d')
data_realtime['datetime'] = | pd.to_datetime(data_realtime['datetime']) | pandas.to_datetime |
#!/usr/bin/env python
# coding: utf-8
# In[24]:
import numpy
import pandas as pd
import tensorflow as tf
from PyEMD import CEEMDAN
import warnings
warnings.filterwarnings("ignore")
### import the libraries
from tensorflow import keras
from tensorflow.keras import layers
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import mean_squared_error
from sklearn.metrics import mean_absolute_error
from sklearn import metrics
from sklearn.preprocessing import StandardScaler
from math import sqrt
# convert an array of values into a dataset matrix
def create_dataset(dataset, look_back=1):
dataX, dataY = [], []
for i in range(len(dataset)-look_back-1):
a = dataset[i:(i+look_back), 0]
dataX.append(a)
dataY.append(dataset[i + look_back, 0])
return numpy.array(dataX), numpy.array(dataY)
def percentage_error(actual, predicted):
res = numpy.empty(actual.shape)
for j in range(actual.shape[0]):
if actual[j] != 0:
res[j] = (actual[j] - predicted[j]) / actual[j]
else:
res[j] = predicted[j] / np.mean(actual)
return res
def mean_absolute_percentage_error(y_true, y_pred):
return numpy.mean(numpy.abs(percentage_error(numpy.asarray(y_true), numpy.asarray(y_pred)))) * 100
# In[25]:
def lr_model(datass,look_back,data_partition):
datasets=datass.values
train_size = int(len(datasets) * data_partition)
test_size = len(datasets) - train_size
train, test = datasets[0:train_size], datasets[train_size:len(datasets)]
trainX, trainY = create_dataset(train, look_back)
testX, testY = create_dataset(test, look_back)
X_train=pd.DataFrame(trainX)
Y_train=pd.DataFrame(trainY)
X_test= | pd.DataFrame(testX) | pandas.DataFrame |
"""Contains the code for ICAPAI'21 paper "Counterfactual Explanations for Multivariate Time Series"
Authors:
<NAME> (1), <NAME> (1), <NAME> (2), <NAME> (1)
Affiliations:
(1) Department of Electrical and Computer Engineering, Boston University
(2) Sandia National Laboratories
This work has been partially funded by Sandia National Laboratories. Sandia
National Laboratories is a multimission laboratory managed and operated by
National Technology and Engineering Solutions of Sandia, LLC., a wholly owned
subsidiary of Honeywell International, Inc., for the U.S. Department of
Energy’s National Nuclear Security Administration under Contract DENA0003525.
"""
import multiprocessing
import logging
import numpy as np
import pandas as pd
from sklearn.base import BaseEstimator, TransformerMixin
from fast_features import generate_features
_TIMESERIES = None
class CheckFeatures(BaseEstimator, TransformerMixin):
"""
Wrapper class that checks if the features used to train the classifier
is the same as the features used to predict results
"""
def __init__(self):
pass
def fit(self, x, y=None):
"""
Stores the columns names of all the generated features
in the form of a list in a member variable. All names
are represented by str.
x = training data represented as a Pandas DataFrame
y = training labels (not used in this class)
"""
self.column_names = list(x.columns)
if not self.column_names:
logging.warning("Training data has no columns.")
return self
def transform(self, x, y=None):
"""
Checks that the names of all the generated features from
training and testing are the same. Prints an error if not
and returns the dataframe provided in the arugment if so.
x = testing data/data to compare with training data
y = training labels (not used in this class)
"""
argu_columns = list(x.columns)
assert (
self.column_names == argu_columns
), "Features of data from training doesn't match that of testing"
return x
class TSFeatureGenerator(BaseEstimator, TransformerMixin):
"""Wrapper class for time series feature generation"""
def __init__(self, trim=60, threads=multiprocessing.cpu_count(),
data_path=None,
features=['max', 'min', 'mean', 'std', 'skew', 'kurt',
'perc05', 'perc25', 'perc50', 'perc75', 'perc95']):
self.features = features
self.trim = trim
self.threads = threads
self.data_path = data_path
def fit(self, x, y=None):
"""Extracts features
x = training data represented as a Pandas DataFrame
y = training labels (not used in this class)
"""
return self
def transform(self, x, y=None):
"""Extracts features
x = testing data/data to compare with training data
y = training labels (not used in this class)
"""
global _TIMESERIES
_TIMESERIES = x
use_pool = self.threads != 1 and x.size > 100000
if use_pool:
pool = multiprocessing.Pool(self.threads)
extractor = _FeatureExtractor(
features=self.features, data_path=self.data_path,
window_size=0, trim=self.trim)
if isinstance(x, pd.DataFrame):
index_name = 'node_id' if 'node_id' in x.index.names else 'nodeID'
if not use_pool:
result = [extractor(node_id)
for node_id in
x.index.get_level_values(index_name).unique()]
else:
result = pool.map(
extractor,
x.index.get_level_values(index_name).unique())
pool.close()
pool.join()
result = pd.concat(result)
else:
# numpy array format compatible with Burak's notebooks
if not use_pool:
result = [extractor(i) for i in range(len(x))]
else:
result = pool.map(extractor, range(len(x)))
pool.close()
pool.join()
result = np.concatenate(result, axis=0)
return result
def _get_features(node_id, features=None, data_path=None, trim=60, **kwargs):
global _TIMESERIES
assert (
features == ['max', 'min', 'mean', 'std', 'skew', 'kurt',
'perc05', 'perc25', 'perc50', 'perc75', 'perc95']
)
if data_path is not None:
try:
data = pd.read_hdf(
data_path + '/table_{}.hdf'.format(node_id[-1]), node_id)
except KeyError:
data = _TIMESERIES.loc[node_id, :, :]
if len(data) < trim * 2:
return pd.DataFrame()
return pd.DataFrame(
generate_features(
np.asarray(data.values.astype('float'), order='C'),
trim
).reshape((1, len(data.columns) * 11)),
index=[node_id],
columns=[feature + '_' + metric
for metric in data.columns
for feature in features])
elif isinstance(_TIMESERIES, pd.DataFrame):
data = np.asarray(
_TIMESERIES.loc[node_id, :, :].values.astype('float'),
order='C')
if len(data) < trim * 2:
return | pd.DataFrame() | pandas.DataFrame |
import os
import numpy as np
import pandas as pd
import json
import lib.galaxy_utilities as gu
from astropy.io import fits
from tqdm import tqdm
aggregated_models = pd.read_pickle('lib/models.pickle')['tuned_aggregate']
def get_n_arms(gal):
keys = (
't11_arms_number_a31_1_debiased',
't11_arms_number_a32_2_debiased',
't11_arms_number_a33_3_debiased',
't11_arms_number_a34_4_debiased',
't11_arms_number_a36_more_than_4_debiased',
)
return sum((i + 1) * gal[k] for i, k in enumerate(keys))
def get_winding_score(gal):
keys = (
't10_arms_winding_a28_tight_debiased',
't10_arms_winding_a29_medium_debiased',
't10_arms_winding_a30_loose_debiased',
)
return sum((i + 1) * gal[k] for i, k in enumerate(keys))
def get_pitch_angle(gal):
m = get_n_arms(gal)
w = get_winding_score(gal)
return 6.37 * w + 1.3 * m + 4.34
def has_comp(annotation, comp=0):
try:
drawn_shapes = annotation[comp]['value'][0]['value']
return len(drawn_shapes) > 0
except (IndexError, KeyError):
return False
if __name__ == '__main__':
loc = os.path.abspath(os.path.dirname(__file__))
# open the GZ2 catalogue
NSA_GZ = fits.open(os.path.join(loc, '../source_files/NSA_GalaxyZoo.fits'))
sid_list_loc = os.path.join(loc, 'lib/subject-id-list.csv')
sid_list = pd.read_csv(sid_list_loc).values[:, 0]
gz2_quants = | pd.DataFrame([], columns=('hart_pa', 'winding', 'n_arms')) | pandas.DataFrame |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=unused-argument
import dataclasses
import hashlib
import json
import logging
import re
from contextlib import closing
from datetime import datetime
from typing import (
Any,
Callable,
Dict,
List,
Match,
NamedTuple,
Optional,
Pattern,
Tuple,
TYPE_CHECKING,
Union,
)
import pandas as pd
import sqlparse
from flask import g
from flask_babel import lazy_gettext as _
from sqlalchemy import column, DateTime, select
from sqlalchemy.engine.base import Engine
from sqlalchemy.engine.interfaces import Compiled, Dialect
from sqlalchemy.engine.reflection import Inspector
from sqlalchemy.engine.url import URL
from sqlalchemy.ext.compiler import compiles
from sqlalchemy.orm import Session
from sqlalchemy.sql import quoted_name, text
from sqlalchemy.sql.expression import ColumnClause, ColumnElement, Select, TextAsFrom
from sqlalchemy.types import TypeEngine
from superset import app, sql_parse
from superset.errors import ErrorLevel, SupersetError, SupersetErrorType
from superset.models.sql_lab import Query
from superset.sql_parse import Table
from superset.utils import core as utils
if TYPE_CHECKING:
# prevent circular imports
from superset.connectors.sqla.models import TableColumn
from superset.models.core import Database
logger = logging.getLogger()
class TimeGrain(NamedTuple): # pylint: disable=too-few-public-methods
name: str # TODO: redundant field, remove
label: str
function: str
duration: Optional[str]
QueryStatus = utils.QueryStatus
config = app.config
builtin_time_grains: Dict[Optional[str], str] = {
None: "Time Column",
"PT1S": "second",
"PT1M": "minute",
"PT5M": "5 minute",
"PT10M": "10 minute",
"PT15M": "15 minute",
"PT0.5H": "half hour",
"PT1H": "hour",
"P1D": "day",
"P1W": "week",
"P1M": "month",
"P0.25Y": "quarter",
"P1Y": "year",
"1969-12-28T00:00:00Z/P1W": "week_start_sunday",
"1969-12-29T00:00:00Z/P1W": "week_start_monday",
"P1W/1970-01-03T00:00:00Z": "week_ending_saturday",
"P1W/1970-01-04T00:00:00Z": "week_ending_sunday",
}
class TimestampExpression(
ColumnClause
): # pylint: disable=abstract-method,too-many-ancestors,too-few-public-methods
def __init__(self, expr: str, col: ColumnClause, **kwargs: Any) -> None:
"""Sqlalchemy class that can be can be used to render native column elements
respeting engine-specific quoting rules as part of a string-based expression.
:param expr: Sql expression with '{col}' denoting the locations where the col
object will be rendered.
:param col: the target column
"""
super().__init__(expr, **kwargs)
self.col = col
@property
def _constructor(self) -> ColumnClause:
# Needed to ensure that the column label is rendered correctly when
# proxied to the outer query.
# See https://github.com/sqlalchemy/sqlalchemy/issues/4730
return ColumnClause
@compiles(TimestampExpression)
def compile_timegrain_expression(
element: TimestampExpression, compiler: Compiled, **kwargs: Any
) -> str:
return element.name.replace("{col}", compiler.process(element.col, **kwargs))
class LimitMethod: # pylint: disable=too-few-public-methods
"""Enum the ways that limits can be applied"""
FETCH_MANY = "fetch_many"
WRAP_SQL = "wrap_sql"
FORCE_LIMIT = "force_limit"
class BaseEngineSpec: # pylint: disable=too-many-public-methods
"""Abstract class for database engine specific configurations"""
engine = "base" # str as defined in sqlalchemy.engine.engine
engine_aliases: Optional[Tuple[str]] = None
engine_name: Optional[
str
] = None # used for user messages, overridden in child classes
_date_trunc_functions: Dict[str, str] = {}
_time_grain_expressions: Dict[Optional[str], str] = {}
column_type_mappings: Tuple[
Tuple[Pattern[str], Union[TypeEngine, Callable[[Match[str]], TypeEngine]]], ...,
] = ()
time_groupby_inline = False
limit_method = LimitMethod.FORCE_LIMIT
time_secondary_columns = False
allows_joins = True
allows_subqueries = True
allows_column_aliases = True
force_column_alias_quotes = False
arraysize = 0
max_column_name_length = 0
try_remove_schema_from_table_name = True # pylint: disable=invalid-name
# default matching patterns for identifying column types
db_column_types: Dict[utils.DbColumnType, Tuple[Pattern[Any], ...]] = {
utils.DbColumnType.NUMERIC: (
re.compile(r"BIT", re.IGNORECASE),
re.compile(r".*DOUBLE.*", re.IGNORECASE),
re.compile(r".*FLOAT.*", re.IGNORECASE),
re.compile(r".*INT.*", re.IGNORECASE),
re.compile(r".*NUMBER.*", re.IGNORECASE),
re.compile(r".*LONG$", re.IGNORECASE),
re.compile(r".*REAL.*", re.IGNORECASE),
re.compile(r".*NUMERIC.*", re.IGNORECASE),
re.compile(r".*DECIMAL.*", re.IGNORECASE),
re.compile(r".*MONEY.*", re.IGNORECASE),
),
utils.DbColumnType.STRING: (
re.compile(r".*CHAR.*", re.IGNORECASE),
re.compile(r".*STRING.*", re.IGNORECASE),
re.compile(r".*TEXT.*", re.IGNORECASE),
),
utils.DbColumnType.TEMPORAL: (
re.compile(r".*DATE.*", re.IGNORECASE),
re.compile(r".*TIME.*", re.IGNORECASE),
),
}
@classmethod
def is_db_column_type_match(
cls, db_column_type: Optional[str], target_column_type: utils.DbColumnType
) -> bool:
"""
Check if a column type satisfies a pattern in a collection of regexes found in
`db_column_types`. For example, if `db_column_type == "NVARCHAR"`,
it would be a match for "STRING" due to being a match for the regex ".*CHAR.*".
:param db_column_type: Column type to evaluate
:param target_column_type: The target type to evaluate for
:return: `True` if a `db_column_type` matches any pattern corresponding to
`target_column_type`
"""
if not db_column_type:
return False
patterns = cls.db_column_types[target_column_type]
return any(pattern.match(db_column_type) for pattern in patterns)
@classmethod
def get_allow_cost_estimate(cls, version: Optional[str] = None) -> bool:
return False
@classmethod
def get_engine(
cls,
database: "Database",
schema: Optional[str] = None,
source: Optional[str] = None,
) -> Engine:
user_name = utils.get_username()
return database.get_sqla_engine(
schema=schema, nullpool=True, user_name=user_name, source=source
)
@classmethod
def get_timestamp_expr(
cls,
col: ColumnClause,
pdf: Optional[str],
time_grain: Optional[str],
type_: Optional[str] = None,
) -> TimestampExpression:
"""
Construct a TimestampExpression to be used in a SQLAlchemy query.
:param col: Target column for the TimestampExpression
:param pdf: date format (seconds or milliseconds)
:param time_grain: time grain, e.g. P1Y for 1 year
:param type_: the source column type
:return: TimestampExpression object
"""
if time_grain:
time_expr = cls.get_time_grain_expressions().get(time_grain)
if not time_expr:
raise NotImplementedError(
f"No grain spec for {time_grain} for database {cls.engine}"
)
if type_ and "{func}" in time_expr:
date_trunc_function = cls._date_trunc_functions.get(type_)
if date_trunc_function:
time_expr = time_expr.replace("{func}", date_trunc_function)
else:
time_expr = "{col}"
# if epoch, translate to DATE using db specific conf
if pdf == "epoch_s":
time_expr = time_expr.replace("{col}", cls.epoch_to_dttm())
elif pdf == "epoch_ms":
time_expr = time_expr.replace("{col}", cls.epoch_ms_to_dttm())
return TimestampExpression(time_expr, col, type_=DateTime)
@classmethod
def get_time_grains(cls) -> Tuple[TimeGrain, ...]:
"""
Generate a tuple of supported time grains.
:return: All time grains supported by the engine
"""
ret_list = []
time_grains = builtin_time_grains.copy()
time_grains.update(config["TIME_GRAIN_ADDONS"])
for duration, func in cls.get_time_grain_expressions().items():
if duration in time_grains:
name = time_grains[duration]
ret_list.append(TimeGrain(name, _(name), func, duration))
return tuple(ret_list)
@classmethod
def get_time_grain_expressions(cls) -> Dict[Optional[str], str]:
"""
Return a dict of all supported time grains including any potential added grains
but excluding any potentially disabled grains in the config file.
:return: All time grain expressions supported by the engine
"""
# TODO: use @memoize decorator or similar to avoid recomputation on every call
time_grain_expressions = cls._time_grain_expressions.copy()
grain_addon_expressions = config["TIME_GRAIN_ADDON_EXPRESSIONS"]
time_grain_expressions.update(grain_addon_expressions.get(cls.engine, {}))
denylist: List[str] = config["TIME_GRAIN_DENYLIST"]
for key in denylist:
time_grain_expressions.pop(key)
return time_grain_expressions
@classmethod
def make_select_compatible(
cls, groupby_exprs: Dict[str, ColumnElement], select_exprs: List[ColumnElement]
) -> List[ColumnElement]:
"""
Some databases will just return the group-by field into the select, but don't
allow the group-by field to be put into the select list.
:param groupby_exprs: mapping between column name and column object
:param select_exprs: all columns in the select clause
:return: columns to be included in the final select clause
"""
return select_exprs
@classmethod
def fetch_data(
cls, cursor: Any, limit: Optional[int] = None
) -> List[Tuple[Any, ...]]:
"""
:param cursor: Cursor instance
:param limit: Maximum number of rows to be returned by the cursor
:return: Result of query
"""
if cls.arraysize:
cursor.arraysize = cls.arraysize
if cls.limit_method == LimitMethod.FETCH_MANY and limit:
return cursor.fetchmany(limit)
return cursor.fetchall()
@classmethod
def expand_data(
cls, columns: List[Dict[Any, Any]], data: List[Dict[Any, Any]]
) -> Tuple[List[Dict[Any, Any]], List[Dict[Any, Any]], List[Dict[Any, Any]]]:
"""
Some engines support expanding nested fields. See implementation in Presto
spec for details.
:param columns: columns selected in the query
:param data: original data set
:return: list of all columns(selected columns and their nested fields),
expanded data set, listed of nested fields
"""
return columns, data, []
@classmethod
def alter_new_orm_column(cls, orm_col: "TableColumn") -> None:
"""Allow altering default column attributes when first detected/added
For instance special column like `__time` for Druid can be
set to is_dttm=True. Note that this only gets called when new
columns are detected/created"""
# TODO: Fix circular import caused by importing TableColumn
@classmethod
def epoch_to_dttm(cls) -> str:
"""
SQL expression that converts epoch (seconds) to datetime that can be used in a
query. The reference column should be denoted as `{col}` in the return
expression, e.g. "FROM_UNIXTIME({col})"
:return: SQL Expression
"""
raise NotImplementedError()
@classmethod
def epoch_ms_to_dttm(cls) -> str:
"""
SQL expression that converts epoch (milliseconds) to datetime that can be used
in a query.
:return: SQL Expression
"""
return cls.epoch_to_dttm().replace("{col}", "({col}/1000)")
@classmethod
def get_datatype(cls, type_code: Any) -> Optional[str]:
"""
Change column type code from cursor description to string representation.
:param type_code: Type code from cursor description
:return: String representation of type code
"""
if isinstance(type_code, str) and type_code != "":
return type_code.upper()
return None
@classmethod
def normalize_indexes(cls, indexes: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
"""
Normalizes indexes for more consistency across db engines
noop by default
:param indexes: Raw indexes as returned by SQLAlchemy
:return: cleaner, more aligned index definition
"""
return indexes
@classmethod
def extra_table_metadata(
cls, database: "Database", table_name: str, schema_name: str
) -> Dict[str, Any]:
"""
Returns engine-specific table metadata
:param database: Database instance
:param table_name: Table name
:param schema_name: Schema name
:return: Engine-specific table metadata
"""
# TODO: Fix circular import caused by importing Database
return {}
@classmethod
def apply_limit_to_sql(cls, sql: str, limit: int, database: "Database") -> str:
"""
Alters the SQL statement to apply a LIMIT clause
:param sql: SQL query
:param limit: Maximum number of rows to be returned by the query
:param database: Database instance
:return: SQL query with limit clause
"""
# TODO: Fix circular import caused by importing Database
if cls.limit_method == LimitMethod.WRAP_SQL:
sql = sql.strip("\t\n ;")
qry = (
select("*")
.select_from(TextAsFrom(text(sql), ["*"]).alias("inner_qry"))
.limit(limit)
)
return database.compile_sqla_query(qry)
if LimitMethod.FORCE_LIMIT:
parsed_query = sql_parse.ParsedQuery(sql)
sql = parsed_query.set_or_update_query_limit(limit)
return sql
@classmethod
def get_limit_from_sql(cls, sql: str) -> Optional[int]:
"""
Extract limit from SQL query
:param sql: SQL query
:return: Value of limit clause in query
"""
parsed_query = sql_parse.ParsedQuery(sql)
return parsed_query.limit
@classmethod
def set_or_update_query_limit(cls, sql: str, limit: int) -> str:
"""
Create a query based on original query but with new limit clause
:param sql: SQL query
:param limit: New limit to insert/replace into query
:return: Query with new limit
"""
parsed_query = sql_parse.ParsedQuery(sql)
return parsed_query.set_or_update_query_limit(limit)
@staticmethod
def csv_to_df(**kwargs: Any) -> pd.DataFrame:
""" Read csv into Pandas DataFrame
:param kwargs: params to be passed to DataFrame.read_csv
:return: Pandas DataFrame containing data from csv
"""
kwargs["encoding"] = "utf-8"
kwargs["iterator"] = True
chunks = pd.read_csv(**kwargs)
df = pd.concat(chunk for chunk in chunks)
return df
@classmethod
def df_to_sql(cls, df: pd.DataFrame, **kwargs: Any) -> None:
""" Upload data from a Pandas DataFrame to a database. For
regular engines this calls the DataFrame.to_sql() method. Can be
overridden for engines that don't work well with to_sql(), e.g.
BigQuery.
:param df: Dataframe with data to be uploaded
:param kwargs: kwargs to be passed to to_sql() method
"""
df.to_sql(**kwargs)
@classmethod
def create_table_from_csv( # pylint: disable=too-many-arguments
cls,
filename: str,
table: Table,
database: "Database",
csv_to_df_kwargs: Dict[str, Any],
df_to_sql_kwargs: Dict[str, Any],
) -> None:
"""
Create table from contents of a csv. Note: this method does not create
metadata for the table.
"""
df = cls.csv_to_df(filepath_or_buffer=filename, **csv_to_df_kwargs)
engine = cls.get_engine(database)
if table.schema:
# only add schema when it is preset and non empty
df_to_sql_kwargs["schema"] = table.schema
if engine.dialect.supports_multivalues_insert:
df_to_sql_kwargs["method"] = "multi"
cls.df_to_sql(df=df, con=engine, **df_to_sql_kwargs)
@classmethod
def convert_dttm(cls, target_type: str, dttm: datetime) -> Optional[str]:
"""
Convert Python datetime object to a SQL expression
:param target_type: The target type of expression
:param dttm: The datetime object
:return: The SQL expression
"""
return None
@classmethod
def create_table_from_excel( # pylint: disable=too-many-arguments
cls,
filename: str,
table: Table,
database: "Database",
excel_to_df_kwargs: Dict[str, Any],
df_to_sql_kwargs: Dict[str, Any],
) -> None:
"""
Create table from contents of a excel. Note: this method does not create
metadata for the table.
"""
df = | pd.read_excel(io=filename, **excel_to_df_kwargs) | pandas.read_excel |
# Command to run bokeh server
# bokeh serve --show example_data_visualization_with_bokeh.py
# Import the necessary modules
from bokeh.io import curdoc, show
from bokeh.models import ColumnDataSource, Slider, CategoricalColorMapper, HoverTool, Select
from bokeh.plotting import figure
from bokeh.palettes import Spectral6
from bokeh.layouts import widgetbox, row, column
from bokeh.models.widgets import Tabs, Panel, DataTable, TableColumn
#Import the Data
import pandas
data = | pandas.read_csv("C:\\Users\\olive\\Documents\\Class\\Data Mining\\Bokeh\\airline_data.csv") | pandas.read_csv |
#!/usr/bin/python
"""functions to create the figures for publication
"""
import seaborn as sns
import math
import pyrtools as pt
import neuropythy as ny
import os.path as op
import warnings
import torch
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
from mpl_toolkits.axes_grid1.anchored_artists import AnchoredSizeBar
import pandas as pd
import re
import itertools
from sklearn import linear_model
from . import summary_plots
from . import analyze_model
from . import plotting
from . import model
from . import utils
from . import first_level_analysis
from . import style
def create_precision_df(paths, summary_func=np.mean,
df_filter_string='drop_voxels_with_mean_negative_amplitudes,drop_voxels_near_border'):
"""Create dataframe summarizing subjects' precision
When combining parameter estimates into an 'overall' value, we want
to use the precision of each subject's data. To do that, we take the
first level summary dfs (using regex to extract the subject,
session, and task from the path) and call `summary_func` on the
`precision` column. This gives us a single number, which we'll use
when performing the precision-weighted mean
df_filter_string can be used to filter the voxels we examine, so
that we look only at those voxels that the model was fit to
Parameters
----------
paths : list
list of strings giving the paths to the first level summary
dfs.
summary_func : callable, optional
function we use to summarize the precision. Must take an array
as its first input, not require any other inputs, and return a
single value
df_filter_string : str or None, optional
a str specifying how to filter the voxels in the dataset. see
the docstrings for sfp.model.FirstLevelDataset and
sfp.model.construct_df_filter for more details. If None, we
won't filter. Should probably use the default, which is what all
models are trained using.
Returns
-------
df : pd.DataFrame
dataframe containing one row per (subject, session) pair, giving
the precision for that scanning session. used to weight
bootstraps
"""
regex_names = ['subject', 'session', 'task']
regexes = [r'(sub-[a-z0-9]+)', r'(ses-[a-z0-9]+)', r'(task-[a-z0-9]+)']
df = []
for p in paths:
tmp = pd.read_csv(p)
if df_filter_string is not None:
df_filter = model.construct_df_filter(df_filter_string)
tmp = df_filter(tmp).reset_index()
val = summary_func(tmp.precision.values)
if hasattr(val, '__len__') and len(val) > 1:
raise Exception(f"summary_func {summary_func} returned more than one value!")
data = {'precision': val}
for n, regex in zip(regex_names, regexes):
res = re.findall(regex, p)
if len(set(res)) != 1:
raise Exception(f"Unable to infer {n} from path {p}!")
data[n] = res[0]
df.append(pd.DataFrame(data, [0]))
return pd.concat(df).reset_index(drop=True)
def existing_studies_df():
"""create df summarizing earlier studies
there have been a handful of studies looking into this, so we want
to summarize them for ease of reference. Each study is measuring
preferred spatial frequency at multiple eccentricities in V1 using
fMRI (though how exactly they determine the preferred SF and the
stimuli they use vary)
This dataframe contains the following columns:
- Paper: the reference for this line
- Eccentricity: the eccentricity (in degrees) that they measured
preferred spatial frequency at
- Preferred spatial frequency (cpd): the preferred spatial frequency
measured at this eccentricity (in cycles per degree)
- Preferred period (deg): the preferred period measured at this
eccentricity (in degrees per cycle); this is just the inverse of
the preferred spatial frequency
The eccentricity / preferred spatial frequency were often not
reported in a manner that allowed for easy extraction of the data,
so the values should all be taken as approximate, as they involve me
attempting to read values off of figures / colormaps.
Papers included (and their reference in the df):
- Sasaki (2001): <NAME>., <NAME>., <NAME>., <NAME>.,
<NAME>., <NAME>., & <NAME>. (2001). Local and global
attention are mapped retinotopically in human occipital
cortex. Proceedings of the National Academy of Sciences, 98(4),
2077–2082.
- Henriksson (2008): <NAME>., <NAME>., Hyv\"arinen,
Aapo, & <NAME>. (2008). Spatial frequency tuning in human
retinotopic visual areas. Journal of Vision, 8(10),
5. http://dx.doi.org/10.1167/8.10.5
- Kay (2011): <NAME>. (2011). Understanding Visual Representation
By Developing Receptive-Field Models. Visual Population Codes:
Towards a Common Multivariate Framework for Cell Recording and
Functional Imaging, (), 133–162.
- Hess (dominant eye, 2009): <NAME>., <NAME>., <NAME>.,
<NAME>., & <NAME>. (2009). Selectivity as well as
sensitivity loss characterizes the cortical spatial frequency
deficit in amblyopia. Human Brain Mapping, 30(12),
4054–4069. http://dx.doi.org/10.1002/hbm.20829 (this paper reports
spatial frequency separately for dominant and non-dominant eyes in
amblyopes, only the dominant eye is reported here)
- D'Souza (2016): <NAME>., <NAME>., <NAME>., Strasburger,
H., & <NAME>. (2016). Dependence of chromatic responses in v1
on visual field eccentricity and spatial frequency: an fmri
study. JOSA A, 33(3), 53–64.
- Farivar (2017): <NAME>., <NAME>., <NAME>.,
<NAME>., & <NAME>. (2017). Non-uniform phase sensitivity
in spatial frequency maps of the human visual cortex. The Journal
of Physiology, 595(4),
1351–1363. http://dx.doi.org/10.1113/jp273206
- Olsson (pilot, model fit): line comes from a model created by <NAME> in the Winawer lab, fit to pilot data collected by
<NAME> (so note that this is not data). Never ended up
in a paper, but did show in a presentation at VSS 2017: <NAME>,
<NAME>, <NAME>, <NAME> (2017) An anatomically-defined
template of BOLD response in
V1-V3. J. Vis. 17(10):585. DOI:10.1167/17.10.585
Returns
-------
df : pd.DataFrame
Dataframe containing the optimum spatial frequency at multiple
eccentricities from the different papers
"""
data_dict = {
'Paper': ['Sasaki (2001)',]*7,
'Preferred spatial frequency (cpd)': [1.25, .9, .75, .7, .6, .5, .4],
'Eccentricity': [0, 1, 2, 3, 4, 5, 12]
}
data_dict['Paper'].extend(['Henriksson (2008)', ]*5)
data_dict['Preferred spatial frequency (cpd)'].extend([1.2, .68, .46, .40, .18])
data_dict['Eccentricity'].extend([1.7, 4.7, 6.3, 9, 19])
# This is only a single point, so we don't plot it
# data_dict['Paper'].extend(['Kay (2008)'])
# data_dict['Preferred spatial frequency (cpd)'].extend([4.5])
# data_dict['Eccentricity'].extend([ 2.9])
data_dict['Paper'].extend(['Kay (2011)']*5)
data_dict['Preferred spatial frequency (cpd)'].extend([4, 3, 10, 10, 2])
data_dict['Eccentricity'].extend([2.5, 4, .5, 1.5, 7])
data_dict['Paper'].extend(["Hess (dominant eye, 2009)"]*3)
data_dict['Preferred spatial frequency (cpd)'].extend([2.25, 1.9, 1.75])
data_dict['Eccentricity'].extend([2.5, 5, 10])
data_dict['Paper'].extend(["D'Souza (2016)"]*3)
data_dict['Preferred spatial frequency (cpd)'].extend([2, .95, .4])
data_dict['Eccentricity'].extend([1.4, 4.6, 9.8])
data_dict['Paper'].extend(['Farivar (2017)']*2)
data_dict['Preferred spatial frequency (cpd)'].extend([3, 1.5,])
data_dict['Eccentricity'].extend([.5, 3])
# model fit and never published, so don't include.
# data_dict['Paper'].extend(['Olsson (pilot, model fit)']*10)
# data_dict['Preferred spatial frequency (cpd)'].extend([2.11, 1.76, 1.47, 2.75, 1.24, 1.06, .88, .77, .66, .60])
# data_dict['Eccentricity'].extend([2, 3, 4, 1, 5, 6, 7, 8, 9, 10])
# these values gotten using web plot digitizer and then rounded to 2
# decimal points
data_dict["Paper"].extend(['Aghajari (2020)']*9)
data_dict['Preferred spatial frequency (cpd)'].extend([2.24, 1.62, 1.26,
1.09, 0.88, 0.75,
0.78, 0.75, 0.70])
data_dict['Eccentricity'].extend([0.68, 1.78, 2.84, 3.90, 5.00, 6.06, 7.16,
8.22, 9.28])
# Predictions of the scaling hypothesis -- currently unused
# ecc = np.linspace(.01, 20, 50)
# fovea_cutoff = 0
# # two possibilities here
# V1_RF_size = np.concatenate([np.ones(len(ecc[ecc<fovea_cutoff])),
# np.linspace(1, 2.5, len(ecc[ecc>=fovea_cutoff]))])
# V1_RF_size = .2 * ecc
df = pd.DataFrame(data_dict)
df = df.sort_values(['Paper', 'Eccentricity'])
df["Preferred period (deg)"] = 1. / df['Preferred spatial frequency (cpd)']
return df
def _demean_df(df, y='cv_loss', extra_cols=[]):
"""demean a column of the dataframe
Calculate the mean of `y` across the values in the 'subject' and
'loss_func' columns, then demean `y` and return df with several new
columns:
- `demeaned_{y}`: each y with `{y}_mean` subtracted off
- `{y}_mean`: the average of y per subject per loss_func
- `{y}_mean_overall`: the average of `{y}_mean` per loss_func
- `remeaned_{y}`: the `demeaned_{y}` with `{y}_mean_overall` added
back to it
If you use this with the defaults, the overall goal of this is to
enable us to look at how the cv_loss varies across models, because
the biggest effect is the difference in cv_loss across
subjects. Demeaning the cv_loss on a subject-by-subject basis
enables us to put all the subjects together so we can look for
patterns across models. For example, we can then compute error bars
that only capture the variation across models, but not across
subjects. Both remeaned or demeaned will capture this, the question
is what values to have on the y-axis. If you use demeaned, you'll
have negative loss, which might be confusing. If you use remeaned,
the y-axis values will be the average across subjects, which might
be easier to interpret.
Parameters
----------
df : pd.DataFrame
dataframe to demean
y : str, optional
the column to demean
extra_cols : list, optionla
list of columns to de/remean using the mean from `y`. for
example, you might want to de/remean the noise_ceiling using the
mean from the cross-validation loss
Returns
-------
df : pd.DataFrame
dataframe with new, demeaned column
"""
gb_cols = ['subject', 'loss_func']
df = df.set_index(gb_cols)
y_mean = df.groupby(gb_cols)[y].mean()
df[f'{y}_mean'] = y_mean
# here we take the average over the averages. we do this so that we weight
# all of the groups the same. For example, if gb_cols=['subject'] and one
# subject had twice as many rows (because it had two sessions in df, for
# example), then this ensures that subject isn't twice as important when
# computing the mean (which would be the case if we used
# df[f'{y}_mean'].mean() instead). We do, however, want to do this
# separately for each loss function, since they'll probably have different
# means
df = df.reset_index()
df = df.set_index('loss_func')
df[f'{y}_mean_overall'] = y_mean.reset_index().groupby('loss_func')[y].mean()
df[f'demeaned_{y}'] = df[y] - df[f'{y}_mean']
df[f'remeaned_{y}'] = df[f'demeaned_{y}'] + df[f'{y}_mean_overall']
for col in extra_cols:
df[f'demeaned_{col}'] = df[col] - df[f'{y}_mean']
df[f'remeaned_{col}'] = df[f'demeaned_{col}'] + df[f'{y}_mean_overall']
return df.reset_index()
def prep_df(df, task, groupaverage=False):
"""prepare the dataframe by restricting to the appropriate subset
The dataframe created by earlier analysis steps contains all
scanning sessions and potentially multiple visual areas. for our
figures, we just want to grab the relevant scanning sessions and
visual areas (V1), so this function helps do that. If df has the
'frequency_type' column (i.e., it's summarizing the 1d tuning
curves), we also restrict to the "local_sf_magnitude" rows (rather
than "frequency_space")
Parameters
----------
df : pd.DataFrame
dataframe that will be used for plotting figures. contains some
summary of (either 1d or 2d) model information across sessions.
task : {'task-sfrescaled', 'task-sfpconstant'}
this determines which task we'll grab: task-sfprescaled or
task-sfpconstant. task-sfp is also exists, but we consider that
a pilot task and so do not allow it for the creation of figures
(the stimuli were not contrast-rescaled).
groupaverage : bool, optional
whether to grab only the groupaverage subjects (if True) or
every other subject (if False). Note that we'll grab/drop both
i-linear and i-nearest if they're both present
Returns
-------
df : pd.DataFrame
The restricted dataframe.
"""
if task not in ['task-sfprescaled', 'task-sfpconstant']:
raise Exception("Only task-sfprescaled and task-sfpconstant are allowed!")
df = df.query("task==@task")
if 'frequency_type' in df.columns:
df = df.query("frequency_type=='local_sf_magnitude'")
if 'varea' in df.columns:
df = df.query("varea==1")
if 'fit_model_type' in df.columns:
df.fit_model_type = df.fit_model_type.map(dict(zip(plotting.MODEL_ORDER,
plotting.MODEL_PLOT_ORDER)))
if 'subject' in df.columns:
df.subject = df.subject.map(dict(zip(plotting.SUBJECT_ORDER,
plotting.SUBJECT_PLOT_ORDER)))
return df
def prep_model_df(df):
"""prepare models df for plotting
For plotting purposes, we want to rename the model parameters from
their original values (e.g., sf_ecc_slope, abs_mode_cardinals) to
those we use in the equation (e.g., a, p_1). We do that by simply
remapping the names from those given at plotting.ORIG_PARAM_ORDER to
those in plotting.PLOT_PARAM_ORDER. we additionally add a new
column, param_category, which we use to separate out the three types
of parameters: sigma, the effect of eccentricity, and the effect of
orientation / retinal angle.
Parameters
----------
df : pd.DataFrame
models dataframe, that is, the dataframe that summarizes the
parameter values for a variety of models
Returns
-------
df : pd.DataFrame
The remapped dataframe.
"""
rename_params = dict((k, v) for k, v in zip(plotting.ORIG_PARAM_ORDER,
plotting.PLOT_PARAM_ORDER))
df = df.set_index('model_parameter')
df.loc['sigma', 'param_category'] = 'sigma'
df.loc[['sf_ecc_slope', 'sf_ecc_intercept'], 'param_category'] = 'eccen'
df.loc[['abs_mode_cardinals', 'abs_mode_obliques', 'rel_mode_cardinals', 'rel_mode_obliques',
'abs_amplitude_cardinals', 'abs_amplitude_obliques', 'rel_amplitude_cardinals',
'rel_amplitude_obliques'], 'param_category'] = 'orientation'
df = df.reset_index()
df['model_parameter'] = df.model_parameter.map(rename_params)
return df
def append_precision_col(df, col='preferred_period',
gb_cols=['subject', 'session', 'varea', 'stimulus_superclass', 'eccen']):
"""append column giving precision of another column and collapse
this function gives the precision of the value found in a single
column (across the columns that are NOT grouped-by) and collapses
across those columns. The intended use case is to determine the
precision of a parameter estimate across bootstraps for each
(subject, session) (for the 2d model) or for each (subject, session,
stimulus_superclass, eccen) (for the 1d model).
precision is the inverse of the variance, so let :math:`c` be the
68% confidence interval of the column value, then precision is
:math:`\frac{1}{(c/2)^2}`
finally, we collapse across gb_cols, returning the median and
precision of col for each combination of values from those columns.
Parameters
----------
df : pd.DataFrame
the df that contains the values we want the precision for
col : str, optional
the name of the column that contains the values we want the
precision for
gb_cols : list, optional
list of strs containing the columns we want to groupby. we will
compute the precision separately for each combination of values
here.
Returns
-------
df : pd.DataFrame
the modified df, containing the median and precision of col
(also contains the medians of the other values in the original
df, but not their precision)
"""
gb = df.groupby(gb_cols)
df = df.set_index(gb_cols)
df[f'{col}_precision'] = gb[col].apply(first_level_analysis._precision_dist)
df = df.reset_index()
return df.groupby(gb_cols).median().reset_index()
def precision_weighted_bootstrap(df, seed, n_bootstraps=100, col='preferred_period',
gb_cols=['varea', 'stimulus_superclass', 'eccen'],
precision_col='preferred_period_precision'):
"""calculate the precision-weighted bootstrap of a column
to combine across subjects, we want to use a precision-weighted
average, rather than a regular average, because we are trying to
summarize the true value across the population and our uncertainty
in it. Therefore, we down-weight subjects whose estimate is
noisier. Similar to append_precision_col(), we groupby over some of
the columns to combine info across them (gb_cols here should be a
subset of those used for append_precision_col())
You should plot the values here with scatter_ci_dist() or something
similar to draw the 68% CI of the distribution here (not sample it
to draw the CI)
Parameters
----------
df : pd.DataFrame
the df that we want to bootstrap (must already have precision
column, i.e., this should be the df returned by
append_precision_col())
seed : int
seed for numpy's RNG
n_bootstraps : int, optional
the number of independent bootstraps to draw
col : str, optional
the name of the column that contains the values we want to draw
bootstraps for
gb_cols : list, optional
list of strs containing the columns we want to groupby. we will
compute the bootstraps for each combination of values here.
precision_col : str, optional
name of the column that contains the precision, used in the
precision-weighted mean
Returns
-------
df : pd.DataFrame
the df containing the bootstraps of precision-weighted
mean. this will only contain the following columns: col,
*gb_cols, and bootstrap_num
"""
np.random.seed(seed)
if type(gb_cols) != list:
raise Exception("gb_cols must be a list!")
bootstraps = []
for n, g in df.groupby(gb_cols):
# n needs to be a list of the same length as gb_cols for the
# dict(zip()) call to work, but if len(gb_cols) == 1, then it
# will be a single str (or int or float or whatever), so we
# convert it to a list real quick
if len(gb_cols) == 1:
n = [n]
tmp = dict(zip(gb_cols, n))
for j in range(n_bootstraps):
t = g.sample(len(g), replace=True)
tmp[col] = np.average(t[col], weights=t[precision_col])
tmp['bootstrap_num'] = j
bootstraps.append(pd.DataFrame(tmp, [0]))
bootstraps = pd.concat(bootstraps).reset_index(drop=True)
if 'subject' in df.columns and 'subject' not in gb_cols:
bootstraps['subject'] = 'all'
return bootstraps
def _summarize_1d(df, reference_frame, y, row, col, height, facetgrid_legend,
**kwargs):
"""helper function for pref_period_1d and bandwidth_1d
since they're very similar functions.
"eccen" is always plotted on the x-axis, and hue is always
"stimulus_type" (unless overwritten with kwargs)
Parameters
----------
df : pd.DataFrame
pandas DataFrame summarizing all the 1d tuning curves, as
created by the summarize_tuning_curves.py script. If you want
confidence intervals, this should be the "full" version of that
df (i.e., including the fits to each bootstrap).
y : str
which column of the df to plot on the y-axis
reference_frame : {'relative', 'absolute'}
whether the data contained here is in the relative or absolute
reference frame. this will determine both the palette used and
the hue_order
row : str
which column of the df to facet the plot's rows on
col : str
which column of the df to facet the plot's column on
height : float
height of each plot facet
kwargs :
all passed to summary_plots.main() (most of these then get
passed to sns.FacetGrid, see the docstring of summary_plots.main
for more info)
Returns
-------
g : sns.FacetGrid
seaborn FacetGrid object containing the plot
"""
pal = plotting.stimulus_type_palette(reference_frame)
hue_order = plotting.get_order('stimulus_type', reference_frame)
col_order, row_order = None, None
if col is not None:
col_order = plotting.get_order(col, col_unique=df[col].unique())
if row is not None:
row_order = plotting.get_order(row, col_unique=df[row].unique())
kwargs.setdefault('xlim', (0, 12))
g = summary_plots.main(df, row=row, col=col, y=y, eccen_range=(0, 11),
hue_order=hue_order, height=height,
plot_func=[plotting.plot_median_fit, plotting.plot_median_fit,
plotting.scatter_ci_dist],
# these three end up being kwargs passed to the
# functions above, in order
x_jitter=[None, None, .2],
x_vals=[(0, 10.5), None, None],
linestyle=['--', None, None],
palette=pal, col_order=col_order,
row_order=row_order,
facetgrid_legend=facetgrid_legend, **kwargs)
g.set_xlabels('Eccentricity (deg)')
if facetgrid_legend:
g._legend.set_title("Stimulus class")
return g
def pref_period_1d(df, context='paper', reference_frame='relative',
row='session', col='subject', col_wrap=None, **kwargs):
"""Plot the preferred period of the 1d model fits.
Note that we do not restrict the input dataframe in any way, so we
will plot all data contained within it. If this is not what you want
(e.g., you only want to plot some of the tasks), you'll need to do
the restrictions yourself before passing df to this function
The only difference between this and the bandwidth_1d function is
what we plot on the y-axis, and how we label it.
Parameters
----------
df : pd.DataFrame
pandas DataFrame summarizing all the 1d tuning curves, as
created by the summarize_tuning_curves.py script. If you want
confidence intervals, this should be the "full" version of that
df (i.e., including the fits to each bootstrap).
context : {'paper', 'poster'}, optional
plotting context that's being used for this figure (as in
seaborn's set_context function). if poster, will scale things up
reference_frame : {'relative', 'absolute'}, optional
whether the data contained here is in the relative or absolute
reference frame. this will determine both the palette used and
the hue_order
row : str, optional
which column of the df to facet the plot's rows on
col : str, optional
which column of the df to facet the plot's column on
kwargs :
passed to sfp.figures._summarize_1d
Returns
-------
g : sns.FacetGrid
seaborn FacetGrid object containing the plot
"""
# if we're wrapping columns, then we need this to take up the full width in
# order for it to be readable
if col_wrap is not None:
fig_width = 'full'
else:
fig_width = 'half'
params, fig_width = style.plotting_style(context, figsize=fig_width)
if col_wrap is not None:
fig_width /= col_wrap
# there is, as of seaborn 0.11.0, a bug that interacts with our xtick
# label size and height (see
# https://github.com/mwaskom/seaborn/issues/2293), which causes an
# issue if col_wrap == 3. this manual setting is about the same size
# and fixes it
if col_wrap == 3:
fig_width = 2.23
elif col is not None:
fig_width /= df[col].nunique()
plt.style.use(params)
if context == 'paper':
facetgrid_legend = False
kwargs.setdefault('xlim', (0, 11.55))
kwargs.setdefault('ylim', (0, 2.1))
else:
kwargs.setdefault('ylim', (0, 4))
facetgrid_legend = True
g = _summarize_1d(df, reference_frame, 'preferred_period', row, col,
fig_width, facetgrid_legend, col_wrap=col_wrap, **kwargs)
g.set_ylabels('Preferred period (deg)')
yticks = [i for i in range(4) if i <= kwargs['ylim'][1]]
g.set(yticks=yticks)
if context != 'paper':
g.fig.suptitle("Preferred period of 1d tuning curves in each eccentricity band")
g.fig.subplots_adjust(top=.85)
else:
if len(g.axes) == 1:
# remove title if there's only one plot (otherwise it tells us which
# subject is which)
g.axes.flatten()[0].set_title('')
for ax in g.axes.flatten():
ax.axhline(color='gray', linestyle='--')
ax.axvline(color='gray', linestyle='--')
ax.set(xticks=[0, 2, 4, 6, 8, 10])
g.fig.subplots_adjust(wspace=.05, hspace=.15)
return g
def bandwidth_1d(df, context='paper', reference_frame='relative',
row='session', col='subject', units='octaves', **kwargs):
"""plot the bandwidth of the 1d model fits
Note that we do not restrict the input dataframe in any way, so we
will plot all data contained within it. If this is not what you want
(e.g., you only want to plot some of the tasks), you'll need to do
the restrictions yourself before passing df to this function
The only difference between this and the pref_period_1d function is
what we plot on the y-axis, and how we label it.
Parameters
----------
df : pd.DataFrame
pandas DataFrame summarizing all the 1d tuning curves, as
created by the summarize_tuning_curves.py script. If you want
confidence intervals, this should be the "full" version of that
df (i.e., including the fits to each bootstrap).
units : {'octaves', 'degrees}, optional
Whether to plot this data in octaves (in which case we expect it to be
flat with eccentricity) or degrees (in which case we expect it to scale
with eccentricity)
context : {'paper', 'poster'}, optional
plotting context that's being used for this figure (as in
seaborn's set_context function). if poster, will scale things up
reference_frame : {'relative', 'absolute'}, optional
whether the data contained here is in the relative or absolute
reference frame. this will determine both the palette used and
the hue_order
row : str, optional
which column of the df to facet the plot's rows on
col : str, optional
which column of the df to facet the plot's column on
kwargs :
passed to sfp.figures._summarize_1d
Returns
-------
g : sns.FacetGrid
seaborn FacetGrid object containing the plot
"""
params, fig_width = style.plotting_style(context, figsize='half')
plt.style.use(params)
if context == 'paper':
facetgrid_legend = False
kwargs.setdefault('xlim', (0, 11.55))
else:
facetgrid_legend = True
if units == 'degrees':
if 'tuning_curve_bandwidth_degrees' not in df.columns:
df['tuning_curve_bandwidth_degrees'] = df.apply(utils._octave_to_degrees, 1)
y = 'tuning_curve_bandwidth_degrees'
elif units == 'octaves':
y = 'tuning_curve_bandwidth'
kwargs.setdefault('ylim', (0, 8))
g = _summarize_1d(df, reference_frame, y, row, col,
fig_width, facetgrid_legend, **kwargs)
g.set_ylabels(f'Tuning curve FWHM ({units})')
if context != 'paper':
g.fig.suptitle("Full-Width Half-Max of 1d tuning curves in each eccentricity band")
g.fig.subplots_adjust(top=.85)
elif len(g.axes) == 1:
# remove title if there's only one plot (otherwise it tells us which
# subject is which)
g.axes.flatten()[0].set_title('')
return g
def existing_studies_figure(df, y="Preferred period (deg)", legend=True, context='paper'):
"""Plot the results from existing studies
See the docstring for figures.existing_studies_df() for more
details on the information displayed in this figure.
Parameters
----------
df : pd.DataFrame
The existing studies df, as returned by the function
figures.existing_studies_df().
y : {'Preferred period (deg)', 'Preferred spatial frequency (cpd)'}
Whether to plot the preferred period or preferred spatial
frequency on the y-axis. If preferred period, the y-axis is
linear; if preferred SF, the y-axis is log-scaled (base 2). The
ylims will also differ between these two
legend : bool, optional
Whether to add a legend or not
context : {'paper', 'poster'}, optional
plotting context that's being used for this figure (as in
seaborn's set_context function). if poster, will scale things up
Returns
-------
g : sns.FacetGrid
The FacetGrid containing the plot
"""
params, fig_width = style.plotting_style(context, figsize='half')
plt.style.use(params)
fig_height = fig_width / 1.2
pal = sns.color_palette('Set2', df.Paper.nunique())
pal = dict(zip(df.Paper.unique(), pal))
if 'Current study' in df.Paper.unique():
pal['Current study'] = (0, 0, 0)
g = sns.FacetGrid(df, hue='Paper', height=fig_height, aspect=1.2, palette=pal)
if y == "Preferred period (deg)":
g.map(plt.plot, 'Eccentricity', y, marker='o')
g.ax.set_ylim((0, 6))
elif y == "Preferred spatial frequency (cpd)":
g.map(plt.semilogy, 'Eccentricity', y, marker='o', basey=2)
g.ax.set_ylim((0, 11))
g.ax.yaxis.set_major_formatter(mpl.ticker.FuncFormatter(plotting.myLogFormat))
g.ax.set_xlim((0, 20))
if context == 'poster':
g.ax.set(xticks=[0, 5, 10, 15, 20])
g.ax.set_title("Summary of human V1 fMRI results")
if legend:
g.add_legend()
# facetgrid doesn't let us set the title fontsize directly, so need to do
# this hacky work-around
g.fig.legends[0].get_title().set_size(mpl.rcParams['legend.title_fontsize'])
g.ax.set_xlabel('Eccentricity of receptive field center (deg)')
return g
def input_schematic(context='paper', prf_loc=(250, 250), prf_radius=100,
stim_freq=(.01, .03)):
"""Schematic to explain 2d model inputs.
This schematic explains the various inputs of our 2d model:
eccentricity, retinotopic angle, spatial frequency, and
orientation. It does this with a little diagram of a pRF with a
local stimulus, with arrows and labels.
The location and size of the pRF, as well as the frequency of the
stimulus, are all modifiable, and the labels and arrows will update
themselves. The arrows should behave appropriately, but it's hard to
guarantee that the labels will always look good (their positioning
is relative, so it will at least be close). You are restricted to
placing the pRF inside the first quadrant, which helps make the
possibilities more reasonable.
Parameters
----------
context : {'paper', 'poster'}, optional
plotting context that's being used for this figure (as in
seaborn's set_context function). if poster, will scale things up
prf_loc : tuple, optional
2-tuple of floats, location of the prf. Both numbers must lie
between 0 and 500 (i.e., we require this to be in the first
quadrant). Max value on both x and y axes is 500.
prf_radius : float, optional
radius of the prf, in pixels. the local stimulus will have half
this radius
stim_freq : tuple, optional
2-tuple of floats, the (x_freq, y_freq) of the stimulus, in
cycles per pixel
Returns
-------
fig : plt.Figure
Figure containing the schematic
"""
params, fig_width = style.plotting_style(context, figsize='half')
plt.style.use(params)
figsize = (fig_width, fig_width)
fig, ax = plt.subplots(1, 1, figsize=figsize)
def get_xy(distance, angle, origin=(500, 500)):
return [o + distance * func(angle) for o, func in
zip(origin, [np.cos, np.sin])]
pal = sns.color_palette('deep', 2)
if (np.array(prf_loc) > 500).any() or (np.array(prf_loc) < 0).any():
raise Exception("the coordinates of prf_loc must be between 0 and 500, but got "
f"value {prf_loc}!")
# prf_loc is in coordinates relative to the center, so we convert that here
abs_prf_loc = [500 + i for i in prf_loc]
mask = utils.create_circle_mask(*abs_prf_loc, prf_radius/2, 1001)
mask[mask==0] = np.nan
stim = mask * utils.create_sin_cpp(1001, *stim_freq)
plotting.im_plot(stim, ax=ax, origin='lower')
ax.axhline(500, c='.5')
ax.axvline(500, c='.5')
ax.set(xlim=(450, 1001), ylim=(450, 1001))
for s in ax.spines.keys():
ax.spines[s].set_visible(False)
prf = mpl.patches.Circle(abs_prf_loc, prf_radius, fc='none', ec='k', linewidth=2,
linestyle='--', zorder=10)
ax.add_artist(prf)
prf_ecc = np.sqrt(np.square(prf_loc).sum())
prf_angle = np.arctan2(*prf_loc[::-1])
e_loc = get_xy(prf_ecc/2, prf_angle + np.pi/13)
plotting.draw_arrow(ax, (500, 500), abs_prf_loc, arrowprops={'connectionstyle': 'arc3',
'arrowstyle': '<-',
'color': pal[1]})
ax.text(*e_loc, r'$r_v$')
ax.text(600, 500 + 100*np.sin(prf_angle/2), r'$\theta_v$')
angle = mpl.patches.Arc((500, 500), 200, 200, 0, 0, np.rad2deg(prf_angle),
fc='none', ec=pal[1], linestyle='-')
ax.add_artist(angle)
# so that this is the normal vector, the 7000 is just an arbitrary
# scale factor to make the vector a reasonable length
normal_len = 7000 * np.sqrt(np.square(stim_freq).sum())
normal_angle = np.arctan2(*stim_freq[::-1])
omega_loc = get_xy(normal_len, normal_angle, abs_prf_loc)
plotting.draw_arrow(ax, abs_prf_loc, omega_loc, r'$\omega_l$', {'connectionstyle': 'arc3',
'arrowstyle': '<-',
'color': pal[0]})
angle = mpl.patches.Arc(abs_prf_loc, 1.2*normal_len, 1.2*normal_len, 0, 0,
# small adjustment appears to be necessary for some
# reason -- but really only for some spatial
# frequencies.
np.rad2deg(normal_angle)-3,
fc='none', ec=pal[0], linestyle='-')
ax.add_artist(angle)
plotting.draw_arrow(ax, (abs_prf_loc[0] + normal_len, abs_prf_loc[1]), abs_prf_loc,
arrowprops={'connectionstyle': 'angle3', 'arrowstyle': '-', 'color': '.5',
'linestyle': ':'})
theta_loc = get_xy(1.3*normal_len/2, normal_angle/2, abs_prf_loc)
ax.text(*theta_loc, r'$\theta_l$')
return fig
def model_schematic(context='paper'):
"""Create model schematic.
In order to better explain the model, its predictions, and the
effects of its parameters, we create a model schematic that shows
the effects of the different p parameters (those that control the
effect of stimulus orientation and retinotopic angle on preferred
period).
This creates only the polar plots (showing the preferred period contours),
and doesn't have a legend; it's intended that you call
compose_figures.add_legend to add the graphical one (and a space has been
left for it)
Parameters
----------
context : {'paper', 'poster'}, optional
plotting context that's being used for this figure (as in
seaborn's set_context function). if poster, will scale things up
Returns
-------
fig : plt.Figure
Figure containing the schematic
"""
params, fig_width = style.plotting_style(context, figsize='half')
plt.style.use(params)
figsize = (fig_width, fig_width/3)
if context == 'paper':
orientation = np.linspace(0, np.pi, 4, endpoint=False)
elif context == 'poster':
orientation = np.linspace(0, np.pi, 2, endpoint=False)
abs_model = model.LogGaussianDonut('full', sf_ecc_slope=.2, sf_ecc_intercept=.2,
abs_mode_cardinals=.4, abs_mode_obliques=.1)
rel_model = model.LogGaussianDonut('full', sf_ecc_slope=.2, sf_ecc_intercept=.2,
rel_mode_cardinals=.4, rel_mode_obliques=.1)
full_model = model.LogGaussianDonut('full', sf_ecc_slope=.2, sf_ecc_intercept=.2,
abs_mode_cardinals=.4, abs_mode_obliques=.1,
rel_mode_cardinals=.4, rel_mode_obliques=.1)
# we can't use the plotting.feature_df_plot / feature_df_polar_plot
# functions because they use FacetGrids, each of which creates a
# separate figure and we want all of this to be on one figure.
fig, axes = plt.subplots(1, 3, figsize=figsize,
subplot_kw={'projection': 'polar'})
labels = [r'$p_1>p_2>0$', r'$p_3>p_4>0$',
# can't have a newline in a raw string, so have to combine them
# in the last label here
r'$p_1=p_3>$'+'\n'+r'$p_2=p_4>0$']
for i, (m, ax) in enumerate(zip([abs_model, rel_model, full_model], axes)):
plotting.model_schematic(m, [ax], [(-.1, 3)], False,
orientation=orientation)
if i != 0:
ax.set(ylabel='')
if i != 1:
ax.set(xlabel='')
else:
# want to move this closer
ax.set_xlabel(ax.get_xlabel(), labelpad=-10)
ax.set_title(labels[i])
ax.set(xticklabels=[], yticklabels=[])
fig.subplots_adjust(wspace=.075)
return fig
def model_schematic_large(context='paper'):
"""Create larger version of model schematic.
In order to better explain the model, its predictions, and the
effects of its parameters, we create a model schematic that shows
the effects of the different p parameters (those that control the
effect of stimulus orientation and retinotopic angle on preferred
period).
Note that this includes both linear and polar plots, and will probably be
way too large
Parameters
----------
context : {'paper', 'poster'}, optional
plotting context that's being used for this figure (as in
seaborn's set_context function). if poster, will scale things up
Returns
-------
fig : plt.Figure
Figure containing the schematic
"""
if context == 'paper':
orientation = np.linspace(0, np.pi, 4, endpoint=False)
size_scale = 1
elif context == 'poster':
size_scale = 1.5
orientation = np.linspace(0, np.pi, 2, endpoint=False)
abs_model = model.LogGaussianDonut('full', sf_ecc_slope=.2, sf_ecc_intercept=.2,
abs_mode_cardinals=.4, abs_mode_obliques=.1)
rel_model = model.LogGaussianDonut('full', sf_ecc_slope=.2, sf_ecc_intercept=.2,
rel_mode_cardinals=.4, rel_mode_obliques=.1)
full_model = model.LogGaussianDonut('full', sf_ecc_slope=.2, sf_ecc_intercept=.2,
abs_mode_cardinals=.4, abs_mode_obliques=.1,
rel_mode_cardinals=.4, rel_mode_obliques=.1)
# we can't use the plotting.feature_df_plot / feature_df_polar_plot
# functions because they use FacetGrids, each of which creates a
# separate figure and we want all of this to be on one figure.
fig = plt.figure(figsize=(size_scale*15, size_scale*15))
gs = mpl.gridspec.GridSpec(figure=fig, ncols=3, nrows=3)
projs = ['rectilinear', 'polar']
labels = [r'$p_1>p_2>0$', r'$p_3>p_4>0$', r'$p_1=p_3>p_2=p_4>0$']
axes = []
for i, m in enumerate([abs_model, rel_model, full_model]):
model_axes = [fig.add_subplot(gs[i, j], projection=projs[j]) for j in range(2)]
if i == 0:
title = True
else:
title = False
model_axes = plotting.model_schematic(m, model_axes[:2], [(-.1, 4.2), (-.1, 3)], title,
orientation=orientation)
if i != 2:
[ax.set(xlabel='') for ax in model_axes]
model_axes[0].text(size_scale*-.25, .5, labels[i], rotation=90,
transform=model_axes[0].transAxes, va='center',
fontsize=1.5*mpl.rcParams['font.size'])
axes.append(model_axes)
# this needs to be created after the model plots so we can grab
# their axes
legend_axis = fig.add_subplot(gs[1, -1])
legend_axis.legend(*axes[1][1].get_legend_handles_labels(), loc='center left')
legend_axis.axis('off')
return fig
def _catplot(df, x='subject', y='cv_loss', hue='fit_model_type', height=8, aspect=.9,
ci=68, plot_kind='strip', x_rotate=False, legend='full', orient='v', **kwargs):
"""wrapper around seaborn.catplot
several figures call seaborn.catplot and are pretty similar, so this
function bundles a bunch of the stuff we do:
1. determine the proper order for hue and x
2. determine the proper palette for hue
3. always use np.median as estimator and 'full' legend
4. optionally rotate x-axis labels (and add extra room if so)
5. add a horizontal line at the x-axis if we have both negative and
positive values
Parameters
----------
df : pd.DataFrame
pandas DataFrame
x : str, optional
which column of the df to plot on the x-axis
y : str, optional
which column of the df to plot on the y-axis
hue : str, optional
which column of the df to facet as the hue
height : float, optional
height of each plot facet
aspect : float, optional
aspect ratio of each facet
ci : int, optional
size of the confidence intervals (ignored if plot_kind=='strip')
plot_kind : {'point', 'bar', 'strip', 'swarm', 'box', 'violin', or 'boxen'}, optional
type of plot to make, i.e., sns.catplot's kind argument. see
that functions docstring for more details. only 'point' and
'strip' are expected, might do strange things otherwise
x_rotate : bool or int, optional
whether to rotate the x-axis labels or not. if True, we rotate
by 25 degrees. if an int, we rotate by that many degrees. if
False, we don't rotate. If labels are rotated, we'll also shift
the bottom of the plot up to avoid cutting off the bottom.
legend : str or bool, optional
the legend arg to pass through to seaborn.catplot, see its
docstrings for more details
orient : {'h', 'v'}, optional
orientation of plot (horizontal or vertical)
kwargs :
passed to sns.catplot
Returns
-------
g : sns.FacetGrid
seaborn FacetGrid object containing the plot
"""
hue_order = plotting.get_order(hue, col_unique=df[hue].unique())
if 'order' in kwargs.keys():
order = kwargs.pop('order')
else:
order = plotting.get_order(x, col_unique=df[x].unique())
pal = plotting.get_palette(hue, col_unique=df[hue].unique(),
doubleup='doubleup' in x)
if plot_kind == 'strip':
# want the different hues to be in a consistent order on the
# x-axis, which requires this
kwargs.update({'jitter': False, 'dodge': True})
if orient == 'h':
x_copy = x
x = y
y = x_copy
aspect = 1/aspect
kwargs['sharex'] = False
else:
kwargs['sharey'] = False
if 'dodge' not in kwargs.keys():
kwargs['dodge'] = 0
# facetgrid seems to ignore the defaults for these, but we want to use them
# so its consistent with other figures
gridspec_kws = {k: mpl.rcParams[f'figure.subplot.{k}']
for k in ['top', 'bottom', 'left', 'right']}
g = sns.catplot(x, y, hue, data=df, hue_order=hue_order, legend=legend, height=height,
kind=plot_kind, aspect=aspect, order=order, palette=pal, ci=ci,
estimator=np.median, orient=orient, facet_kws={'gridspec_kws': gridspec_kws},
**kwargs)
for ax in g.axes.flatten():
if x_rotate:
if x_rotate is True:
x_rotate = 25
labels = ax.get_xticklabels()
if labels:
ax.set_xticklabels(labels, rotation=x_rotate, ha='right')
if orient == 'v':
if (df[y] < 0).any() and (df[y] > 0).any():
ax.axhline(color='grey', linestyle='dashed')
else:
if (df[x] < 0).any() and (df[x] > 0).any():
ax.axvline(color='grey', linestyle='dashed')
if x_rotate:
if x == 'subject':
g.fig.subplots_adjust(bottom=.15)
else:
g.fig.subplots_adjust(bottom=.2)
return g
def cross_validation_raw(df, seed, noise_ceiling_df=None, orient='v', context='paper'):
"""plot raw cross-validation loss
This does no pre-processing of the df and plots subjects on the
x-axis, model type as hue. (NOTE: this means if there are multiple
scanning sessions for each subject, the plot will combine them,
which is probably NOT what you want)
Parameters
----------
df : pd.DataFrame
dataframe containing the output of the cross-validation
analyses, combined across sessions (i.e., the output of
combine_model_cv_summaries snakemake rule)
seed : int
seed for numpy's RNG
noise_ceiling_df : pd.DataFrame
dataframe containing the results of the noise ceiling analyses
for all subjects (i.e., the output of the
noise_ceiling_monte_carlo_overall rule)
orient : {'h', 'v'}, optional
orientation of plot (horizontal or vertical)
context : {'paper', 'poster'}, optional
plotting context that's being used for this figure (as in
seaborn's set_context function). if poster, will scale things up
Returns
-------
g : sns.FacetGrid
seaborn FacetGrid object containing the plot
"""
np.random.seed(seed)
height = 8
aspect = .9
s = 5
if context == 'poster':
height *= 2
aspect = 1
s *= 2
if noise_ceiling_df is not None:
merge_cols = ['subject', 'mat_type', 'atlas_type', 'session', 'task', 'vareas', 'eccen']
df = pd.merge(df, noise_ceiling_df, 'outer', on=merge_cols, suffixes=['_cv', '_noise'])
g = _catplot(df.query('loss_func in ["weighted_normed_loss", "normed_loss", "cosine_distance_scaled"]'),
legend=False, height=height, s=s, x_rotate=True, orient=orient,
col='loss_func')
if noise_ceiling_df is not None:
g.map_dataframe(plotting.plot_noise_ceiling, 'subject', 'loss')
g.fig.suptitle("Cross-validated loss across subjects")
if orient == 'v':
g.set(ylabel="Cross-validated loss", xlabel="Subject")
elif orient == 'h':
g.set(xlabel="Cross-validated loss", ylabel="Subject")
g.add_legend()
g._legend.set_title("Model type")
ylims = [(0, .06), (0, .0022), (0, .0022)]
for i, ax in enumerate(g.axes.flatten()):
ax.set(ylim=ylims[i])
return g
def cross_validation_demeaned(df, seed, remeaned=False, orient='v', context='paper'):
"""plot demeaned cross-validation loss
This function demeans the cross-validation loss on a
subject-by-subject basis, then plots subjects on the x-axis, model
type as hue. (NOTE: this means if there are multiple scanning
sessions for each subject, the plot will combine them, which is
probably NOT what you want)
Parameters
----------
df : pd.DataFrame
dataframe containing the output of the cross-validation
analyses, combined across sessions (i.e., the output of
combine_model_cv_summaries snakemake rule)
seed : int
seed for numpy's RNG
remeaned : bool, optional
whether to use the demeaned cross-validation loss or the
remeaned one. Remeaned has the mean across subjects added back
to it, so that there won't be any negative y-values. This will
only affect the values on the y-axis; the relative placements of
the points will all be the same.
orient : {'h', 'v'}, optional
orientation of plot (horizontal or vertical)
context : {'paper', 'poster'}, optional
plotting context that's being used for this figure (as in
seaborn's set_context function). if poster, will scale things up
Returns
-------
g : sns.FacetGrid
seaborn FacetGrid object containing the plot
"""
np.random.seed(seed)
height = 8
aspect = .9
if context == 'poster':
height *= 2
aspect = 1
df = _demean_df(df)
if remeaned:
name = 'remeaned'
else:
name = 'demeaned'
g = _catplot(df, y=f'{name}_cv_loss', height=height, aspect=aspect, x_rotate=True,
orient=orient, col='loss_func')
g.fig.suptitle(f"{name.capitalize()} cross-validated loss across subjects")
if orient == 'v':
g.set(ylabel=f"Cross-validated loss ({name} by subject)", xlabel="Subject")
elif orient == 'h':
g.set(xlabel=f"Cross-validated loss ({name} by subject)", ylabel="Subject")
g._legend.set_title("Model type")
return g
def cross_validation_model(df, seed, plot_kind='strip', remeaned=False, noise_ceiling_df=None,
orient='v', sort=False, doubleup=False, context='paper'):
"""plot demeaned cross-validation loss, as function of model type
This function demeans the cross-validation loss on a
subject-by-subject basis, then plots model type on the x-axis,
subject as hue. (NOTE: this means if there are multiple scanning
sessions for each subject, the plot will combine them, which is
probably NOT what you want)
Parameters
----------
df : pd.DataFrame
dataframe containing the output of the cross-validation
analyses, combined across sessions (i.e., the output of
combine_model_cv_summaries snakemake rule)
seed : int
seed for numpy's RNG
plot_kind : {'strip', 'point'}, optional
whether to create a strip plot (each subject as a separate
point) or a point plot (combine across subjects, plotting the
median and bootstrapped 68% CI)
remeaned : bool, optional
whether to use the demeaned cross-validation loss or the
remeaned one. Remeaned has the mean across subjects added back
to it, so that there won't be any negative y-values. This will
only affect the values on the y-axis; the relative placements of
the points (and the size of the error bars if
`plot_kind='point'`) will all be the same.
noise_ceiling_df : pd.DataFrame
dataframe containing the results of the noise ceiling analyses
for all subjects (i.e., the output of the
noise_ceiling_monte_carlo_overall rule)
orient : {'h', 'v'}, optional
orientation of plot (horizontal or vertical)
sort : bool, optional
whether to sort the models by the median loss of the
weighted_normed_loss or show them in numbered order
doubleup : bool, optional
whether to "double-up" models so that we plot two models on the same
row if they're identical except for fitting A3/A4. this then shows the
version fitting A3/A4 as a fainter color of the version that doesn't.
context : {'paper', 'poster'}, optional
plotting context that's being used for this figure (as in
seaborn's set_context function). if poster, will scale things up
Returns
-------
g : sns.FacetGrid
seaborn FacetGrid object containing the plot
"""
kwargs = {}
np.random.seed(seed)
params, fig_width = style.plotting_style(context, figsize='half')
plt.style.use(params)
if doubleup:
height = fig_width * .855
else:
height = fig_width
aspect = 1
if noise_ceiling_df is not None:
merge_cols = ['subject', 'mat_type', 'atlas_type', 'session', 'task', 'vareas', 'eccen']
noise_ceiling_df = noise_ceiling_df.groupby(merge_cols).median().reset_index()
df = pd.merge(df, noise_ceiling_df, 'inner', on=merge_cols, suffixes=['_cv', '_noise'])
extra_cols = ['loss']
else:
extra_cols = []
df = _demean_df(df, extra_cols=extra_cols)
if plot_kind == 'strip':
hue = 'subject'
legend_title = "Subject"
legend = 'full'
elif plot_kind == 'point':
hue = 'fit_model_type'
legend = False
if remeaned:
name = 'remeaned'
else:
name = 'demeaned'
if sort:
gb = df.query("loss_func == 'weighted_normed_loss'").groupby('fit_model_type')
kwargs['order'] = gb[f'{name}_cv_loss'].median().sort_values(ascending=False).index
if doubleup:
df['fit_model_doubleup'] = df.fit_model_type.map(dict(zip(plotting.MODEL_PLOT_ORDER,
plotting.MODEL_PLOT_ORDER_DOUBLEUP)))
x = 'fit_model_doubleup'
if noise_ceiling_df is not None:
nc_map = {k: k for k in range(1, 8)}
nc_map.update({10: 8, 12: 9})
df['fit_model_nc'] = df.fit_model_doubleup.map(nc_map)
else:
x = 'fit_model_type'
if noise_ceiling_df is not None:
df['fit_model_nc'] = df.fit_model_type
g = _catplot(df, x=x, y=f'{name}_cv_loss', hue=hue,
col='loss_func', plot_kind=plot_kind, height=height,
aspect=aspect, orient=orient, legend=legend, **kwargs)
title = f"{name.capitalize()} cross-validated loss across model types"
if noise_ceiling_df is not None:
g.map_dataframe(plotting.plot_noise_ceiling, 'fit_model_nc', f'{name}_loss', ci=0,
orient=orient)
title += "\n Median noise ceiling shown as blue line"
if orient == 'v':
g.set(ylabel=f"Cross-validated loss ({name} by subject)", xlabel="Model type")
elif orient == 'h':
g.set(xlabel=f"Cross-validated loss ({name} by subject)", ylabel="")
# if plot_kind=='point', then there is no legend, so the following
# would cause an error
if plot_kind == 'strip':
g._legend.set_title(legend_title)
# don't want title in the paper version
if context != 'paper':
g.fig.suptitle(title)
else:
if orient == 'h':
# also want to remove the y axis, since it's duplicating the one from
# the other figure
for ax in g.axes.flatten():
ax.yaxis.set_visible(False)
ax.spines['left'].set_visible(False)
if plot_kind == 'point':
# this way, the ylims line up whether or not we plotted the
# noise ceiling line
if doubleup:
ax.set_ylim((8.5, -0.5))
else:
ax.set_ylim((13.5, -0.5))
return g
def model_types(context='paper', palette_type='model', annotate=False,
order=None, doubleup=False):
"""Create plot showing which model fits which parameters.
We have 11 different parameters, which might seem like a lot, so we
do cross-validation to determine whether they're all necessary. This
plot shows which parameters are fit by each model, in a little
table.
Parameters
----------
context : {'paper', 'poster'}, optional
plotting context that's being used for this figure (as in
seaborn's set_context function). if poster, will scale things up
palette_type : {'model', 'simple', 'simple_r', seaborn palette name}, optional
palette to use for this plot. if 'model', the parameter each
model fits is shown in its color (as used in other plots). If
'simple' or 'simple_r', we'll use a white/black colormap with
either black (if 'simple') or white (if 'simple_r') showing the
parameter is fit. Else, should be a str giving a seaborn palette
name, i.e., an arg that can be passed to seaborn.color_palette.
annotate : bool, optional
whether to annotate the schematic with info on the parameter
categories (e.g., period/amplitude, eccentricity/orientation,
etc)
order : pandas index or None, optional
If None, we plot the models in the default order. Else, should be an
index object that gives the order to plot them in (from top to bottom).
Returns
-------
fig : plt.Figure
The figure with the plot on it
"""
params, fig_width = style.plotting_style(context, figsize='half')
# these ticks don't add anything and are confusing
params['xtick.bottom'] = False
params['ytick.left'] = False
plt.style.use(params)
figsize = (fig_width, fig_width)
extra_space = 0
model_names = plotting.MODEL_PLOT_ORDER
parameters = plotting.PLOT_PARAM_ORDER
model_variants = np.zeros((len(model_names), len(parameters)))
if palette_type == 'model':
pal = plotting.get_palette('fit_model_type', col_unique=model_names,
doubleup=doubleup)
try:
pal = pal.tolist()
except AttributeError:
# then it's already a list
pass
pal = [(1, 1, 1)] + pal
fill_vals = dict(zip(range(len(model_names)), range(1, len(model_names)+1)))
else:
if palette_type.startswith('simple'):
black, white = [(0, 0, 0), (1, 1, 1)]
if palette_type.endswith('_r'):
pal = [black, white]
else:
pal = [white, black]
else:
pal = sns.color_palette(palette_type, 2)
fill_vals = dict(zip(range(len(model_names)), len(model_names) * [True]))
if not doubleup:
model_variants[0, [0, 2]] = fill_vals[0]
model_variants[1, [0, 1]] = fill_vals[1]
model_variants[2, [0, 1, 2]] = fill_vals[2]
model_variants[3, [0, 1, 2, 3, 4]] = fill_vals[3]
model_variants[4, [0, 1, 2, 5, 6]] = fill_vals[4]
model_variants[5, [0, 1, 2, 3, 4, 5, 6]] = fill_vals[5]
model_variants[6, [0, 1, 2, 7, 8]] = fill_vals[6]
model_variants[7, [0, 1, 2, 9, 10]] = fill_vals[7]
model_variants[8, [0, 1, 2, 7, 8, 9, 10]] = fill_vals[8]
model_variants[9, [0, 1, 2, 3, 4, 7, 8]] = fill_vals[9]
model_variants[10, [0, 1, 2, 5, 6, 9, 10]] = fill_vals[10]
model_variants[11, [0, 1, 2, 3, 4, 5, 6, 7, 8]] = fill_vals[11]
model_variants[12, [0, 1, 2, 3, 4, 5, 6, 9, 10]] = fill_vals[12]
model_variants[13, :] = fill_vals[13]
# while in theory, we want square to be True here too, we messed with
# all the size in such a way that it works with it set to False
square = False
else:
model_variants[0, [0, 2]] = fill_vals[0]
model_variants[1, [0, 1]] = fill_vals[1]
model_variants[2, [0, 1, 2]] = fill_vals[2]
model_variants[3, [0, 1, 2, 3, 4]] = fill_vals[3]
model_variants[4, [0, 1, 2, 5, 6]] = fill_vals[4]
model_variants[5, [0, 1, 2, 3, 4, 5, 6]] = fill_vals[5]
model_variants[6, [0, 1, 2, 7, 8]] = fill_vals[6]
model_variants[2, [9, 10]] = fill_vals[7]
model_variants[6, [9, 10]] = fill_vals[8]
model_variants[9, [0, 1, 2, 3, 4, 7, 8]] = fill_vals[9]
model_variants[4, [9, 10]] = fill_vals[10]
model_variants[11, [0, 1, 2, 3, 4, 5, 6, 7, 8]] = fill_vals[11]
model_variants[5, [9, 10]] = fill_vals[12]
model_variants[11, [9, 10]] = fill_vals[13]
# drop the rows that are all 0s
model_variants = model_variants[~(model_variants==0).all(1)]
warnings.warn("when doubling-up, we just use sequential numbers for models "
"(the numbers therefore have a different meaning than for "
"non-doubled-up version)")
model_names = np.arange(1, model_variants.shape[0]+1)
square = True
model_variants = pd.DataFrame(model_variants, model_names, parameters)
if order is not None:
model_variants = model_variants.reindex(order)
fig = plt.figure(figsize=figsize)
ax = sns.heatmap(model_variants, cmap=pal, cbar=False, square=square)
ax.set_yticklabels(model_variants.index, rotation=0)
ax.set_ylabel("Model type")
# we want the labels on the top here, not the bottom
ax.tick_params(labelbottom=False, labeltop=True, pad=-2)
if annotate:
arrowprops = {'connectionstyle': 'bar', 'arrowstyle': '-', 'color': '0'}
text = ['Eccentricity', 'Absolute', 'Relative', 'Absolute', 'Relative']
text = ['Ecc', 'Abs', 'Rel', 'Abs', 'Rel']
for i, pos in enumerate(range(1, 10, 2)):
plotting.draw_arrow(ax, ((pos+.5)/11, 1.08+extra_space),
((pos+1.5)/11, 1.08+extra_space), arrowprops=arrowprops,
xycoords='axes fraction', textcoords='axes fraction')
ax.text((pos+1)/11, 1.11+extra_space, text[i], transform=ax.transAxes,
ha='center', va='bottom')
arrowprops['connectionstyle'] = f'bar,fraction={.3/5}'
plotting.draw_arrow(ax, (1.5/11, 1.17+extra_space), (6.5/11, 1.17+extra_space),
arrowprops=arrowprops,
xycoords='axes fraction', textcoords='axes fraction')
ax.text(4/11, 1.22+extra_space, 'Period', transform=ax.transAxes,
ha='center', va='bottom')
arrowprops['connectionstyle'] = f'bar,fraction={.3/3}'
plotting.draw_arrow(ax, (7.5/11, 1.17+extra_space), (10.5/11, 1.17+extra_space),
arrowprops=arrowprops,
xycoords='axes fraction', textcoords='axes fraction')
ax.text(9/11, 1.22+extra_space, 'Amplitude', transform=ax.transAxes,
ha='center', va='bottom')
return fig
def model_parameters(df, plot_kind='point', visual_field='all', fig=None, add_legend=True,
context='paper', **kwargs):
"""plot model parameter values, across subjects
Parameters
----------
df : pd.DataFrame
dataframe containing all the model parameter values, across
subjects. note that this should first have gone through
prep_model_df, which renames the values of the model_parameter
columns so they're more pleasant to look at on the plot and adds
a column, param_category, which enables us to break up the
figure into three subplots
plot_kind : {'point', 'strip', 'dist'}, optional
What type of plot to make. If 'point' or 'strip', it's assumed
that df contains only the fits to the median data across
bootstraps (thus, one value per subject per parameter); if
'dist', it's assumed that df contains the fits to all bootstraps
(thus, 100 values per subject per parameter). this function
should run if those are not true, but it will look weird:
- 'point': point plot, so show 68% CI across subjects
- 'strip': strip plot, so show each subject as a separate point
- 'dist': distribution, show each each subject as a separate
point with their own 68% CI across bootstraps
visual_field : str, optional
in addition to fitting the model across the whole visual field,
we also fit the model to some portions of it (the left half,
right half, etc). this arg allows us to easily modify the title
of the plot to make it clear which portion of the visual field
we're plotting. If 'all' (the default), we don't modify the
title at all, otherwise we append "in {visual_field} visual
field" to it.
fig : plt.Figure or None, optional
the figure to plot on. If None, we create a new figure. Intended
use case for this is to plot the data from multiple sessions on
the same axes (with different display kwargs), in order to
directly compare how parameter values change.
add_legend : bool, optional
whether to add a legend or not. If True, will add just outside
the right-most axis
context : {'paper', 'poster'}, optional
plotting context that's being used for this figure (as in
seaborn's set_context function). if poster, will scale things up
kwargs :
Passed directly to the plotting function, which depends on the
value of plot_kind
Returns
-------
fig : plt.Figure
Figure containin the plot
"""
params, fig_width = style.plotting_style(context, figsize='full')
plt.style.use(params)
# in order to make the distance between the hues appear roughly
# equivalent, need to set the ax_xlims in a particular way
n_ori_params = df.query("param_category=='orientation'").model_parameter.nunique()
ax_xlims = [[-.5, .5], [-.5, 1.5], [-.5, n_ori_params - .5]]
yticks = [[0, .5, 1, 1.5, 2, 2.5], [0, .1, .2, .3, .4], [-.03, 0, .03, .06, .09]]
axhline = [2]
if fig is None:
fig, axes = plt.subplots(1, 3, figsize=(fig_width, fig_width/2),
gridspec_kw={'width_ratios': [.12, .25, .63],
'wspace': .3})
else:
axes = fig.axes
order = plotting.get_order('model_parameter', col_unique=df.model_parameter.unique())
if plot_kind == 'point':
pal = plotting.get_palette('model_parameter', col_unique=df.model_parameter.unique(),
as_dict=True)
elif plot_kind == 'strip':
# then we're showing this across subjects
if 'subject' in df.columns and df.subject.nunique() > 1:
hue = 'subject'
# this is sub-groupaverage
else:
hue = 'groupaverage_seed'
pal = plotting.get_palette(hue, col_unique=df[hue].unique(), as_dict=True)
hue_order = plotting.get_order(hue, col_unique=df[hue].unique())
elif plot_kind == 'dist':
# then we're showing this across subjects
if 'subject' in df.columns and df.subject.nunique() > 1:
pal = plotting.get_palette('subject', col_unique=df.subject.unique(), as_dict=True)
hue_order = plotting.get_order('subject', col_unique=df.subject.unique())
gb_col = 'subject'
# copied from how seaborn's stripplot handles this, by looking
# at lines 368 and 1190 in categorical.py (version 0.9.0)
dodge = np.linspace(0, .8 - (.8 / df.subject.nunique()), df.subject.nunique())
dodge -= dodge.mean()
yticks = [[0, .5, 1, 1.5, 2, 2.5, 3.0],
[-.1, 0, .1, .2, .3, .4, .5, .6, .7, .8, .9, 1],
[-.2, -.1, 0, .1, .2, .3]]
ax_xlims = [[-1, 1], [-1, 2], [-.75, n_ori_params-.5]]
axhline += [1]
# else we've combined across all subjects
else:
pal = plotting.get_palette('model_parameter', col_unique=df.model_parameter.unique(),
as_dict=True)
gb_col = 'model_parameter'
dodge = np.zeros(df.model_parameter.nunique())
for i, ax in enumerate(axes):
cat = ['sigma', 'eccen', 'orientation'][i]
tmp = df.query("param_category==@cat")
ax_order = [i for i in order if i in tmp.model_parameter.unique()]
if plot_kind == 'point':
sns.pointplot('model_parameter', 'fit_value', 'model_parameter', data=tmp,
estimator=np.median, ax=ax, order=ax_order, palette=pal, ci=68, **kwargs)
elif plot_kind == 'strip':
# want to make sure that the different hues end up in the
# same order everytime, which requires doing this with
# jitter and dodge
sns.stripplot('model_parameter', 'fit_value', hue, data=tmp, ax=ax,
order=ax_order, palette=pal, hue_order=hue_order, jitter=False,
dodge=True, **kwargs)
elif plot_kind == 'dist':
handles, labels = [], []
for j, (n, g) in enumerate(tmp.groupby(gb_col)):
dots, _, _ = plotting.scatter_ci_dist('model_parameter', 'fit_value', data=g,
label=n, ax=ax, color=pal[n],
x_dodge=dodge[j], x_order=ax_order, **kwargs)
handles.append(dots)
labels.append(n)
ax.set(xlim=ax_xlims[i], yticks=yticks[i])
ax.tick_params(pad=0)
if ax.legend_:
ax.legend_.remove()
if i == 2:
if add_legend:
if plot_kind == 'dist':
legend = ax.legend(handles, labels, loc='lower center', ncol=3,
borderaxespad=0, frameon=False,
bbox_to_anchor=(.49, -.3), bbox_transform=fig.transFigure)
else:
legend = ax.legend(loc=(1.01, .3), borderaxespad=0, frameon=False)
# explicitly adding the legend artist allows us to add a
# second legend if we want
ax.add_artist(legend)
if i in axhline:
ax.axhline(color='grey', linestyle='dashed')
if i == 0:
ax.set(ylabel='Parameter value')
fig.text(.5, 0, "Parameter", ha='center')
if context != 'paper':
# don't want title in paper context
suptitle = "Model parameters"
if visual_field != 'all':
suptitle += f' in {visual_field} visual field'
fig.suptitle(suptitle)
fig.subplots_adjust(top=.85)
return fig
def model_parameters_pairplot(df, drop_outlier=False):
"""plot pairwise distribution of model parameters
There's one very obvious outlier (sub-wlsubj007, ses-04, bootstrap
41), where the $a$ parameter (sf_ecc_slope) is less than 0 (other
parameters are also weird). If you want to drop that, set
drop_outlier=True
Parameters
----------
df : pd.DataFrame
dataframe containing all the model parameter values, across
subjects. note that this should first have gone through
prep_model_df, which renames the values of the model_parameter
columns so they're more pleasant to look at on the plot
drop_outlier : bool, optional
whether to drop the outlier or not (see above)
Returns
-------
g : sns.PairGrid
the PairGrid containing the plot
"""
pal = plotting.get_palette('subject', col_unique=df.subject.unique())
pal = dict(zip(df.subject.unique(), pal))
df = pd.pivot_table(df, index=['subject', 'bootstrap_num'], columns='model_parameter',
values='fit_value').reset_index()
# this is a real outlier: one subject, one bootstrap (see docstring)
if drop_outlier:
df = df[df.get('$a$') > 0]
g = sns.pairplot(df, hue='subject', vars=plotting.PLOT_PARAM_ORDER, palette=pal)
for ax in g.axes.flatten():
ax.axhline(color='grey', linestyle='dashed')
ax.axvline(color='grey', linestyle='dashed')
return g
def model_parameters_compare_plot(df, bootstrap_df):
"""plot comparison of model parameters from bootstrap vs median fits
we have two different ways of fitting the data: to all of the
bootstraps or just to the median across bootstraps. if we compare
the resulting parameter values, they shouldn't be that different,
which is what we do here.
Parameters
----------
df : pd.DataFrame
dataframe containing all the model parameter values, across
subjects. note that this should first have gone through
prep_model_df, which renames the values of the model_parameter
columns so they're more pleasant to look at on the plot
bootstrap_df : pd.DataFrame
dataframe containing all the model parameter values, across
subjects and bootstraps. note that this should first have gone
through prep_model_df, which renames the values of the
model_parameter columns so they're more pleasant to look at on
the plot
Returns
-------
g : sns.FacetGrid
the FacetGrid containing the plot
"""
pal = plotting.get_palette('subject', col_unique=df.subject.unique(), as_dict=True)
order = plotting.get_order('subject', col_unique=df.subject.unique())
compare_cols = ['model_parameter', 'subject', 'session', 'task']
compare_df = df[compare_cols + ['fit_value']]
tmp = bootstrap_df[compare_cols + ['fit_value']].rename(columns={'fit_value': 'fit_value_bs'})
compare_df = pd.merge(tmp, compare_df, on=compare_cols)
compare_df = compare_df.sort_values(compare_cols)
g = sns.FacetGrid(compare_df, col='model_parameter', hue='subject', col_wrap=4, sharey=False,
aspect=2.5, height=3, col_order=plotting.PLOT_PARAM_ORDER, hue_order=order,
palette=pal)
g.map_dataframe(plotting.scatter_ci_dist, 'subject', 'fit_value_bs')
g.map_dataframe(plt.scatter, 'subject', 'fit_value')
for ax in g.axes.flatten():
ax.set_xticklabels(ax.get_xticklabels(), rotation=25, ha='right')
return g
def training_loss_check(df, hue='test_subset', thresh=.2):
"""check last epoch training loss
in order to check that one of the models didn't get stuck in a local
optimum in, e.g., one of the cross-validation folds or bootstraps,
we here plot the loss for each subject and model, with median and
68% CI across batches. they should hopefully look basically all the
same
Parameters
----------
df : pd.DataFrame
dataframe with the last epoch loss, as created by
`analyze_model.collect_final_loss`
hue : str, optional
which df column to use as the hue arg for the FacetGrid
thresh : float, optional
the loss threshold for getting stuck in local optima. we
annotate the plot with any training sessions whose median
training loss on the last epoch is above this value
Returns
-------
g : sns.FacetGrid
the FacetGrid containing the plot
"""
# to make sure we show the full dataframe below, from
# https://stackoverflow.com/a/42293737
pd.set_option('display.max_columns', None)
# from https://stackoverflow.com/a/25352191
pd.set_option('display.max_colwidth', -1)
df.fit_model_type = df.fit_model_type.map(dict(zip(plotting.MODEL_ORDER,
plotting.MODEL_PLOT_ORDER_FULL)))
order = plotting.get_order('fit_model_type', col_unique=df.fit_model_type.unique())
col_order = plotting.get_order('subject', col_unique=df.subject.unique())
g = sns.FacetGrid(df, col='subject', hue=hue, col_wrap=4, sharey=False,
aspect=2.5, height=3, col_order=col_order)
g.map_dataframe(plotting.scatter_ci_dist, 'fit_model_type', 'loss', x_jitter=True,
x_order=order)
for ax in g.axes.flatten():
ax.set_xticklabels(ax.get_xticklabels(), rotation=25, ha='right')
if ax.get_ylim()[1] > thresh:
ax.hlines(thresh, 0, len(df.fit_model_type.unique())-1, 'gray', 'dashed')
# find those training sessions with loss above the threshold
above_thresh = df.groupby(['subject', 'fit_model_type', hue]).loss.median()
above_thresh = above_thresh.reset_index().query('loss > @thresh')
if len(above_thresh) > 0:
g.fig.text(1.01, .5, ("Probable local optima (median last epoch training loss > "
f"{thresh}):\n" + str(above_thresh)))
g.fig.suptitle("Last epoch median training loss (with 68% CI across batches) on each CV fold")
g.fig.subplots_adjust(top=.92)
return g
def feature_df_plot(df, avg_across_retinal_angle=False, reference_frame='relative',
feature_type='pref-period', visual_field='all', context='paper',
col_wrap=None, scatter_ref_pts=False, **kwargs):
"""plot model predictions based on parameter values
This function is used to create plots showing the preferred period
as a function of eccentricity, as given by the model. Right now, it
always plots each subject separately, and will plot confidence
intervals based on bootstraps if possible (i.e., if df contains the
column 'bootstrap_num'). You can optionally average over the
retinotopic angles or keep them separate, and you can plot the
predictions for stimuli in the relative or absolute reference frame.
This function converts the model paramter value df into the
feature_df by calling analyze_model.create_feature_df.
Parameters
----------
df : pd.DataFrame
dataframe containing all the model parameter values, across
subjects.
avg_across_retinal_angle : bool, optional
whether to average across the different retinotopic angles
(True) or plot each of them on separate subplots (False). only
relevant if feature_type=='pref-period' (others all plot
something as function of retinotopic angle on polar plots)
reference_frame : {'relative', 'absolute'}, optional
whether the you want to plot the predictions for stimuli in the
relative or absolute reference frame (i.e., annuli and pinwheels
or constant gratings).
feature_type : {'pref-period', 'pref-period-contour', 'iso-pref-period', 'max-amp'}
what type of feature to create the plot for:
- pref-period: plot preferred period as a function of
eccentricity (on a Cartesian plot)
- pref-period-contour: plot preferred period as a function of
retinotopic angle at several different eccentricities (on a
polar plot)
- iso-pref-period: plot iso-preferred period lines as a function
of retinotopic angle, for several different preferred periods
(on a polar plot)
- max-amp: plot max amplitude as a function of retinotopic angle
(on a polar plot)
visual_field : str, optional
in addition to fitting the model across the whole visual field,
we also fit the model to some portions of it (the left half,
right half, etc). this arg allows us to easily modify the title
of the plot to make it clear which portion of the visual field
we're plotting. If 'all' (the default), we don't modify the
title at all, otherwise we append "in {visual_field} visual
field" to it.
context : {'paper', 'poster'}, optional
plotting context that's being used for this figure (as in
seaborn's set_context function). if poster, will scale things up
col_wrap : int or None, optional
col_wrap argument to pass through to seaborn FacetGrid
scatter_ref_pts : bool, optional
if True, we plot black points every 45 degrees on the polar plots to
serve as a reference (only used in paper context). if False, do
nothing.
kwargs :
Passed to plotting.feature_df_plot
Returns
-------
g : sns.FacetGrid
the FacetGrid containing the plot
"""
aspect = 1
params, fig_width = style.plotting_style(context, figsize='full')
plt.style.use(params)
kwargs.setdefault('top', .9)
axes_titles = True
title_kwargs = {}
adjust_kwargs = {}
if df.bootstrap_num.nunique() > 1 or 'groupaverage_seed' in df.columns:
# then we have each subject's bootstraps or the groupaverage
# subject (which has also already been bootstrapped), so we use
# scatter_ci_dist to plot across them
plot_func = plotting.scatter_ci_dist
kwargs.update({'draw_ctr_pts': False, 'ci_mode': 'fill', 'join': True})
else:
plot_func = sns.lineplot
# in this case, we have the individual fits
if 'groupaverage_seed' not in df.columns:
gb_cols = ['subject', 'bootstrap_num']
col = 'subject'
pre_boot_gb_cols = ['subject', 'reference_frame', 'Stimulus type', 'bootstrap_num',
'Eccentricity (deg)']
# in this case, we have the sub-groupaverage
else:
gb_cols = ['groupaverage_seed']
col = None
pre_boot_gb_cols = ['reference_frame', 'Stimulus type', 'groupaverage_seed',
'Eccentricity (deg)']
# if we're faceting over something, need to separate it out when creating
# the feature df
if 'hue' in kwargs.keys():
gb_cols += [kwargs['hue']]
pre_boot_gb_cols += [kwargs['hue']]
if col is None or df.subject.nunique() == 1:
facetgrid_legend = False
suptitle = False
axes_titles = False
split_oris = True
col = 'orientation_type'
ori_map = {k: ['cardinals', 'obliques'][i%2] for i, k in
enumerate(np.linspace(0, np.pi, 4, endpoint=False))}
pre_boot_gb_cols += [col]
if feature_type == 'pref-period':
kwargs.setdefault('height', (fig_width/2) / aspect)
else:
# the polar plots have two subplots, so they're half the height of the
# pref-period one in order to get the same width
kwargs.setdefault('height', (fig_width/4) / aspect)
else:
if context != 'paper':
facetgrid_legend = True
suptitle = True
else:
facetgrid_legend = False
suptitle = False
split_oris = False
if col_wrap is not None:
# there is, as of seaborn 0.11.0, a bug that interacts with our
# xtick label size and height (see
# https://github.com/mwaskom/seaborn/issues/2293), which causes an
# issue if col_wrap == 3. this manual setting is about the same
# size and fixes it
if col_wrap == 3:
kwargs.setdefault('height', 2.23)
else:
kwargs.setdefault('height', (fig_width / col_wrap) / aspect)
if feature_type == 'pref-period':
if context == 'poster':
aspect = 1.3
else:
kwargs.setdefault('ylim', (0, 2.1))
kwargs.setdefault('xlim', (0, 11.55))
if avg_across_retinal_angle:
pre_boot_gb_func = 'mean'
row = None
else:
pre_boot_gb_func = None
row = 'Retinotopic angle (rad)'
if split_oris:
orientation = np.linspace(0, np.pi, 2, endpoint=False)
else:
orientation = np.linspace(0, np.pi, 4, endpoint=False)
df = analyze_model.create_feature_df(df, reference_frame=reference_frame, gb_cols=gb_cols,
orientation=orientation)
if split_oris:
df['orientation_type'] = df['Orientation (rad)'].map(ori_map)
g = plotting.feature_df_plot(df, col=col, row=row, pre_boot_gb_func=pre_boot_gb_func,
plot_func=plot_func, aspect=aspect,
pre_boot_gb_cols=pre_boot_gb_cols, col_wrap=col_wrap,
facetgrid_legend=facetgrid_legend, **kwargs)
else:
kwargs.update({'all_tick_labels': ['r'], })
if context == 'paper':
orientation = np.linspace(0, np.pi, 4, endpoint=False)
kwargs.update({'ylabelpad': 10, 'theta_ticklabels': [], 'wspace': .1,
'hspace': .1})
elif context == 'poster':
orientation = np.linspace(0, np.pi, 2, endpoint=False)
kwargs.update({'top': .76, 'r_ticks': [.25, .5, .75, 1], 'wspace': .3,
'r_ticklabels': ['', .5, '', 1], 'ylabelpad': 60,
'hspace': .3})
if feature_type == 'pref-period-contour':
rticks = np.arange(.25, 1.5, .25)
if context == 'paper':
rticklabels = ['' for i in rticks]
else:
rticklabels = [j if j == 1 else '' for i, j in enumerate(rticks)]
if not split_oris:
# there's a weird interaction where if we set the rticks before
# calling scatter (which we do when split_oris is True), it
# competely messes up the plot. unsure why.
kwargs.update({'r_ticks': rticks, 'r_ticklabels': rticklabels})
df = analyze_model.create_feature_df(df, reference_frame=reference_frame,
eccentricity=[5], orientation=orientation,
retinotopic_angle=np.linspace(0, 2*np.pi, 49),
gb_cols=gb_cols)
if split_oris:
df['orientation_type'] = df['Orientation (rad)'].map(ori_map)
kwargs['ylim'] = (0, 1.25)
row = 'Eccentricity (deg)'
if df[row].nunique() == 1:
row = None
r = 'Preferred period (deg)'
g = plotting.feature_df_polar_plot(df, col=col, row=row,
r=r, plot_func=plot_func, col_wrap=col_wrap,
aspect=aspect,
pre_boot_gb_cols=pre_boot_gb_cols,
facetgrid_legend=facetgrid_legend, **kwargs)
if context == 'paper':
for axes in g.axes:
axes[0].set_ylabel('Preferred\nperiod (deg)')
elif feature_type == 'iso-pref-period':
if context == 'poster':
kwargs.update({'r_ticks': list(range(1, 9)),
'r_ticklabels': [i if i%2==0 else '' for i in range(1, 9)]})
if split_oris:
df['orientation_type'] = df['Orientation (rad)'].map(ori_map)
df = analyze_model.create_feature_df(df, 'preferred_period_contour', period_target=[1],
reference_frame=reference_frame,
orientation=orientation, gb_cols=gb_cols)
r = 'Eccentricity (deg)'
row = 'Preferred period (deg)'
if df[row].nunique() == 1:
row = None
g = plotting.feature_df_polar_plot(df, col=col, r=r, row=row,
plot_func=plot_func, aspect=aspect,
title='ISO-preferred period contours',
pre_boot_gb_cols=pre_boot_gb_cols,
col_wrap=col_wrap,
facetgrid_legend=facetgrid_legend, **kwargs)
elif feature_type == 'max-amp':
rticks = np.arange(.25, 1.5, .25)
if context == 'paper':
rticklabels = ['' for i in rticks]
else:
rticklabels = [j if j == 1 else '' for i, j in enumerate(rticks)]
if not split_oris:
# there's a weird interaction where if we set the rticks before
# calling scatter (which we do when split_oris is True), it
# competely messes up the plot. unsure why.
kwargs.update({'r_ticks': rticks, 'r_ticklabels': rticklabels})
df = analyze_model.create_feature_df(df, 'max_amplitude', orientation=orientation,
reference_frame=reference_frame, gb_cols=gb_cols)
if split_oris:
df['orientation_type'] = df['Orientation (rad)'].map(ori_map)
kwargs['ylim'] = (0, 1.15)
r = 'Max amplitude'
g = plotting.feature_df_polar_plot(df, col=col, r=r,
aspect=aspect, plot_func=plot_func,
title='Relative amplitude', col_wrap=col_wrap,
pre_boot_gb_cols=pre_boot_gb_cols,
facetgrid_legend=facetgrid_legend, **kwargs)
ylabel = 'Relative amplitude'
# doesn't look good with multiple rows
if context == 'paper' and col_wrap is None:
# the location argument here does nothing, since we over-ride
# it with the bbox_to_anchor and bbox_transform arguments. the
# size and size_vertical values here look weird because they're
# in polar units (so size is in theta, size_vertical is in r)
asb = AnchoredSizeBar(g.axes[0, 0].transData, 0, '1', 'center',
frameon=False, size_vertical=1,
bbox_to_anchor=(.52, 1),
sep=5,
bbox_transform=g.fig.transFigure)
g.axes[0, 0].add_artist(asb)
ylabel = ylabel.replace(' ', '\n')
for axes in g.axes:
axes[0].set_ylabel(ylabel)
else:
raise Exception(f"Don't know what to do with feature_type {feature_type}!")
if split_oris:
th = np.linspace(0, 2*np.pi, 8, endpoint=False)
r_val = 1 # df[r].mean()
if scatter_ref_pts:
for ax in g.axes.flatten():
ax.scatter(th, len(th)*[r_val], c='k',
s=mpl.rcParams['lines.markersize']**2 / 2)
# for some reason, can't call the set_rticks until after all
# scatters have been called, or they get messed up
for ax in g.axes.flatten():
ax.set_yticks(rticks)
ax.set_yticklabels(rticklabels)
else:
adjust_kwargs.update({'wspace': -.1, 'hspace': .15})
if context == 'paper':
for ax in g.axes.flatten():
if ax.get_xlabel():
ax.set_xlabel(ax.get_xlabel(), labelpad=-5)
# remove the xlabel from one of them and place the remaining one in
# between the two subplots, because it's redundant
g.axes[0, 0].set_xlabel('')
# this can have its xlabel removed, since it will be above another plot which has one
if feature_type == 'pref-period-contour':
g.axes[0, 1].set_xlabel('')
else:
g.axes[0, 1].set_xlabel(g.axes.flatten()[1].get_xlabel(), x=-.05,
ha='center', labelpad=-5)
title_kwargs['pad'] = -13
if visual_field != 'all':
g.fig._suptitle.set_text(g.fig._suptitle.get_text() + f' in {visual_field} visual field')
if not suptitle:
g.fig.suptitle('')
if not axes_titles:
for ax in g.axes.flatten():
ax.set_title('')
else:
g.set_titles(col_template="{col_name}", **title_kwargs)
g.tight_layout()
g.fig.subplots_adjust(**adjust_kwargs)
return g
def existing_studies_with_current_figure(df, seed=None, precision_df=None, y="Preferred period (deg)",
context='paper'):
"""Plot results from existing studies with our results
This is the same plot as `existing_studies_figure()`, with the
results from our study plotted as a black line (so see that figure
for more details).
Note that the `df` argument here is the dataframe containing results
from this study, NOT the results from previous studies (we call the
`existing_studies_df()` function here)
Parameters
----------
df : pd.DataFrame
dataframe containing all the model parameter values, across
subjects.
seed : int or None
seed for numpy's RNG. can only be None if precision_df is None
precision_df : pd.dataFrame or None, optional
dataframe containing the precision for each scanning session in
df. If None, we won't do any bootstrapping, and so assume this
already has only one subject
y : {'Preferred period (deg)', 'Preferred spatial frequency (cpd)'}
Whether to plot the preferred period or preferred spatial
frequency on the y-axis. If preferred period, the y-axis is
linear; if preferred SF, the y-axis is log-scaled (base 2). The
ylims will also differ between these two
context : {'paper', 'poster'}, optional
plotting context that's being used for this figure (as in
seaborn's set_context function). if poster, will scale things up
Returns
-------
g : sns.FacetGrid
The FacetGrid containing the plot
"""
# this gets us the median parameter value for each subject and fit
# model type
df = df.groupby(['subject', 'model_parameter', 'fit_model_type']).median().reset_index()
if precision_df is not None:
df = df.merge(precision_df, on=['subject'])
df = precision_weighted_bootstrap(df, seed, 100, 'fit_value', ['model_parameter', 'fit_model_type'],
'precision')
gb_cols = [c for c in ['subject', 'bootstrap_num'] if c in df.columns]
df = analyze_model.create_feature_df(df, reference_frame='relative', gb_cols=gb_cols)
df = df.groupby(['subject', 'reference_frame', 'Eccentricity (deg)', 'bootstrap_num']).agg('mean').reset_index()
df['Preferred spatial frequency (cpd)'] = 1 / df['Preferred period (deg)']
g = existing_studies_figure(existing_studies_df(), y, False, context)
_, line, _ = plotting.scatter_ci_dist('Eccentricity (deg)', y, data=df,
color='k', join=True, ax=g.ax,
linewidth=1.5*plt.rcParams['lines.linewidth'],
ci=68, estimator=np.median,
draw_ctr_pts=False, ci_mode='fill');
data = g._legend_data.copy()
data['Current study'] = line[0]
g.add_legend(data, label_order=g.hue_names + ['Current study'])
# facetgrid doesn't let us set the title fontsize directly, so need to do
# this hacky work-around
g.fig.legends[0].get_title().set_size(mpl.rcParams['legend.title_fontsize'])
return g
def mtf(mtf_func, df=None, context='paper'):
"""Plot the MTF as a function of spatial frequencies
This plots the function we use to invert the display MTF when constructing
our stimuli. We plot a semilogx plot, from 1/512 to 1/2 cycles per pixel,
labeled as pixels per period (the reciprocal of spatial frequency), with
y-values going from .5 to 1
Parameters
----------
mtf_func : function
python function that takes array of spatial frequencies as its only
argument and returns the MTF at those spatial frequencies.
df : pd.DataFrame or None, optional
If not None, the data used to fit this function, which we'll plot as
points on the figure.
context : {'paper', 'poster'}, optional
plotting context that's being used for this figure (as in
seaborn's set_context function). if poster, will scale things up
Returns
-------
fig : plt.figure
Figure containing the MTF plot
"""
sfs = np.linspace(0, .5)
params, fig_width = style.plotting_style(context, figsize='half')
plt.style.use(params)
fig, ax = plt.subplots(1, 1, figsize=(fig_width, fig_width*.65))
ax.semilogx(sfs, mtf_func(sfs), 'C0', basex=2)
if df is not None:
ax.semilogx(df.display_freq, df.corrected_contrast, 'C0o', basex=2)
ticks = [512, 128, 32, 8, 2]
ax.set(xticks=[1/i for i in ticks], xticklabels=ticks, xlabel='Pixels per period',
ylabel='Michelson contrast', yticks=[.5, .75, 1])
fig.tight_layout()
return fig
def sigma_interpretation(df):
"""Generate string interpreting relative size of a, b, and sigma.
This function returns a string (meant to be printed or saved to txt file)
that describes the preferred period at 0, the standard deviation, and how
many degrees you'd have to move in order to shift your preferred period by
a single standard deviation.
Parameters
----------
df : pd.DataFrame
dataframe containing all the model parameter values, across
subjects. note that this should first have gone through
prep_model_df, which renames the values of the model_parameter
columns.
Returns
-------
result : str
string containing the description discussed above
"""
# get the median value of the parameters we're interested
median_params = df.groupby('model_parameter').fit_value.median()
a = median_params['$a$']
b = median_params['$b$']
sigma = median_params['$\sigma$']
n_degrees = (b * (2**sigma - 1)) / a
pref_period_there = b + n_degrees * a
# as described on the wiki page for FWHM:
# https://en.wikipedia.org/wiki/Full_width_at_half_maximum. That's for a
# regular Gaussian, but the same calculation works here, just in octave
# units (as equivalent to $\log_2(SF_{.5H} / SF_{.5L})$, where those SFs
# are the spatial frequency where the curve reaches half-max above and
# below the peak, respectively)
fwhm = 2*np.sqrt(2*np.log(2)) * sigma
result = (
f"Preferred period at 0 degrees is {b:.03f}, with slope {a:.03f}.\n"
f"Standard deviation of the log-Gaussian is {sigma:.03f} octaves (equivalent to FWHM of {fwhm:.03f} octaves).\n"
f"Therefore, you'd need to move to {n_degrees:.03f} degrees eccentricity to move by a std dev.\n"
f"At that eccentricity, preferred period is {pref_period_there:.03f}.\n"
"All this is calculated using the median across bootstraps, average across polar angle and orientations."
)
return result
def compare_cv_models(first_level_df, targets, predictions, model_names, loss_func='normed_loss',
df_filter_string='drop_voxels_with_mean_negative_amplitudes,drop_voxels_near_border',
context='paper', voxel_n_check=9):
"""Create plots to help understand differences in model performance.
This creates several plots to compare the predictions of different models.
We make pairwise comparisons between each of them:
1. Plot pairwise difference in loss as a function of eccentricity (each
comparison on a separate row) (1 plot).
2. Plot the `voxel_n_check` voxels that are the best for each model in each
pairwise comparison (2 plots per pairwise comparison). We plot the voxel
response as a function of spatial frequency, and then curves for each
model. This means that we're collapsing across stimulus orientation
(variation in those responses just shown as confidence intervals).
Because we're only plotting response as a function of spatial frequency
(and not of stimulus orientation), this is really only sufficient for
comparing models 1 to 3, those models whose responses are isotropic.
Modification to this would be necessary to make informative plots for the
other models.
Parameters
----------
first_level_df : pd.DataFrame
DataFrame containing the responses of each voxel to each stimulus. Note
that we only use the median response, so the summary dataframe (vs
full, which includes separate bootstraps) should be used.
targets : torch.tensor
tensor containing the targets for the model, i.e., the responses and
precision of the voxels-to-fit, as saved out by
sfp.analyze_model.calc_cv_error
predictions : list
list of tensors containing the predictions for each model, as saved out
by sfp.analyze_model.calc_cv_error
model_names : list
list of strings containing the names (for plotting purposes) of each
model, in same order as predictions.
loss_func : str, optional
The loss function to compute. One of: {'weighted_normed_loss',
'crosscorrelation', 'normed_loss', 'explained_variance_score',
'cosine_distance', 'cosine_distance_scaled'}.
df_filter_string : str or None, optional
a str specifying how to filter the voxels in the dataset. see
the docstrings for sfp.model.FirstLevelDataset and
sfp.model.construct_df_filter for more details. If None, we
won't filter. Should probably use the default, which is what all
models are trained using.
context : {'paper', 'poster'}, optional
plotting context that's being used for this figure (as in
seaborn's set_context function). if poster, will scale things up
voxel_n_check : int, optional
Number of voxels to plot in second plot type. As you get farther away
from default value (9), more likely that plot will look weird.
Returns
-------
figs : list
List containing the created figures
"""
params, fig_width = style.plotting_style(context, figsize='full')
plt.style.use(params)
if df_filter_string is not None:
df_filter = model.construct_df_filter(df_filter_string)
first_level_df = df_filter(first_level_df)
voxels = first_level_df.drop_duplicates('voxel')
voxels['voxel_new'] = np.arange(len(voxels))
tmp = first_level_df.set_index('voxel')
tmp['voxel_new'] = voxels['voxel_new']
first_level_df = tmp.reset_index()
for name, pred in zip(model_names, predictions):
loss = analyze_model._calc_loss(pred, targets, loss_func, False)
voxels[f'{name}_loss'] = loss
# this is the number of combinations of the values in model names with
# length 2. for some reason, itertools objects don't have len()
n_combos = int(math.factorial(len(model_names)) / 2 /
math.factorial(len(model_names)-2))
fig, axes = plt.subplots(n_combos, 2, squeeze=False,
figsize=(fig_width, n_combos/2*fig_width))
predictions = dict(zip(model_names, predictions))
voxel_comp_figs = []
for i, (name_1, name_2) in enumerate(itertools.combinations(model_names, 2)):
loss_name = f'{name_1}_loss - {name_2}_loss'
voxels[loss_name] = voxels[f'{name_1}_loss'] - voxels[f'{name_2}_loss']
ymax = voxels[loss_name].max() + voxels[loss_name].max() / 10
ymin = voxels[loss_name].min() + voxels[loss_name].min() / 10
sns.scatterplot(x='eccen', y=loss_name, data=voxels, ax=axes[i, 0])
axes[i, 0].set_ylim(ymin, ymax)
sns.regplot(x='eccen', y=loss_name, data=voxels, ax=axes[i, 1],
x_estimator=np.median, x_bins=50)
axes[i, 1].set(ylabel='')
axes[i, 0].hlines(0, voxels.eccen.min(), voxels.eccen.max(), linestyles='dashed')
axes[i, 1].hlines(0, voxels.eccen.min(), voxels.eccen.max(), linestyles='dashed')
vox_idx = voxels[loss_name].values.argsort()
vox_idx = np.concatenate([vox_idx[-voxel_n_check:], vox_idx[:voxel_n_check]])
tmp = first_level_df.query(f"voxel_new in @vox_idx")
data = []
for j, v in enumerate(vox_idx):
d = {}
for name in model_names:
pred = predictions[name]
val = pred[v]
# need to normalize predictions for comparison
val = val / val.norm(2, -1, True)
d[name] = val.detach()
d['voxel_new'] = v
d['stimulus_class'] = np.arange(48)
d['better_model'] = {True: name_2, False: name_1}[j < voxel_n_check]
data.append(pd.DataFrame(d))
t = pd.concat(data)
tmp = tmp.merge(t, 'left', on=['voxel_new', 'stimulus_class'],
validate='1:1', )
tmp = tmp.rename(columns={'amplitude_estimate_median_normed':
'voxel_response'})
tmp = pd.melt(tmp, ['voxel_new', 'local_sf_magnitude', 'stimulus_class',
'better_model', 'eccen'],
value_vars=['voxel_response'] + model_names,
var_name='model', value_name='response')
for name, other_name in zip([name_1, name_2], [name_2, name_1]):
g = sns.relplot(x='local_sf_magnitude', y='response',
data=tmp.query(f"better_model=='{name}'"),
hue='model', col='voxel_new', kind='line',
col_wrap=3, height=fig_width/3)
g.fig.suptitle(f'better_model = {name} (vs {other_name})')
if voxel_n_check > 6:
g.fig.subplots_adjust(top=.9)
elif voxel_n_check > 3:
g.fig.subplots_adjust(top=.85)
else:
g.fig.subplots_adjust(top=.75)
g.set(xscale='log')
for ax in g.axes.flatten():
vox_id = int(re.findall('\d+', ax.get_title())[0])
ax.set_title(ax.get_title() + f",\neccen = {tmp.query('voxel_new==@vox_id').eccen.unique()[0]:.02f}")
voxel_comp_figs.append(g.fig)
fig.tight_layout()
return [fig] + voxel_comp_figs
def theory_background_figure(context):
"""Create figure with some small info on background theory.
Parameters
----------
context : {'paper', 'poster'}, optional
plotting context that's being used for this figure (as in seaborn's
set_context function). if poster, will scale things up (but only paper
has been tested)
Returns
-------
fig : plt.figure
Figure containing this plot
"""
einstein_path = op.join(op.dirname(op.realpath(__file__)), '..', 'reports', 'figures',
'einstein.pgm')
einstein = plt.imread(einstein_path)
einstein = einstein / einstein.max()
params, fig_width = style.plotting_style(context, figsize='full')
params['axes.titlesize'] = '8'
params['axes.labelsize'] = '8'
params['legend.fontsize'] = '8'
warnings.warn("We adjust the font size for axes titles, labels, and legends down to "
"8pts (so this will probably look wrong if context is not paper)!")
plt.style.use(params)
fig = plt.figure(figsize=(fig_width, fig_width/2))
gs = fig.add_gridspec(4, 4, hspace=.65)
fig.add_subplot(gs[:2, 0])
fig.add_subplot(gs[2:, 0])
fig.add_subplot(gs[1:3, 1])
fig.add_subplot(gs[1:3, -2])
fig.add_subplot(gs[:2, -1])
fig.add_subplot(gs[2:, -1])
axes = np.array(fig.axes).flatten()
for ax in axes[:2]:
ax.axis('off')
pt.imshow((einstein+.5)/1.5, ax=axes[0], zoom=110/256, title=None,
vrange=(0, 1))
pt.imshow((einstein+.5)/1.5, ax=axes[1], zoom=110/256, title=None,
vrange=(0, 1))
axes[0].set_title(r'SF preferences $\bf{constant}$'+'\nacross visual field')
axes[1].set_title(r'SF preferences $\bf{scale}$'+'\nwith eccentricity')
ecc = np.linspace(.01, 20, 50)
V1_pRF_size = 0.063485 * ecc
constant_hyp = 2*np.ones(len(ecc))
pal = sns.color_palette('Dark2', n_colors=2)
for i, ax in enumerate(axes[2:4].flatten()):
if i == 0:
ax.semilogy(ecc, 1./V1_pRF_size, '-', label='scaling',
linewidth=2, basey=2, c=pal[0])
ax.set_ylim((.25, 10))
ax.plot(ecc, constant_hyp, c=pal[1], linewidth=2, label='constant')
ax.set(xticks=[], yticks=[], ylabel='Preferred SF (cpd)',
xlabel='Eccentricity')
elif i == 1:
ax.plot(ecc, V1_pRF_size, linewidth=2, label='scaling', c=pal[0])
ax.plot(ecc, 1./constant_hyp, c=pal[1], linewidth=2, label='constant')
ax.set(xlabel='Eccentricity', xticks=[], yticks=[],
ylabel='Preferred period (deg)')
axes[3].legend(frameon=False, bbox_to_anchor=(-.1, -.1), loc='upper center')
axes[3].annotate('', xy=(.5, 1), xytext=(-.65, 1), xycoords='axes fraction',
arrowprops={'arrowstyle': '<->', 'color': 'k',
'connectionstyle': 'arc3,rad=-.3'})
axes[3].text(-.075, 1.2, r'$\frac{1}{f(x)}$', ha='center', va='bottom',
transform=axes[3].transAxes)
# from Eero, this is about what it should be
V1_RF_size = .2 * ecc
V1_pRF_size_slope = 0.063485
V1_pRF_size_offset = 0
V1_pRF_size_error = 0.052780
for i, ax in enumerate(axes[4:].flatten()):
ax.fill_between(ecc, (V1_pRF_size_slope - V1_pRF_size_error/2.)*ecc + V1_pRF_size_offset,
(V1_pRF_size_slope + V1_pRF_size_error/2.)*ecc + V1_pRF_size_offset,
alpha=.1, color=pal[0])
ax.plot(ecc, V1_pRF_size_slope*ecc+V1_pRF_size_offset, linewidth=2, label='scaling', c=pal[0])
if i == 0:
for e in [1,5,10,15,20]:
ax.plot([0, 20], [V1_pRF_size_slope*e+V1_pRF_size_offset,
V1_pRF_size_slope*e+V1_pRF_size_offset], '--', c='k',
linewidth=1)
ax.set(title="Full-field gratings", xticks=[], yticks=[])
if i == 1:
for j in [-1, -.5, 0, .5, 1]:
ax.plot(ecc, (V1_pRF_size_slope + j*V1_pRF_size_error/2.)*ecc + V1_pRF_size_offset,
linestyle='--', c='k', linewidth=1)
ax.set(xlabel='Eccentricity', xticks=[], yticks=[], title='Scaled gratings')
ax.set_ylabel("Preferred period (deg)")
return fig
def voxel_exclusion(df, context='paper'):
"""Create plot showing how many voxels were excluded from model fitting.
WARNING: Currently this is not context-compliant -- the figure ends up much
wider than allowed. If we want to use this in paper, will change that.
Parameters
----------
df : pd.DataFrame
dataframe containing the voxel exclusion info, as created by the
snakemake rule voxel_exclusion_df
context : {'paper', 'poster'}, optional
plotting context that's being used for this figure (as in seaborn's
set_context function). if poster, will scale things up (but only paper
has been tested)
Returns
-------
g : sns.FacetGrid
FacetGrid containing the plot
"""
params, fig_width = style.plotting_style(context, figsize='full')
plt.style.use(params)
if 'ecc in 1-12,drop_voxels_with_any_negative_amplitudes' in df.columns:
arg_str = 'any'
elif 'ecc in 1-12,drop_voxels_with_mean_negative_amplitudes' in df.columns:
arg_str = 'mean'
neg = df['ecc in 1-12'] - df[f'ecc in 1-12,drop_voxels_with_{arg_str}_negative_amplitudes']
border = df['ecc in 1-12'] - df['ecc in 1-12,drop_voxels_near_border']
df[f'ecc in 1-12,drop_voxels_with_{arg_str}_negative_amplitudes,drop_voxels_near_border - independent'] = df['ecc in 1-12'] - (neg + border)
neg_prop = dict(zip(df.subject, neg / df['ecc in 1-12']))
neg = dict(zip(df.subject, neg))
map_dict = {'total_voxels': 0,
'ecc in 1-12': 1,
f'ecc in 1-12,drop_voxels_with_{arg_str}_negative_amplitudes': 2,
'ecc in 1-12,drop_voxels_near_border': 3,
f'ecc in 1-12,drop_voxels_with_{arg_str}_negative_amplitudes,drop_voxels_near_border': 4,
f'ecc in 1-12,drop_voxels_with_{arg_str}_negative_amplitudes,drop_voxels_near_border - independent': 5}
id_vars = [c for c in df.columns if c not in map_dict.keys()]
df = pd.melt(df, id_vars, value_name='number_of_voxels')
df['exclusion_criteria'] = df.variable.map(map_dict)
col_order = plotting.get_order('subject', col_unique=df.subject.unique())
g = sns.catplot(x='exclusion_criteria', y='number_of_voxels', data=df,
col='subject', kind='point', col_wrap=6, aspect=.5,
height=(1/.5)*(2*fig_width/6), col_order=col_order)
for ijk, data in g.facet_data():
ax = g.axes[ijk[1]]
ax.scatter(4, data.query('exclusion_criteria==4').number_of_voxels, c='r', zorder=100)
txt = '\n'.join([f'{v}: {k}' for k,v in map_dict.items()])
g.fig.text(1, .75, txt, va='center')
txt = '\n'.join([f'{s}: {neg[s]} ({neg_prop[s]:.3f})' for s in col_order])
txt = "Number of voxels dropped because of negative amplitude (proportion on stimuli)\n\n" + txt
g.fig.text(1, .25, txt, va='center')
return g
def _create_model_prediction_df(df, trained_model, voxel_label,
for_relative_plot=False,
extend_sf=False):
"""Create df containing model predictions for a single voxel
Will contain 48 rows, with the following columns: model_predictions (normed
predictions of trained_model to the spatial frequency seen by this voxel),
voxel (voxel_label), stimulus_class (0 to 47, giving the stimulus label),
peak_sf (if add_peak_sf is True, this gives the preferred spatial frequency
of this voxel, at each observed orientation).
Parameters
----------
df : pd.DataFrame
DataFrame containing the responses of a single voxel to stimuli. Should
only have one response per stimulus (thus, the summary df), and must
have columns eccen, angle, local_sf_magnitude, and local_sf_xy_direction.
trained_model : sfp.model.LogGaussianDonut
Trained model whose responses we want to get.
voxel_label : str
The label for this voxel.
for_relative_plot : bool, optional
If True, will add a column giving the peak spatial frequency for this
voxel at each observed orientation and evaluate the model at 36
frequencies log-spaced from two decades below to two decades above the
peak (rather than the presented frequencies), at the four main
orientations.
extend_sf : bool, optional
If True, we instead generate predictions for local spatial frequencies
from .01 to 100 cpd (logspaced, 36 samples), for the four main angles.
Cannot be True if for_relative_plot is True.
Returns
-------
data : pd.DataFrame
DataFrame containing the above info
"""
data = {}
assert df.eccen.nunique() == 1 and df.angle.nunique() == 1, "_create_model_prediction_df must be called on the df with responses to a single voxel!"
sfs = df.drop_duplicates('stimulus_class')[['local_sf_magnitude',
'local_sf_xy_direction']]
sfs = torch.tensor(sfs.values)
prf_loc = torch.tensor(df[['eccen', 'angle']].values)
predictions = trained_model.evaluate(sfs[:, 0], sfs[:, 1], prf_loc[:, 0], prf_loc[:, 1])
predictions_norm = predictions.norm(2, -1, True)
if extend_sf:
if for_relative_plot:
raise Exception("At most one of for_relative_plot and extend_sf can be true, but both were true!")
# get the 4 main orientations
angles = np.linspace(0, 2*np.pi, 8, endpoint=False)
angles = df.query('freq_space_angle in @angles').drop_duplicates('freq_space_angle')
angles = angles.local_sf_xy_direction.values
n_samps = 36
freqs = []
for a in angles:
freqs.extend(np.logspace(-2, 2, n_samps))
sfs = torch.tensor([freqs, np.concatenate([n_samps*[a] for a in angles])]).transpose(0, 1)
data['local_sf_magnitude'] = sfs[:, 0].detach().numpy()
# we use the same norm as before, in order to make sure things line up correctly
predictions = trained_model.evaluate(sfs[:, 0], sfs[:, 1],
prf_loc[0, 0], prf_loc[0, 1])
elif for_relative_plot:
# get the 4 main orientations
angles = np.linspace(0, 2*np.pi, 8, endpoint=False)
angles = df.query('freq_space_angle in @angles').drop_duplicates('freq_space_angle')
angles = angles.local_sf_xy_direction.values
peak_sf = []
freqs = []
n_samps = 36
for a in angles:
peak_sf.append(trained_model.preferred_sf(a, prf_loc[0, 0], prf_loc[0, 1]).item())
freqs.extend(np.logspace(np.log10(peak_sf[-1]/100), np.log10(peak_sf[-1]*100), n_samps))
sfs = torch.tensor([freqs, np.concatenate([n_samps*[a] for a in angles])]).transpose(0, 1)
peak_sf = np.concatenate([n_samps*[p] for p in peak_sf])
data['peak_sf'] = peak_sf
data['local_sf_magnitude'] = sfs[:, 0].detach().numpy()
# we use the same norm as before, in order to make sure things line up correctly
predictions = trained_model.evaluate(sfs[:, 0], sfs[:, 1],
prf_loc[0, 0], prf_loc[0, 1])
else:
data['stimulus_class'] = np.arange(48)
data['model_predictions'] = (predictions / predictions_norm).detach().squeeze()
data['voxel'] = voxel_label
return pd.DataFrame(data)
def _remap_frequencies(df, freq_mag_col='local_sf_magnitude'):
"""Create plotting_sf column in df
for each voxel, our stimuli have several orientations. ideally, these
orientations would all have the exact same spatial frequency, but they
don't (the w_r/w_a parameters needed to be integers in order to avoid
obvious artifacts at polar angle 0). for plotting purposes, this is
confusing, so we map those values such that they are identical, and the
binning that gets done later on then makes more sense.
This adds a column, plotting_sf, which contains this info.
Parameters
----------
df : pd.DataFrame
first level DataFrame containing the amplitude responses for a single
subject and session. Must be the summary version (only has median across
bootstraps).
freq_mag_col : str, optional
Name of the column with the spatial frequencies to remap.
Returns
-------
df : pd.DataFrame
the dataframe with plotting_sf column added.
"""
canonical_freqs = [f for f in df.freq_space_distance.unique() if f == int(f)]
canonical_freq_mapper = {f: min(canonical_freqs, key=lambda x: abs(x-f))
for f in df.freq_space_distance.unique()}
freq_mapper = df.groupby(['voxel', 'freq_space_distance'])[freq_mag_col].median().to_dict()
df['plotting_sf'] = df.apply(lambda x: freq_mapper[x.voxel,
canonical_freq_mapper[x.freq_space_distance]],
axis=1)
return df
def _merge_model_response_df(df, model_predictions):
"""Merge dfs with model predictions and voxel responses.
Parameters
----------
df : pd.DataFrame
first level DataFrame containing the amplitude responses for a single
subject and session. Must be the summary version (only has median across
bootstraps).
model_predictions : pd.DataFrame
DataFrame containing the model predictions for each voxel in df.
Returns
-------
df : pd.Dataframe
The merged dataframe
"""
try:
df = df.merge(model_predictions, 'left', on=['voxel', 'stimulus_class'],
validate='1:1', )
df = df.rename(columns={'amplitude_estimate_median_normed':
'voxel_response'})
df = pd.melt(df, ['voxel', 'stimulus_class', 'eccen', 'freq_space_angle',
'local_sf_magnitude', 'plotting_sf'],
value_vars=['voxel_response', 'model_predictions'],
var_name='model', value_name='Response (a.u.)')
except KeyError:
# in this case, we're combining the relative ones, so model_predictions
# doesn't have a stimulus_class column (and they're evaluated at
# different frequencies)
df = df[['voxel', 'local_sf_magnitude', 'amplitude_estimate_median_normed',
'peak_sf', 'subject']]
df['model'] = 'voxel_response'
df = df.rename(columns={'amplitude_estimate_median_normed': 'Response (a.u.)'})
model_predictions = model_predictions.rename(columns={'model_predictions':
'Response (a.u.)'})
model_predictions['model'] = 'model_predictions'
df = pd.concat([df, model_predictions], sort=False)
return df
def _voxel_responses_and_predictions(*args, label='', n_bins=10, plot_type='reg', **kwargs):
"""Plot voxel responses and model predictions.
If label=voxel_response, we use sns.regplot (if plot_type=='reg', with
n_bins bins on the x axis) or sns.histplot (if plot_type='hist', logscaling
the x-axis). Else, we use sns.lineplot
"""
if label == 'voxel_response':
if plot_type == 'reg':
# there are 22 unique frequencies (freq_space_distance in the
# dataframe), but only 10 "real" ones, the others are just off by a
# little bit (because w_a/w_r needed to be whole numbers)
return sns.regplot(*args, x_bins=n_bins,
fit_reg=False, label=label,
scatter_kws={'s': 10}, **kwargs)
elif plot_type == 'hist':
to_return = sns.histplot(*args, label=label,
log_scale=(True, False),
# rasterize to decrease size
rasterized=True,
**kwargs)
# set xscale back to linear because apparently sns.histplot sets it
# for all axes, and we want the next facet to have linear xscale
# for when sns.lineplot is called
plt.xscale('linear')
return to_return
else:
return sns.lineplot(*args, label=label, **kwargs, zorder=10)
def example_voxels(df, trained_model, voxel_idx=[2310, 2957, 1651],
extend_sf=False, context='paper'):
"""Plot some example voxel data and their model fit.
For some voxels and a trained model, plot some comparisons between the
measured voxel responses and the model's predictions. Each voxel gets its
own column. Nothing is done here to choose the voxels, so that must be done
externally.
Parameters
----------
df : pd.DataFrame
first level DataFrame containing the amplitude responses for a single
subject and session. Must be the summary version (only has median across
bootstraps).
trained_model : sfp.model.LogGaussianDonut
Trained model whose responses we want to show.
voxel_idx : list, optional
List of voxel ids (i.e., values from the 'voxel' column of df) to show.
Should be selected somehow in order to make sure they're reasonably
nice. The default values are for sub-wlsubj001, ses-04, and are roughly
foveal, parafoveal, and peripheral, all reasonably well fit by the full
model. Regardless of how many are here, we'll have 3 columns per row.
extend_sf : bool, optional
If True, we instead generate predictions for local spatial frequencies
from .01 to 100 cpd (logspaced, 36 samples), for the four main angles.
context : {'paper', 'poster'}, optional
plotting context that's being used for this figure (as in seaborn's
set_context function). if poster, will scale things up (but only paper
has been tested)
Returns
-------
g : sns.FacetGrid
FacetGrid containing the plot
"""
params, fig_width = style.plotting_style(context, figsize='full')
plt.style.use(params)
ax_height = (fig_width / 4) / .75
df = df.query("voxel in @voxel_idx")
data = []
voxel = df.drop_duplicates('voxel')
eccen_order = voxel.sort_values('eccen').voxel.values
for i, v in enumerate(voxel_idx):
data.append(_create_model_prediction_df(df.query('voxel==@v'),
trained_model, v,
extend_sf=extend_sf))
data = pd.concat(data)
canonical_freqs = [f for f in df.freq_space_distance.unique() if f == int(f)]
df = _remap_frequencies(df)
if extend_sf:
df['model'] = 'voxel_response'
df = df.rename(columns={'amplitude_estimate_median_normed':
'Response (a.u.)'})
xlim = (.01, 100)
ylim = (0, .225)
yticks = [0, .2]
else:
df = _merge_model_response_df(df, data)
xlim = (.1, 10)
ylim = (.05, .225)
yticks = []
g = sns.FacetGrid(hue='model', data=df, col='voxel', col_wrap=3,
col_order=eccen_order, height=ax_height, aspect=.75)
g.map_dataframe(_voxel_responses_and_predictions, x='plotting_sf',
y='Response (a.u.)', n_bins=len(canonical_freqs),
plot_type='reg')
for i, ax in enumerate(g.axes.flatten()):
vox_id = int(re.findall('voxel = (\d+)', ax.get_title())[0])
ax.set_title(f"eccentricity = {df.query('voxel==@vox_id').eccen.unique()[0]:.02f}")
# when extend_sf is True, we plot model predictions separately, because
# merging the two dfs was too difficult
if extend_sf:
sns.lineplot(ax=ax, c='C1', label='model_prediction',
x='local_sf_magnitude', y='model_predictions',
data=data.query("voxel==@vox_id"), zorder=10)
ax.legend_.remove()
if i == 0:
ax.set(ylabel='Response (a.u.)')
if i != 1:
ax.set(xlabel='')
else:
ax.set(xlabel='Local spatial frequency (cpd)')
g.set(xscale='log', ylim=ylim, yticks=yticks, xlim=xlim)
return g
def example_eccentricity_bins(df, context='paper'):
"""Plot some example eccentricity bins and their tuning curves.
This plots the amplitude estimates and the tuning curves for a single
subject, angular and radial stimuli, eccentricity bins 02-03 and 10-11. It
is meant to show that the tuning curves fit the bins reasonably well.
Parameters
----------
df : pd.DataFrame
pandas DataFrame containing the 1d tuning curves for a single subject.
Must be the summary version (containing the fits to the median across
bootstraps)
context : {'paper', 'poster'}, optional
plotting context that's being used for this figure (as in seaborn's
set_context function). if poster, will scale things up (but only paper
has been tested)
Returns
-------
g : sns.FacetGrid
FacetGrid containing the plot
"""
params, fig_width = style.plotting_style(figsize='half')
plt.style.use(params)
if context == 'paper':
# this isn't the exact same as what you'd get doing the line below,
# because we use a relatively small wspace (space between axes), that
# lets us make them a bit bigger
height = 2.7
else:
height = (fig_width / 2) / .7
df = df.query("frequency_type=='local_sf_magnitude' & "
"stimulus_superclass in ['angular', 'radial'] & "
"eccen in ['02-03', '09-10']")
df.eccen = df.eccen.map(lambda x: {'02-03': '2-3',
'09-10': '9-10'}.get(x, x))
df = df.rename(columns={'eccen': "Eccentricity band"})
pal = plotting.get_palette('stimulus_type', 'relative',
df.stimulus_superclass.unique(),
True)
fig, axes = plt.subplots(nrows=1, ncols=2, squeeze=True, sharey=True,
figsize=(.6*2*height, height),
gridspec_kw={'wspace': .1})
artists = [axes[0].scatter([], [], s=0)]
labels = ['Eccentricity band']
for i, (col, data) in enumerate(df.groupby('stimulus_superclass')):
ax = axes[i]
for s, d in data.groupby('Eccentricity band'):
hue = pal[col]
ax.scatter(d.frequency_value, d.amplitude_estimate,
facecolor={'2-3': 'w'}.get(s, hue), edgecolor=hue)
plotting.plot_tuning_curve(data=d, ax=ax, xlim='data',
style='Eccentricity band', color=hue,
dashes_dict={'2-3': (2, 2)})
if i == 1:
artists.append(ax.plot([], [], color='k',
mfc={'2-3': 'w'}.get(s, 'k'),
mec='k', marker='o',
dashes={'2-3': (2, 2)}.get(s, ''))[0])
labels.append(s+' deg')
ax.set_xscale('log', basex=10)
ax.set(xticks=[10**i for i in [-1, 0, 1]], ylim=(1, 3.5),)
if i == 0:
ax.set(ylabel='Response\n(% BOLD signal change)',
yticks=np.arange(1, 4.5, .5))
ax.set_xlabel('Local spatial frequency (cpd)', ha='center',
x=1)
fig.legend(artists, labels, frameon=False, bbox_to_anchor=(1, .5),
bbox_transform=fig.transFigure, loc='center left',
borderaxespad=0)
return fig
def stimulus_schematic(stim, stim_df, context='paper'):
"""Create schematic with some example stimuli.
Shows the two lowest frequencies from each of the four main stimulus types,
with some annotations.
This works with any of the stimuli created by this project: log-scaled or
constant, rescaled or not.
Parameters
----------
stim : np.ndarray
array containing the stimuli
stim_df : pd.DataFrame
dataframe containing the description of the stimuli.
context : {'paper', 'poster'}, optional
plotting context that's being used for this figure (as in seaborn's
set_context function). if poster, will scale things up (but only paper
has been tested)
Returns
--------
fig : plt.figure
Figure containing this plot
"""
params, fig_width = style.plotting_style(context, figsize='half')
params['font.size'] = '8'
params['axes.titlesize'] = '8'
params['axes.labelsize'] = '8'
params['legend.fontsize'] = '8'
warnings.warn("We adjust the font size for axes titles, labels, and legends down to "
"8pts (so this will probably look wrong if context is not paper)!")
plt.style.use(params)
fig, axes = plt.subplots(3, 4, figsize=(fig_width, .75*fig_width))
# for pt.imshow to work, we need to find an integer that, when the size of
# the image is divided by it, we get another integer (so that we
# down-sample correctly)
zoom = int(stim.shape[-1] / axes[0, 0].bbox.height)
# this while will be false when zoom is a divisor of stim.shape[-1]
while math.gcd(stim.shape[-1], zoom) != zoom:
zoom += 1
stim_df = first_level_analysis._add_freq_metainfo(stim_df.drop_duplicates('class_idx'))
# drop baseline and the off-diagonal stimuli
stim_df = stim_df.query("stimulus_superclass not in ['baseline', 'mixtures', 'off-diagonal']")
if 'angular' in stim_df.stimulus_superclass.unique():
col_order = ['radial', 'angular', 'forward spiral', 'reverse spiral']
stim_type = 'relative'
elif 'vertical' in stim_df.stimulus_superclass.unique():
col_order = ['vertical', 'horizontal', 'forward diagonal', 'reverse diagonal']
stim_type = 'absolute'
pal = plotting.get_palette('stimulus_type', stim_type, col_order, True)
for i, stim_type in enumerate(col_order):
# get the lowest and second frequencies from each stimulus type (any
# higher and it starts to alias at this resolution)
g = stim_df.query("stimulus_superclass==@stim_type").iloc[[0, 2]]
for ax, g_j in zip(axes[:, i], g.iterrows()):
pt.imshow(stim[g_j[1]['index']], ax=ax, zoom=1/zoom, title=None)
ax.set_frame_on(False)
ax.xaxis.set_visible(False)
ax.yaxis.set_visible(False)
axes[-1, i].set_visible(False)
# use the less-ambiguous label
plot_label = plotting.SUPERCLASS_PLOT_LABELS.get(stim_type, stim_type)
axes[0, i].set_title(plot_label.replace(' ', '\n'), rotation=0, va='bottom',
bbox={'fc': 'none', 'ec': pal[stim_type], 'pad': 2})
fig.text(.515, 1/3.5, '...', transform=fig.transFigure, fontsize='xx-large',
va='center', ha='center')
axes[0, 0].text(0, .5, 'Low base\nfrequency', transform=axes[0, 0].transAxes,
rotation=90, ha='right', va='center', multialignment='center')
# we do this using axes[0, 0] because axes[2, 0] is invisible, but we can
# still use its transform
axes[0, 0].text(0, .5, 'High base\nfrequency', transform=axes[2, 0].transAxes,
rotation=90, ha='right', va='center')
axes[1, 0].annotate('', xy=(-.2, 1), xytext=(-.2, 0), textcoords='axes fraction',
xycoords='axes fraction',
arrowprops={'arrowstyle': '<-', 'color': '0',
'connectionstyle': 'arc3'})
axes[1, -1].annotate('', xy=(1.035, -.1), xytext=(-.035, -.1), textcoords='axes fraction',
xycoords='axes fraction',
arrowprops={'arrowstyle': '-', 'color': '0',
'connectionstyle': 'arc3'})
axes[1, -1].text(.5, -.2, '24\u00B0', transform=axes[1, -1].transAxes,
ha='center', va='top')
fig.subplots_adjust(wspace=.05, hspace=.05)
return fig
def peakiness_check(dfs, trained_models, col='subject', voxel_subset=False,
df_filter_string='drop_voxels_with_mean_negative_amplitudes,drop_voxels_near_border',
context='paper'):
"""Plot all voxels responses to check peakiness.
The x value here is spatial frequency relative to peak (based on the
model). This allows us to see whether the responses are "peakier" than the
model, which would tell us that, instead of the exp(-x^2) in a Gaussian, we
should be using a smaller exponent, e.g., exp(-x^(1.5))
Parameters
----------
dfs : pd.DataFrame or list
first level DataFrame containing the amplitude responses for a single
subject and session. Must be the summary version (only has median across
bootstraps). If a list, a list of those (one per subject). Should
contain a subject column
trained_models : sfp.model.LogGaussianDonut or list
Trained model whose responses we want to show. If a list, a list of
those (one per subject).
col : str or None, optional
The column of the dataframe to facet columns on
voxel_subset : bool or int, optional
if True, we only do this for 10 voxels, to test it out (since this
will take a while). If an int, we do it for that many voxels.
df_filter_string : str or None, optional
a str specifying how to filter the voxels in the dataset. see
the docstrings for sfp.model.FirstLevelDataset and
sfp.model.construct_df_filter for more details. If None, we
won't filter. Should probably use the default, which is what all
models are trained using.
context : {'paper', 'poster'}, optional
plotting context that's being used for this figure (as in seaborn's
set_context function). if poster, will scale things up (but only paper
has been tested)
Returns
-------
g : sns.FacetGrid
FacetGrid containing the plot
"""
params, fig_width = style.plotting_style(context, figsize='full')
# for some reason, we also draw the grid in this figure, even when
# axes.grid is set to False. so, we manually set the linestyle to nothing
# in order to avoid drawing the grid lines. (we don't want to do this in
# the style params because we *do* want to draw the grid for polar plots)
params['grid.linestyle'] = ''
plt.style.use(params)
ax_height = (fig_width / 4) / .75
if not isinstance(dfs, list):
dfs = [dfs]
if not isinstance(trained_models, list):
trained_models = [trained_models]
df_overall = []
for df, trained_model in zip(dfs, trained_models):
if 'subject' not in df.columns:
# this way it will run even if there's no subject specified
df['subject'] = 'none'
print(f"Starting subject {df['subject'].unique()}")
if df_filter_string is not None:
df_filter = model.construct_df_filter(df_filter_string)
df = df_filter(df).reset_index()
if voxel_subset is not False:
if voxel_subset is True:
voxel_subset = df.voxel.unique()[:10]
elif isinstance(voxel_subset, int):
voxel_subset = df.voxel.unique()[:voxel_subset]
df = df.query("voxel in @voxel_subset")
data = []
peak_sfs = []
for n, g in df.groupby('voxel'):
data.append(_create_model_prediction_df(g, trained_model, n,
True))
g = g[['local_sf_xy_direction', 'eccen', 'angle']].values
peak_sf = trained_model.preferred_sf(*torch.tensor(g.T))
peak_sf = pd.DataFrame({'voxel': n, 'stimulus_class': np.arange(48),
'peak_sf': peak_sf.detach().numpy()})
peak_sfs.append(peak_sf)
data = | pd.concat(data) | pandas.concat |
import argparse
import glob
import itertools
import os
import random
import numpy as np
import pandas as pd
from scipy.stats import ttest_ind, kendalltau
def parse_argument() -> argparse.Namespace:
"""
Parse input arguments.
"""
parser = argparse.ArgumentParser()
parser.add_argument(
'--input_folder',
type=str,
default='./other_seeds',
help='Folder containing hypotheses files with different random seeds.',
)
args = parser.parse_args()
return args
def get_index(filename):
names = ['sub', 'pred', 'obj', 'label', 'probability']
df = | pd.read_csv(filename, sep='\t', names=names) | pandas.read_csv |
import pandas as pd
import numpy as np
import tkinter as tk
from tkinter import filedialog
Response=pd.read_json("1.json",encoding="UTF-8")
carList=Response["response"]["classifieds"]
df=pd.DataFrame(carList)
for each in range(2,295):
try:
Response=pd.read_json(str(each)+".json",encoding="UTF-8")
carList=Response["response"]["classifieds"]
df2=pd.DataFrame(carList)
print(str(int((each/294)*100)))
df=np.concatenate((df, df2), axis=0)
df= | pd.DataFrame(df) | pandas.DataFrame |
import matplotlib.pyplot as plt
import numpy as np
import pandas as pa
# local_conn = mu.get_conn()
# local_conn = create_engine('mysql+pymysql://root:root@localhost:3306/test?charset=utf8')
# 显示所有列
pa.set_option('display.max_columns', None)
# 显示所有行
pa.set_option('display.max_rows', None)
path = r'C:\Users\AL\Desktop\test\text\text_data.csv'
text_df = pa.read_csv(path)
text_df.info()
text_df.head()
text_df.shape
text_df.count()
temp_data = text_df.groupby('user_id').apply(lambda t: t[t.buy_time == t.buy_time.max()])
temp_data.shape
text_df.info()
use_clo = ['send_text', 's_time', 's_u', 're_u']
temp_data = text_df[text_df['s_u'] == 47]
temp_data.size
text_df.groupby('text_t').size()
temp_data = text_df[use_clo][text_df['s_u'] == 47].head(5)
temp_data.head()
text_df[text_df['s_u'] == 47].head(5)
temp_data = text_df[((text_df['s_u'] == 47) & (text_df['re_u'] == 4003)) | (
(text_df['s_u'] == 4003) & (text_df['re_u'] == 47))]
temp_data = text_df[(text_df['s_u'] == 47) | (text_df['re_u'] == 4003)]
null_data = text_df[text_df['send_text'].isna()]
not_null_data = text_df[text_df['send_text'].notna()]
temp_data.groupby('text_t').size()
temp_data.groupby('re_u').size()
temp_data.groupby('text_t').count()
temp_data.groupby('text_t')['text_t'].count()
temp_data.groupby('text_t').agg({'s_time': np.mean, 'text_t': np.size})
# text_df.to_sql('text_data', con=local_conn, if_exists='replace')
df1 = pa.DataFrame({'key': ['A', 'B', 'C', 'D'],
'value': np.random.randn(4)})
df2 = pa.DataFrame({'key': ['B', 'D', 'D', 'E'],
'value': np.random.randn(4)})
pa.merge(df1, df2, on='key')
pa.concat([df1, df2])
pa.concat([df1, df2]).drop_duplicates()
# temp_data.nlargest(10 + 1, columns='re_u').tail(10)
path_random = r'C:\Users\AL\Desktop\test\test.csv'
test_data_df = pa.read_csv(path_random)
test_data_df.head()
# 获取重复值
test_data_df[test_data_df.duplicated()]
np.sum(test_data_df.duplicated())
# 删除重复值
test_data_df.drop_duplicates(inplace=True)
test_data_df.isnull
test_data_df.isna
test_data_df.prod
np.sum(test_data_df.isnull(), axis=1)
test_data_df.apply(lambda x: sum(x.isnull()) / len(x), axis=1)
# 删除缺失值
test_data_df.dropna(inplace=True)
# 填补缺失值
test_data_df.fillna(test_data_df.mean())
# 不同的列按照不同的标准选择缺失值
test_data_df.fillna(value={"name1": 123, "name2": test_data_df.name2.mean()})
# 用前一个填补缺失值
test_data_df.fillna(method="ffill")
# 异常值处理
s_mean = test_data_df['age'].mean()
s_std = test_data_df['age'].std()
s_mean + s_std * 2
s_mean - s_std * 2
test_data_df['age'] > s_mean + s_std * 2
test_data_df['age'] < s_mean - s_std * 2
test_data_df['age'].plot(kind="hist")
plt.show()
text_df.dtypes
text_df.head()
text_df.describe()
# delete a columns
text_df = text_df.drop(['diff_date'], axis=1)
# text_df = text_df.drop(columns=['time_stamp'], axis=1)
# 把时间戳 转换 日期
text_df['s_time'] = pa.to_datetime(text_df['s_time'], unit='s')
# text_df['s_time'] = pa.to_timedelta(text_df['s_time'],unit='s')
# 日期格式转换
# 方法 1
# text_df['s_time'] = text_df['s_time'].apply(lambda x : x.strftime('%Y-%m-%d'))
# 方法 2 参数 M 表示月份,Q 表示季度,A 表示年度,D 表示按天,这几个参数比较常用。
text_df['test_time'] = text_df['s_time'].dt.to_period('D')
text_df['test_price'] = text_df['s_u'].astype(float)
text_df['diff_date'] = pa.datetime.today() - text_df['s_time']
text_df['diff_year'] = pa.datetime.today().year - text_df['s_time'].dt.year
# apply
text_df['total_price'] = text_df[['test_price', 're_u']].apply(np.prod, axis=1)
# groupby
text_df_group = text_df.groupby(by='test_time').count()
text_df_group = text_df.groupby(by='test_time').sum()
# take some columns
col_n = ['test_time', 'test_price', 'total_price']
temp_df = | pa.DataFrame(text_df, columns=col_n) | pandas.DataFrame |
#!/usr/bin/env python
import argparse
import logging
from glob import glob
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from baselines.eval_task import construct_parser as eval_construct_parser
from baselines.eval_task import main as eval_main
def plot_log_file(log_file, trn_kwargs=None, vld_kwargs=None):
df = pd.read_csv(log_file)
trn_df = df.loc[df["mode"] == "train"]
min_trn_loss = trn_df["avg_loss"].min()
min_trn_acc = trn_df["avg_acc"].max()
vld_df = df.loc[df["mode"] == "test"]
min_vld_loss = vld_df["avg_loss"].min()
min_vld_acc = vld_df["avg_acc"].max()
if trn_kwargs is None:
trn_kwargs = dict(label=f"({min_trn_loss:.4f}, {min_trn_acc:.1f})")
elif "label" in trn_kwargs:
trn_kwargs["label"] += f" ({min_trn_loss:.4f}, {min_trn_acc:.1f})"
if vld_kwargs is None:
vld_kwargs = dict(label=f"({min_vld_loss:.4f}, {min_vld_acc:.1f})")
elif "label" in vld_kwargs:
vld_kwargs["label"] += f" ({min_vld_loss:.4f}, {min_vld_acc:.1f})"
plt.plot(trn_df["epoch"], trn_df["avg_loss"], **trn_kwargs)
plt.plot(vld_df["epoch"], vld_df["avg_loss"], **vld_kwargs)
plt.legend()
plt.xlabel("epoch")
plt.ylabel("average loss")
return trn_df, vld_df, (min_trn_loss, min_trn_acc, min_vld_loss, min_vld_acc)
def plot_task_losses(
output_dir, task_name, settings, setting_names, save_plots=False, show_plots=True
):
idx_cols = ["task_name", "expt_name"] + setting_names + ["repeat"]
val_cols = ["min_trn_loss", "min_vld_loss", "min_trn_acc", "min_vld_acc"]
res = pd.DataFrame(columns=idx_cols + val_cols, dtype=float)
res.set_index(idx_cols, drop=True, inplace=True)
summary_fig = plt.figure()
plt.title(f"{task_name} vld loss curves")
alphas = [0.3, 0.5, 0.7]
for setting in settings:
setting_fig = plt.figure()
# TODO: this shouldn't be hardcoded 3
for repeat in range(3):
expt_name = f'{task_name}__{"_".join(setting)}_{repeat}'
log_file = f"{output_dir}/{task_name}/{expt_name}.log"
plt.figure(setting_fig.number)
try:
trn_df, vld_df, losses = plot_log_file(
log_file,
trn_kwargs=dict(
label=f"train {repeat}", color="C0", alpha=alphas[repeat]
),
vld_kwargs=dict(
label=f"valid {repeat}", color="C1", alpha=alphas[repeat]
),
)
except pd.errors.EmptyDataError:
print(f"{expt_name}.log found but empty")
continue
except FileNotFoundError:
print(f"{expt_name}.log not found")
continue
min_trn_loss, min_trn_acc, min_vld_loss, min_vld_acc = losses
idx = (task_name, expt_name) + setting + (str(repeat),)
res.loc[idx, :] = [min_trn_loss, min_vld_loss, min_trn_acc, min_vld_acc]
plt.figure(summary_fig.number)
plt.plot(vld_df["epoch"], vld_df["avg_loss"], color="C1", alpha=0.2)
plt.figure(setting_fig.number)
if save_plots:
plt.savefig(f'{save_plots}/{task_name}__{"_".join(setting)}.png', dpi=300)
plt.savefig(f'{save_plots}/{task_name}__{"_".join(setting)}.pdf', dpi=300)
plt.title(f'{task_name}__{"_".join(setting)}')
if show_plots:
plt.show()
plt.close()
plt.figure(summary_fig.number)
if save_plots:
plt.savefig(f"{save_plots}/{task_name}__all_loss_summary.png", dpi=300)
plt.savefig(f"{save_plots}/{task_name}__all_loss_summary.pdf", dpi=300)
if show_plots:
plt.show()
plt.close()
return res
def get_settings(output_dir, task_name):
files = glob(f"{output_dir}/{task_name}/*")
settings = [tuple(ff.split("__")[-1].rsplit("_", 1)[0].split("_")) for ff in files]
settings = set(settings) # easy dedup
settings = list(settings)
settings.sort()
return settings
def round_to_n(x, n=3):
if pd.isnull(x):
return np.nan
elif x == 0:
return 0
else:
return np.round(x, -(np.floor(np.log10(x))).astype(int) + (n - 1))
def plot_confusion(confusion_mat, save_plots=False, ax=None):
if ax is None:
ax = plt.gca()
degs = [
"none",
"pitch_shift",
"time_shift",
"onset_shift",
"offset_shift",
"remove_note",
"add_note",
"split_note",
"join_notes",
]
_ = ax.imshow(confusion_mat, cmap="Oranges", interpolation="nearest")
# We want to show all ticks...
ax.xaxis.tick_top()
ax.set_xticks(np.arange(len(degs)))
ax.set_yticks(np.arange(len(degs)))
# ... and label them with the respective list entries
ax.set_xticklabels(degs, fontname="serif")
ax.set_yticklabels(degs, fontname="serif")
# Rotate the tick labels and set their alignment.
plt.setp(ax.get_xticklabels(), rotation=45, ha="left", rotation_mode="anchor")
for i in range(len(degs)):
array_range = np.max(confusion_mat) - np.min(confusion_mat)
color_cutoff = np.min(confusion_mat) + array_range / 2
for j in range(len(degs)):
_ = ax.text(
j,
i,
"%.2f" % confusion_mat[i, j],
ha="center",
va="center",
color="black" if confusion_mat[i, j] < color_cutoff else "white",
fontname="serif",
)
plt.ylim(8.5, -0.5)
plt.tight_layout()
if save_plots:
plt.savefig(f"{save_plots}.png", dpi=300)
plt.savefig(f"{save_plots}.pdf", dpi=300)
def plot_1d_array_per_deg(array, save_plots=False, ax=None):
if ax is None:
ax = plt.gca()
degs = [
"none",
"pitch_shift",
"time_shift",
"onset_shift",
"offset_shift",
"remove_note",
"add_note",
"split_note",
"join_notes",
]
_ = ax.imshow(array.reshape((-1, 1)), cmap="Oranges", interpolation="nearest",)
# We want to show all ticks...
ax.xaxis.tick_top()
ax.set_yticks(np.arange(len(degs)))
# ... and label them with the respective list entries
ax.set_yticklabels(degs, fontname="serif")
for ii in range(len(degs)):
array_range = np.max(array) - np.min(array)
color_cutoff = np.min(array) + array_range / 2
if array[ii] < color_cutoff:
color = "black"
else:
color = "white"
_ = ax.text(
0,
ii,
f"{array[ii]:.2f}",
ha="center",
va="center",
color=color,
fontname="serif",
)
plt.ylim(8.5, -0.5)
plt.tight_layout()
if save_plots:
plt.savefig(f"{save_plots}.png", dpi=300)
plt.savefig(f"{save_plots}.pdf", dpi=300)
def construct_parser():
parser = argparse.ArgumentParser(
description="Script for summarising "
"results of experiments run. Expects a "
"directory of output data, with "
"subdirectories for the name of the task."
" These subdirs contain the logs and "
"checkpoints for models fitted."
)
parser.add_argument(
"--output_dir", default="output", help="location of logs and model checkpoints"
)
parser.add_argument(
"--save_plots",
default=None,
help="location to save plots. By default will "
"save to output_dir. If set to none no plots "
"saved",
)
parser.add_argument(
"--in_dir",
default="acme",
help="location of the pianoroll and command corpus datasets",
)
parser.add_argument(
"--task_names",
nargs="+",
required=True,
help="names of tasks to get results for. "
"must correspond to names of dirs in output_dir",
)
parser.add_argument(
"--setting_names",
nargs="+",
required=True,
help="A list (with no spaces) describing the names of "
"variables the gridsearches were performed over "
"for each task e.g. for --task_names task1 task4 "
"--setting_names \"['lr','wd','hid']\" "
"\"['lr','wd','hid','lay']\". You need to be careful "
"to preserve quotes",
)
parser.add_argument(
"--formats",
nargs="+",
required=True,
choices=["pianoroll", "command"],
help="data format type for each task e.g. "
"for --task_names task1 task4 --formats command "
"pianoroll",
)
parser.add_argument(
"--seq_len", nargs="+", required=True, help="seq_len for each task"
)
parser.add_argument(
"--metrics", nargs="+", required=True, help="metric for each task"
)
parser.add_argument(
"--task_desc",
nargs="+",
required=True,
help="description (with no spaces) for each task to "
"use as the identifier in the results table e.g. for "
"--task_names task1 task4 --task_desc ErrorDetection "
"ErrorCorrection",
)
parser.add_argument(
"--splits",
nargs="+",
required=True,
default=["train", "valid", "test"],
help="which splits to evaluate: train, valid, test.",
)
parser.add_argument(
"-s",
"--show_plots",
action="store_true",
help="Whether to use plt.show() or not",
)
return parser
def main(args):
task_names = args.task_names
nr_tasks = len(task_names)
# TODO: change the way this is done! ATM can't think of
# better way of handling sending a list of lists
setting_names = [eval(ll) for ll in args.setting_names]
assert len(setting_names) == nr_tasks, (
"You must submit a list of "
"parameters being searched for each task --setting_names . "
"Submit lists of parameter names with no spaces, e.g. "
"--task_names task1 task4 "
"--setting_names ['lr','wd','hid'] ['lr','wd','hid','lay'] ."
f"You submitted {nr_tasks} tasks: {task_names}, but "
f"{len(setting_names)} setting names: {setting_names}"
)
for varname in ["formats", "seq_len", "metrics", "task_desc"]:
value = getattr(args, varname)
vlen = len(value)
assert vlen == nr_tasks, (
f"You submitted {vlen} {varname}, but need "
f"to supply {nr_tasks}. task_names: {task_names}, "
f"{varname}: {value}"
)
in_dir = args.in_dir
output_dir = args.output_dir
save_plots = args.save_plots
if save_plots is None:
save_plots = output_dir
elif save_plots.lower == "none":
save_plots = None
formats = dict(zip(task_names, args.formats))
seq_len = dict(zip(task_names, args.seq_len))
metrics = dict(zip(task_names, args.metrics))
task_desc = dict(zip(task_names, args.task_desc))
splits = args.splits
results = {}
mean_res = {}
median_res = {}
min_idx = {}
median_min_idx = {}
for task_name, setting_name in zip(task_names, setting_names):
print(f'{task_name} plots {20*"="}')
settings = get_settings(output_dir, task_name)
results[task_name] = plot_task_losses(
output_dir,
task_name,
settings,
setting_name,
save_plots=f"{save_plots}",
show_plots=args.show_plots,
)
for task_name, setting_name in zip(task_names, setting_names):
res = results[task_name]
if len(res) == 0:
print(f"No results for {task_name}")
continue
mean_res[task_name] = res.mean(level=setting_name)
median_res[task_name] = res.median(level=setting_name)
min_idx[task_name] = res.min_vld_loss.idxmin()
median_min_idx[task_name] = res.min_vld_loss.idxmin()
df = results[task_name].reset_index()
df["expt_id"] = (
df[setting_name]
.apply(lambda x: x.astype(str), axis=1)
.apply("_".join, axis=1)
)
df.sort_values("expt_id", inplace=True)
plt.figure()
sns.pointplot(
x="expt_id",
y="min_vld_loss",
estimator=np.median,
ci="sd",
data=df,
linewidth=0,
)
plt.xticks(rotation=90)
plt.title(f"Summary of {task_name} - median over repeats")
if save_plots:
plt.savefig(f"{save_plots}/{task_name}__min_loss_summary.pdf", dpi=300)
plt.savefig(f"{save_plots}/{task_name}__min_loss_summary.png", dpi=300)
if args.show_plots:
plt.show()
plt.close("all")
# sns.pointplot(x='expt_id', y='min_vld_loss', estimator=np.mean,
# ci='sd', data=df, linewidth=0)
# plt.xticks(rotation=90)
# plt.title(f'Summary of {task_name} - mean over repeats')
# plt.show()
best_models = {
task: f"{output_dir}/{task}/{val[1]}.checkpoint.best"
for task, val in min_idx.items()
}
best_logs = {
task: f"{output_dir}/{task}/{val[1]}.log" for task, val in min_idx.items()
}
print(f"best models: {best_models}")
for task_name, log_file in best_logs.items():
plot_log_file(log_file)
plt.title(f"{task_name} best model training curve")
if save_plots:
plt.savefig(f"{save_plots}/{task_name}__best_model_loss.png", dpi=300)
plt.savefig(f"{save_plots}/{task_name}__best_model_loss.pdf", dpi=300)
if args.show_plots:
plt.show()
plt.close("all")
task_eval_log = {}
for task_name in task_names:
eval_parser = eval_construct_parser()
eval_args_str = (
f"--input {in_dir} "
f"--model {best_models[task_name]} "
f"--format {formats[task_name]} "
f"--task {task_name[4]} "
f"--seq_len {seq_len[task_name]} "
f"--splits {' '.join(splits)}"
# f"--splits test"
)
eval_args = eval_parser.parse_args(eval_args_str.split())
logging.disable(logging.WARNING)
log_info = eval_main(eval_args)
logging.disable(logging.NOTSET)
task_eval_log[task_name] = log_info
dict_of_df = {k: | pd.DataFrame(v) | pandas.DataFrame |
##############################################################################
# Usage: python extract_QCT.py Proj_path Demo_path Proj
# ex) python extract_QCT.py
# data/sample_Proj/Proj_Subj
# data/sample_demo.csv
# ENV18PM
#
# Run Time: ~1 min
# Ref: ENV18PM.drawio
# ##############################################################################
# extract_QCT.py (No version suffix): 20220118, In Kyu Lee
# Use git to maintain different versions.
# ##############################################################################
# v2h: 20211031, In Kyu Lee
# - Minor error fixed: Airtrap -> AirT.
# v2g: 20211031, <NAME>
# - AirT is added.
# v2f: 20211020, <NAME>
# - TF.average instead of TF.total
# v2d: 20211015, In Kyu Lee
# - CFG class is defined to get config.
# v2c: 20211015, <NAME>, In Kyu Lee
# - changed ("string") to (str) for number only subject ID (Subj) for version independency.
# -
# v2b: 20211008b, In Kyu Lee
# - Changed FU to T0
# v2a: 20211007a, <NAME>
# - corrected WT_pred & Dh_pred for KOR.
# - made minor edits in comments for hard-coded parameters.
# v2: 20211007, In Kyu Lee
# - Proj is the third argument
# - Korean predicted value is available
# - Manual work is minimized
# ##############################################################################
# v1: 20210501, In Kyu Lee
# Desc: Extract QCT variables
# ##############################################################################
# Input:
# - Path of the project folder, ex) /data4/common/IR/IR_ENV18PM_SN12
# - Path of the demographics csv file, ex) /data1/common/IR/ENV18PM_Demo_20210304.csv
# - Proj, ex) ENV18PM
# Output:
# - QCT varialbes csv file for each subject
# - combined QCT variables csv file
# ##############################################################################
import pandas as pd
import numpy as np
from tqdm.auto import tqdm
import os
import sys
import warnings
warnings.simplefilter(action='ignore', category=pd.errors.PerformanceWarning)
class CFG:
path = str(sys.argv[1])
demo_path = str(sys.argv[2])
Proj = str(sys.argv[3])
# --------------------
# Proj = 'ENV18PM'
# Proj = 'CBDPI'
# Proj = "SARP"
# Img0: Fixed Image
# Img1: Floating Image
Img0 = "IN0"
Img1 = "EX0"
# Img0 = 'IND0'
# Img0 = 'TLC0'
# Img1 = 'FRC0'
# FU = '{FU}' # 20211008IK
FU = "T0"
# For Korean, set True
KOR = False
# --------------------
def Circularity(branch):
if branch.empty:
return np.nan
A = branch.avgInnerArea.values[0]
P = branch.avgInnerPerimeter.values[0]
return (4 * np.pi * A) / P ** 2
def Eccentricity(branch):
if branch.empty:
return np.nan
minor_inner_D = branch.avgMinorInnerDiam.values[0]
major_inner_D = branch.avgMajorInnerDiam.values[0]
return minor_inner_D/major_inner_D
def LADIVBYCL(branch):
if branch.empty:
return np.nan
inner_A = branch.avgInnerArea.values[0]
center_line_l = branch.centerLineLength.values[0]
return inner_A/center_line_l
def D_inner(branch):
if branch.empty:
return np.nan, np.nan
minor_inner_D = branch.avgMinorInnerDiam.values[0]
major_inner_D = branch.avgMajorInnerDiam.values[0]
return minor_inner_D, major_inner_D
def Area_lumen(branch):
if branch.empty:
return np.nan, np.nan
inner_A = branch.avgInnerArea.values[0]
outer_A = branch.avgOuterArea.values[0]
return inner_A, outer_A
def Peri_lumen(branch):
if branch.empty:
return np.nan, np.nan
inner_P = branch.avgInnerPerimeter.values[0]
outer_P = branch.avgOuterPerimeter.values[0]
return inner_P, outer_P
# WALLAREA
# WALLAREAPCT
def Area_wall(branch):
if branch.empty:
return np.nan, np.nan
wall_frac = branch.avgWallAreaFraction.values[0]
inner_A = branch.avgInnerArea.values[0]
outer_A = branch.avgOuterArea.values[0]
wall_A = outer_A - inner_A
wall_A_p = wall_frac *100
return wall_A, wall_A_p
# avgavgwallthickness
# WALLTHICKNESSPCT
# avgAvgWallThickness
# avgAvgWallThickness/(avgInnerEquivalentCircleDiameter + avgAvgWallThickness)*100%
def Thickness_wall(branch):
if branch.empty:
return np.nan, np.nan
wall_th = branch.avgAvgWallThickness.values[0]
inner_CD = branch.avgInnerEquivalentCircleDiameter.values[0]
wall_th_p = 100 * wall_th / (inner_CD + wall_th)
return wall_th, wall_th_p
def WT_pred(row, KOR=False):
if KOR:
return np.log10(
9.11
- 1.02 * np.log10(row["Age_yr"])
- 0.98 * row["Height_m"] * row["Height_m"] * row["Gender_m0f1"]
+ 1.01
* row["Height_m"]
* row["Height_m"]
* np.log10(row["Age_yr"]) # 20211007jc
# + 1.01*row["Height_yr"]*row["Height_yr"]*row["Age_yr"]
)
else:
return (
4.5493
- 0.5007 * row["Gender_m0f1"]
+ 0.3007 * np.log10(row["Age_yr"]) * row["Height_m"]
)
def WT_norm(branch, row, KOR=False):
if branch.empty or row.empty:
return np.nan
WT = branch.avgAvgWallThickness.values[0]
WT_predicted = WT_pred(row, KOR)
return WT / WT_predicted
# Ref: [QCT-based structural Alterations of Asthma]
def Dh_pred(row, KOR=False):
if KOR:
return (
12.79
- 0.13 * np.log10(row["Age_yr"])
- 5.82 * np.log10(row["Height_m"]) * row["Gender_m0f1"] # 20211007jc
# - 5.82 * np.log10(row["Height_yr"]) * row["Gender_m0f1"]
+ 3.01 * np.log10(row["Age_yr"]) * row["Height_m"]
)
else:
return (
16.446
- 2.4019 * row["Gender_m0f1"]
- 0.298809 * row["Gender_m0f1"] * row["Age_yr"]
+ 0.0284836 * row["Age_yr"] * row["Height_m"]
+ 0.1786604 * row["Gender_m0f1"] * row["Age_yr"] * row["Height_m"]
)
def Dh_norm(branch, row, KOR=False):
if branch.empty or row.empty:
return np.nan
A = branch.avgInnerArea.values[0]
P = branch.avgInnerPerimeter.values[0]
Dh = 4 * A / P
Dh_predicted = Dh_pred(row, KOR)
return Dh / Dh_predicted
# Angle between two vectors: arccos(np.dot(v1,v2))
def Angle_vectors(v1, v2):
return np.arccos(np.dot(v1, v2)) * (180 / np.pi)
def main():
Config = CFG()
path = Config.path
demo_path = Config.demo_path
Proj = Config.Proj
Img0 = Config.Img0
Img1 = Config.Img1
FU = Config.FU
KOR = Config.KOR
Subjs = [
f.split("_")[1]
for f in os.listdir(path)
if os.path.isdir(os.path.join(path, f)) and f.split("_")[0] == Proj
]
if os.path.exists(demo_path):
demo_available = True
demo_df = pd.read_csv(demo_path)
demo_df["Subj"] = demo_df["Subj"].astype(str)
else:
demo_available = False
print(f"{demo_path} cant be found.")
print("Extracting QCTs without demo.")
pbar = tqdm(enumerate(Subjs),total=len(Subjs))
for i, Subj in pbar:
subj_path = os.path.join(path, f"{Proj}_{Subj}")
df = pd.DataFrame({"Proj": [Proj], "Subj": [Subj]})
# variable_ : path of the variable
# lobe0: lu | lobe1: ll | lobe2: ru | lobe3: rm | lobe4: rl
# Demographics
if demo_available:
row = demo_df[demo_df.Subj == Subj].reset_index(drop=True)
if len(row) > 0:
row = row.loc[0, :]
df["Age_yr"] = row.Age_yr
df["Gender_m0f1"] = row.Gender_m0f1
df["Height_m"] = row.Height_m
df["Weight_kg"] = row.Weight_kg
else:
df["Age_yr"] = "na"
df["Gender_m0f1"] = "na"
df["Height_m"] = "na"
df["Weight_kg"] = "na"
# Vent
Vent_ = os.path.join(
subj_path, f"{Subj}_{Img1}-TO-{Subj}_{Img0}-SSTVD_airDiff_Lobe.dat"
)
if os.path.exists(Vent_):
Vent = pd.read_csv(Vent_, sep=" ")
# Upper/(Middle+Lower)
df[f"dAV_U_ML_{FU}"] = (Vent.total[0] + Vent.total[2]) / (
Vent.total[1] + Vent.total[3] + Vent.total[4]
)
# (Upper+Middle)/Lower
df[f"dAV_UM_L_{FU}"] = (Vent.total[0] + Vent.total[2] + Vent.total[3]) / (
Vent.total[1] + Vent.total[4]
)
df[f"dAV_xLUL_{FU}"] = Vent.total[0] / Vent.total[5]
df[f"dAV_xLLL_{FU}"] = Vent.total[1] / Vent.total[5]
df[f"dAV_xRUL_{FU}"] = Vent.total[2] / Vent.total[5]
df[f"dAV_xRML_{FU}"] = Vent.total[3] / Vent.total[5]
df[f"dAV_xRLL_{FU}"] = Vent.total[4] / Vent.total[5]
# Tissue fraction @ TLC
TLC_tiss_ = os.path.join(
subj_path, f"{Subj}_{Img1}-TO-{Subj}_{Img0}-SSTVD_fixed_tissue_Lobe.dat"
)
if os.path.exists(TLC_tiss_):
TLC_tiss = pd.read_csv(TLC_tiss_, sep=" ")
df[f"TF_All_{Img0}"] = TLC_tiss.average[5]
df[f"TF_LUL_{Img0}"] = TLC_tiss.average[0]
df[f"TF_LLL_{Img0}"] = TLC_tiss.average[1]
df[f"TF_RUL_{Img0}"] = TLC_tiss.average[2]
df[f"TF_RML_{Img0}"] = TLC_tiss.average[3]
df[f"TF_RLL_{Img0}"] = TLC_tiss.average[4]
# Emphysema & fSAD
# Emph_ = os.path.join(subj_path, f"{Subj}_{Img1}-TO-{Subj}_{Img0}-SSTVD_lobar_Emphy.txt")
Emph_ = os.path.join(
subj_path, f"{Subj}_{Img1}-TO-{Subj}_{Img0}-SSTVD_lobar_Emph_fSAD.txt"
)
if not os.path.exists(Emph_):
Emph_ = os.path.join(
subj_path, f"{Subj}_{Img1}-TO-{Subj}_{Img0}-SSTVD_lobar_Emphys.txt"
)
if os.path.exists(Emph_):
Emph = pd.read_csv(Emph_, sep=" ")
df[f"Emph_All_{FU}"] = Emph.Emphysratio[5]
df[f"Emph_LUL_{FU}"] = Emph.Emphysratio[0]
df[f"Emph_LLL_{FU}"] = Emph.Emphysratio[1]
df[f"Emph_RUL_{FU}"] = Emph.Emphysratio[2]
df[f"Emph_RML_{FU}"] = Emph.Emphysratio[3]
df[f"Emph_RLL_{FU}"] = Emph.Emphysratio[4]
df[f"fSAD_All_{FU}"] = Emph.fSADratio[5]
df[f"fSAD_LUL_{FU}"] = Emph.fSADratio[0]
df[f"fSAD_LLL_{FU}"] = Emph.fSADratio[1]
df[f"fSAD_RUL_{FU}"] = Emph.fSADratio[2]
df[f"fSAD_RML_{FU}"] = Emph.fSADratio[3]
df[f"fSAD_RLL_{FU}"] = Emph.fSADratio[4]
# Airtrap
# AirT_ = os.path.join(subj_path, f"{Subj}_{Img1}-TO-{Subj}_{Img0}-SSTVD_lobar_Airtrap.txt")
AirT_ = os.path.join(
# subj_path, f"{Subj}_{Img1}-TO-{Subj}_{Img0}-SSTVD_lobar_Airtrap.txt"
subj_path, f"{Subj}_{Img1}-TO-{Subj}_{Img0}-SSTVD_lobar_AirT.txt" #20211031 IKL
)
if os.path.exists(AirT_):
AirT = pd.read_csv(AirT_, sep=" ")
df[f"AirT_All_{FU}"] = AirT.airtrapratio[5]
df[f"AirT_LUL_{FU}"] = AirT.airtrapratio[0]
df[f"AirT_LLL_{FU}"] = AirT.airtrapratio[1]
df[f"AirT_RUL_{FU}"] = AirT.airtrapratio[2]
df[f"AirT_RML_{FU}"] = AirT.airtrapratio[3]
df[f"AirT_RLL_{FU}"] = AirT.airtrapratio[4]
# RRAVC
RRAVC_ = os.path.join(
subj_path, f"{Subj}_{Img1}-TO-{Subj}_{Img0}-SSTVD_lobar_RRAVC.txt"
)
if os.path.exists(RRAVC_):
RRAVC = pd.read_csv(RRAVC_, sep=" ")
df[f"RRAVC_All_{FU}"] = RRAVC.RRAVC_m[5]
df[f"RRAVC_LUL_{FU}"] = RRAVC.RRAVC_m[0]
df[f"RRAVC_LLL_{FU}"] = RRAVC.RRAVC_m[1]
df[f"RRAVC_RUL_{FU}"] = RRAVC.RRAVC_m[2]
df[f"RRAVC_RML_{FU}"] = RRAVC.RRAVC_m[3]
df[f"RRAVC_RLL_{FU}"] = RRAVC.RRAVC_m[4]
# s_norm
s_norm_ = os.path.join(
subj_path, f"{Subj}_{Img1}-TO-{Subj}_{Img0}-SSTVD_lobar_s_norm.txt"
)
if os.path.exists(s_norm_):
s_norm = pd.read_csv(s_norm_, sep=" ")
df[f"sStar_All_{FU}"] = s_norm.sStar_m[5]
df[f"sStar_LUL_{FU}"] = s_norm.sStar_m[0]
df[f"sStar_LLL_{FU}"] = s_norm.sStar_m[1]
df[f"sStar_RUL_{FU}"] = s_norm.sStar_m[2]
df[f"sStar_RML_{FU}"] = s_norm.sStar_m[3]
df[f"sStar_RLL_{FU}"] = s_norm.sStar_m[4]
# HAA
HAA_ = os.path.join(
subj_path, f"{Subj}_{Img1}-TO-{Subj}_{Img0}-SSTVD_lobar_HAA-700to0.txt"
)
if os.path.exists(HAA_):
HAA = pd.read_csv(HAA_, sep=" ")
df[f"HAA_All_{FU}"] = HAA.HAAratio[5]
df[f"HAA_LUL_{FU}"] = HAA.HAAratio[0]
df[f"HAA_LLL_{FU}"] = HAA.HAAratio[1]
df[f"HAA_RUL_{FU}"] = HAA.HAAratio[2]
df[f"HAA_RML_{FU}"] = HAA.HAAratio[3]
df[f"HAA_RLL_{FU}"] = HAA.HAAratio[4]
# Jacob
J_ = os.path.join(
subj_path, f"{Subj}_{Img1}-TO-{Subj}_{Img0}-SSTVD_jacob_Lobe.dat"
)
if os.path.exists(J_):
J = pd.read_csv(J_, sep=" ")
df[f"J_All_{FU}"] = J.average[5]
df[f"J_LUL_{FU}"] = J.average[0]
df[f"J_LLL_{FU}"] = J.average[1]
df[f"J_RUL_{FU}"] = J.average[2]
df[f"J_RML_{FU}"] = J.average[3]
df[f"J_RLL_{FU}"] = J.average[4]
# ADI
ADI_ = os.path.join(
subj_path, f"{Subj}_{Img1}-TO-{Subj}_{Img0}-SSTVD_ADI_Lobe.dat"
)
if os.path.exists(ADI_):
ADI = pd.read_csv(ADI_, sep=" ")
df[f"ADI_All_{FU}"] = ADI.average[5]
df[f"ADI_LUL_{FU}"] = ADI.average[0]
df[f"ADI_LLL_{FU}"] = ADI.average[1]
df[f"ADI_RUL_{FU}"] = ADI.average[2]
df[f"ADI_RML_{FU}"] = ADI.average[3]
df[f"ADI_RLL_{FU}"] = ADI.average[4]
# pi10
pi10_ = os.path.join(subj_path, f"{Subj}_{Img0}_vida-Pi10.csv")
if os.path.exists(pi10_):
pi10 = pd.read_csv(pi10_)
df[f"whole_tree_all_INSP"] = pi10['pi10_whole_tree_all'].values[0]
df[f"whole_tree_leq20_INSP"] = pi10['pi10_whole_tree_leq20'].values[0]
# histo
histo_ = os.path.join(subj_path, f"{Subj}_{Img0}_vida-histo.csv")
if os.path.exists(histo_):
histo = pd.read_csv(histo_)
Lung = histo[histo.location == "both"]
df[f"both_mean_hu_INSP"] = Lung['mean'].values[0]
df[f"both_pct_be_950_INSP"] = Lung['percent-below_-950'].values[0]
df[f"both_tissue_volume_cm3_INSP"] = Lung['tissue-volume-cm3'].values[0]
df[f"both_air_volume_cm3_INSP"] = Lung['air-volume-cm3'].values[0]
df[f"both_total_volume_cm3_INSP"] = Lung['total-volume-cm3'].values[0]
# Air meas
air_meas_ = os.path.join(subj_path, f"{Subj}_{Img0}_vida-airmeas.csv")
if os.path.exists(air_meas_):
air_meas = pd.read_csv(air_meas_)
Trachea = air_meas[air_meas.anatomicalName == "Trachea"]
RMB = air_meas[air_meas.anatomicalName == "RMB"]
LMB = air_meas[air_meas.anatomicalName == "LMB"]
LLB = air_meas[air_meas.anatomicalName == "LLB"]
BronInt = air_meas[air_meas.anatomicalName == "BronInt"]
LUL = air_meas[air_meas.anatomicalName == "LUL"]
RUL = air_meas[air_meas.anatomicalName == "RUL"]
RLL = air_meas[air_meas.anatomicalName == "RLL"]
# segmental branches
# Left Upper Lobe (LUL)
LB1 = air_meas[air_meas.anatomicalName == "LB1"]
LB2 = air_meas[air_meas.anatomicalName == "LB2"]
LB3 = air_meas[air_meas.anatomicalName == "LB3"]
LB4 = air_meas[air_meas.anatomicalName == "LB4"]
LB5 = air_meas[air_meas.anatomicalName == "LB5"]
# Left Lower Lobe (LLL)
LB6 = air_meas[air_meas.anatomicalName == "LB6"]
LB8 = air_meas[air_meas.anatomicalName == "LB8"]
LB9 = air_meas[air_meas.anatomicalName == "LB9"]
LB10 = air_meas[air_meas.anatomicalName == "LB10"]
# Right Upper Lobe (RUL)
RB1 = air_meas[air_meas.anatomicalName == "RB1"]
RB2 = air_meas[air_meas.anatomicalName == "RB2"]
RB3 = air_meas[air_meas.anatomicalName == "RB3"]
# Right Middle Lobe (RML)
RB4 = air_meas[air_meas.anatomicalName == "RB4"]
RB5 = air_meas[air_meas.anatomicalName == "RB5"]
# Right Lower Lobe (RLL)
RB6 = air_meas[air_meas.anatomicalName == "RB6"]
RB7 = air_meas[air_meas.anatomicalName == "RB7"]
RB8 = air_meas[air_meas.anatomicalName == "RB8"]
RB9 = air_meas[air_meas.anatomicalName == "RB9"]
RB10 = air_meas[air_meas.anatomicalName == "RB10"]
# Angle_Trachea
RMB_vector = np.array(
(RMB.dirCosX.values[0], RMB.dirCosY.values[0], RMB.dirCosZ.values[0])
)
LMB_vector = np.array(
(LMB.dirCosX.values[0], LMB.dirCosY.values[0], LMB.dirCosZ.values[0])
)
RUL_vector = np.array(
(RUL.dirCosX.values[0], RUL.dirCosY.values[0], RUL.dirCosZ.values[0])
)
BronInt_vector = np.array(
(
BronInt.dirCosX.values[0],
BronInt.dirCosY.values[0],
BronInt.dirCosZ.values[0],
)
)
df[f"Angle_eTrachea_{Img0}"] = Angle_vectors(RMB_vector, LMB_vector)
# Angle between RUL and BronInt:
df[f"Angle_eRMB_{Img0}"] = Angle_vectors(RUL_vector, BronInt_vector)
# Circularity: C = 4*pi*A/P^2; P: perimeter
df[f"Cr_Trachea_{Img0}"] = Circularity(Trachea)
df[f"Cr_RMB_{Img0}"] = Circularity(RMB)
df[f"Cr_LMB_{Img0}"] = Circularity(LMB)
df[f"Cr_LLB_{Img0}"] = Circularity(LLB)
df[f"Cr_BI_{Img0}"] = Circularity(BronInt)
cr_LB1 = Circularity(LB1)
cr_LB2 = Circularity(LB2)
cr_LB3 = Circularity(LB3)
cr_LB4 = Circularity(LB4)
cr_LB5 = Circularity(LB5)
cr_LB6 = Circularity(LB6)
cr_LB8 = Circularity(LB8)
cr_LB9 = Circularity(LB9)
cr_LB10 = Circularity(LB10)
cr_RB1 = Circularity(RB1)
cr_RB2 = Circularity(RB2)
cr_RB3 = Circularity(RB3)
cr_RB4 = Circularity(RB4)
cr_RB5 = Circularity(RB5)
cr_RB6 = Circularity(RB6)
cr_RB7 = Circularity(RB7)
cr_RB8 = Circularity(RB8)
cr_RB9 = Circularity(RB9)
cr_RB10 = Circularity(RB10)
df[f"Cr_sLUL_{Img0}"] = np.mean([cr_LB1,cr_LB2,cr_LB3,cr_LB4,cr_LB5])
df[f"Cr_sLLL_{Img0}"] = np.mean([cr_LB6,cr_LB8,cr_LB9,cr_LB10])
df[f"Cr_sRUL_{Img0}"] = np.mean([cr_RB1,cr_RB2,cr_RB3])
df[f"Cr_sRML_{Img0}"] = np.mean([cr_RB4,cr_RB5])
df[f"Cr_sRLL_{Img0}"] = np.mean([cr_RB6,cr_RB7,cr_RB8,cr_RB9,cr_RB10])
# Eccentircity
ecc_lb1 = Eccentricity(LB1)
ecc_lb10 = Eccentricity(LB10)
ecc_rb1 = Eccentricity(RB1)
ecc_rb4 = Eccentricity(RB4)
ecc_rb10 = Eccentricity(RB10)
df[f"ECCENTRICITY"] = (ecc_lb1 + ecc_lb10 + ecc_rb1 + ecc_rb4 + ecc_rb10)/5
# LADIVBYCL
l_cl_lb1 = LADIVBYCL(LB1)
l_cl_lb10 = LADIVBYCL(LB10)
l_cl_rb1 = LADIVBYCL(RB1)
l_cl_rb4 = LADIVBYCL(RB4)
l_cl_rb10 = LADIVBYCL(RB10)
df[f"LADIVBYCL"] = (l_cl_lb1 + l_cl_lb10 + l_cl_rb1 + l_cl_rb4 + l_cl_rb10)/5
# avgminorinnerdiam, avgmajorinnerdiam
minor_in_d_lb1, major_in_d_lb1 = D_inner(LB1)
minor_in_d_lb10, major_in_d_lb10 = D_inner(LB10)
minor_in_d_rb1, major_in_d_rb1 = D_inner(RB1)
minor_in_d_rb4, major_in_d_rb4 = D_inner(RB4)
minor_in_d_rb10, major_in_d_rb10 = D_inner(RB10)
df[f"Dminor"] = (minor_in_d_lb1 + minor_in_d_lb10 + minor_in_d_rb1 + minor_in_d_rb4 + minor_in_d_rb10)/5
df[f"Dmajor"] = (major_in_d_lb1 + major_in_d_lb10 + major_in_d_rb1 + major_in_d_rb4 + major_in_d_rb10)/5
# avginnerarea, avgouterarea
in_A_lb1, out_A_lb1 = Area_lumen(LB1)
in_A_lb10, out_A_lb10 = Area_lumen(LB10)
in_A_rb1, out_A_rb1 = Area_lumen(RB1)
in_A_rb4, out_A_rb4 = Area_lumen(RB4)
in_A_rb10, out_A_rb10 = Area_lumen(RB10)
LA = (in_A_lb1 + in_A_lb10 + in_A_rb1 + in_A_rb4 + in_A_rb10)/5
df[f"LA"] = LA
OA = (out_A_lb1 + out_A_lb10 + out_A_rb1 + out_A_rb4 + out_A_rb10)/5
df[f"OA"] = OA
df[f"Dout"] = np.sqrt((4*OA)/np.pi)
# avginnerperimeter, avgouterperimeter
in_P_lb1, out_P_lb1 = Peri_lumen(LB1)
in_P_lb10, out_P_lb10 = Peri_lumen(LB10)
in_P_rb1, out_P_rb1 = Peri_lumen(RB1)
in_P_rb4, out_P_rb4 = Peri_lumen(RB4)
in_P_rb10, out_P_rb10 = Peri_lumen(RB10)
df[f"Peri"] = (in_P_lb1 + in_P_lb10 + in_P_rb1 + in_P_rb4 + in_P_rb10)/5
df[f"Peri_o"] = (out_P_lb1 + out_P_lb10 + out_P_rb1 + out_P_rb4 + out_P_rb10)/5
# WALLAREA, WALLAREAPCT
wall_A_lb1, wall_A_p_lb1 = Area_wall(LB1)
wall_A_lb10, wall_A_p_lb10 = Area_wall(LB10)
wall_A_rb1, wall_A_p_rb1 = Area_wall(RB1)
wall_A_rb4, wall_A_p_rb4 = Area_wall(RB4)
wall_A_rb10, wall_A_p_rb10 = Area_wall(RB10)
df[f"WA"] = (wall_A_lb1 + wall_A_lb10 + wall_A_rb1 + wall_A_rb4 + wall_A_rb10)/5
df[f"WA_pct"] = (wall_A_p_lb1 + wall_A_p_lb10 + wall_A_p_rb1 + wall_A_p_rb4 + wall_A_p_rb10)/5
# avgavgwallthickness, WALLTHICKNESSPCT
wall_th_lb1, wall_th_p_lb1 = Thickness_wall(LB1)
wall_th_lb10, wall_th_p_lb10 = Thickness_wall(LB10)
wall_th_rb1, wall_th_p_rb1 = Thickness_wall(RB1)
wall_th_rb4, wall_th_p_rb4 = Thickness_wall(RB4)
wall_th_rb10, wall_th_p_rb10 = Thickness_wall(RB10)
df[f"WT"] = (wall_th_lb1 + wall_th_lb10 + wall_th_rb1 + wall_th_rb4 + wall_th_rb10)/5
df[f"WT_pct"] = (wall_th_p_lb1 + wall_th_p_lb10 + wall_th_p_rb1 + wall_th_p_rb4 + wall_th_p_rb10)/5
if demo_available:
# Normalized Wall thickness
df[f"WTn_Trachea_{Img0}"] = WT_norm(Trachea, row, KOR)
df[f"WTn_RMB_{Img0}"] = WT_norm(RMB, row, KOR)
df[f"WTn_LMB_{Img0}"] = WT_norm(LMB, row, KOR)
df[f"WTn_LLB_{Img0}"] = WT_norm(LLB, row, KOR)
df[f"WTn_BI_{Img0}"] = WT_norm(BronInt, row, KOR)
wTn_LB1 = WT_norm(LB1,row,KOR)
wTn_LB2 = WT_norm(LB2,row,KOR)
wTn_LB3 = WT_norm(LB3,row,KOR)
wTn_LB4 = WT_norm(LB4,row,KOR)
wTn_LB5 = WT_norm(LB5,row,KOR)
wTn_LB6 = WT_norm(LB6,row,KOR)
wTn_LB8 = WT_norm(LB8,row,KOR)
wTn_LB9 = WT_norm(LB9,row,KOR)
wTn_LB10 = WT_norm(LB10,row,KOR)
wTn_RB1 = WT_norm(RB1,row,KOR)
wTn_RB2 = WT_norm(RB2,row,KOR)
wTn_RB3 = WT_norm(RB3,row,KOR)
wTn_RB4 = WT_norm(RB4,row,KOR)
wTn_RB5 = WT_norm(RB5,row,KOR)
wTn_RB6 = WT_norm(RB6,row,KOR)
wTn_RB7 = WT_norm(RB7,row,KOR)
wTn_RB8 = WT_norm(RB8,row,KOR)
wTn_RB9 = WT_norm(RB9,row,KOR)
wTn_RB10 = WT_norm(RB10,row,KOR)
df[f"WTn_sLUL_{Img0}"] = np.mean([wTn_LB1,wTn_LB2,wTn_LB3,wTn_LB4,wTn_LB5])
df[f"WTn_sLLL_{Img0}"] = np.mean([wTn_LB6,wTn_LB8,wTn_LB9,wTn_LB10])
df[f"WTn_sRUL_{Img0}"] = np.mean([wTn_RB1,wTn_RB2,wTn_RB3])
df[f"WTn_sRML_{Img0}"] = np.mean([wTn_RB4,wTn_RB5])
df[f"WTn_sRLL_{Img0}"] = np.mean([wTn_RB6,wTn_RB7,wTn_RB8,wTn_RB9,wTn_RB10])
# Normalized hydraulic diameter: 4A/P
df[f"Dhn_Trachea_{Img0}"] = Dh_norm(Trachea, row, KOR)
df[f"Dhn_RMB_{Img0}"] = Dh_norm(RMB, row, KOR)
df[f"Dhn_LMB_{Img0}"] = Dh_norm(LMB, row, KOR)
df[f"Dhn_LLB_{Img0}"] = Dh_norm(LLB, row, KOR)
df[f"Dhn_BI_{Img0}"] = Dh_norm(BronInt, row, KOR)
dhn_LB1 = Dh_norm(LB1,row,KOR)
dhn_LB2 = Dh_norm(LB2,row,KOR)
dhn_LB3 = Dh_norm(LB3,row,KOR)
dhn_LB4 = Dh_norm(LB4,row,KOR)
dhn_LB5 = Dh_norm(LB5,row,KOR)
dhn_LB6 = Dh_norm(LB6,row,KOR)
dhn_LB8 = Dh_norm(LB8,row,KOR)
dhn_LB9 = Dh_norm(LB9,row,KOR)
dhn_LB10 = Dh_norm(LB10,row,KOR)
dhn_RB1 = Dh_norm(RB1,row,KOR)
dhn_RB2 = Dh_norm(RB2,row,KOR)
dhn_RB3 = Dh_norm(RB3,row,KOR)
dhn_RB4 = Dh_norm(RB4,row,KOR)
dhn_RB5 = Dh_norm(RB5,row,KOR)
dhn_RB6 = Dh_norm(RB6,row,KOR)
dhn_RB7 = Dh_norm(RB7,row,KOR)
dhn_RB8 = Dh_norm(RB8,row,KOR)
dhn_RB9 = Dh_norm(RB9,row,KOR)
dhn_RB10 = Dh_norm(RB10,row,KOR)
df[f"Dhn_sLUL_{Img0}"] = np.mean([dhn_LB1,dhn_LB2,dhn_LB3,dhn_LB4,dhn_LB5])
df[f"Dhn_sLLL_{Img0}"] = np.mean([dhn_LB6,dhn_LB8,dhn_LB9,dhn_LB10])
df[f"Dhn_sRUL_{Img0}"] = np.mean([dhn_RB1,dhn_RB2,dhn_RB3])
df[f"Dhn_sRML_{Img0}"] = np.mean([dhn_RB4,dhn_RB5])
df[f"Dhn_sRLL_{Img0}"] = np.mean([dhn_RB6,dhn_RB7,dhn_RB8,dhn_RB9,dhn_RB10])
else:
df[f"WTn_Trachea_{Img0}"] = "na"
df[f"WTn_RMB_{Img0}"] = "na"
df[f"WTn_LMB_{Img0}"] = "na"
df[f"WTn_LLB_{Img0}"] = "na"
df[f"WTn_BI_{Img0}"] = "na"
df[f"WTn_sLUL_{Img0}"] = "na"
df[f"WTn_sLLL_{Img0}"] = "na"
df[f"WTn_sRUL_{Img0}"] = "na"
df[f"WTn_sRML_{Img0}"] = "na"
df[f"WTn_sRLL_{Img0}"] = "na"
df[f"Dhn_Trachea_{Img0}"] = "na"
df[f"Dhn_RMB_{Img0}"] = "na"
df[f"Dhn_LMB_{Img0}"] = "na"
df[f"Dhn_LLB_{Img0}"] = "na"
df[f"Dhn_BI_{Img0}"] = "na"
df[f"Dhn_sLUL_{Img0}"] = "na"
df[f"Dhn_sLLL_{Img0}"] = "na"
df[f"Dhn_sRUL_{Img0}"] = "na"
df[f"Dhn_sRML_{Img0}"] = "na"
df[f"Dhn_sRLL_{Img0}"] = "na"
# Save per subject
df.to_csv(
os.path.join(subj_path, f"{Proj}_{Subj}_{Img0}_{Img1}_QCT.csv"),
index=False,
)
if i == 0:
final_df = df
else:
final_df = | pd.concat([final_df, df], ignore_index=True) | pandas.concat |
# Substitute for psvl
# Front matter
##############
import os
from os import fdopen, remove
from tempfile import mkstemp
from shutil import move
import glob
import re
import time
import pandas as pd
import numpy as np
from scipy import constants
from scipy.optimize import curve_fit, fsolve
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.ticker import AutoMinorLocator
from matplotlib import gridspec
from scipy.interpolate import spline
import math
import seaborn as sns
matplotlib.rc('xtick', labelsize=16)
matplotlib.rc('ytick', labelsize=16)
rc = {'lines.linewidth': 1,
'axes.labelsize': 20,
'axes.titlesize': 20,
'legend.fontsize': 26,
'xtick.direction': u'in',
'ytick.direction': u'in'}
sns.set_style('ticks', rc=rc)
start_time = time.time()
# Input file paths and info
###########################
m_res_isotope = 57
fit_type = 'Debye'
phoxpath = '../060_phox_FeNi_man/'
K_bcc_path = '../015_BulkModulusDet/Results/bccFeNi.csv'
K_hcp_path = '../015_BulkModulusDet/Results/hcpFeNi.csv'
start_fit_path = 'StartFitParams.csv'
choices_file_path = 'ChoiceFitParams.csv'
# Functions
###########
def transform_DOS(dos_df,m_res_isotope,rho,drho):
# f(E) = [(m/(2*pi^2*hbar^3*rho))*E^2/PDOS]^(1/3)
# units: (kg/atom) (meV*eV/1000meV)^2
# ------------------- * ------------------
# (eV*s)^3 * (kg/m^3) 1/eV
m_res_isotope_SI = m_res_isotope/(constants.N_A*1000) # g/mol to kg/atom
hbar = 6.58212E-16 # eV s
rho_SI = rho*1000 # g/cm^3 to kg/m^3
drho_SI = drho*1000 # g/cm^3 to kg/m^3
coeff = m_res_isotope_SI/(2*np.pi**2*hbar**3)
# PSVL has an extra 3 factor that I don't understand.
# Including this factor gives identical results to psvl.
coeff = 3*coeff
# energy in meV, but we need eV for calculation
# DOS in 1/eV
# If DOS = 0, divide by zero warning will appear later. We can't use this first
# data point anyway.
dos_df = dos_df[dos_df['DOS'] != 0.0].copy()
# Transform to velocity units
# Pandas can't handle the cubic root of a negative number or of zero.
# This corrects for that
cubic_root = lambda x: 0 if x==0 else np.sign(x) * np.power(abs(x), 1./3)
dos_df['Velocity'] = [cubic_root((coeff/rho_SI)*E**2/DOS)
for E, DOS in zip(dos_df['E']/1000,dos_df['DOS'])]
# Calculate uncertainty in velocity units
# df(E) = (1/3)*(E^2/DOS^4)^(1/3)*dDOS
dos_df['dVelocity'] = [np.sqrt( ((1/3)*cubic_root((coeff/rho_SI)*E**2/(DOS**4))*dDOS)**2 +
((1/3)*cubic_root((coeff/rho_SI**4)*E**2/(DOS))*drho_SI)**2)
for E, DOS, dDOS in zip(dos_df['E']/1000,dos_df['DOS'],dos_df['dDOS'])]
return(dos_df) # Units of m/s
# Create a plot of transformed PDOS
def plot_transform_DOS(folder,index,dos_df,fit):
fig, (ax0) = plt.subplots(nrows = 1, ncols=1, sharex=True, figsize=(8, 6))
ax0.plot(dos_df['E'], dos_df['Velocity'],
marker = '', linestyle='', color='black', mfc='White')
ax0.errorbar(dos_df['E'], dos_df['Velocity'], yerr=dos_df['dVelocity'],
marker = '', linestyle='', color='black', mfc='White', elinewidth = 1)
if fit != None:
[E_fit,vD_fit] = fit
ax0.plot(E_fit, vD_fit, '-', color='red')
ax0.set_xlabel(r'Energy (meV)', fontsize=14)
ax0.set_ylabel(r'$(E^2/D(E,V))^{1/3}$ (m/s)', fontsize=14)
ax0.set_xlim([0,40])
ax0.set_ylim([2000,8000])
plt.tight_layout()
fig = plt.gcf()
fig.savefig(folder+'/PDOS_Velocity_'+index+'.pdf', format='pdf')
plt.close()
def set_E_range(dos_df, Emin, Emax):
dos_crop_df = dos_df[dos_df['E'] >= Emin]
dos_crop_df = dos_crop_df[dos_crop_df['E'] <= Emax]
E_data = dos_crop_df['E']
vD_data = dos_crop_df['Velocity']
dvD_data = dos_crop_df['dVelocity']
return E_data, vD_data, dvD_data
def set_start_params(fit_type,dos_df):
dos_beg_df = dos_df[dos_df['E'] <= 5]
A0 = dos_beg_df['Velocity'].mean()
if fit_type == 'Debye':
return A0
elif fit_type == 'Constrained_Power':
A1 = 10
return A0, A1
elif fit_type == 'Unconstrained_Power':
A1 = 10
A2 = 2
return A0, A1, A2
else:
print('Error: Invalid model selected.\n')
def Debye_fit(x,A0):
return A0
def Constrained_Power_fit(x,A0,A1):
return A0 - (x/A1)**2
def Unconstrained_Power_fit(x,A0,A1,A2):
return A0 - (x/A1)**A2
# Fit a curve to data
def fit_curve(E_data,vD_data,dvD_data,fit_type):
no_conv = False
# Define start parameters for fit
start_params = set_start_params(fit_type,dos_df)
# Fit curve to data depending on selected model
if fit_type == 'Debye':
try:
par_opt,par_cov = curve_fit(Debye_fit,E_data,vD_data,
p0=[start_params], sigma = dvD_data)
vD_fit = [Debye_fit(E,*par_opt) for E in E_data.values]
except:
no_conv = True
elif fit_type == 'Constrained_Power':
try:
par_opt,par_cov = curve_fit(Constrained_Power_fit,E_data,vD_data,
p0=[*start_params], sigma = dvD_data)
vD_fit = [Constrained_Power_fit(E,*par_opt) for E in E_data.values]
except:
no_conv = True
elif fit_type == 'Unconstrained_Power':
try:
par_opt,par_cov = curve_fit(Unconstrained_Power_fit,E_data,vD_data,
p0=[*start_params], sigma = dvD_data)
vD_fit = [Unconstrained_Power_fit(E,*par_opt) for E in E_data.values]
except:
no_conv = True
else:
print('Error: Model not found.')
if not no_conv:
# Get number of data points
N = len(E_data)
# Calculate reduced chi^2
chi2 = calc_chi2(vD_data,vD_fit,dvD_data,N,M)
# Calculate AICc
AICc = calc_AICc(chi2, M, N)
# Calclulate "probability" from AICc
prob = calc_pdfAICc(AICc)
return par_opt, par_cov, vD_fit, N, chi2, AICc, prob, no_conv
else:
return None, None, None, None, None, None, None, no_conv
# Calculate reduced chi^2
def calc_chi2(data,fit,sigma,N,M):
chi2 = np.sum((data-fit)**2/sigma**2)/(N-M)
return chi2
# Calculate AICc - corrected Aikake information criterion
# Useful for comparing fits with different numbers of model parameters or data points
# Correction accounts for small number of data points
# Most other information criteria assume N is large, which wouldn't work here
# M = number of model parameters
# N = number of data points
def calc_AICc(misfit, M, N):
AIC = misfit + 2*M
correction = 2*M*(M+1)/(N-M-1) # Corrects for cases with small N
AICc = AIC + correction
return AICc
# AICc is analogous to a log posterior. Transform back to posterior space to create
# a pdf
def calc_pdfAICc(AICc):
return np.exp(-(1/2)*AICc)
def vD_from_fit(par_opt, par_cov):
vD = par_opt[0]
dvD = np.abs(np.sqrt(par_cov[0,0]))
return vD, dvD
# Gaussian function for fitting
def Gaussian(x,amplitude,mean,sigma):
return amplitude*np.exp(-(x-mean)**2/(2*sigma**2))
# Bin data by vD to get pdf of vD
def bin_vD(psvl_all_df,bin_width):
min_vD = psvl_all_df['vD'].min()
max_vD = psvl_all_df['vD'].max()
vD_start = min_vD - (min_vD%bin_width) - bin_width
vD_end = max_vD - (max_vD%bin_width) + 2*bin_width
vD_bins = np.arange(vD_start,vD_end,bin_width)
psvl_all_df['Bin'] = pd.cut(psvl_all_df['vD'],vD_bins)
psvl_all_df['Bin Midpoint'] = [psvl_all_df['Bin'].iloc[i].mid for i in psvl_all_df.index]
grouped = psvl_all_df.groupby(['Bin Midpoint'])
binned_df = grouped.sum()[['Prob']].reset_index()
return binned_df
# Pivot data for plotting
def df_2_pivotdata(psvl_all_df,param):
psvl_all_df = psvl_all_df.drop_duplicates(subset=['Emin', 'Emax'], keep='first')
# NaN automatically assigned when missing
pivotplot = psvl_all_df.pivot('Emin', 'Emax', param)
Emin_array = pivotplot.index.values
Emax_array = pivotplot.columns.values
param_pivot = pivotplot.values
return Emin_array, Emax_array, param_pivot
def plot_AICc_analysis(psvl_all_df,binned_df,Gauss_opt,folder,index,fit_type,
E_choice,title):
# Pivot data so we can make a plot
Emin_array, Emax_array, chi2_pivot = df_2_pivotdata(psvl_all_df,'chi2')
Emin_array, Emax_array, prob_pivot = df_2_pivotdata(psvl_all_df,'Prob')
Emin_array, Emax_array, vD_pivot = df_2_pivotdata(psvl_all_df,'vD')
Emin_array, Emax_array, dvD_pivot = df_2_pivotdata(psvl_all_df,'dvD')
fig, (ax0, ax1, ax2, ax3) = plt.subplots(nrows = 4, ncols=1, figsize=(16,16))
cmap_choice = 'magma'
big_dot_size = 10
if E_choice != None:
Emin_choice = E_choice[0]
Emax_choice = E_choice[1]
pdflim_min = max([min(binned_df['Bin Midpoint']),2000])
pdflim_max = min([max(binned_df['Bin Midpoint']),8000])
# Plot chi2 data
cf0 = ax0.pcolormesh(Emax_array,Emin_array,chi2_pivot,edgecolor='face',
cmap=cmap_choice+'_r')
cbar0 = plt.colorbar(cf0, ax = ax0, aspect=10)
cbar0.ax.set_ylabel(r'$\chi^2$',fontsize=18)
if E_choice != None:
ax0.plot(Emax_choice,Emin_choice,'o',color='white',ms=big_dot_size)
ax0.set_xlim([min(Emax_array),max(Emax_array)])
ax0.set_ylim([min(Emin_array),max(Emin_array)])
ax0.tick_params(direction='out')
ax0.set_ylabel('Emin',fontsize=18)
ax0.set_title(title,fontsize=18)
cf1 = ax1.pcolormesh(Emax_array,Emin_array,prob_pivot,edgecolor='face',cmap=cmap_choice)
cbar1 = plt.colorbar(cf1, ax = ax1, aspect=10)
cbar1.ax.set_ylabel(r'Posterior (unnormalized)',fontsize=18)
if E_choice != None:
ax1.plot(Emax_choice,Emin_choice,'o',color='white',ms=big_dot_size)
ax1.set_xlim([min(Emax_array),max(Emax_array)])
ax1.set_ylim([min(Emin_array),max(Emin_array)])
ax1.tick_params(direction='out')
ax1.set_ylabel('Emin',fontsize=18)
# Plot vD data
cf2 = ax2.pcolormesh(Emax_array,Emin_array,vD_pivot,edgecolor='face',cmap=cmap_choice,
vmin=pdflim_min, vmax=pdflim_max)
cbar2 = plt.colorbar(cf2, ax = ax2, aspect=10)
cbar2.ax.set_ylabel(r'$vD$',fontsize=20)
if E_choice != None:
ax2.plot(Emax_choice,Emin_choice,'o',color='white',ms=big_dot_size)
ax2.set_xlim([min(Emax_array),max(Emax_array)])
ax2.set_ylim([min(Emin_array),max(Emin_array)])
ax2.tick_params(direction='out')
ax2.set_xlabel('Emax',fontsize=18)
ax2.set_ylabel('Emin',fontsize=18)
# Plot vD pdf data
ax3.plot(binned_df['Bin Midpoint'],binned_df['Prob'],'.')
ax3.plot(binned_df['Bin Midpoint'],Gaussian(binned_df['Bin Midpoint'],*Gauss_opt),
color='red')
ax3.set_xlim([min(binned_df['Bin Midpoint']),max(binned_df['Bin Midpoint'])])
ax3.tick_params(direction='out')
ax3.set_xlabel(r'$vD$',fontsize=18)
ax3.set_ylabel(r'pdf of $vD$',fontsize=18)
# Fine-tune figure; make subplots close to each other and hide x ticks for
# all but bottom plot.
fig.subplots_adjust(hspace=0.05)
plt.setp([ax0.get_xticklabels() for a in fig.axes[:1]], visible=False);
plt.setp([ax1.get_xticklabels() for a in fig.axes[:1]], visible=False);
plt.tight_layout()
fig.savefig(folder+'/AICc_plot_'+index+'_'+fit_type+'.pdf', format='pdf')
plt.close()
def vPvS_vDeqn(vP,vS,vD):
return 1/vP**3 + 2/vS**3 - 3/vD**3
def vPvS_vphieqn(vP,vS,vphi):
return vP**2 - (4/3)*vS**2 - vphi**2
def vP_vS_eqns(initialguess,vD,vphi):
vP, vS = initialguess
eqn1 = vPvS_vDeqn(vP,vS,vD)
eqn2 = vPvS_vphieqn(vP,vS,vphi)
return eqn1, eqn2
def get_vP_vS(vD,dvD,vphi,dvphi):
num_samples = 10000
# Create random normal distributions for vD and vphi
vD_dist = np.random.normal(vD, dvD, num_samples)
vphi_dist = np.random.normal(vphi, dvphi, num_samples)
initialguess = (vD+3000, vD-500)
# Solve system of equations defined in vP_vS_eqns()
temp = [fsolve(vP_vS_eqns, initialguess, args = (vD_i,vphi_i))
for vD_i,vphi_i in zip(vD_dist,vphi_dist)]
vP_dist = np.array(temp)[:,0]
vS_dist = np.array(temp)[:,1]
vP = vP_dist.mean()
dvP = vP_dist.std()
vS = vS_dist.mean()
dvS = vS_dist.std()
return vP, dvP, vS, dvS
# Load datasets
###############
K_bcc_df = | pd.read_csv(K_bcc_path, engine='python') | pandas.read_csv |
#!/usr/bin/env python
"""Tests for `arcos_py` package."""
from numpy import int64
import pandas as pd
import pytest
from pandas.testing import assert_frame_equal
from arcos4py import ARCOS
from arcos4py.tools._errors import noDataError
@pytest.fixture
def no_bin_data():
"""
pytest fixture to generate test data
"""
data = [item for i in range(10) for item in list(range(1, 11))]
m = [0 for i in range(100)]
d = {'id': data, 'time': data, 'm': m, 'x': data}
print(d)
df = pd.DataFrame(d)
return df
def test_empty_data(no_bin_data: pd.DataFrame):
with pytest.raises(noDataError, match='Input is empty'):
test_data = no_bin_data[no_bin_data['m'] > 0]
pos = ['x']
ts = ARCOS(
test_data, posCols=pos, frame_column='time', id_column='id', measurement_column='m', clid_column='clTrackID'
)
ts.trackCollev(eps=1, minClsz=1, nPrev=2)
def test_1_central_1_prev():
df_in = pd.read_csv('tests/testdata/1central_in.csv')
df_true = pd.read_csv('tests/testdata/1central_res.csv')
pos = ['x']
ts = ARCOS(
df_in, posCols=pos, frame_column='time', id_column='trackID', measurement_column='m', clid_column='clTrackID'
)
ts.bin_col = 'm'
out = ts.trackCollev(eps=1, minClsz=1, nPrev=1)
out = out.drop(columns=['m', 'x'])
assert_frame_equal(out, df_true, check_dtype=False)
def test_1_central_2_prev():
df_in = pd.read_csv('tests/testdata/1central_in.csv')
df_true = pd.read_csv('tests/testdata/1central2prev_res.csv')
pos = ['x']
ts = ARCOS(
df_in, posCols=pos, frame_column='time', id_column='trackID', measurement_column='m', clid_column='clTrackID'
)
ts.bin_col = 'm'
out = ts.trackCollev(eps=1, minClsz=1, nPrev=2)
out = out.drop(columns=['m', 'x'])
assert_frame_equal(out, df_true, check_dtype=False)
def test_1_central_3D():
df_in = pd.read_csv('tests/testdata/1central3D_in.csv')
df_true = pd.read_csv('tests/testdata/1central3D_res.csv')
pos = ['x', 'y', 'z']
ts = ARCOS(
df_in, posCols=pos, frame_column='time', id_column='trackID', measurement_column='m', clid_column='clTrackID'
)
ts.bin_col = 'm'
out = ts.trackCollev(eps=1, minClsz=1, nPrev=1)
out = out.drop(columns=['m', 'x', 'y', 'z'])
assert_frame_equal(out, df_true)
def test_1_central_growing():
df_in = pd.read_csv('tests/testdata/1centralGrowing_in.csv')
df_true = pd.read_csv('tests/testdata/1centralGrowing_res.csv')
pos = ['x']
ts = ARCOS(
df_in, posCols=pos, frame_column='time', id_column='trackID', measurement_column='m', clid_column='clTrackID'
)
ts.bin_col = 'm'
out = ts.trackCollev(eps=1, minClsz=1, nPrev=1)
out = out.drop(columns=['m', 'x'])
assert_frame_equal(out, df_true)
def test_2_central_growing():
df_in = pd.read_csv('tests/testdata/2centralGrowing_in.csv')
df_true = pd.read_csv('tests/testdata/2centralGrowing_res.csv')
pos = ['x']
ts = ARCOS(
df_in, posCols=pos, frame_column='time', id_column='trackID', measurement_column='m', clid_column='clTrackID'
)
ts.bin_col = 'm'
out = ts.trackCollev(eps=1, minClsz=1, nPrev=1)
out = out.drop(columns=['m', 'x'])
assert_frame_equal(out, df_true)
def test_2_with_1_common_symmetric():
df_in = pd.read_csv('tests/testdata/2with1commonSym_in.csv')
df_true = pd.read_csv('tests/testdata/2with1commonSym_res.csv')
pos = ['x']
ts = ARCOS(
df_in, posCols=pos, frame_column='time', id_column='trackID', measurement_column='m', clid_column='clTrackID'
)
ts.bin_col = 'm'
out = ts.trackCollev(eps=1.0, minClsz=1, nPrev=1)
out = out.drop(columns=['m', 'x'])
assert_frame_equal(out, df_true)
def test_2_with_1_common_asymmetric():
df_in = pd.read_csv('tests/testdata/2with1commonAsym_in.csv')
df_true = pd.read_csv('tests/testdata/2with1commonAsym_res.csv')
pos = ['x']
ts = ARCOS(
df_in, posCols=pos, frame_column='time', id_column='trackID', measurement_column='m', clid_column='clTrackID'
)
ts.bin_col = 'm'
out = ts.trackCollev(eps=1.0, minClsz=1, nPrev=1)
out = out.drop(columns=['m', 'x'])
assert_frame_equal(out, df_true)
def test_3_spreading_1_prev():
df_in = pd.read_csv('tests/testdata/3spreading_in.csv')
df_true = pd.read_csv('tests/testdata/3spreading_res.csv')
pos = ['x']
ts = ARCOS(
df_in, posCols=pos, frame_column='time', id_column='trackID', measurement_column='m', clid_column='clTrackID'
)
ts.bin_col = 'm'
out = ts.trackCollev(eps=1.0, minClsz=1, nPrev=1)
out = out.drop(columns=['m', 'x'])
assert_frame_equal(out, df_true)
def test_3_spreading_2_prev():
df_in = pd.read_csv('tests/testdata/3spreading_in.csv')
df_true = pd.read_csv('tests/testdata/3spreading2prev_res.csv')
pos = ['x']
ts = ARCOS(
df_in, posCols=pos, frame_column='time', id_column='trackID', measurement_column='m', clid_column='clTrackID'
)
ts.bin_col = 'm'
out = ts.trackCollev(eps=1.0, minClsz=1, nPrev=2)
out = out.drop(columns=['m', 'x'])
assert_frame_equal(out, df_true)
def test_5_overlapping_1_prev():
df_in = pd.read_csv('tests/testdata/5overlapping_in.csv')
df_true = pd.read_csv('tests/testdata/5overlapping_res.csv')
pos = ['x']
ts = ARCOS(
df_in, posCols=pos, frame_column='time', id_column='trackID', measurement_column='m', clid_column='clTrackID'
)
ts.bin_col = 'm'
out = ts.trackCollev(eps=1.0, minClsz=1, nPrev=1)
out = out.drop(columns=['m', 'x'])
assert_frame_equal(out, df_true)
def test_5_overlapping_2_prev():
df_in = pd.read_csv('tests/testdata/5overlapping_in.csv')
df_true = pd.read_csv('tests/testdata/5overlapping2prev_res.csv')
pos = ['x']
ts = ARCOS(
df_in, posCols=pos, frame_column='time', id_column='trackID', measurement_column='m', clid_column='clTrackID'
)
ts.bin_col = 'm'
out = ts.trackCollev(eps=1.0, minClsz=1, nPrev=2)
out = out.drop(columns=['m', 'x'])
assert_frame_equal(out, df_true)
def test_6_overlapping():
df_in = pd.read_csv('tests/testdata/6overlapping_in.csv')
df_true = pd.read_csv('tests/testdata/6overlapping_res.csv')
pos = ['x']
ts = ARCOS(
df_in, posCols=pos, frame_column='time', id_column='trackID', measurement_column='m', clid_column='clTrackID'
)
ts.bin_col = 'm'
out = ts.trackCollev(eps=1.0, minClsz=1, nPrev=1)
out = out.drop(columns=['m', 'x'])
out['trackID'] = out['trackID'].astype(int64)
assert_frame_equal(out, df_true)
def test_split_from_single():
df_in = pd.read_csv('tests/testdata/1objSplit_in.csv')
df_true = pd.read_csv('tests/testdata/1objSplit_res.csv')
pos = ['pos']
ts = ARCOS(df_in, posCols=pos, frame_column='t', id_column='id', measurement_column=None, clid_column='collid')
out = ts.trackCollev(eps=1.0, minClsz=1, nPrev=1)
out = out.drop(columns=['pos'])
assert_frame_equal(out, df_true)
def test_split_from_2_objects():
df_in = pd.read_csv('tests/testdata/2objSplit_in.csv')
df_true = pd.read_csv('tests/testdata/2objSplit_res.csv')
pos = ['pos']
ts = ARCOS(df_in, posCols=pos, frame_column='t', id_column='id', measurement_column=None, clid_column='collid')
out = ts.trackCollev(eps=1.0, minClsz=1, nPrev=1)
out = out.drop(columns=['pos'])
assert_frame_equal(out, df_true)
def test_cross_2_objects():
df_in = pd.read_csv('tests/testdata/2objCross_in.csv')
df_true = pd.read_csv('tests/testdata/2objCross_res.csv')
pos = ['pos']
ts = ARCOS(df_in, posCols=pos, frame_column='t', id_column='id', measurement_column=None, clid_column='collid')
out = ts.trackCollev(eps=1.0, minClsz=1, nPrev=1)
out = out.drop(columns=['pos'])
assert_frame_equal(out, df_true)
def test_merge_split_2_objects_with_common():
df_in = | pd.read_csv('tests/testdata/2objMergeSplitCommon_in.csv') | pandas.read_csv |
# libraries
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
import os, sys
import matplotlib.dates as mdates
import matplotlib as mpl
from matplotlib.colors import ListedColormap
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
from matplotlib.offsetbox import AnchoredText
from mpl_toolkits.axisartist.axislines import Axes
from mpl_toolkits import axisartist
import uncertainpy as un
import statistics as st
sys.path.append(
os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
)
from src.utils.settings import config
# from src.utils.uq_output import draw_plot
from src.models.icestupaClass import Icestupa
from src.models.methods.metadata import get_parameter_metadata
import seaborn as sns
import matplotlib.pyplot as plt
if __name__ == "__main__":
locations = ["gangles21", "guttannen21", "guttannen20"]
index = pd.date_range(start="1-1-2022", end="1-1-2024", freq="D", name="When")
df_out = pd.DataFrame(columns=locations, index=index)
names = [
"DX",
"IE",
"A_I",
"A_S",
"A_DECAY",
"T_PPT",
"Z",
"T_F",
# "D_MEAN",
# "MU_CONE",
# "r_spray",
]
names_label = [
"$\\Delta x$",
"$\\epsilon_{ice}$",
r"$\alpha_{ice}$",
r"$\alpha_{snow}$",
"$\\tau$",
"$T_{ppt}$",
"$z_{0}$",
"$T_{F}$",
# "$d_{mean}$",
# r"$\mu_{cone}$",
# "$r_{spray}$",
]
zip_iterator = zip(names, names_label)
param_dictionary = dict(zip_iterator)
evaluations = []
percent_change = []
efficiency_change = []
site = []
param = []
result = []
freeze_rate = []
melt_rate = []
fig, ax = plt.subplots()
for location in locations:
SITE, FOLDER = config(location)
icestupa = Icestupa(location)
icestupa.read_output()
feature_name = "efficiency"
icestupa.se = (icestupa.M_water + icestupa.M_ice) / icestupa.M_input * 100
for j in range(0, icestupa.df.shape[0]):
if icestupa.df.loc[j, "fountain_froze"] != 0:
freeze_rate.append(
[
get_parameter_metadata(location)["shortname"],
j,
icestupa.df.loc[j, "fountain_froze"] / 60,
]
)
if icestupa.df.loc[j, "melted"] != 0:
melt_rate.append(
[
get_parameter_metadata(location)["shortname"],
j,
icestupa.df.loc[j, "melted"] / 60,
]
)
for name in names:
data = un.Data()
filename1 = FOLDER["sim"] + name + ".h5"
data.load(filename1)
print(data)
evaluations.append(data[feature_name].evaluations)
eval = data[feature_name].evaluations
print(
f"95 percent confidence interval caused by {name} is {round(st.mean(eval),2)} and {round(2 * st.stdev(eval),2)}"
)
for i in range(0, len(data[feature_name].evaluations)):
result.append(
[
get_parameter_metadata(location)["shortname"],
param_dictionary[name],
data[feature_name].evaluations[i],
(data[feature_name].evaluations[i] - icestupa.se),
]
)
df = pd.DataFrame(result, columns=["Site", "param", "SE", "percent_change"])
df2 = pd.DataFrame(freeze_rate, columns=["Site", "hour", "frozen"])
df3 = | pd.DataFrame(melt_rate, columns=["Site", "hour", "melted"]) | pandas.DataFrame |
from io import StringIO
from typing import Dict
import unittest
import pandas as pd
from data_manager.base_manager import DataParam
from data_manager.time_series_manager import TimeSeriesDataManager
from proto.aiengine.v1 import aiengine_pb2
def get_test_fields(fill_method) -> Dict[str, aiengine_pb2.FieldData]:
return {
"foo": aiengine_pb2.FieldData(
initializer=10.0, fill_method=fill_method
),
"bar": aiengine_pb2.FieldData(
initializer=0.0, fill_method=fill_method
)
}
def get_test_data_manager(fill_method=aiengine_pb2.FILL_ZERO, fields=None):
if fields is None:
fields = get_test_fields(fill_method)
return TimeSeriesDataManager(
param=DataParam(
epoch_time= | pd.to_datetime(10, unit="s") | pandas.to_datetime |
"""
A module used to work
with animations
"""
import abc
from enum import Enum
import json
from typing import Optional, List, Union
import pandas as pd
from pandas.api.types import is_numeric_dtype
from ipyvizzu.json import RawJavaScript, RawJavaScriptEncoder
from ipyvizzu.schema import DataSchema
class Animation:
"""
An abstract class used to represent
an animation object which has dump and build methods
"""
def dump(self) -> str:
"""
A method used to convert
the builded data into json str
"""
return json.dumps(self.build(), cls=RawJavaScriptEncoder)
@abc.abstractmethod
def build(self) -> dict:
"""
A method used to return
a dict with native python values that can be converted into json
"""
class PlainAnimation(dict, Animation):
"""
A class used to represent
a plain animation which is a custom dictionary
"""
def build(self) -> dict:
return self
class InferType(Enum):
"""
An enum class used to define
infer type options
"""
DIMENSION = "dimension"
MEASURE = "measure"
class Data(dict, Animation):
"""
A class used to represent
data animation
"""
@classmethod
def filter(cls, filter_expr: str):
"""
A method used to return
a Data() class which contains a filter
"""
data = cls()
data.set_filter(filter_expr)
return data
def set_filter(self, filter_expr: str) -> None:
"""
A method used to add
filter to an existing Data() class
"""
filter_expr = (
RawJavaScript(f"record => {{ return ({filter_expr}) }}")
if filter_expr is not None
else filter_expr
)
self.update({"filter": filter_expr})
@classmethod
def from_json(cls, filename: str):
"""
A method used to return
a Data() class which created from a json file
"""
with open(filename, "r", encoding="utf8") as file_desc:
return cls(json.load(file_desc))
def add_record(self, record: list) -> None:
"""
A method used to add
record to an existing Data() class
"""
self._add_value("records", record)
def add_records(self, records: List[list]) -> None:
"""
A method used to add
records to an existing Data() class
"""
list(map(self.add_record, records))
def add_series(self, name: str, values: Optional[list] = None, **kwargs) -> None:
"""
A method used to add
series to an existing Data() class
"""
self._add_named_value("series", name, values, **kwargs)
def add_dimension(self, name, values=None, **kwargs):
"""
A method used to add
dimension to an existing Data() class
"""
self._add_named_value("dimensions", name, values, **kwargs)
def add_measure(self, name: str, values: Optional[list] = None, **kwargs) -> None:
"""
A method used to add
measure to an existing Data() class
"""
self._add_named_value("measures", name, values, **kwargs)
def add_data_frame(
self,
data_frame: Union[pd.DataFrame, pd.core.series.Series],
default_measure_value=0,
default_dimension_value="",
) -> None:
"""
A method used to add
dataframe to an existing Data() class
"""
if not isinstance(data_frame, type(None)):
if isinstance(data_frame, pd.core.series.Series):
data_frame = pd.DataFrame(data_frame)
if not isinstance(data_frame, pd.DataFrame):
raise TypeError(
"data_frame must be instance of pandas.DataFrame or pandas.Series"
)
for name in data_frame.columns:
values = []
if is_numeric_dtype(data_frame[name].dtype):
infer_type = InferType.MEASURE
values = (
data_frame[name]
.fillna(default_measure_value)
.astype(float)
.values.tolist()
)
else:
infer_type = InferType.DIMENSION
values = (
data_frame[name]
.fillna(default_dimension_value)
.astype(str)
.values.tolist()
)
self.add_series(
name,
values,
type=infer_type.value,
)
def add_data_frame_index(
self,
data_frame: Union[pd.DataFrame, pd.core.series.Series],
name: str,
) -> None:
"""
A method used to add
dataframe index to an existing Data() class
"""
if data_frame is not None:
if isinstance(data_frame, pd.core.series.Series):
data_frame = | pd.DataFrame(data_frame) | pandas.DataFrame |
import numpy as np
import pandas as pd
import xarray as xr
def convert_datetime64(obj, tz_from, tz_to, **kwargs):
"""Convert a numpy datetime object to a different timezone.
Numpy datetime objects do not have native support for timezones anymore.
Therefore pandas is used to convert between different timezones.
Parameters
----------
obj : :obj:`numpy.datetime64`
Object to be converted.
tz_from:
Timezone of the object to be converted. Can be given as :obj:`str`
referring to the name of a timezone in the tz database, or as instance
of any class inheriting from :class:`datetime.tzinfo`.
tz_to:
Timezone the object should be converted to. Can be given as :obj:`str`
referring to the name of a timezone in the tz database, or as instance
of any class inheriting from :class:`datetime.tzinfo`.
**kwargs:
Additional keyword arguments passed on to
:meth:`pandas.Timestamp.tz_convert`.
Returns
-------
:obj:`numpy.datetime64`
"""
obj_new = pd.Timestamp(obj).tz_localize(tz_from).tz_convert(tz_to, **kwargs)
return np.datetime64(obj_new.tz_localize(None))
def create_extent_cube(spatial_extent, temporal_extent, spatial_resolution,
temporal_resolution = None, crs = None, tz = None,
trim = True):
"""Create a spatio-temporal extent cube.
Internally the query processor uses a multi-dimensional array to represent
the spatio-temporal extent of the query. This is an :obj:`xarray.DataArray`
and forms the base template for all cubes that are fetched from the factbase
during query processing.
Parameters
-----------
spatial_extent : SpatialExtent
Spatial extent.
temporal_extent : TemporalExtent
Temporal extent.
spatial_resolution : :obj:`list`
Spatial resolution of the cube. Should be given as a list in the format
`[y, x]`, where y is the cell size along the y-axis, x is the cell size
along the x-axis, and both are given as :obj:`int` or :obj:`float`
value expressed in the units of the CRS. These values should include
the direction of the axes. For most CRSs, the y-axis has a negative
direction, and hence the cell size along the y-axis is given as a
negative number.
temporal_resolution : :obj:`str` or :obj:`pandas.tseries.offsets.DateOffset`
Temporal resolution of the cube. Can be given as offset alias as
defined in pandas, e.g. "D" for a daily frequency. These aliases can
have multiples, e.g. "5D". If :obj:`None`, only the start and end
instants of the extent will be temporal coordinates in the cube.
crs : optional
Coordinate reference system in which the spatial coordinates of the cube
should be expressed. Can be given as any object understood by the
initializer of :class:`pyproj.crs.CRS`. This includes
:obj:`pyproj.crs.CRS` objects themselves, as well as EPSG codes and WKT
strings. If :obj:`None`, the CRS of the provided spatial extent is used.
tz : optional
Timezone in which the temporal coordinates of the cube should be
expressed. Can be given as :obj:`str` referring to the name of a time
zone in the tz database, or as instance of any class inheriting from
:class:`datetime.tzinfo`. If :obj:`None`, the timezone of the provided
temporal extent is used.
trim : :obj:`bool`
Should the cube be trimmed before returning? Trimming means that all
coordinates for which all values are null, are dropped from the array.
The spatial dimension (if present) is treated differently, by trimming
it only at the edges, and thus maintaining the regularity of the spatial
dimension.
Returns
-------
:obj:`xarray.DataArray`
A two-dimensional data cube with a spatial and temporal dimension. The
spatial dimension is a stacked dimension with each coordinate value
being a tuple of the x and y coordinate of the corresponding cell.
"""
# Rasterize spatial extent.
space = spatial_extent.rasterize(spatial_resolution, crs, stack = True)
# Add spatial feature indices as coordinates.
space.coords["feature"] = ("space", space.data)
space["feature"].name = "feature"
space["feature"].sq.value_type = space.sq.value_type
space["feature"].sq.value_labels = space.sq.value_labels
# Discretize temporal extent.
time = temporal_extent.discretize(temporal_resolution, tz)
# Combine rasterized spatial extent with discretized temporal extent.
extent = space.expand_dims({"time": time})
extent["time"].sq.value_type = "datetime"
# Add temporal reference.
extent = extent.sq.write_tz(time.sq.tz)
# Trim the extent cube if requested.
# This means we drop all x, y and time slices for which all values are nan.
if trim:
extent = extent.sq.trim()
return extent
def np_null(x):
"""Return the appropriate null value for a numpy array.
For arrays of datetime values ``NaT`` is returned. For other arrays ``nan``
is returned.
Parameters
----------
x : :obj:`numpy.array`
The input array.
"""
return np.datetime64("NaT") if x.dtype.kind == "M" else np.nan
def np_allnull(x, axis):
"""Test whether all elements along a given axis in a numpy array are null.
Parameters
----------
x : :obj:`numpy.array`
The input array.
axis : :obj:`int`
Axis along which the tests are performed.
Return
-------
:obj:`numpy.array`
"""
return np.equal(np.sum( | pd.notnull(x) | pandas.notnull |
SECONDS_IN_ONE_DAY = 60*60*24 # 86400 # used for granularity (daily)
import logging
logger = logging.getLogger('isitfit')
# Exception classes
class NoCloudtrailException(Exception):
pass
class DdgNoData(ValueError):
pass
class HostNotFoundInDdg(DdgNoData):
pass
class DataNotFoundForHostInDdg(DdgNoData):
pass
class NoCloudwatchException(Exception):
pass
class IsitfitCliRunnerBreakIterator(Exception):
pass
def mergeSeriesOnTimestampRange(df_cpu, df_type, fields):
"""
Upsamples df_type to df_cpu.
Check unit test for an example
"""
# check that df_type dataframe are sorted descending
# This does not apply to df_cpu
#if df_cpu.Timestamp.iloc[0] < df_cpu.Timestamp.iloc[-1]:
# raise Exception("CPU Dataframe should be sorted descending for utils.mergeSeriesOnTimestampRange")
if df_type.iloc[0].name < df_type.iloc[-1].name:
raise Exception("Types Dataframe should be sorted descending for utils.mergeSeriesOnTimestampRange")
import numpy as np
for f in fields:
df_cpu[f] = None
# assume df_type is sorted in decreasing EventTime order (very important)
# NB: since some instances are not present in the cloudtrail (for which we append artificially the "now" type)
# Need to traverse the df_type matrix backwards
for index, row_type in df_type.iterrows():
row_i = np.where(df_cpu.Timestamp <= row_type.name)[0]
for f in fields:
# use row_type.name instead of row_type['EventTime']
# check note above about needing to traverse backwards
# df_cpu.iloc[np.where(df_cpu.Timestamp >= row_type.name)[0], df_cpu.columns.get_loc('instanceType')] = row_type['instanceType']
col_i = df_cpu.columns.get_loc(f)
df_cpu.iloc[row_i, col_i] = row_type[f]
# fill na at beginning with back-fill
# (artifact of cloudwatch having data at days before the creation of the instance)
for f in fields:
df_cpu[f] = df_cpu[f].fillna(method='backfill')
return df_cpu
# copied from git-remote-aws
def mysetlocale():
li = 'en_US.utf8'
import os
os.environ["LC_ALL"] = li
os.environ["LANG"] = li
MAX_ROWS = 10
MAX_COLS = 5
MAX_STRING = 20
def display_df(title, df, csv_fn, shape, logger):
# https://pypi.org/project/termcolor/
from termcolor import colored
import click
click.echo("")
if shape[0]==0:
click.echo(title)
click.echo(colored("None", "red"))
return
if csv_fn is not None:
click.echo(colored("The table '%s' was saved to the CSV file '%s'."%(title, csv_fn), "cyan"))
click.echo(colored("It could be opened in the terminal with visidata (http://visidata.org/)","cyan"))
click.echo(colored("and you can close visidata by pressing 'q'","cyan"))
open_vd = input(colored('Would you like to do so? yes/[no] ', 'cyan'))
if open_vd.lower() == 'yes' or open_vd.lower() == 'y':
click.echo("Opening CSV file `%s` with visidata."%csv_fn)
from subprocess import call
call(["vd", csv_fn])
click.echo("Exited visidata.")
click.echo(colored("The table '%s' was saved to the CSV file '%s'."%(title, csv_fn), "cyan"))
return
else:
click.echo("Not opening visidata.")
click.echo("To open the results with visidata, use `vd %s`."%csv_fn)
# if not requested to open with visidata
from tabulate import tabulate
df_show = df.head(n=MAX_ROWS)
df_show = df_show.applymap(lambda c: (c[:MAX_STRING]+'...' if len(c)>=MAX_STRING else c) if type(c)==str else c)
click.echo(tabulate(df_show, headers='keys', tablefmt='psql', showindex=False))
if (shape[0] > MAX_ROWS) or (shape[1] > MAX_COLS):
click.echo("...")
click.echo("(results truncated)")
# done
return
# done
return
def prompt_upgrade(pkg_name, current_version):
"""
check if current version is out-of-date
https://github.com/alexmojaki/outdated
copied from https://github.com/WhatsApp/WADebug/blob/958ac37be804cc732ae514d4872b93d19d197a5c/wadebug/cli.py#L40
"""
import outdated
is_outdated = False
try:
is_outdated, latest_version = outdated.check_outdated(pkg_name, current_version)
except requests.exceptions.ConnectionError as error:
# e.g.
# requests.exceptions.ConnectionError:
# HTTPSConnectionPool(host='pypi.python.org', port=443):
# Max retries exceeded with url: /pypi/isitfit/json
# (Caused by NewConnectionError(... Name or service not known)
# Thrown upon internet disconnection
pass
except ValueError as error:
# catch case of "ValueError: Version 0.10.0 is greater than the latest version on PyPI: 0.9.1"
# This would happen on my dev machine
if not "is greater than" in str(error):
raise
# In this case, outdated does not cache the result to disk
# so cache it myself (copied from https://github.com/alexmojaki/outdated/blob/565bb3fe1adc30da5e50249912cd2ac494662659/outdated/__init__.py#L61)
latest_version = str(error).split(":")[1].strip()
import datetime as dt
import json
with outdated.utils.cache_file(pkg_name, 'w') as f:
try:
data = [latest_version, outdated.utils.format_date(dt.datetime.now())]
json.dump(data, f)
except Exception as e:
print('Error: ' + str(e))
raise
# is_outdated = True # FIXME for debugging
if not is_outdated:
return is_outdated
import click
msg_outdated = """The current version of {pkg_name} ({current_version}) is out of date.
Run `pip3 install {pkg_name} --upgrade` to upgrade to version {latest_version},
or use `isitfit --skip-check-upgrade ...` to skip checking for version upgrades of isitfit.
"""
msg_outdated = msg_outdated.format(
pkg_name=pkg_name, current_version=current_version, latest_version=latest_version
)
click.secho(msg_outdated, fg="red")
# Give the user some time to read the message and possibly update
import time
from tqdm import tqdm
wait_outdated = 10
click.secho("Will continue in %i seconds"%wait_outdated, fg='yellow')
for i in tqdm(range(wait_outdated)):
time.sleep(1)
return is_outdated
# This import needs to stay here for the sake of the mock in test_utils
import requests
SKIP_PING=False
def ping_matomo(action_name):
"""
Gather anonymous usage statistics
"""
logger.debug("ping_matomo('%s')"%action_name)
# get uuid
from .dotMan import DotMan
uuid_val = DotMan().get_myuid()
# get version
from . import isitfit_version as isitfit_cli_version
# build action name field. note that "action_name" already starts with "/"
full_actionName = "%s%s"%(isitfit_cli_version, action_name)
# use base function
from matomo_sdk_py.matomo_sdk_py import ping_matomo as ping_matomo_base
ping_matomo_base(
action_name=full_actionName,
action_base="https://cli.isitfit.io",
idsite=2, # 2 is for cli.isitfit.io
uuid_val=uuid_val,
matomo_url="https://isitfit.matomo.cloud/piwik.php"
)
def display_footer():
import click
from . import isitfit_version
click.echo("")
click.echo("⛅ Generated by isitfit version %s"%isitfit_version)
click.echo("")
click.echo("Useful links:")
click.echo("ℹ️ Docs https://isitfit.autofitcloud.com")
click.echo("🌎 Climate https://twitter.com/hashtag/ClimateStrike")
#click.echo("❤️ Built by AutofitCloud https://www.autofitcloud.com")
click.echo("💬 Chat https://discord.gg/Z2YMDvx")
click.echo("🤖 Misc https://www.reddit.com/r/autofitcloud")
def ask_feedback():
# TODO should use a proper feedback gathering method rather than collecting it in matomo
# but this is a shortcut for now
print("")
import click
a1 = click.prompt("How useful was this? (0: wtf, 1: useless, 2: IDK, 3: kind of, 4: epiphanic)", type=click.IntRange(0, 4))
ping_matomo("/feedback?a1_usefulness=%i"%a1)
q2 = {
0: "Seriously? Why?",
1: "Is there no hope? What can be done?",
2: "What would make things clearer?",
3: "What can we improve?",
4: "TBH, I wasn't expecting this. Really? Why?"
}
a2 = click.prompt(q2[a1])
ping_matomo("/feedback?a2_why=%s"%a2)
a3a = click.confirm("Shall we schedule a 10-minute phone call?")
ping_matomo("/feedback?a3a_can_we_call=%s"%b2l(a3a))
a3b = None
a3c = None
if a3a:
a3b = click.prompt("Phone number with country code")
ping_matomo("/feedback?a3b_phone=%s"%a3b)
a3c = click.prompt("Best time to call (include timezone)")
ping_matomo("/feedback?a3c_time=%s"%a3c)
print("Perfect! In the mean time, feel free to reach me at <EMAIL>")
else:
print("Ok. You can always reach me at <EMAIL>")
print("Thanks!")
def myreturn(df_xxx):
if df_xxx.shape[0] > 0:
return df_xxx
else:
return None # this means that the data was found in cache, but it was empty (meaning aws returned no data)
def b2l(b_in):
"""
return "T" on true and "F" on false
Instead of "True" and "False"
"""
return str(b_in)[0]
def l2s(x):
"""
[1,2,3,4,5,6,7,8] -> '1,2,...,7,8'
"""
if len(x)>5: x = x[:2] + ['...'] + x[-2:]
y = [str(z) for z in x] # convert to list of strings, eg if list of int
y = ",".join(y)
return y
def taglist2str(taglist, filter_tags):
"""
eg
taglist2str([{'Key':'app', 'Value':'isitfit'}], 'boo')
returns ""
taglist2str([{'Key':'app', 'Value':'isitfit'}], 'is')
returns "app = isitfit"
"""
if filter_tags is not None:
# filter the tag list for only those containing the filter-tags string
f_tn = filter_tags.lower()
# similar to the isitfit.mainManager.tagsContain function, but filtering the tags themselves
taglist = [x for x in taglist if (f_tn in x['Key'].lower()) or (f_tn in x['Value'].lower())]
# list to string
taglist = ["%s = %s"%(x['Key'], x['Value']) for x in taglist]
taglist = "\n".join(taglist)
return taglist
def pd_series_frozenset_union(s1, s2):
"""
Pandas doesn't have a built-in set union.
Worse, it doesn't support set as column type
Need to use frozenset
Check test for example
Ref https://python-forum.io/Thread-Error-Message-TypeError-unhashable-type-set
"""
import pandas as pd
df1=pd.DataFrame({'a1': s1})
df2= | pd.DataFrame({'a2': s2}) | pandas.DataFrame |
# -*- coding: utf-8 -*-
# pylint: disable-msg=W0612,E1101
import itertools
import warnings
from warnings import catch_warnings
from datetime import datetime
from pandas.types.common import (is_integer_dtype,
is_float_dtype,
is_scalar)
from pandas.compat import range, lrange, lzip, StringIO, lmap
from pandas.tslib import NaT
from numpy import nan
from numpy.random import randn
import numpy as np
import pandas as pd
from pandas import option_context
from pandas.core.indexing import _non_reducing_slice, _maybe_numeric_slice
from pandas.core.api import (DataFrame, Index, Series, Panel, isnull,
MultiIndex, Timestamp, Timedelta, UInt64Index)
from pandas.formats.printing import pprint_thing
from pandas import concat
from pandas.core.common import PerformanceWarning
from pandas.tests.indexing.common import _mklbl
import pandas.util.testing as tm
from pandas import date_range
_verbose = False
# ------------------------------------------------------------------------
# Indexing test cases
def _generate_indices(f, values=False):
""" generate the indicies
if values is True , use the axis values
is False, use the range
"""
axes = f.axes
if values:
axes = [lrange(len(a)) for a in axes]
return itertools.product(*axes)
def _get_value(f, i, values=False):
""" return the value for the location i """
# check agains values
if values:
return f.values[i]
# this is equiv of f[col][row].....
# v = f
# for a in reversed(i):
# v = v.__getitem__(a)
# return v
with catch_warnings(record=True):
return f.ix[i]
def _get_result(obj, method, key, axis):
""" return the result for this obj with this key and this axis """
if isinstance(key, dict):
key = key[axis]
# use an artifical conversion to map the key as integers to the labels
# so ix can work for comparisions
if method == 'indexer':
method = 'ix'
key = obj._get_axis(axis)[key]
# in case we actually want 0 index slicing
try:
xp = getattr(obj, method).__getitem__(_axify(obj, key, axis))
except:
xp = getattr(obj, method).__getitem__(key)
return xp
def _axify(obj, key, axis):
# create a tuple accessor
axes = [slice(None)] * obj.ndim
axes[axis] = key
return tuple(axes)
class TestIndexing(tm.TestCase):
_objs = set(['series', 'frame', 'panel'])
_typs = set(['ints', 'uints', 'labels', 'mixed',
'ts', 'floats', 'empty', 'ts_rev'])
def setUp(self):
self.series_ints = Series(np.random.rand(4), index=lrange(0, 8, 2))
self.frame_ints = DataFrame(np.random.randn(4, 4),
index=lrange(0, 8, 2),
columns=lrange(0, 12, 3))
self.panel_ints = Panel(np.random.rand(4, 4, 4),
items=lrange(0, 8, 2),
major_axis=lrange(0, 12, 3),
minor_axis=lrange(0, 16, 4))
self.series_uints = Series(np.random.rand(4),
index=UInt64Index(lrange(0, 8, 2)))
self.frame_uints = DataFrame(np.random.randn(4, 4),
index=UInt64Index(lrange(0, 8, 2)),
columns=UInt64Index(lrange(0, 12, 3)))
self.panel_uints = Panel(np.random.rand(4, 4, 4),
items=UInt64Index(lrange(0, 8, 2)),
major_axis=UInt64Index(lrange(0, 12, 3)),
minor_axis=UInt64Index(lrange(0, 16, 4)))
self.series_labels = Series(np.random.randn(4), index=list('abcd'))
self.frame_labels = DataFrame(np.random.randn(4, 4),
index=list('abcd'), columns=list('ABCD'))
self.panel_labels = Panel(np.random.randn(4, 4, 4),
items=list('abcd'),
major_axis=list('ABCD'),
minor_axis=list('ZYXW'))
self.series_mixed = Series(np.random.randn(4), index=[2, 4, 'null', 8])
self.frame_mixed = DataFrame(np.random.randn(4, 4),
index=[2, 4, 'null', 8])
self.panel_mixed = Panel(np.random.randn(4, 4, 4),
items=[2, 4, 'null', 8])
self.series_ts = Series(np.random.randn(4),
index=date_range('20130101', periods=4))
self.frame_ts = DataFrame(np.random.randn(4, 4),
index=date_range('20130101', periods=4))
self.panel_ts = Panel(np.random.randn(4, 4, 4),
items=date_range('20130101', periods=4))
dates_rev = (date_range('20130101', periods=4)
.sort_values(ascending=False))
self.series_ts_rev = Series(np.random.randn(4),
index=dates_rev)
self.frame_ts_rev = DataFrame(np.random.randn(4, 4),
index=dates_rev)
self.panel_ts_rev = Panel(np.random.randn(4, 4, 4),
items=dates_rev)
self.frame_empty = DataFrame({})
self.series_empty = Series({})
self.panel_empty = Panel({})
# form agglomerates
for o in self._objs:
d = dict()
for t in self._typs:
d[t] = getattr(self, '%s_%s' % (o, t), None)
setattr(self, o, d)
def check_values(self, f, func, values=False):
if f is None:
return
axes = f.axes
indicies = itertools.product(*axes)
for i in indicies:
result = getattr(f, func)[i]
# check agains values
if values:
expected = f.values[i]
else:
expected = f
for a in reversed(i):
expected = expected.__getitem__(a)
tm.assert_almost_equal(result, expected)
def check_result(self, name, method1, key1, method2, key2, typs=None,
objs=None, axes=None, fails=None):
def _eq(t, o, a, obj, k1, k2):
""" compare equal for these 2 keys """
if a is not None and a > obj.ndim - 1:
return
def _print(result, error=None):
if error is not None:
error = str(error)
v = ("%-16.16s [%-16.16s]: [typ->%-8.8s,obj->%-8.8s,"
"key1->(%-4.4s),key2->(%-4.4s),axis->%s] %s" %
(name, result, t, o, method1, method2, a, error or ''))
if _verbose:
pprint_thing(v)
try:
rs = getattr(obj, method1).__getitem__(_axify(obj, k1, a))
try:
xp = _get_result(obj, method2, k2, a)
except:
result = 'no comp'
_print(result)
return
detail = None
try:
if is_scalar(rs) and is_scalar(xp):
self.assertEqual(rs, xp)
elif xp.ndim == 1:
tm.assert_series_equal(rs, xp)
elif xp.ndim == 2:
tm.assert_frame_equal(rs, xp)
elif xp.ndim == 3:
tm.assert_panel_equal(rs, xp)
result = 'ok'
except AssertionError as e:
detail = str(e)
result = 'fail'
# reverse the checks
if fails is True:
if result == 'fail':
result = 'ok (fail)'
_print(result)
if not result.startswith('ok'):
raise AssertionError(detail)
except AssertionError:
raise
except Exception as detail:
# if we are in fails, the ok, otherwise raise it
if fails is not None:
if isinstance(detail, fails):
result = 'ok (%s)' % type(detail).__name__
_print(result)
return
result = type(detail).__name__
raise AssertionError(_print(result, error=detail))
if typs is None:
typs = self._typs
if objs is None:
objs = self._objs
if axes is not None:
if not isinstance(axes, (tuple, list)):
axes = [axes]
else:
axes = list(axes)
else:
axes = [0, 1, 2]
# check
for o in objs:
if o not in self._objs:
continue
d = getattr(self, o)
for a in axes:
for t in typs:
if t not in self._typs:
continue
obj = d[t]
if obj is not None:
obj = obj.copy()
k2 = key2
_eq(t, o, a, obj, key1, k2)
def test_ix_deprecation(self):
# GH 15114
df = DataFrame({'A': [1, 2, 3]})
with tm.assert_produces_warning(DeprecationWarning,
check_stacklevel=False):
df.ix[1, 'A']
def test_indexer_caching(self):
# GH5727
# make sure that indexers are in the _internal_names_set
n = 1000001
arrays = [lrange(n), lrange(n)]
index = MultiIndex.from_tuples(lzip(*arrays))
s = Series(np.zeros(n), index=index)
str(s)
# setitem
expected = Series(np.ones(n), index=index)
s = Series(np.zeros(n), index=index)
s[s == 0] = 1
tm.assert_series_equal(s, expected)
def test_at_and_iat_get(self):
def _check(f, func, values=False):
if f is not None:
indicies = _generate_indices(f, values)
for i in indicies:
result = getattr(f, func)[i]
expected = _get_value(f, i, values)
tm.assert_almost_equal(result, expected)
for o in self._objs:
d = getattr(self, o)
# iat
for f in [d['ints'], d['uints']]:
_check(f, 'iat', values=True)
for f in [d['labels'], d['ts'], d['floats']]:
if f is not None:
self.assertRaises(ValueError, self.check_values, f, 'iat')
# at
for f in [d['ints'], d['uints'], d['labels'],
d['ts'], d['floats']]:
_check(f, 'at')
def test_at_and_iat_set(self):
def _check(f, func, values=False):
if f is not None:
indicies = _generate_indices(f, values)
for i in indicies:
getattr(f, func)[i] = 1
expected = _get_value(f, i, values)
tm.assert_almost_equal(expected, 1)
for t in self._objs:
d = getattr(self, t)
# iat
for f in [d['ints'], d['uints']]:
_check(f, 'iat', values=True)
for f in [d['labels'], d['ts'], d['floats']]:
if f is not None:
self.assertRaises(ValueError, _check, f, 'iat')
# at
for f in [d['ints'], d['uints'], d['labels'],
d['ts'], d['floats']]:
_check(f, 'at')
def test_at_iat_coercion(self):
# as timestamp is not a tuple!
dates = date_range('1/1/2000', periods=8)
df = DataFrame(randn(8, 4), index=dates, columns=['A', 'B', 'C', 'D'])
s = df['A']
result = s.at[dates[5]]
xp = s.values[5]
self.assertEqual(result, xp)
# GH 7729
# make sure we are boxing the returns
s = Series(['2014-01-01', '2014-02-02'], dtype='datetime64[ns]')
expected = Timestamp('2014-02-02')
for r in [lambda: s.iat[1], lambda: s.iloc[1]]:
result = r()
self.assertEqual(result, expected)
s = Series(['1 days', '2 days'], dtype='timedelta64[ns]')
expected = Timedelta('2 days')
for r in [lambda: s.iat[1], lambda: s.iloc[1]]:
result = r()
self.assertEqual(result, expected)
def test_iat_invalid_args(self):
pass
def test_imethods_with_dups(self):
# GH6493
# iat/iloc with dups
s = Series(range(5), index=[1, 1, 2, 2, 3], dtype='int64')
result = s.iloc[2]
self.assertEqual(result, 2)
result = s.iat[2]
self.assertEqual(result, 2)
self.assertRaises(IndexError, lambda: s.iat[10])
self.assertRaises(IndexError, lambda: s.iat[-10])
result = s.iloc[[2, 3]]
expected = Series([2, 3], [2, 2], dtype='int64')
tm.assert_series_equal(result, expected)
df = s.to_frame()
result = df.iloc[2]
expected = Series(2, index=[0], name=2)
tm.assert_series_equal(result, expected)
result = df.iat[2, 0]
expected = 2
self.assertEqual(result, 2)
def test_repeated_getitem_dups(self):
# GH 5678
# repeated gettitems on a dup index returing a ndarray
df = DataFrame(
np.random.random_sample((20, 5)),
index=['ABCDE' [x % 5] for x in range(20)])
expected = df.loc['A', 0]
result = df.loc[:, 0].loc['A']
tm.assert_series_equal(result, expected)
def test_iloc_exceeds_bounds(self):
# GH6296
# iloc should allow indexers that exceed the bounds
df = DataFrame(np.random.random_sample((20, 5)), columns=list('ABCDE'))
expected = df
# lists of positions should raise IndexErrror!
with tm.assertRaisesRegexp(IndexError,
'positional indexers are out-of-bounds'):
df.iloc[:, [0, 1, 2, 3, 4, 5]]
self.assertRaises(IndexError, lambda: df.iloc[[1, 30]])
self.assertRaises(IndexError, lambda: df.iloc[[1, -30]])
self.assertRaises(IndexError, lambda: df.iloc[[100]])
s = df['A']
self.assertRaises(IndexError, lambda: s.iloc[[100]])
self.assertRaises(IndexError, lambda: s.iloc[[-100]])
# still raise on a single indexer
msg = 'single positional indexer is out-of-bounds'
with tm.assertRaisesRegexp(IndexError, msg):
df.iloc[30]
self.assertRaises(IndexError, lambda: df.iloc[-30])
# GH10779
# single positive/negative indexer exceeding Series bounds should raise
# an IndexError
with tm.assertRaisesRegexp(IndexError, msg):
s.iloc[30]
self.assertRaises(IndexError, lambda: s.iloc[-30])
# slices are ok
result = df.iloc[:, 4:10] # 0 < start < len < stop
expected = df.iloc[:, 4:]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, -4:-10] # stop < 0 < start < len
expected = df.iloc[:, :0]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 10:4:-1] # 0 < stop < len < start (down)
expected = df.iloc[:, :4:-1]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 4:-10:-1] # stop < 0 < start < len (down)
expected = df.iloc[:, 4::-1]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, -10:4] # start < 0 < stop < len
expected = df.iloc[:, :4]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 10:4] # 0 < stop < len < start
expected = df.iloc[:, :0]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, -10:-11:-1] # stop < start < 0 < len (down)
expected = df.iloc[:, :0]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 10:11] # 0 < len < start < stop
expected = df.iloc[:, :0]
tm.assert_frame_equal(result, expected)
# slice bounds exceeding is ok
result = s.iloc[18:30]
expected = s.iloc[18:]
tm.assert_series_equal(result, expected)
result = s.iloc[30:]
expected = s.iloc[:0]
tm.assert_series_equal(result, expected)
result = s.iloc[30::-1]
expected = s.iloc[::-1]
tm.assert_series_equal(result, expected)
# doc example
def check(result, expected):
str(result)
result.dtypes
tm.assert_frame_equal(result, expected)
dfl = DataFrame(np.random.randn(5, 2), columns=list('AB'))
check(dfl.iloc[:, 2:3], DataFrame(index=dfl.index))
check(dfl.iloc[:, 1:3], dfl.iloc[:, [1]])
check(dfl.iloc[4:6], dfl.iloc[[4]])
self.assertRaises(IndexError, lambda: dfl.iloc[[4, 5, 6]])
self.assertRaises(IndexError, lambda: dfl.iloc[:, 4])
def test_iloc_getitem_int(self):
# integer
self.check_result('integer', 'iloc', 2, 'ix',
{0: 4, 1: 6, 2: 8}, typs=['ints', 'uints'])
self.check_result('integer', 'iloc', 2, 'indexer', 2,
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
def test_iloc_getitem_neg_int(self):
# neg integer
self.check_result('neg int', 'iloc', -1, 'ix',
{0: 6, 1: 9, 2: 12}, typs=['ints', 'uints'])
self.check_result('neg int', 'iloc', -1, 'indexer', -1,
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
def test_iloc_getitem_list_int(self):
# list of ints
self.check_result('list int', 'iloc', [0, 1, 2], 'ix',
{0: [0, 2, 4], 1: [0, 3, 6], 2: [0, 4, 8]},
typs=['ints', 'uints'])
self.check_result('list int', 'iloc', [2], 'ix',
{0: [4], 1: [6], 2: [8]}, typs=['ints', 'uints'])
self.check_result('list int', 'iloc', [0, 1, 2], 'indexer', [0, 1, 2],
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
# array of ints (GH5006), make sure that a single indexer is returning
# the correct type
self.check_result('array int', 'iloc', np.array([0, 1, 2]), 'ix',
{0: [0, 2, 4],
1: [0, 3, 6],
2: [0, 4, 8]}, typs=['ints', 'uints'])
self.check_result('array int', 'iloc', np.array([2]), 'ix',
{0: [4], 1: [6], 2: [8]}, typs=['ints', 'uints'])
self.check_result('array int', 'iloc', np.array([0, 1, 2]), 'indexer',
[0, 1, 2],
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
def test_iloc_getitem_neg_int_can_reach_first_index(self):
# GH10547 and GH10779
# negative integers should be able to reach index 0
df = DataFrame({'A': [2, 3, 5], 'B': [7, 11, 13]})
s = df['A']
expected = df.iloc[0]
result = df.iloc[-3]
tm.assert_series_equal(result, expected)
expected = df.iloc[[0]]
result = df.iloc[[-3]]
tm.assert_frame_equal(result, expected)
expected = s.iloc[0]
result = s.iloc[-3]
self.assertEqual(result, expected)
expected = s.iloc[[0]]
result = s.iloc[[-3]]
tm.assert_series_equal(result, expected)
# check the length 1 Series case highlighted in GH10547
expected = pd.Series(['a'], index=['A'])
result = expected.iloc[[-1]]
tm.assert_series_equal(result, expected)
def test_iloc_getitem_dups(self):
# no dups in panel (bug?)
self.check_result('list int (dups)', 'iloc', [0, 1, 1, 3], 'ix',
{0: [0, 2, 2, 6], 1: [0, 3, 3, 9]},
objs=['series', 'frame'], typs=['ints', 'uints'])
# GH 6766
df1 = DataFrame([{'A': None, 'B': 1}, {'A': 2, 'B': 2}])
df2 = DataFrame([{'A': 3, 'B': 3}, {'A': 4, 'B': 4}])
df = concat([df1, df2], axis=1)
# cross-sectional indexing
result = df.iloc[0, 0]
self.assertTrue(isnull(result))
result = df.iloc[0, :]
expected = Series([np.nan, 1, 3, 3], index=['A', 'B', 'A', 'B'],
name=0)
tm.assert_series_equal(result, expected)
def test_iloc_getitem_array(self):
# array like
s = Series(index=lrange(1, 4))
self.check_result('array like', 'iloc', s.index, 'ix',
{0: [2, 4, 6], 1: [3, 6, 9], 2: [4, 8, 12]},
typs=['ints', 'uints'])
def test_iloc_getitem_bool(self):
# boolean indexers
b = [True, False, True, False, ]
self.check_result('bool', 'iloc', b, 'ix', b, typs=['ints', 'uints'])
self.check_result('bool', 'iloc', b, 'ix', b,
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
def test_iloc_getitem_slice(self):
# slices
self.check_result('slice', 'iloc', slice(1, 3), 'ix',
{0: [2, 4], 1: [3, 6], 2: [4, 8]},
typs=['ints', 'uints'])
self.check_result('slice', 'iloc', slice(1, 3), 'indexer',
slice(1, 3),
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
def test_iloc_getitem_slice_dups(self):
df1 = DataFrame(np.random.randn(10, 4), columns=['A', 'A', 'B', 'B'])
df2 = DataFrame(np.random.randint(0, 10, size=20).reshape(10, 2),
columns=['A', 'C'])
# axis=1
df = concat([df1, df2], axis=1)
tm.assert_frame_equal(df.iloc[:, :4], df1)
tm.assert_frame_equal(df.iloc[:, 4:], df2)
df = concat([df2, df1], axis=1)
tm.assert_frame_equal(df.iloc[:, :2], df2)
tm.assert_frame_equal(df.iloc[:, 2:], df1)
exp = concat([df2, df1.iloc[:, [0]]], axis=1)
tm.assert_frame_equal(df.iloc[:, 0:3], exp)
# axis=0
df = concat([df, df], axis=0)
tm.assert_frame_equal(df.iloc[0:10, :2], df2)
tm.assert_frame_equal(df.iloc[0:10, 2:], df1)
tm.assert_frame_equal(df.iloc[10:, :2], df2)
tm.assert_frame_equal(df.iloc[10:, 2:], df1)
def test_iloc_setitem(self):
df = self.frame_ints
df.iloc[1, 1] = 1
result = df.iloc[1, 1]
self.assertEqual(result, 1)
df.iloc[:, 2:3] = 0
expected = df.iloc[:, 2:3]
result = df.iloc[:, 2:3]
tm.assert_frame_equal(result, expected)
# GH5771
s = Series(0, index=[4, 5, 6])
s.iloc[1:2] += 1
expected = Series([0, 1, 0], index=[4, 5, 6])
tm.assert_series_equal(s, expected)
def test_loc_setitem_slice(self):
# GH10503
# assigning the same type should not change the type
df1 = DataFrame({'a': [0, 1, 1],
'b': Series([100, 200, 300], dtype='uint32')})
ix = df1['a'] == 1
newb1 = df1.loc[ix, 'b'] + 1
df1.loc[ix, 'b'] = newb1
expected = DataFrame({'a': [0, 1, 1],
'b': Series([100, 201, 301], dtype='uint32')})
tm.assert_frame_equal(df1, expected)
# assigning a new type should get the inferred type
df2 = DataFrame({'a': [0, 1, 1], 'b': [100, 200, 300]},
dtype='uint64')
ix = df1['a'] == 1
newb2 = df2.loc[ix, 'b']
df1.loc[ix, 'b'] = newb2
expected = DataFrame({'a': [0, 1, 1], 'b': [100, 200, 300]},
dtype='uint64')
tm.assert_frame_equal(df2, expected)
def test_ix_loc_setitem_consistency(self):
# GH 5771
# loc with slice and series
s = Series(0, index=[4, 5, 6])
s.loc[4:5] += 1
expected = Series([1, 1, 0], index=[4, 5, 6])
tm.assert_series_equal(s, expected)
# GH 5928
# chained indexing assignment
df = DataFrame({'a': [0, 1, 2]})
expected = df.copy()
with catch_warnings(record=True):
expected.ix[[0, 1, 2], 'a'] = -expected.ix[[0, 1, 2], 'a']
with catch_warnings(record=True):
df['a'].ix[[0, 1, 2]] = -df['a'].ix[[0, 1, 2]]
tm.assert_frame_equal(df, expected)
df = DataFrame({'a': [0, 1, 2], 'b': [0, 1, 2]})
with catch_warnings(record=True):
df['a'].ix[[0, 1, 2]] = -df['a'].ix[[0, 1, 2]].astype(
'float64') + 0.5
expected = DataFrame({'a': [0.5, -0.5, -1.5], 'b': [0, 1, 2]})
tm.assert_frame_equal(df, expected)
# GH 8607
# ix setitem consistency
df = DataFrame({'timestamp': [1413840976, 1413842580, 1413760580],
'delta': [1174, 904, 161],
'elapsed': [7673, 9277, 1470]})
expected = DataFrame({'timestamp': pd.to_datetime(
[1413840976, 1413842580, 1413760580], unit='s'),
'delta': [1174, 904, 161],
'elapsed': [7673, 9277, 1470]})
df2 = df.copy()
df2['timestamp'] = pd.to_datetime(df['timestamp'], unit='s')
tm.assert_frame_equal(df2, expected)
df2 = df.copy()
df2.loc[:, 'timestamp'] = pd.to_datetime(df['timestamp'], unit='s')
tm.assert_frame_equal(df2, expected)
df2 = df.copy()
with catch_warnings(record=True):
df2.ix[:, 2] = pd.to_datetime(df['timestamp'], unit='s')
tm.assert_frame_equal(df2, expected)
def test_ix_loc_consistency(self):
# GH 8613
# some edge cases where ix/loc should return the same
# this is not an exhaustive case
def compare(result, expected):
if is_scalar(expected):
self.assertEqual(result, expected)
else:
self.assertTrue(expected.equals(result))
# failure cases for .loc, but these work for .ix
df = pd.DataFrame(np.random.randn(5, 4), columns=list('ABCD'))
for key in [slice(1, 3), tuple([slice(0, 2), slice(0, 2)]),
tuple([slice(0, 2), df.columns[0:2]])]:
for index in [tm.makeStringIndex, tm.makeUnicodeIndex,
tm.makeDateIndex, tm.makePeriodIndex,
tm.makeTimedeltaIndex]:
df.index = index(len(df.index))
with catch_warnings(record=True):
df.ix[key]
self.assertRaises(TypeError, lambda: df.loc[key])
df = pd.DataFrame(np.random.randn(5, 4), columns=list('ABCD'),
index=pd.date_range('2012-01-01', periods=5))
for key in ['2012-01-03',
'2012-01-31',
slice('2012-01-03', '2012-01-03'),
slice('2012-01-03', '2012-01-04'),
slice('2012-01-03', '2012-01-06', 2),
slice('2012-01-03', '2012-01-31'),
tuple([[True, True, True, False, True]]), ]:
# getitem
# if the expected raises, then compare the exceptions
try:
with catch_warnings(record=True):
expected = df.ix[key]
except KeyError:
self.assertRaises(KeyError, lambda: df.loc[key])
continue
result = df.loc[key]
compare(result, expected)
# setitem
df1 = df.copy()
df2 = df.copy()
with catch_warnings(record=True):
df1.ix[key] = 10
df2.loc[key] = 10
compare(df2, df1)
# edge cases
s = Series([1, 2, 3, 4], index=list('abde'))
result1 = s['a':'c']
with catch_warnings(record=True):
result2 = s.ix['a':'c']
result3 = s.loc['a':'c']
tm.assert_series_equal(result1, result2)
tm.assert_series_equal(result1, result3)
# now work rather than raising KeyError
s = Series(range(5), [-2, -1, 1, 2, 3])
with catch_warnings(record=True):
result1 = s.ix[-10:3]
result2 = s.loc[-10:3]
tm.assert_series_equal(result1, result2)
with catch_warnings(record=True):
result1 = s.ix[0:3]
result2 = s.loc[0:3]
tm.assert_series_equal(result1, result2)
def test_loc_setitem_dups(self):
# GH 6541
df_orig = DataFrame(
{'me': list('rttti'),
'foo': list('aaade'),
'bar': np.arange(5, dtype='float64') * 1.34 + 2,
'bar2': np.arange(5, dtype='float64') * -.34 + 2}).set_index('me')
indexer = tuple(['r', ['bar', 'bar2']])
df = df_orig.copy()
df.loc[indexer] *= 2.0
tm.assert_series_equal(df.loc[indexer], 2.0 * df_orig.loc[indexer])
indexer = tuple(['r', 'bar'])
df = df_orig.copy()
df.loc[indexer] *= 2.0
self.assertEqual(df.loc[indexer], 2.0 * df_orig.loc[indexer])
indexer = tuple(['t', ['bar', 'bar2']])
df = df_orig.copy()
df.loc[indexer] *= 2.0
tm.assert_frame_equal(df.loc[indexer], 2.0 * df_orig.loc[indexer])
def test_iloc_setitem_dups(self):
# GH 6766
# iloc with a mask aligning from another iloc
df1 = DataFrame([{'A': None, 'B': 1}, {'A': 2, 'B': 2}])
df2 = DataFrame([{'A': 3, 'B': 3}, {'A': 4, 'B': 4}])
df = concat([df1, df2], axis=1)
expected = df.fillna(3)
expected['A'] = expected['A'].astype('float64')
inds = np.isnan(df.iloc[:, 0])
mask = inds[inds].index
df.iloc[mask, 0] = df.iloc[mask, 2]
tm.assert_frame_equal(df, expected)
# del a dup column across blocks
expected = DataFrame({0: [1, 2], 1: [3, 4]})
expected.columns = ['B', 'B']
del df['A']
tm.assert_frame_equal(df, expected)
# assign back to self
df.iloc[[0, 1], [0, 1]] = df.iloc[[0, 1], [0, 1]]
tm.assert_frame_equal(df, expected)
# reversed x 2
df.iloc[[1, 0], [0, 1]] = df.iloc[[1, 0], [0, 1]].reset_index(
drop=True)
df.iloc[[1, 0], [0, 1]] = df.iloc[[1, 0], [0, 1]].reset_index(
drop=True)
tm.assert_frame_equal(df, expected)
def test_chained_getitem_with_lists(self):
# GH6394
# Regression in chained getitem indexing with embedded list-like from
# 0.12
def check(result, expected):
tm.assert_numpy_array_equal(result, expected)
tm.assertIsInstance(result, np.ndarray)
df = DataFrame({'A': 5 * [np.zeros(3)], 'B': 5 * [np.ones(3)]})
expected = df['A'].iloc[2]
result = df.loc[2, 'A']
check(result, expected)
result2 = df.iloc[2]['A']
check(result2, expected)
result3 = df['A'].loc[2]
check(result3, expected)
result4 = df['A'].iloc[2]
check(result4, expected)
def test_loc_getitem_int(self):
# int label
self.check_result('int label', 'loc', 2, 'ix', 2,
typs=['ints', 'uints'], axes=0)
self.check_result('int label', 'loc', 3, 'ix', 3,
typs=['ints', 'uints'], axes=1)
self.check_result('int label', 'loc', 4, 'ix', 4,
typs=['ints', 'uints'], axes=2)
self.check_result('int label', 'loc', 2, 'ix', 2,
typs=['label'], fails=KeyError)
def test_loc_getitem_label(self):
# label
self.check_result('label', 'loc', 'c', 'ix', 'c', typs=['labels'],
axes=0)
self.check_result('label', 'loc', 'null', 'ix', 'null', typs=['mixed'],
axes=0)
self.check_result('label', 'loc', 8, 'ix', 8, typs=['mixed'], axes=0)
self.check_result('label', 'loc', Timestamp('20130102'), 'ix', 1,
typs=['ts'], axes=0)
self.check_result('label', 'loc', 'c', 'ix', 'c', typs=['empty'],
fails=KeyError)
def test_loc_getitem_label_out_of_range(self):
# out of range label
self.check_result('label range', 'loc', 'f', 'ix', 'f',
typs=['ints', 'uints', 'labels', 'mixed', 'ts'],
fails=KeyError)
self.check_result('label range', 'loc', 'f', 'ix', 'f',
typs=['floats'], fails=TypeError)
self.check_result('label range', 'loc', 20, 'ix', 20,
typs=['ints', 'uints', 'mixed'], fails=KeyError)
self.check_result('label range', 'loc', 20, 'ix', 20,
typs=['labels'], fails=TypeError)
self.check_result('label range', 'loc', 20, 'ix', 20, typs=['ts'],
axes=0, fails=TypeError)
self.check_result('label range', 'loc', 20, 'ix', 20, typs=['floats'],
axes=0, fails=TypeError)
def test_loc_getitem_label_list(self):
# list of labels
self.check_result('list lbl', 'loc', [0, 2, 4], 'ix', [0, 2, 4],
typs=['ints', 'uints'], axes=0)
self.check_result('list lbl', 'loc', [3, 6, 9], 'ix', [3, 6, 9],
typs=['ints', 'uints'], axes=1)
self.check_result('list lbl', 'loc', [4, 8, 12], 'ix', [4, 8, 12],
typs=['ints', 'uints'], axes=2)
self.check_result('list lbl', 'loc', ['a', 'b', 'd'], 'ix',
['a', 'b', 'd'], typs=['labels'], axes=0)
self.check_result('list lbl', 'loc', ['A', 'B', 'C'], 'ix',
['A', 'B', 'C'], typs=['labels'], axes=1)
self.check_result('list lbl', 'loc', ['Z', 'Y', 'W'], 'ix',
['Z', 'Y', 'W'], typs=['labels'], axes=2)
self.check_result('list lbl', 'loc', [2, 8, 'null'], 'ix',
[2, 8, 'null'], typs=['mixed'], axes=0)
self.check_result('list lbl', 'loc',
[Timestamp('20130102'), Timestamp('20130103')], 'ix',
[Timestamp('20130102'), Timestamp('20130103')],
typs=['ts'], axes=0)
self.check_result('list lbl', 'loc', [0, 1, 2], 'indexer', [0, 1, 2],
typs=['empty'], fails=KeyError)
self.check_result('list lbl', 'loc', [0, 2, 3], 'ix', [0, 2, 3],
typs=['ints', 'uints'], axes=0, fails=KeyError)
self.check_result('list lbl', 'loc', [3, 6, 7], 'ix', [3, 6, 7],
typs=['ints', 'uints'], axes=1, fails=KeyError)
self.check_result('list lbl', 'loc', [4, 8, 10], 'ix', [4, 8, 10],
typs=['ints', 'uints'], axes=2, fails=KeyError)
def test_loc_getitem_label_list_fails(self):
# fails
self.check_result('list lbl', 'loc', [20, 30, 40], 'ix', [20, 30, 40],
typs=['ints', 'uints'], axes=1, fails=KeyError)
self.check_result('list lbl', 'loc', [20, 30, 40], 'ix', [20, 30, 40],
typs=['ints', 'uints'], axes=2, fails=KeyError)
def test_loc_getitem_label_array_like(self):
# array like
self.check_result('array like', 'loc', Series(index=[0, 2, 4]).index,
'ix', [0, 2, 4], typs=['ints', 'uints'], axes=0)
self.check_result('array like', 'loc', Series(index=[3, 6, 9]).index,
'ix', [3, 6, 9], typs=['ints', 'uints'], axes=1)
self.check_result('array like', 'loc', Series(index=[4, 8, 12]).index,
'ix', [4, 8, 12], typs=['ints', 'uints'], axes=2)
def test_loc_getitem_bool(self):
# boolean indexers
b = [True, False, True, False]
self.check_result('bool', 'loc', b, 'ix', b,
typs=['ints', 'uints', 'labels',
'mixed', 'ts', 'floats'])
self.check_result('bool', 'loc', b, 'ix', b, typs=['empty'],
fails=KeyError)
def test_loc_getitem_int_slice(self):
# ok
self.check_result('int slice2', 'loc', slice(2, 4), 'ix', [2, 4],
typs=['ints', 'uints'], axes=0)
self.check_result('int slice2', 'loc', slice(3, 6), 'ix', [3, 6],
typs=['ints', 'uints'], axes=1)
self.check_result('int slice2', 'loc', slice(4, 8), 'ix', [4, 8],
typs=['ints', 'uints'], axes=2)
# GH 3053
# loc should treat integer slices like label slices
from itertools import product
index = MultiIndex.from_tuples([t for t in product(
[6, 7, 8], ['a', 'b'])])
df = DataFrame(np.random.randn(6, 6), index, index)
result = df.loc[6:8, :]
with catch_warnings(record=True):
expected = df.ix[6:8, :]
tm.assert_frame_equal(result, expected)
index = MultiIndex.from_tuples([t
for t in product(
[10, 20, 30], ['a', 'b'])])
df = DataFrame(np.random.randn(6, 6), index, index)
result = df.loc[20:30, :]
with catch_warnings(record=True):
expected = df.ix[20:30, :]
tm.assert_frame_equal(result, expected)
# doc examples
result = df.loc[10, :]
with catch_warnings(record=True):
expected = df.ix[10, :]
tm.assert_frame_equal(result, expected)
result = df.loc[:, 10]
# expected = df.ix[:,10] (this fails)
expected = df[10]
tm.assert_frame_equal(result, expected)
def test_loc_to_fail(self):
# GH3449
df = DataFrame(np.random.random((3, 3)),
index=['a', 'b', 'c'],
columns=['e', 'f', 'g'])
# raise a KeyError?
self.assertRaises(KeyError, df.loc.__getitem__,
tuple([[1, 2], [1, 2]]))
# GH 7496
# loc should not fallback
s = Series()
s.loc[1] = 1
s.loc['a'] = 2
self.assertRaises(KeyError, lambda: s.loc[-1])
self.assertRaises(KeyError, lambda: s.loc[[-1, -2]])
self.assertRaises(KeyError, lambda: s.loc[['4']])
s.loc[-1] = 3
result = s.loc[[-1, -2]]
expected = Series([3, np.nan], index=[-1, -2])
tm.assert_series_equal(result, expected)
s['a'] = 2
self.assertRaises(KeyError, lambda: s.loc[[-2]])
del s['a']
def f():
s.loc[[-2]] = 0
self.assertRaises(KeyError, f)
# inconsistency between .loc[values] and .loc[values,:]
# GH 7999
df = DataFrame([['a'], ['b']], index=[1, 2], columns=['value'])
def f():
df.loc[[3], :]
self.assertRaises(KeyError, f)
def f():
df.loc[[3]]
self.assertRaises(KeyError, f)
def test_at_to_fail(self):
# at should not fallback
# GH 7814
s = Series([1, 2, 3], index=list('abc'))
result = s.at['a']
self.assertEqual(result, 1)
self.assertRaises(ValueError, lambda: s.at[0])
df = DataFrame({'A': [1, 2, 3]}, index=list('abc'))
result = df.at['a', 'A']
self.assertEqual(result, 1)
self.assertRaises(ValueError, lambda: df.at['a', 0])
s = Series([1, 2, 3], index=[3, 2, 1])
result = s.at[1]
self.assertEqual(result, 3)
self.assertRaises(ValueError, lambda: s.at['a'])
df = DataFrame({0: [1, 2, 3]}, index=[3, 2, 1])
result = df.at[1, 0]
self.assertEqual(result, 3)
self.assertRaises(ValueError, lambda: df.at['a', 0])
# GH 13822, incorrect error string with non-unique columns when missing
# column is accessed
df = DataFrame({'x': [1.], 'y': [2.], 'z': [3.]})
df.columns = ['x', 'x', 'z']
# Check that we get the correct value in the KeyError
self.assertRaisesRegexp(KeyError, r"\['y'\] not in index",
lambda: df[['x', 'y', 'z']])
def test_loc_getitem_label_slice(self):
# label slices (with ints)
self.check_result('lab slice', 'loc', slice(1, 3),
'ix', slice(1, 3),
typs=['labels', 'mixed', 'empty', 'ts', 'floats'],
fails=TypeError)
# real label slices
self.check_result('lab slice', 'loc', slice('a', 'c'),
'ix', slice('a', 'c'), typs=['labels'], axes=0)
self.check_result('lab slice', 'loc', slice('A', 'C'),
'ix', slice('A', 'C'), typs=['labels'], axes=1)
self.check_result('lab slice', 'loc', slice('W', 'Z'),
'ix', slice('W', 'Z'), typs=['labels'], axes=2)
self.check_result('ts slice', 'loc', slice('20130102', '20130104'),
'ix', slice('20130102', '20130104'),
typs=['ts'], axes=0)
self.check_result('ts slice', 'loc', slice('20130102', '20130104'),
'ix', slice('20130102', '20130104'),
typs=['ts'], axes=1, fails=TypeError)
self.check_result('ts slice', 'loc', slice('20130102', '20130104'),
'ix', slice('20130102', '20130104'),
typs=['ts'], axes=2, fails=TypeError)
# GH 14316
self.check_result('ts slice rev', 'loc', slice('20130104', '20130102'),
'indexer', [0, 1, 2], typs=['ts_rev'], axes=0)
self.check_result('mixed slice', 'loc', slice(2, 8), 'ix', slice(2, 8),
typs=['mixed'], axes=0, fails=TypeError)
self.check_result('mixed slice', 'loc', slice(2, 8), 'ix', slice(2, 8),
typs=['mixed'], axes=1, fails=KeyError)
self.check_result('mixed slice', 'loc', slice(2, 8), 'ix', slice(2, 8),
typs=['mixed'], axes=2, fails=KeyError)
self.check_result('mixed slice', 'loc', slice(2, 4, 2), 'ix', slice(
2, 4, 2), typs=['mixed'], axes=0, fails=TypeError)
def test_loc_general(self):
df = DataFrame(
np.random.rand(4, 4), columns=['A', 'B', 'C', 'D'],
index=['A', 'B', 'C', 'D'])
# want this to work
result = df.loc[:, "A":"B"].iloc[0:2, :]
self.assertTrue((result.columns == ['A', 'B']).all())
self.assertTrue((result.index == ['A', 'B']).all())
# mixed type
result = DataFrame({'a': [Timestamp('20130101')], 'b': [1]}).iloc[0]
expected = Series([Timestamp('20130101'), 1], index=['a', 'b'], name=0)
tm.assert_series_equal(result, expected)
self.assertEqual(result.dtype, object)
def test_loc_setitem_consistency(self):
# GH 6149
# coerce similary for setitem and loc when rows have a null-slice
expected = DataFrame({'date': Series(0, index=range(5),
dtype=np.int64),
'val': Series(range(5), dtype=np.int64)})
df = DataFrame({'date': date_range('2000-01-01', '2000-01-5'),
'val': Series(
range(5), dtype=np.int64)})
df.loc[:, 'date'] = 0
tm.assert_frame_equal(df, expected)
df = DataFrame({'date': date_range('2000-01-01', '2000-01-5'),
'val': Series(range(5), dtype=np.int64)})
df.loc[:, 'date'] = np.array(0, dtype=np.int64)
tm.assert_frame_equal(df, expected)
df = DataFrame({'date': date_range('2000-01-01', '2000-01-5'),
'val': Series(range(5), dtype=np.int64)})
df.loc[:, 'date'] = np.array([0, 0, 0, 0, 0], dtype=np.int64)
tm.assert_frame_equal(df, expected)
expected = DataFrame({'date': Series('foo', index=range(5)),
'val': Series(range(5), dtype=np.int64)})
df = DataFrame({'date': date_range('2000-01-01', '2000-01-5'),
'val': Series(range(5), dtype=np.int64)})
df.loc[:, 'date'] = 'foo'
tm.assert_frame_equal(df, expected)
expected = DataFrame({'date': Series(1.0, index=range(5)),
'val': Series(range(5), dtype=np.int64)})
df = DataFrame({'date': date_range('2000-01-01', '2000-01-5'),
'val': Series(range(5), dtype=np.int64)})
df.loc[:, 'date'] = 1.0
tm.assert_frame_equal(df, expected)
def test_loc_setitem_consistency_empty(self):
# empty (essentially noops)
expected = DataFrame(columns=['x', 'y'])
expected['x'] = expected['x'].astype(np.int64)
df = DataFrame(columns=['x', 'y'])
df.loc[:, 'x'] = 1
tm.assert_frame_equal(df, expected)
df = DataFrame(columns=['x', 'y'])
df['x'] = 1
tm.assert_frame_equal(df, expected)
def test_loc_setitem_consistency_slice_column_len(self):
# .loc[:,column] setting with slice == len of the column
# GH10408
data = """Level_0,,,Respondent,Respondent,Respondent,OtherCat,OtherCat
Level_1,,,Something,StartDate,EndDate,Yes/No,SomethingElse
Region,Site,RespondentID,,,,,
Region_1,Site_1,3987227376,A,5/25/2015 10:59,5/25/2015 11:22,Yes,
Region_1,Site_1,3980680971,A,5/21/2015 9:40,5/21/2015 9:52,Yes,Yes
Region_1,Site_2,3977723249,A,5/20/2015 8:27,5/20/2015 8:41,Yes,
Region_1,Site_2,3977723089,A,5/20/2015 8:33,5/20/2015 9:09,Yes,No"""
df = pd.read_csv(StringIO(data), header=[0, 1], index_col=[0, 1, 2])
df.loc[:, ('Respondent', 'StartDate')] = pd.to_datetime(df.loc[:, (
'Respondent', 'StartDate')])
df.loc[:, ('Respondent', 'EndDate')] = pd.to_datetime(df.loc[:, (
'Respondent', 'EndDate')])
df.loc[:, ('Respondent', 'Duration')] = df.loc[:, (
'Respondent', 'EndDate')] - df.loc[:, ('Respondent', 'StartDate')]
df.loc[:, ('Respondent', 'Duration')] = df.loc[:, (
'Respondent', 'Duration')].astype('timedelta64[s]')
expected = Series([1380, 720, 840, 2160.], index=df.index,
name=('Respondent', 'Duration'))
tm.assert_series_equal(df[('Respondent', 'Duration')], expected)
def test_loc_setitem_frame(self):
df = self.frame_labels
result = df.iloc[0, 0]
df.loc['a', 'A'] = 1
result = df.loc['a', 'A']
self.assertEqual(result, 1)
result = df.iloc[0, 0]
self.assertEqual(result, 1)
df.loc[:, 'B':'D'] = 0
expected = df.loc[:, 'B':'D']
with catch_warnings(record=True):
result = df.ix[:, 1:]
tm.assert_frame_equal(result, expected)
# GH 6254
# setting issue
df = DataFrame(index=[3, 5, 4], columns=['A'])
df.loc[[4, 3, 5], 'A'] = np.array([1, 2, 3], dtype='int64')
expected = DataFrame(dict(A=Series(
[1, 2, 3], index=[4, 3, 5]))).reindex(index=[3, 5, 4])
tm.assert_frame_equal(df, expected)
# GH 6252
# setting with an empty frame
keys1 = ['@' + str(i) for i in range(5)]
val1 = np.arange(5, dtype='int64')
keys2 = ['@' + str(i) for i in range(4)]
val2 = np.arange(4, dtype='int64')
index = list(set(keys1).union(keys2))
df = DataFrame(index=index)
df['A'] = nan
df.loc[keys1, 'A'] = val1
df['B'] = nan
df.loc[keys2, 'B'] = val2
expected = DataFrame(dict(A=Series(val1, index=keys1), B=Series(
val2, index=keys2))).reindex(index=index)
tm.assert_frame_equal(df, expected)
# GH 8669
# invalid coercion of nan -> int
df = DataFrame({'A': [1, 2, 3], 'B': np.nan})
df.loc[df.B > df.A, 'B'] = df.A
expected = DataFrame({'A': [1, 2, 3], 'B': np.nan})
tm.assert_frame_equal(df, expected)
# GH 6546
# setting with mixed labels
df = DataFrame({1: [1, 2], 2: [3, 4], 'a': ['a', 'b']})
result = df.loc[0, [1, 2]]
expected = Series([1, 3], index=[1, 2], dtype=object, name=0)
tm.assert_series_equal(result, expected)
expected = DataFrame({1: [5, 2], 2: [6, 4], 'a': ['a', 'b']})
df.loc[0, [1, 2]] = [5, 6]
tm.assert_frame_equal(df, expected)
def test_loc_setitem_frame_multiples(self):
# multiple setting
df = DataFrame({'A': ['foo', 'bar', 'baz'],
'B': Series(
range(3), dtype=np.int64)})
rhs = df.loc[1:2]
rhs.index = df.index[0:2]
df.loc[0:1] = rhs
expected = DataFrame({'A': ['bar', 'baz', 'baz'],
'B': Series(
[1, 2, 2], dtype=np.int64)})
tm.assert_frame_equal(df, expected)
# multiple setting with frame on rhs (with M8)
df = DataFrame({'date': date_range('2000-01-01', '2000-01-5'),
'val': Series(
range(5), dtype=np.int64)})
expected = DataFrame({'date': [Timestamp('20000101'), Timestamp(
'20000102'), Timestamp('20000101'), Timestamp('20000102'),
Timestamp('20000103')],
'val': Series(
[0, 1, 0, 1, 2], dtype=np.int64)})
rhs = df.loc[0:2]
rhs.index = df.index[2:5]
df.loc[2:4] = rhs
tm.assert_frame_equal(df, expected)
def test_iloc_getitem_frame(self):
df = DataFrame(np.random.randn(10, 4), index=lrange(0, 20, 2),
columns=lrange(0, 8, 2))
result = df.iloc[2]
with catch_warnings(record=True):
exp = df.ix[4]
tm.assert_series_equal(result, exp)
result = df.iloc[2, 2]
with catch_warnings(record=True):
exp = df.ix[4, 4]
self.assertEqual(result, exp)
# slice
result = df.iloc[4:8]
with catch_warnings(record=True):
expected = df.ix[8:14]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 2:3]
with catch_warnings(record=True):
expected = df.ix[:, 4:5]
tm.assert_frame_equal(result, expected)
# list of integers
result = df.iloc[[0, 1, 3]]
with catch_warnings(record=True):
expected = df.ix[[0, 2, 6]]
tm.assert_frame_equal(result, expected)
result = df.iloc[[0, 1, 3], [0, 1]]
with catch_warnings(record=True):
expected = df.ix[[0, 2, 6], [0, 2]]
tm.assert_frame_equal(result, expected)
# neg indicies
result = df.iloc[[-1, 1, 3], [-1, 1]]
with catch_warnings(record=True):
expected = df.ix[[18, 2, 6], [6, 2]]
tm.assert_frame_equal(result, expected)
# dups indicies
result = df.iloc[[-1, -1, 1, 3], [-1, 1]]
with catch_warnings(record=True):
expected = df.ix[[18, 18, 2, 6], [6, 2]]
tm.assert_frame_equal(result, expected)
# with index-like
s = Series(index=lrange(1, 5))
result = df.iloc[s.index]
with catch_warnings(record=True):
expected = df.ix[[2, 4, 6, 8]]
tm.assert_frame_equal(result, expected)
def test_iloc_getitem_labelled_frame(self):
# try with labelled frame
df = DataFrame(np.random.randn(10, 4),
index=list('abcdefghij'), columns=list('ABCD'))
result = df.iloc[1, 1]
exp = df.loc['b', 'B']
self.assertEqual(result, exp)
result = df.iloc[:, 2:3]
expected = df.loc[:, ['C']]
tm.assert_frame_equal(result, expected)
# negative indexing
result = df.iloc[-1, -1]
exp = df.loc['j', 'D']
self.assertEqual(result, exp)
# out-of-bounds exception
self.assertRaises(IndexError, df.iloc.__getitem__, tuple([10, 5]))
# trying to use a label
self.assertRaises(ValueError, df.iloc.__getitem__, tuple(['j', 'D']))
def test_iloc_getitem_doc_issue(self):
# multi axis slicing issue with single block
# surfaced in GH 6059
arr = np.random.randn(6, 4)
index = date_range('20130101', periods=6)
columns = list('ABCD')
df = DataFrame(arr, index=index, columns=columns)
# defines ref_locs
df.describe()
result = df.iloc[3:5, 0:2]
str(result)
result.dtypes
expected = DataFrame(arr[3:5, 0:2], index=index[3:5],
columns=columns[0:2])
tm.assert_frame_equal(result, expected)
# for dups
df.columns = list('aaaa')
result = df.iloc[3:5, 0:2]
str(result)
result.dtypes
expected = DataFrame(arr[3:5, 0:2], index=index[3:5],
columns=list('aa'))
tm.assert_frame_equal(result, expected)
# related
arr = np.random.randn(6, 4)
index = list(range(0, 12, 2))
columns = list(range(0, 8, 2))
df = DataFrame(arr, index=index, columns=columns)
df._data.blocks[0].mgr_locs
result = df.iloc[1:5, 2:4]
str(result)
result.dtypes
expected = DataFrame(arr[1:5, 2:4], index=index[1:5],
columns=columns[2:4])
tm.assert_frame_equal(result, expected)
def test_setitem_ndarray_1d(self):
# GH5508
# len of indexer vs length of the 1d ndarray
df = DataFrame(index=Index(lrange(1, 11)))
df['foo'] = np.zeros(10, dtype=np.float64)
df['bar'] = np.zeros(10, dtype=np.complex)
# invalid
def f():
with catch_warnings(record=True):
df.ix[2:5, 'bar'] = np.array([2.33j, 1.23 + 0.1j, 2.2])
self.assertRaises(ValueError, f)
def f():
df.loc[df.index[2:5], 'bar'] = np.array([2.33j, 1.23 + 0.1j,
2.2, 1.0])
self.assertRaises(ValueError, f)
# valid
df.loc[df.index[2:6], 'bar'] = np.array([2.33j, 1.23 + 0.1j,
2.2, 1.0])
result = df.loc[df.index[2:6], 'bar']
expected = Series([2.33j, 1.23 + 0.1j, 2.2, 1.0], index=[3, 4, 5, 6],
name='bar')
tm.assert_series_equal(result, expected)
# dtype getting changed?
df = DataFrame(index=Index(lrange(1, 11)))
df['foo'] = np.zeros(10, dtype=np.float64)
df['bar'] = np.zeros(10, dtype=np.complex)
def f():
df[2:5] = np.arange(1, 4) * 1j
self.assertRaises(ValueError, f)
def test_iloc_setitem_series(self):
df = DataFrame(np.random.randn(10, 4), index=list('abcdefghij'),
columns=list('ABCD'))
df.iloc[1, 1] = 1
result = df.iloc[1, 1]
self.assertEqual(result, 1)
df.iloc[:, 2:3] = 0
expected = df.iloc[:, 2:3]
result = df.iloc[:, 2:3]
tm.assert_frame_equal(result, expected)
s = Series(np.random.randn(10), index=lrange(0, 20, 2))
s.iloc[1] = 1
result = s.iloc[1]
self.assertEqual(result, 1)
s.iloc[:4] = 0
expected = s.iloc[:4]
result = s.iloc[:4]
tm.assert_series_equal(result, expected)
s = Series([-1] * 6)
s.iloc[0::2] = [0, 2, 4]
s.iloc[1::2] = [1, 3, 5]
result = s
expected = Series([0, 1, 2, 3, 4, 5])
tm.assert_series_equal(result, expected)
def test_iloc_setitem_list_of_lists(self):
# GH 7551
# list-of-list is set incorrectly in mixed vs. single dtyped frames
df = DataFrame(dict(A=np.arange(5, dtype='int64'),
B=np.arange(5, 10, dtype='int64')))
df.iloc[2:4] = [[10, 11], [12, 13]]
expected = DataFrame(dict(A=[0, 1, 10, 12, 4], B=[5, 6, 11, 13, 9]))
tm.assert_frame_equal(df, expected)
df = DataFrame(
dict(A=list('abcde'), B=np.arange(5, 10, dtype='int64')))
df.iloc[2:4] = [['x', 11], ['y', 13]]
expected = DataFrame(dict(A=['a', 'b', 'x', 'y', 'e'],
B=[5, 6, 11, 13, 9]))
tm.assert_frame_equal(df, expected)
def test_ix_general(self):
# ix general issues
# GH 2817
data = {'amount': {0: 700, 1: 600, 2: 222, 3: 333, 4: 444},
'col': {0: 3.5, 1: 3.5, 2: 4.0, 3: 4.0, 4: 4.0},
'year': {0: 2012, 1: 2011, 2: 2012, 3: 2012, 4: 2012}}
df = DataFrame(data).set_index(keys=['col', 'year'])
key = 4.0, 2012
# emits a PerformanceWarning, ok
with self.assert_produces_warning(PerformanceWarning):
tm.assert_frame_equal(df.loc[key], df.iloc[2:])
# this is ok
df.sort_index(inplace=True)
res = df.loc[key]
# col has float dtype, result should be Float64Index
index = MultiIndex.from_arrays([[4.] * 3, [2012] * 3],
names=['col', 'year'])
expected = DataFrame({'amount': [222, 333, 444]}, index=index)
tm.assert_frame_equal(res, expected)
def test_ix_weird_slicing(self):
# http://stackoverflow.com/q/17056560/1240268
df = DataFrame({'one': [1, 2, 3, np.nan, np.nan],
'two': [1, 2, 3, 4, 5]})
df.loc[df['one'] > 1, 'two'] = -df['two']
expected = DataFrame({'one': {0: 1.0,
1: 2.0,
2: 3.0,
3: nan,
4: nan},
'two': {0: 1,
1: -2,
2: -3,
3: 4,
4: 5}})
tm.assert_frame_equal(df, expected)
def test_loc_coerceion(self):
# 12411
df = DataFrame({'date': [pd.Timestamp('20130101').tz_localize('UTC'),
pd.NaT]})
expected = df.dtypes
result = df.iloc[[0]]
tm.assert_series_equal(result.dtypes, expected)
result = df.iloc[[1]]
tm.assert_series_equal(result.dtypes, expected)
# 12045
import datetime
df = DataFrame({'date': [datetime.datetime(2012, 1, 1),
datetime.datetime(1012, 1, 2)]})
expected = df.dtypes
result = df.iloc[[0]]
tm.assert_series_equal(result.dtypes, expected)
result = df.iloc[[1]]
tm.assert_series_equal(result.dtypes, expected)
# 11594
df = DataFrame({'text': ['some words'] + [None] * 9})
expected = df.dtypes
result = df.iloc[0:2]
tm.assert_series_equal(result.dtypes, expected)
result = df.iloc[3:]
tm.assert_series_equal(result.dtypes, expected)
def test_setitem_dtype_upcast(self):
# GH3216
df = DataFrame([{"a": 1}, {"a": 3, "b": 2}])
df['c'] = np.nan
self.assertEqual(df['c'].dtype, np.float64)
df.loc[0, 'c'] = 'foo'
expected = DataFrame([{"a": 1, "c": 'foo'},
{"a": 3, "b": 2, "c": np.nan}])
tm.assert_frame_equal(df, expected)
# GH10280
df = DataFrame(np.arange(6, dtype='int64').reshape(2, 3),
index=list('ab'),
columns=['foo', 'bar', 'baz'])
for val in [3.14, 'wxyz']:
left = df.copy()
left.loc['a', 'bar'] = val
right = DataFrame([[0, val, 2], [3, 4, 5]], index=list('ab'),
columns=['foo', 'bar', 'baz'])
tm.assert_frame_equal(left, right)
self.assertTrue(is_integer_dtype(left['foo']))
self.assertTrue(is_integer_dtype(left['baz']))
left = DataFrame(np.arange(6, dtype='int64').reshape(2, 3) / 10.0,
index=list('ab'),
columns=['foo', 'bar', 'baz'])
left.loc['a', 'bar'] = 'wxyz'
right = DataFrame([[0, 'wxyz', .2], [.3, .4, .5]], index=list('ab'),
columns=['foo', 'bar', 'baz'])
tm.assert_frame_equal(left, right)
self.assertTrue(is_float_dtype(left['foo']))
self.assertTrue(is_float_dtype(left['baz']))
def test_setitem_iloc(self):
# setitem with an iloc list
df = DataFrame(np.arange(9).reshape((3, 3)), index=["A", "B", "C"],
columns=["A", "B", "C"])
df.iloc[[0, 1], [1, 2]]
df.iloc[[0, 1], [1, 2]] += 100
expected = DataFrame(
np.array([0, 101, 102, 3, 104, 105, 6, 7, 8]).reshape((3, 3)),
index=["A", "B", "C"], columns=["A", "B", "C"])
tm.assert_frame_equal(df, expected)
def test_dups_fancy_indexing(self):
# GH 3455
from pandas.util.testing import makeCustomDataframe as mkdf
df = mkdf(10, 3)
df.columns = ['a', 'a', 'b']
result = df[['b', 'a']].columns
expected = Index(['b', 'a', 'a'])
self.assert_index_equal(result, expected)
# across dtypes
df = DataFrame([[1, 2, 1., 2., 3., 'foo', 'bar']],
columns=list('aaaaaaa'))
df.head()
str(df)
result = DataFrame([[1, 2, 1., 2., 3., 'foo', 'bar']])
result.columns = list('aaaaaaa')
# TODO(wesm): unused?
df_v = df.iloc[:, 4] # noqa
res_v = result.iloc[:, 4] # noqa
tm.assert_frame_equal(df, result)
# GH 3561, dups not in selected order
df = DataFrame(
{'test': [5, 7, 9, 11],
'test1': [4., 5, 6, 7],
'other': list('abcd')}, index=['A', 'A', 'B', 'C'])
rows = ['C', 'B']
expected = DataFrame(
{'test': [11, 9],
'test1': [7., 6],
'other': ['d', 'c']}, index=rows)
result = df.loc[rows]
tm.assert_frame_equal(result, expected)
result = df.loc[Index(rows)]
tm.assert_frame_equal(result, expected)
rows = ['C', 'B', 'E']
expected = DataFrame(
{'test': [11, 9, np.nan],
'test1': [7., 6, np.nan],
'other': ['d', 'c', np.nan]}, index=rows)
result = df.loc[rows]
tm.assert_frame_equal(result, expected)
# see GH5553, make sure we use the right indexer
rows = ['F', 'G', 'H', 'C', 'B', 'E']
expected = DataFrame({'test': [np.nan, np.nan, np.nan, 11, 9, np.nan],
'test1': [np.nan, np.nan, np.nan, 7., 6, np.nan],
'other': [np.nan, np.nan, np.nan,
'd', 'c', np.nan]},
index=rows)
result = df.loc[rows]
tm.assert_frame_equal(result, expected)
# inconsistent returns for unique/duplicate indices when values are
# missing
df = DataFrame(randn(4, 3), index=list('ABCD'))
expected = df.ix[['E']]
dfnu = DataFrame(randn(5, 3), index=list('AABCD'))
result = dfnu.ix[['E']]
tm.assert_frame_equal(result, expected)
# ToDo: check_index_type can be True after GH 11497
# GH 4619; duplicate indexer with missing label
df = DataFrame({"A": [0, 1, 2]})
result = df.ix[[0, 8, 0]]
expected = DataFrame({"A": [0, np.nan, 0]}, index=[0, 8, 0])
tm.assert_frame_equal(result, expected, check_index_type=False)
df = DataFrame({"A": list('abc')})
result = df.ix[[0, 8, 0]]
expected = DataFrame({"A": ['a', np.nan, 'a']}, index=[0, 8, 0])
tm.assert_frame_equal(result, expected, check_index_type=False)
# non unique with non unique selector
df = DataFrame({'test': [5, 7, 9, 11]}, index=['A', 'A', 'B', 'C'])
expected = DataFrame(
{'test': [5, 7, 5, 7, np.nan]}, index=['A', 'A', 'A', 'A', 'E'])
result = df.ix[['A', 'A', 'E']]
tm.assert_frame_equal(result, expected)
# GH 5835
# dups on index and missing values
df = DataFrame(
np.random.randn(5, 5), columns=['A', 'B', 'B', 'B', 'A'])
expected = pd.concat(
[df.ix[:, ['A', 'B']], DataFrame(np.nan, columns=['C'],
index=df.index)], axis=1)
result = df.ix[:, ['A', 'B', 'C']]
tm.assert_frame_equal(result, expected)
# GH 6504, multi-axis indexing
df = DataFrame(np.random.randn(9, 2),
index=[1, 1, 1, 2, 2, 2, 3, 3, 3], columns=['a', 'b'])
expected = df.iloc[0:6]
result = df.loc[[1, 2]]
tm.assert_frame_equal(result, expected)
expected = df
result = df.loc[:, ['a', 'b']]
tm.assert_frame_equal(result, expected)
expected = df.iloc[0:6, :]
result = df.loc[[1, 2], ['a', 'b']]
tm.assert_frame_equal(result, expected)
def test_indexing_mixed_frame_bug(self):
# GH3492
df = DataFrame({'a': {1: 'aaa', 2: 'bbb', 3: 'ccc'},
'b': {1: 111, 2: 222, 3: 333}})
# this works, new column is created correctly
df['test'] = df['a'].apply(lambda x: '_' if x == 'aaa' else x)
# this does not work, ie column test is not changed
idx = df['test'] == '_'
temp = df.ix[idx, 'a'].apply(lambda x: '-----' if x == 'aaa' else x)
df.ix[idx, 'test'] = temp
self.assertEqual(df.iloc[0, 2], '-----')
# if I look at df, then element [0,2] equals '_'. If instead I type
# df.ix[idx,'test'], I get '-----', finally by typing df.iloc[0,2] I
# get '_'.
def test_multitype_list_index_access(self):
# GH 10610
df = pd.DataFrame(np.random.random((10, 5)),
columns=["a"] + [20, 21, 22, 23])
with self.assertRaises(KeyError):
df[[22, 26, -8]]
self.assertEqual(df[21].shape[0], df.shape[0])
def test_set_index_nan(self):
# GH 3586
df = DataFrame({'PRuid': {17: 'nonQC',
18: 'nonQC',
19: 'nonQC',
20: '10',
21: '11',
22: '12',
23: '13',
24: '24',
25: '35',
26: '46',
27: '47',
28: '48',
29: '59',
30: '10'},
'QC': {17: 0.0,
18: 0.0,
19: 0.0,
20: nan,
21: nan,
22: nan,
23: nan,
24: 1.0,
25: nan,
26: nan,
27: nan,
28: nan,
29: nan,
30: nan},
'data': {17: 7.9544899999999998,
18: 8.0142609999999994,
19: 7.8591520000000008,
20: 0.86140349999999999,
21: 0.87853110000000001,
22: 0.8427041999999999,
23: 0.78587700000000005,
24: 0.73062459999999996,
25: 0.81668560000000001,
26: 0.81927080000000008,
27: 0.80705009999999999,
28: 0.81440240000000008,
29: 0.80140849999999997,
30: 0.81307740000000006},
'year': {17: 2006,
18: 2007,
19: 2008,
20: 1985,
21: 1985,
22: 1985,
23: 1985,
24: 1985,
25: 1985,
26: 1985,
27: 1985,
28: 1985,
29: 1985,
30: 1986}}).reset_index()
result = df.set_index(['year', 'PRuid', 'QC']).reset_index().reindex(
columns=df.columns)
tm.assert_frame_equal(result, df)
def test_multi_nan_indexing(self):
# GH 3588
df = DataFrame({"a": ['R1', 'R2', np.nan, 'R4'],
'b': ["C1", "C2", "C3", "C4"],
"c": [10, 15, np.nan, 20]})
result = df.set_index(['a', 'b'], drop=False)
expected = DataFrame({"a": ['R1', 'R2', np.nan, 'R4'],
'b': ["C1", "C2", "C3", "C4"],
"c": [10, 15, np.nan, 20]},
index=[Index(['R1', 'R2', np.nan, 'R4'],
name='a'),
Index(['C1', 'C2', 'C3', 'C4'], name='b')])
tm.assert_frame_equal(result, expected)
def test_multi_assign(self):
# GH 3626, an assignement of a sub-df to a df
df = DataFrame({'FC': ['a', 'b', 'a', 'b', 'a', 'b'],
'PF': [0, 0, 0, 0, 1, 1],
'col1': lrange(6),
'col2': lrange(6, 12)})
df.ix[1, 0] = np.nan
df2 = df.copy()
mask = ~df2.FC.isnull()
cols = ['col1', 'col2']
dft = df2 * 2
dft.ix[3, 3] = np.nan
expected = DataFrame({'FC': ['a', np.nan, 'a', 'b', 'a', 'b'],
'PF': [0, 0, 0, 0, 1, 1],
'col1': Series([0, 1, 4, 6, 8, 10]),
'col2': [12, 7, 16, np.nan, 20, 22]})
# frame on rhs
df2.ix[mask, cols] = dft.ix[mask, cols]
tm.assert_frame_equal(df2, expected)
df2.ix[mask, cols] = dft.ix[mask, cols]
tm.assert_frame_equal(df2, expected)
# with an ndarray on rhs
df2 = df.copy()
df2.ix[mask, cols] = dft.ix[mask, cols].values
tm.assert_frame_equal(df2, expected)
df2.ix[mask, cols] = dft.ix[mask, cols].values
tm.assert_frame_equal(df2, expected)
# broadcasting on the rhs is required
df = DataFrame(dict(A=[1, 2, 0, 0, 0], B=[0, 0, 0, 10, 11], C=[
0, 0, 0, 10, 11], D=[3, 4, 5, 6, 7]))
expected = df.copy()
mask = expected['A'] == 0
for col in ['A', 'B']:
expected.loc[mask, col] = df['D']
df.loc[df['A'] == 0, ['A', 'B']] = df['D']
tm.assert_frame_equal(df, expected)
def test_ix_assign_column_mixed(self):
# GH #1142
df = DataFrame(tm.getSeriesData())
df['foo'] = 'bar'
orig = df.ix[:, 'B'].copy()
df.ix[:, 'B'] = df.ix[:, 'B'] + 1
tm.assert_series_equal(df.B, orig + 1)
# GH 3668, mixed frame with series value
df = DataFrame({'x': lrange(10), 'y': lrange(10, 20), 'z': 'bar'})
expected = df.copy()
for i in range(5):
indexer = i * 2
v = 1000 + i * 200
expected.ix[indexer, 'y'] = v
self.assertEqual(expected.ix[indexer, 'y'], v)
df.ix[df.x % 2 == 0, 'y'] = df.ix[df.x % 2 == 0, 'y'] * 100
tm.assert_frame_equal(df, expected)
# GH 4508, making sure consistency of assignments
df = DataFrame({'a': [1, 2, 3], 'b': [0, 1, 2]})
df.ix[[0, 2, ], 'b'] = [100, -100]
expected = DataFrame({'a': [1, 2, 3], 'b': [100, 1, -100]})
tm.assert_frame_equal(df, expected)
df = pd.DataFrame({'a': lrange(4)})
df['b'] = np.nan
df.ix[[1, 3], 'b'] = [100, -100]
expected = DataFrame({'a': [0, 1, 2, 3],
'b': [np.nan, 100, np.nan, -100]})
tm.assert_frame_equal(df, expected)
# ok, but chained assignments are dangerous
# if we turn off chained assignement it will work
with option_context('chained_assignment', None):
df = pd.DataFrame({'a': lrange(4)})
df['b'] = np.nan
df['b'].ix[[1, 3]] = [100, -100]
tm.assert_frame_equal(df, expected)
def test_ix_get_set_consistency(self):
# GH 4544
# ix/loc get/set not consistent when
# a mixed int/string index
df = DataFrame(np.arange(16).reshape((4, 4)),
columns=['a', 'b', 8, 'c'],
index=['e', 7, 'f', 'g'])
self.assertEqual(df.ix['e', 8], 2)
self.assertEqual(df.loc['e', 8], 2)
df.ix['e', 8] = 42
self.assertEqual(df.ix['e', 8], 42)
self.assertEqual(df.loc['e', 8], 42)
df.loc['e', 8] = 45
self.assertEqual(df.ix['e', 8], 45)
self.assertEqual(df.loc['e', 8], 45)
def test_setitem_list(self):
# GH 6043
# ix with a list
df = DataFrame(index=[0, 1], columns=[0])
df.ix[1, 0] = [1, 2, 3]
df.ix[1, 0] = [1, 2]
result = DataFrame(index=[0, 1], columns=[0])
result.ix[1, 0] = [1, 2]
tm.assert_frame_equal(result, df)
# ix with an object
class TO(object):
def __init__(self, value):
self.value = value
def __str__(self):
return "[{0}]".format(self.value)
__repr__ = __str__
def __eq__(self, other):
return self.value == other.value
def view(self):
return self
df = DataFrame(index=[0, 1], columns=[0])
df.ix[1, 0] = TO(1)
df.ix[1, 0] = TO(2)
result = DataFrame(index=[0, 1], columns=[0])
result.ix[1, 0] = TO(2)
tm.assert_frame_equal(result, df)
# remains object dtype even after setting it back
df = DataFrame(index=[0, 1], columns=[0])
df.ix[1, 0] = TO(1)
df.ix[1, 0] = np.nan
result = DataFrame(index=[0, 1], columns=[0])
tm.assert_frame_equal(result, df)
def test_iloc_mask(self):
# GH 3631, iloc with a mask (of a series) should raise
df = DataFrame(lrange(5), list('ABCDE'), columns=['a'])
mask = (df.a % 2 == 0)
self.assertRaises(ValueError, df.iloc.__getitem__, tuple([mask]))
mask.index = lrange(len(mask))
self.assertRaises(NotImplementedError, df.iloc.__getitem__,
tuple([mask]))
# ndarray ok
result = df.iloc[np.array([True] * len(mask), dtype=bool)]
tm.assert_frame_equal(result, df)
# the possibilities
locs = np.arange(4)
nums = 2 ** locs
reps = lmap(bin, nums)
df = DataFrame({'locs': locs, 'nums': nums}, reps)
expected = {
(None, ''): '0b1100',
(None, '.loc'): '0b1100',
(None, '.iloc'): '0b1100',
('index', ''): '0b11',
('index', '.loc'): '0b11',
('index', '.iloc'): ('iLocation based boolean indexing '
'cannot use an indexable as a mask'),
('locs', ''): 'Unalignable boolean Series provided as indexer '
'(index of the boolean Series and of the indexed '
'object do not match',
('locs', '.loc'): 'Unalignable boolean Series provided as indexer '
'(index of the boolean Series and of the '
'indexed object do not match',
('locs', '.iloc'): ('iLocation based boolean indexing on an '
'integer type is not available'),
}
# UserWarnings from reindex of a boolean mask
with warnings.catch_warnings(record=True):
result = dict()
for idx in [None, 'index', 'locs']:
mask = (df.nums > 2).values
if idx:
mask = Series(mask, list(reversed(getattr(df, idx))))
for method in ['', '.loc', '.iloc']:
try:
if method:
accessor = getattr(df, method[1:])
else:
accessor = df
ans = str(bin(accessor[mask]['nums'].sum()))
except Exception as e:
ans = str(e)
key = tuple([idx, method])
r = expected.get(key)
if r != ans:
raise AssertionError(
"[%s] does not match [%s], received [%s]"
% (key, ans, r))
def test_ix_slicing_strings(self):
# GH3836
data = {'Classification':
['SA EQUITY CFD', 'bbb', 'SA EQUITY', 'SA SSF', 'aaa'],
'Random': [1, 2, 3, 4, 5],
'X': ['correct', 'wrong', 'correct', 'correct', 'wrong']}
df = DataFrame(data)
x = df[~df.Classification.isin(['SA EQUITY CFD', 'SA EQUITY', 'SA SSF'
])]
df.ix[x.index, 'X'] = df['Classification']
expected = DataFrame({'Classification': {0: 'SA EQUITY CFD',
1: 'bbb',
2: 'SA EQUITY',
3: 'SA SSF',
4: 'aaa'},
'Random': {0: 1,
1: 2,
2: 3,
3: 4,
4: 5},
'X': {0: 'correct',
1: 'bbb',
2: 'correct',
3: 'correct',
4: 'aaa'}}) # bug was 4: 'bbb'
tm.assert_frame_equal(df, expected)
def test_non_unique_loc(self):
# GH3659
# non-unique indexer with loc slice
# https://groups.google.com/forum/?fromgroups#!topic/pydata/zTm2No0crYs
# these are going to raise becuase the we are non monotonic
df = DataFrame({'A': [1, 2, 3, 4, 5, 6],
'B': [3, 4, 5, 6, 7, 8]}, index=[0, 1, 0, 1, 2, 3])
self.assertRaises(KeyError, df.loc.__getitem__,
tuple([slice(1, None)]))
self.assertRaises(KeyError, df.loc.__getitem__,
tuple([slice(0, None)]))
self.assertRaises(KeyError, df.loc.__getitem__, tuple([slice(1, 2)]))
# monotonic are ok
df = DataFrame({'A': [1, 2, 3, 4, 5, 6],
'B': [3, 4, 5, 6, 7, 8]},
index=[0, 1, 0, 1, 2, 3]).sort_index(axis=0)
result = df.loc[1:]
expected = DataFrame({'A': [2, 4, 5, 6], 'B': [4, 6, 7, 8]},
index=[1, 1, 2, 3])
tm.assert_frame_equal(result, expected)
result = df.loc[0:]
tm.assert_frame_equal(result, df)
result = df.loc[1:2]
expected = DataFrame({'A': [2, 4, 5], 'B': [4, 6, 7]},
index=[1, 1, 2])
tm.assert_frame_equal(result, expected)
def test_loc_name(self):
# GH 3880
df = DataFrame([[1, 1], [1, 1]])
df.index.name = 'index_name'
result = df.iloc[[0, 1]].index.name
self.assertEqual(result, 'index_name')
result = df.ix[[0, 1]].index.name
self.assertEqual(result, 'index_name')
result = df.loc[[0, 1]].index.name
self.assertEqual(result, 'index_name')
def test_iloc_non_unique_indexing(self):
# GH 4017, non-unique indexing (on the axis)
df = DataFrame({'A': [0.1] * 3000, 'B': [1] * 3000})
idx = np.array(lrange(30)) * 99
expected = df.iloc[idx]
df3 = pd.concat([df, 2 * df, 3 * df])
result = df3.iloc[idx]
tm.assert_frame_equal(result, expected)
df2 = DataFrame({'A': [0.1] * 1000, 'B': [1] * 1000})
df2 = pd.concat([df2, 2 * df2, 3 * df2])
sidx = df2.index.to_series()
expected = df2.iloc[idx[idx <= sidx.max()]]
new_list = []
for r, s in expected.iterrows():
new_list.append(s)
new_list.append(s * 2)
new_list.append(s * 3)
expected = DataFrame(new_list)
expected = pd.concat([expected, DataFrame(index=idx[idx > sidx.max()])
])
result = df2.loc[idx]
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_string_slice(self):
# GH 14424
# string indexing against datetimelike with object
# dtype should properly raises KeyError
df = pd.DataFrame([1], pd.Index([pd.Timestamp('2011-01-01')],
dtype=object))
self.assertTrue(df.index.is_all_dates)
with tm.assertRaises(KeyError):
df['2011']
with tm.assertRaises(KeyError):
df.loc['2011', 0]
df = pd.DataFrame()
self.assertFalse(df.index.is_all_dates)
with tm.assertRaises(KeyError):
df['2011']
with tm.assertRaises(KeyError):
df.loc['2011', 0]
def test_mi_access(self):
# GH 4145
data = """h1 main h3 sub h5
0 a A 1 A1 1
1 b B 2 B1 2
2 c B 3 A1 3
3 d A 4 B2 4
4 e A 5 B2 5
5 f B 6 A2 6
"""
df = pd.read_csv(StringIO(data), sep=r'\s+', index_col=0)
df2 = df.set_index(['main', 'sub']).T.sort_index(1)
index = Index(['h1', 'h3', 'h5'])
columns = MultiIndex.from_tuples([('A', 'A1')], names=['main', 'sub'])
expected = DataFrame([['a', 1, 1]], index=columns, columns=index).T
result = df2.loc[:, ('A', 'A1')]
tm.assert_frame_equal(result, expected)
result = df2[('A', 'A1')]
tm.assert_frame_equal(result, expected)
# GH 4146, not returning a block manager when selecting a unique index
# from a duplicate index
# as of 4879, this returns a Series (which is similar to what happens
# with a non-unique)
expected = Series(['a', 1, 1], index=['h1', 'h3', 'h5'], name='A1')
result = df2['A']['A1']
tm.assert_series_equal(result, expected)
# selecting a non_unique from the 2nd level
expected = DataFrame([['d', 4, 4], ['e', 5, 5]],
index=Index(['B2', 'B2'], name='sub'),
columns=['h1', 'h3', 'h5'], ).T
result = df2['A']['B2']
tm.assert_frame_equal(result, expected)
def test_non_unique_loc_memory_error(self):
# GH 4280
# non_unique index with a large selection triggers a memory error
columns = list('ABCDEFG')
def gen_test(l, l2):
return pd.concat([DataFrame(randn(l, len(columns)),
index=lrange(l), columns=columns),
DataFrame(np.ones((l2, len(columns))),
index=[0] * l2, columns=columns)])
def gen_expected(df, mask):
l = len(mask)
return pd.concat([df.take([0], convert=False),
DataFrame(np.ones((l, len(columns))),
index=[0] * l,
columns=columns),
df.take(mask[1:], convert=False)])
df = gen_test(900, 100)
self.assertFalse(df.index.is_unique)
mask = np.arange(100)
result = df.loc[mask]
expected = gen_expected(df, mask)
tm.assert_frame_equal(result, expected)
df = gen_test(900000, 100000)
self.assertFalse(df.index.is_unique)
mask = np.arange(100000)
result = df.loc[mask]
expected = gen_expected(df, mask)
tm.assert_frame_equal(result, expected)
def test_astype_assignment(self):
# GH4312 (iloc)
df_orig = DataFrame([['1', '2', '3', '.4', 5, 6., 'foo']],
columns=list('ABCDEFG'))
df = df_orig.copy()
df.iloc[:, 0:2] = df.iloc[:, 0:2].astype(np.int64)
expected = DataFrame([[1, 2, '3', '.4', 5, 6., 'foo']],
columns=list('ABCDEFG'))
tm.assert_frame_equal(df, expected)
df = df_orig.copy()
df.iloc[:, 0:2] = df.iloc[:, 0:2]._convert(datetime=True, numeric=True)
expected = DataFrame([[1, 2, '3', '.4', 5, 6., 'foo']],
columns=list('ABCDEFG'))
tm.assert_frame_equal(df, expected)
# GH5702 (loc)
df = df_orig.copy()
df.loc[:, 'A'] = df.loc[:, 'A'].astype(np.int64)
expected = DataFrame([[1, '2', '3', '.4', 5, 6., 'foo']],
columns=list('ABCDEFG'))
tm.assert_frame_equal(df, expected)
df = df_orig.copy()
df.loc[:, ['B', 'C']] = df.loc[:, ['B', 'C']].astype(np.int64)
expected = DataFrame([['1', 2, 3, '.4', 5, 6., 'foo']],
columns=list('ABCDEFG'))
tm.assert_frame_equal(df, expected)
# full replacements / no nans
df = DataFrame({'A': [1., 2., 3., 4.]})
df.iloc[:, 0] = df['A'].astype(np.int64)
expected = DataFrame({'A': [1, 2, 3, 4]})
tm.assert_frame_equal(df, expected)
df = DataFrame({'A': [1., 2., 3., 4.]})
df.loc[:, 'A'] = df['A'].astype(np.int64)
expected = DataFrame({'A': [1, 2, 3, 4]})
tm.assert_frame_equal(df, expected)
def test_astype_assignment_with_dups(self):
# GH 4686
# assignment with dups that has a dtype change
cols = pd.MultiIndex.from_tuples([('A', '1'), ('B', '1'), ('A', '2')])
df = DataFrame(np.arange(3).reshape((1, 3)),
columns=cols, dtype=object)
index = df.index.copy()
df['A'] = df['A'].astype(np.float64)
self.assert_index_equal(df.index, index)
# TODO(wesm): unused variables
# result = df.get_dtype_counts().sort_index()
# expected = Series({'float64': 2, 'object': 1}).sort_index()
def test_dups_loc(self):
# GH4726
# dup indexing with iloc/loc
df = DataFrame([[1, 2, 'foo', 'bar', Timestamp('20130101')]],
columns=['a', 'a', 'a', 'a', 'a'], index=[1])
expected = Series([1, 2, 'foo', 'bar', Timestamp('20130101')],
index=['a', 'a', 'a', 'a', 'a'], name=1)
result = df.iloc[0]
tm.assert_series_equal(result, expected)
result = df.loc[1]
tm.assert_series_equal(result, expected)
def test_partial_setting(self):
# GH2578, allow ix and friends to partially set
# series
s_orig = Series([1, 2, 3])
s = s_orig.copy()
s[5] = 5
expected = Series([1, 2, 3, 5], index=[0, 1, 2, 5])
tm.assert_series_equal(s, expected)
s = s_orig.copy()
s.loc[5] = 5
expected = Series([1, 2, 3, 5], index=[0, 1, 2, 5])
tm.assert_series_equal(s, expected)
s = s_orig.copy()
s[5] = 5.
expected = Series([1, 2, 3, 5.], index=[0, 1, 2, 5])
tm.assert_series_equal(s, expected)
s = s_orig.copy()
s.loc[5] = 5.
expected = Series([1, 2, 3, 5.], index=[0, 1, 2, 5])
tm.assert_series_equal(s, expected)
# iloc/iat raise
s = s_orig.copy()
def f():
s.iloc[3] = 5.
self.assertRaises(IndexError, f)
def f():
s.iat[3] = 5.
self.assertRaises(IndexError, f)
# ## frame ##
df_orig = DataFrame(
np.arange(6).reshape(3, 2), columns=['A', 'B'], dtype='int64')
# iloc/iat raise
df = df_orig.copy()
def f():
df.iloc[4, 2] = 5.
self.assertRaises(IndexError, f)
def f():
df.iat[4, 2] = 5.
self.assertRaises(IndexError, f)
# row setting where it exists
expected = DataFrame(dict({'A': [0, 4, 4], 'B': [1, 5, 5]}))
df = df_orig.copy()
df.iloc[1] = df.iloc[2]
tm.assert_frame_equal(df, expected)
expected = DataFrame(dict({'A': [0, 4, 4], 'B': [1, 5, 5]}))
df = df_orig.copy()
df.loc[1] = df.loc[2]
tm.assert_frame_equal(df, expected)
# like 2578, partial setting with dtype preservation
expected = DataFrame(dict({'A': [0, 2, 4, 4], 'B': [1, 3, 5, 5]}))
df = df_orig.copy()
df.loc[3] = df.loc[2]
tm.assert_frame_equal(df, expected)
# single dtype frame, overwrite
expected = DataFrame(dict({'A': [0, 2, 4], 'B': [0, 2, 4]}))
df = df_orig.copy()
df.ix[:, 'B'] = df.ix[:, 'A']
tm.assert_frame_equal(df, expected)
# mixed dtype frame, overwrite
expected = DataFrame(dict({'A': [0, 2, 4], 'B': Series([0, 2, 4])}))
df = df_orig.copy()
df['B'] = df['B'].astype(np.float64)
df.ix[:, 'B'] = df.ix[:, 'A']
tm.assert_frame_equal(df, expected)
# single dtype frame, partial setting
expected = df_orig.copy()
expected['C'] = df['A']
df = df_orig.copy()
df.ix[:, 'C'] = df.ix[:, 'A']
tm.assert_frame_equal(df, expected)
# mixed frame, partial setting
expected = df_orig.copy()
expected['C'] = df['A']
df = df_orig.copy()
df.ix[:, 'C'] = df.ix[:, 'A']
tm.assert_frame_equal(df, expected)
# ## panel ##
p_orig = Panel(np.arange(16).reshape(2, 4, 2),
items=['Item1', 'Item2'],
major_axis=pd.date_range('2001/1/12', periods=4),
minor_axis=['A', 'B'], dtype='float64')
# panel setting via item
p_orig = Panel(np.arange(16).reshape(2, 4, 2),
items=['Item1', 'Item2'],
major_axis=pd.date_range('2001/1/12', periods=4),
minor_axis=['A', 'B'], dtype='float64')
expected = p_orig.copy()
expected['Item3'] = expected['Item1']
p = p_orig.copy()
p.loc['Item3'] = p['Item1']
tm.assert_panel_equal(p, expected)
# panel with aligned series
expected = p_orig.copy()
expected = expected.transpose(2, 1, 0)
expected['C'] = DataFrame({'Item1': [30, 30, 30, 30],
'Item2': [32, 32, 32, 32]},
index=p_orig.major_axis)
expected = expected.transpose(2, 1, 0)
p = p_orig.copy()
p.loc[:, :, 'C'] = Series([30, 32], index=p_orig.items)
tm.assert_panel_equal(p, expected)
# GH 8473
dates = date_range('1/1/2000', periods=8)
df_orig = DataFrame(np.random.randn(8, 4), index=dates,
columns=['A', 'B', 'C', 'D'])
expected = pd.concat([df_orig, DataFrame(
{'A': 7}, index=[dates[-1] + 1])])
df = df_orig.copy()
df.loc[dates[-1] + 1, 'A'] = 7
tm.assert_frame_equal(df, expected)
df = df_orig.copy()
df.at[dates[-1] + 1, 'A'] = 7
tm.assert_frame_equal(df, expected)
exp_other = DataFrame({0: 7}, index=[dates[-1] + 1])
expected = pd.concat([df_orig, exp_other], axis=1)
df = df_orig.copy()
df.loc[dates[-1] + 1, 0] = 7
tm.assert_frame_equal(df, expected)
df = df_orig.copy()
df.at[dates[-1] + 1, 0] = 7
tm.assert_frame_equal(df, expected)
def test_partial_setting_mixed_dtype(self):
# in a mixed dtype environment, try to preserve dtypes
# by appending
df = DataFrame([[True, 1], [False, 2]], columns=["female", "fitness"])
s = df.loc[1].copy()
s.name = 2
expected = df.append(s)
df.loc[2] = df.loc[1]
tm.assert_frame_equal(df, expected)
# columns will align
df = DataFrame(columns=['A', 'B'])
df.loc[0] = Series(1, index=range(4))
tm.assert_frame_equal(df, DataFrame(columns=['A', 'B'], index=[0]))
# columns will align
df = DataFrame(columns=['A', 'B'])
df.loc[0] = Series(1, index=['B'])
exp = DataFrame([[np.nan, 1]], columns=['A', 'B'],
index=[0], dtype='float64')
tm.assert_frame_equal(df, exp)
# list-like must conform
df = DataFrame(columns=['A', 'B'])
def f():
df.loc[0] = [1, 2, 3]
self.assertRaises(ValueError, f)
# these are coerced to float unavoidably (as its a list-like to begin)
df = DataFrame(columns=['A', 'B'])
df.loc[3] = [6, 7]
exp = DataFrame([[6, 7]], index=[3], columns=['A', 'B'],
dtype='float64')
tm.assert_frame_equal(df, exp)
def test_series_partial_set(self):
# partial set with new index
# Regression from GH4825
ser = Series([0.1, 0.2], index=[1, 2])
# loc
expected = Series([np.nan, 0.2, np.nan], index=[3, 2, 3])
result = ser.loc[[3, 2, 3]]
tm.assert_series_equal(result, expected, check_index_type=True)
expected = Series([np.nan, 0.2, np.nan, np.nan], index=[3, 2, 3, 'x'])
result = ser.loc[[3, 2, 3, 'x']]
tm.assert_series_equal(result, expected, check_index_type=True)
expected = Series([0.2, 0.2, 0.1], index=[2, 2, 1])
result = ser.loc[[2, 2, 1]]
tm.assert_series_equal(result, expected, check_index_type=True)
expected = Series([0.2, 0.2, np.nan, 0.1], index=[2, 2, 'x', 1])
result = ser.loc[[2, 2, 'x', 1]]
tm.assert_series_equal(result, expected, check_index_type=True)
# raises as nothing in in the index
self.assertRaises(KeyError, lambda: ser.loc[[3, 3, 3]])
expected = Series([0.2, 0.2, np.nan], index=[2, 2, 3])
result = ser.loc[[2, 2, 3]]
tm.assert_series_equal(result, expected, check_index_type=True)
expected = Series([0.3, np.nan, np.nan], index=[3, 4, 4])
result = Series([0.1, 0.2, 0.3], index=[1, 2, 3]).loc[[3, 4, 4]]
tm.assert_series_equal(result, expected, check_index_type=True)
expected = Series([np.nan, 0.3, 0.3], index=[5, 3, 3])
result = Series([0.1, 0.2, 0.3, 0.4],
index=[1, 2, 3, 4]).loc[[5, 3, 3]]
tm.assert_series_equal(result, expected, check_index_type=True)
expected = Series([np.nan, 0.4, 0.4], index=[5, 4, 4])
result = Series([0.1, 0.2, 0.3, 0.4],
index=[1, 2, 3, 4]).loc[[5, 4, 4]]
tm.assert_series_equal(result, expected, check_index_type=True)
expected = Series([0.4, np.nan, np.nan], index=[7, 2, 2])
result = Series([0.1, 0.2, 0.3, 0.4],
index=[4, 5, 6, 7]).loc[[7, 2, 2]]
tm.assert_series_equal(result, expected, check_index_type=True)
expected = Series([0.4, np.nan, np.nan], index=[4, 5, 5])
result = Series([0.1, 0.2, 0.3, 0.4],
index=[1, 2, 3, 4]).loc[[4, 5, 5]]
tm.assert_series_equal(result, expected, check_index_type=True)
# iloc
expected = Series([0.2, 0.2, 0.1, 0.1], index=[2, 2, 1, 1])
result = ser.iloc[[1, 1, 0, 0]]
tm.assert_series_equal(result, expected, check_index_type=True)
def test_series_partial_set_with_name(self):
# GH 11497
idx = Index([1, 2], dtype='int64', name='idx')
ser = Series([0.1, 0.2], index=idx, name='s')
# loc
exp_idx = Index([3, 2, 3], dtype='int64', name='idx')
expected = Series([np.nan, 0.2, np.nan], index=exp_idx, name='s')
result = ser.loc[[3, 2, 3]]
tm.assert_series_equal(result, expected, check_index_type=True)
exp_idx = Index([3, 2, 3, 'x'], dtype='object', name='idx')
expected = Series([np.nan, 0.2, np.nan, np.nan], index=exp_idx,
name='s')
result = ser.loc[[3, 2, 3, 'x']]
tm.assert_series_equal(result, expected, check_index_type=True)
exp_idx = Index([2, 2, 1], dtype='int64', name='idx')
expected = Series([0.2, 0.2, 0.1], index=exp_idx, name='s')
result = ser.loc[[2, 2, 1]]
tm.assert_series_equal(result, expected, check_index_type=True)
exp_idx = Index([2, 2, 'x', 1], dtype='object', name='idx')
expected = Series([0.2, 0.2, np.nan, 0.1], index=exp_idx, name='s')
result = ser.loc[[2, 2, 'x', 1]]
tm.assert_series_equal(result, expected, check_index_type=True)
# raises as nothing in in the index
self.assertRaises(KeyError, lambda: ser.loc[[3, 3, 3]])
exp_idx = Index([2, 2, 3], dtype='int64', name='idx')
expected = Series([0.2, 0.2, np.nan], index=exp_idx, name='s')
result = ser.loc[[2, 2, 3]]
tm.assert_series_equal(result, expected, check_index_type=True)
exp_idx = Index([3, 4, 4], dtype='int64', name='idx')
expected = Series([0.3, np.nan, np.nan], index=exp_idx, name='s')
idx = Index([1, 2, 3], dtype='int64', name='idx')
result = Series([0.1, 0.2, 0.3], index=idx, name='s').loc[[3, 4, 4]]
tm.assert_series_equal(result, expected, check_index_type=True)
exp_idx = Index([5, 3, 3], dtype='int64', name='idx')
expected = Series([np.nan, 0.3, 0.3], index=exp_idx, name='s')
idx = Index([1, 2, 3, 4], dtype='int64', name='idx')
result = Series([0.1, 0.2, 0.3, 0.4], index=idx,
name='s').loc[[5, 3, 3]]
tm.assert_series_equal(result, expected, check_index_type=True)
exp_idx = Index([5, 4, 4], dtype='int64', name='idx')
expected = Series([np.nan, 0.4, 0.4], index=exp_idx, name='s')
idx = Index([1, 2, 3, 4], dtype='int64', name='idx')
result = Series([0.1, 0.2, 0.3, 0.4], index=idx,
name='s').loc[[5, 4, 4]]
tm.assert_series_equal(result, expected, check_index_type=True)
exp_idx = Index([7, 2, 2], dtype='int64', name='idx')
expected = Series([0.4, np.nan, np.nan], index=exp_idx, name='s')
idx = Index([4, 5, 6, 7], dtype='int64', name='idx')
result = Series([0.1, 0.2, 0.3, 0.4], index=idx,
name='s').loc[[7, 2, 2]]
tm.assert_series_equal(result, expected, check_index_type=True)
exp_idx = Index([4, 5, 5], dtype='int64', name='idx')
expected = Series([0.4, np.nan, np.nan], index=exp_idx, name='s')
idx = Index([1, 2, 3, 4], dtype='int64', name='idx')
result = Series([0.1, 0.2, 0.3, 0.4], index=idx,
name='s').loc[[4, 5, 5]]
tm.assert_series_equal(result, expected, check_index_type=True)
# iloc
exp_idx = Index([2, 2, 1, 1], dtype='int64', name='idx')
expected = Series([0.2, 0.2, 0.1, 0.1], index=exp_idx, name='s')
result = ser.iloc[[1, 1, 0, 0]]
tm.assert_series_equal(result, expected, check_index_type=True)
def test_partial_set_invalid(self):
# GH 4940
# allow only setting of 'valid' values
orig = tm.makeTimeDataFrame()
df = orig.copy()
# don't allow not string inserts
def f():
df.loc[100.0, :] = df.ix[0]
self.assertRaises(TypeError, f)
def f():
df.loc[100, :] = df.ix[0]
self.assertRaises(TypeError, f)
def f():
df.ix[100.0, :] = df.ix[0]
self.assertRaises(TypeError, f)
def f():
df.ix[100, :] = df.ix[0]
self.assertRaises(ValueError, f)
# allow object conversion here
df = orig.copy()
df.loc['a', :] = df.ix[0]
exp = orig.append(pd.Series(df.ix[0], name='a'))
tm.assert_frame_equal(df, exp)
tm.assert_index_equal(df.index,
pd.Index(orig.index.tolist() + ['a']))
self.assertEqual(df.index.dtype, 'object')
def test_partial_set_empty_series(self):
# GH5226
# partially set with an empty object series
s = Series()
s.loc[1] = 1
tm.assert_series_equal(s, Series([1], index=[1]))
s.loc[3] = 3
tm.assert_series_equal(s, Series([1, 3], index=[1, 3]))
s = Series()
s.loc[1] = 1.
tm.assert_series_equal(s, Series([1.], index=[1]))
s.loc[3] = 3.
tm.assert_series_equal(s, Series([1., 3.], index=[1, 3]))
s = Series()
s.loc['foo'] = 1
tm.assert_series_equal(s, Series([1], index=['foo']))
s.loc['bar'] = 3
tm.assert_series_equal(s, Series([1, 3], index=['foo', 'bar']))
s.loc[3] = 4
tm.assert_series_equal(s, Series([1, 3, 4], index=['foo', 'bar', 3]))
def test_partial_set_empty_frame(self):
# partially set with an empty object
# frame
df = DataFrame()
def f():
df.loc[1] = 1
self.assertRaises(ValueError, f)
def f():
df.loc[1] = Series([1], index=['foo'])
self.assertRaises(ValueError, f)
def f():
df.loc[:, 1] = 1
self.assertRaises(ValueError, f)
# these work as they don't really change
# anything but the index
# GH5632
expected = DataFrame(columns=['foo'], index=pd.Index(
[], dtype='int64'))
def f():
df = DataFrame()
df['foo'] = Series([], dtype='object')
return df
tm.assert_frame_equal(f(), expected)
def f():
df = DataFrame()
df['foo'] = Series(df.index)
return df
tm.assert_frame_equal(f(), expected)
def f():
df = DataFrame()
df['foo'] = df.index
return df
tm.assert_frame_equal(f(), expected)
expected = DataFrame(columns=['foo'],
index=pd.Index([], dtype='int64'))
expected['foo'] = expected['foo'].astype('float64')
def f():
df = DataFrame()
df['foo'] = []
return df
tm.assert_frame_equal(f(), expected)
def f():
df = DataFrame()
df['foo'] = Series(range(len(df)))
return df
tm.assert_frame_equal(f(), expected)
def f():
df = DataFrame()
tm.assert_index_equal(df.index, pd.Index([], dtype='object'))
df['foo'] = range(len(df))
return df
expected = DataFrame(columns=['foo'],
index=pd.Index([], dtype='int64'))
expected['foo'] = expected['foo'].astype('float64')
tm.assert_frame_equal(f(), expected)
df = DataFrame()
tm.assert_index_equal(df.columns, pd.Index([], dtype=object))
df2 = DataFrame()
df2[1] = Series([1], index=['foo'])
df.loc[:, 1] = Series([1], index=['foo'])
tm.assert_frame_equal(df, DataFrame([[1]], index=['foo'], columns=[1]))
tm.assert_frame_equal(df, df2)
# no index to start
expected = DataFrame({0: Series(1, index=range(4))},
columns=['A', 'B', 0])
df = DataFrame(columns=['A', 'B'])
df[0] = Series(1, index=range(4))
df.dtypes
str(df)
tm.assert_frame_equal(df, expected)
df = DataFrame(columns=['A', 'B'])
df.loc[:, 0] = Series(1, index=range(4))
df.dtypes
str(df)
tm.assert_frame_equal(df, expected)
def test_partial_set_empty_frame_row(self):
# GH5720, GH5744
# don't create rows when empty
expected = DataFrame(columns=['A', 'B', 'New'],
index=pd.Index([], dtype='int64'))
expected['A'] = expected['A'].astype('int64')
expected['B'] = expected['B'].astype('float64')
expected['New'] = expected['New'].astype('float64')
df = DataFrame({"A": [1, 2, 3], "B": [1.2, 4.2, 5.2]})
y = df[df.A > 5]
y['New'] = np.nan
tm.assert_frame_equal(y, expected)
# tm.assert_frame_equal(y,expected)
expected = DataFrame(columns=['a', 'b', 'c c', 'd'])
expected['d'] = expected['d'].astype('int64')
df = DataFrame(columns=['a', 'b', 'c c'])
df['d'] = 3
tm.assert_frame_equal(df, expected)
tm.assert_series_equal(df['c c'], Series(name='c c', dtype=object))
# reindex columns is ok
df = DataFrame({"A": [1, 2, 3], "B": [1.2, 4.2, 5.2]})
y = df[df.A > 5]
result = y.reindex(columns=['A', 'B', 'C'])
expected = DataFrame(columns=['A', 'B', 'C'],
index=pd.Index([], dtype='int64'))
expected['A'] = expected['A'].astype('int64')
expected['B'] = expected['B'].astype('float64')
expected['C'] = expected['C'].astype('float64')
tm.assert_frame_equal(result, expected)
def test_partial_set_empty_frame_set_series(self):
# GH 5756
# setting with empty Series
df = DataFrame(Series())
tm.assert_frame_equal(df, DataFrame({0: Series()}))
df = DataFrame(Series(name='foo'))
tm.assert_frame_equal(df, DataFrame({'foo': Series()}))
def test_partial_set_empty_frame_empty_copy_assignment(self):
# GH 5932
# copy on empty with assignment fails
df = DataFrame(index=[0])
df = df.copy()
df['a'] = 0
expected = DataFrame(0, index=[0], columns=['a'])
tm.assert_frame_equal(df, expected)
def test_partial_set_empty_frame_empty_consistencies(self):
# GH 6171
# consistency on empty frames
df = DataFrame(columns=['x', 'y'])
df['x'] = [1, 2]
expected = DataFrame(dict(x=[1, 2], y=[np.nan, np.nan]))
tm.assert_frame_equal(df, expected, check_dtype=False)
df = DataFrame(columns=['x', 'y'])
df['x'] = ['1', '2']
expected = DataFrame(
dict(x=['1', '2'], y=[np.nan, np.nan]), dtype=object)
tm.assert_frame_equal(df, expected)
df = DataFrame(columns=['x', 'y'])
df.loc[0, 'x'] = 1
expected = DataFrame(dict(x=[1], y=[np.nan]))
tm.assert_frame_equal(df, expected, check_dtype=False)
def test_cache_updating(self):
# GH 4939, make sure to update the cache on setitem
df = tm.makeDataFrame()
df['A'] # cache series
df.ix["Hello Friend"] = df.ix[0]
self.assertIn("Hello Friend", df['A'].index)
self.assertIn("Hello Friend", df['B'].index)
panel = tm.makePanel()
panel.ix[0] # get first item into cache
panel.ix[:, :, 'A+1'] = panel.ix[:, :, 'A'] + 1
self.assertIn("A+1", panel.ix[0].columns)
self.assertIn("A+1", panel.ix[1].columns)
# 5216
# make sure that we don't try to set a dead cache
a = np.random.rand(10, 3)
df = DataFrame(a, columns=['x', 'y', 'z'])
tuples = [(i, j) for i in range(5) for j in range(2)]
index = MultiIndex.from_tuples(tuples)
df.index = index
# setting via chained assignment
# but actually works, since everything is a view
df.loc[0]['z'].iloc[0] = 1.
result = df.loc[(0, 0), 'z']
self.assertEqual(result, 1)
# correct setting
df.loc[(0, 0), 'z'] = 2
result = df.loc[(0, 0), 'z']
self.assertEqual(result, 2)
# 10264
df = DataFrame(np.zeros((5, 5), dtype='int64'), columns=[
'a', 'b', 'c', 'd', 'e'], index=range(5))
df['f'] = 0
df.f.values[3] = 1
# TODO(wesm): unused?
# y = df.iloc[np.arange(2, len(df))]
df.f.values[3] = 2
expected = DataFrame(np.zeros((5, 6), dtype='int64'), columns=[
'a', 'b', 'c', 'd', 'e', 'f'], index=range(5))
expected.at[3, 'f'] = 2
tm.assert_frame_equal(df, expected)
expected = Series([0, 0, 0, 2, 0], name='f')
tm.assert_series_equal(df.f, expected)
def test_set_ix_out_of_bounds_axis_0(self):
df = pd.DataFrame(
randn(2, 5), index=["row%s" % i for i in range(2)],
columns=["col%s" % i for i in range(5)])
self.assertRaises(ValueError, df.ix.__setitem__, (2, 0), 100)
def test_set_ix_out_of_bounds_axis_1(self):
df = pd.DataFrame(
randn(5, 2), index=["row%s" % i for i in range(5)],
columns=["col%s" % i for i in range(2)])
self.assertRaises(ValueError, df.ix.__setitem__, (0, 2), 100)
def test_iloc_empty_list_indexer_is_ok(self):
from pandas.util.testing import makeCustomDataframe as mkdf
df = mkdf(5, 2)
# vertical empty
tm.assert_frame_equal(df.iloc[:, []], df.iloc[:, :0],
check_index_type=True, check_column_type=True)
# horizontal empty
tm.assert_frame_equal(df.iloc[[], :], df.iloc[:0, :],
check_index_type=True, check_column_type=True)
# horizontal empty
tm.assert_frame_equal(df.iloc[[]], df.iloc[:0, :],
check_index_type=True,
check_column_type=True)
def test_loc_empty_list_indexer_is_ok(self):
from pandas.util.testing import makeCustomDataframe as mkdf
df = mkdf(5, 2)
# vertical empty
tm.assert_frame_equal(df.loc[:, []], df.iloc[:, :0],
check_index_type=True, check_column_type=True)
# horizontal empty
tm.assert_frame_equal(df.loc[[], :], df.iloc[:0, :],
check_index_type=True, check_column_type=True)
# horizontal empty
tm.assert_frame_equal(df.loc[[]], df.iloc[:0, :],
check_index_type=True,
check_column_type=True)
def test_ix_empty_list_indexer_is_ok(self):
from pandas.util.testing import makeCustomDataframe as mkdf
df = mkdf(5, 2)
# vertical empty
tm.assert_frame_equal(df.ix[:, []], df.iloc[:, :0],
check_index_type=True,
check_column_type=True)
# horizontal empty
tm.assert_frame_equal(df.ix[[], :], df.iloc[:0, :],
check_index_type=True,
check_column_type=True)
# horizontal empty
tm.assert_frame_equal(df.ix[[]], df.iloc[:0, :],
check_index_type=True,
check_column_type=True)
def test_index_type_coercion(self):
# GH 11836
# if we have an index type and set it with something that looks
# to numpy like the same, but is actually, not
# (e.g. setting with a float or string '0')
# then we need to coerce to object
# integer indexes
for s in [Series(range(5)),
Series(range(5), index=range(1, 6))]:
self.assertTrue(s.index.is_integer())
for indexer in [lambda x: x.ix,
lambda x: x.loc,
lambda x: x]:
s2 = s.copy()
indexer(s2)[0.1] = 0
self.assertTrue(s2.index.is_floating())
self.assertTrue(indexer(s2)[0.1] == 0)
s2 = s.copy()
indexer(s2)[0.0] = 0
exp = s.index
if 0 not in s:
exp = Index(s.index.tolist() + [0])
tm.assert_index_equal(s2.index, exp)
s2 = s.copy()
indexer(s2)['0'] = 0
self.assertTrue(s2.index.is_object())
for s in [Series(range(5), index=np.arange(5.))]:
self.assertTrue(s.index.is_floating())
for idxr in [lambda x: x.ix,
lambda x: x.loc,
lambda x: x]:
s2 = s.copy()
idxr(s2)[0.1] = 0
self.assertTrue(s2.index.is_floating())
self.assertTrue(idxr(s2)[0.1] == 0)
s2 = s.copy()
idxr(s2)[0.0] = 0
tm.assert_index_equal(s2.index, s.index)
s2 = s.copy()
idxr(s2)['0'] = 0
self.assertTrue(s2.index.is_object())
def test_float_index_to_mixed(self):
df = DataFrame({0.0: np.random.rand(10), 1.0: np.random.rand(10)})
df['a'] = 10
tm.assert_frame_equal(DataFrame({0.0: df[0.0],
1.0: df[1.0],
'a': [10] * 10}),
df)
def test_duplicate_ix_returns_series(self):
df = DataFrame(np.random.randn(3, 3), index=[0.1, 0.2, 0.2],
columns=list('abc'))
r = df.ix[0.2, 'a']
e = df.loc[0.2, 'a']
tm.assert_series_equal(r, e)
def test_float_index_non_scalar_assignment(self):
df = DataFrame({'a': [1, 2, 3], 'b': [3, 4, 5]}, index=[1., 2., 3.])
df.loc[df.index[:2]] = 1
expected = DataFrame({'a': [1, 1, 3], 'b': [1, 1, 5]}, index=df.index)
tm.assert_frame_equal(expected, df)
df = DataFrame({'a': [1, 2, 3], 'b': [3, 4, 5]}, index=[1., 2., 3.])
df2 = df.copy()
df.loc[df.index] = df.loc[df.index]
tm.assert_frame_equal(df, df2)
def test_float_index_at_iat(self):
s = pd.Series([1, 2, 3], index=[0.1, 0.2, 0.3])
for el, item in s.iteritems():
self.assertEqual(s.at[el], item)
for i in range(len(s)):
self.assertEqual(s.iat[i], i + 1)
def test_rhs_alignment(self):
# GH8258, tests that both rows & columns are aligned to what is
# assigned to. covers both uniform data-type & multi-type cases
def run_tests(df, rhs, right):
# label, index, slice
r, i, s = list('bcd'), [1, 2, 3], slice(1, 4)
c, j, l = ['joe', 'jolie'], [1, 2], slice(1, 3)
left = df.copy()
left.loc[r, c] = rhs
tm.assert_frame_equal(left, right)
left = df.copy()
left.iloc[i, j] = rhs
tm.assert_frame_equal(left, right)
left = df.copy()
left.ix[s, l] = rhs
tm.assert_frame_equal(left, right)
left = df.copy()
left.ix[i, j] = rhs
tm.assert_frame_equal(left, right)
left = df.copy()
left.ix[r, c] = rhs
tm.assert_frame_equal(left, right)
xs = np.arange(20).reshape(5, 4)
cols = ['jim', 'joe', 'jolie', 'joline']
df = pd.DataFrame(xs, columns=cols, index=list('abcde'))
# right hand side; permute the indices and multiplpy by -2
rhs = -2 * df.iloc[3:0:-1, 2:0:-1]
# expected `right` result; just multiply by -2
right = df.copy()
right.iloc[1:4, 1:3] *= -2
# run tests with uniform dtypes
run_tests(df, rhs, right)
# make frames multi-type & re-run tests
for frame in [df, rhs, right]:
frame['joe'] = frame['joe'].astype('float64')
frame['jolie'] = frame['jolie'].map('@{0}'.format)
run_tests(df, rhs, right)
def test_str_label_slicing_with_negative_step(self):
SLC = pd.IndexSlice
def assert_slices_equivalent(l_slc, i_slc):
tm.assert_series_equal(s.loc[l_slc], s.iloc[i_slc])
if not idx.is_integer:
# For integer indices, ix and plain getitem are position-based.
tm.assert_series_equal(s[l_slc], s.iloc[i_slc])
tm.assert_series_equal(s.ix[l_slc], s.iloc[i_slc])
for idx in [_mklbl('A', 20), np.arange(20) + 100,
np.linspace(100, 150, 20)]:
idx = Index(idx)
s = Series(np.arange(20), index=idx)
assert_slices_equivalent(SLC[idx[9]::-1], SLC[9::-1])
assert_slices_equivalent(SLC[:idx[9]:-1], SLC[:8:-1])
assert_slices_equivalent(SLC[idx[13]:idx[9]:-1], SLC[13:8:-1])
assert_slices_equivalent(SLC[idx[9]:idx[13]:-1], SLC[:0])
def test_slice_with_zero_step_raises(self):
s = Series(np.arange(20), index=_mklbl('A', 20))
self.assertRaisesRegexp(ValueError, 'slice step cannot be zero',
lambda: s[::0])
self.assertRaisesRegexp(ValueError, 'slice step cannot be zero',
lambda: s.loc[::0])
self.assertRaisesRegexp(ValueError, 'slice step cannot be zero',
lambda: s.ix[::0])
def test_indexing_assignment_dict_already_exists(self):
df = pd.DataFrame({'x': [1, 2, 6],
'y': [2, 2, 8],
'z': [-5, 0, 5]}).set_index('z')
expected = df.copy()
rhs = dict(x=9, y=99)
df.loc[5] = rhs
expected.loc[5] = [9, 99]
tm.assert_frame_equal(df, expected)
def test_indexing_dtypes_on_empty(self):
# Check that .iloc and .ix return correct dtypes GH9983
df = DataFrame({'a': [1, 2, 3], 'b': ['b', 'b2', 'b3']})
df2 = df.ix[[], :]
self.assertEqual(df2.loc[:, 'a'].dtype, np.int64)
tm.assert_series_equal(df2.loc[:, 'a'], df2.iloc[:, 0])
tm.assert_series_equal(df2.loc[:, 'a'], df2.ix[:, 0])
def test_range_in_series_indexing(self):
# range can cause an indexing error
# GH 11652
for x in [5, 999999, 1000000]:
s = pd.Series(index=range(x))
s.loc[range(1)] = 42
tm.assert_series_equal(s.loc[range(1)], Series(42.0, index=[0]))
s.loc[range(2)] = 43
tm.assert_series_equal(s.loc[range(2)], Series(43.0, index=[0, 1]))
def test_non_reducing_slice(self):
df = pd.DataFrame([[0, 1], [2, 3]])
slices = [
# pd.IndexSlice[:, :],
pd.IndexSlice[:, 1],
pd.IndexSlice[1, :],
pd.IndexSlice[[1], [1]],
pd.IndexSlice[1, [1]],
pd.IndexSlice[[1], 1],
pd.IndexSlice[1],
pd.IndexSlice[1, 1],
slice(None, None, None),
[0, 1],
np.array([0, 1]),
pd.Series([0, 1])
]
for slice_ in slices:
tslice_ = _non_reducing_slice(slice_)
self.assertTrue(isinstance(df.loc[tslice_], DataFrame))
def test_list_slice(self):
# like dataframe getitem
slices = [['A'], pd.Series(['A']), np.array(['A'])]
df = pd.DataFrame({'A': [1, 2], 'B': [3, 4]}, index=['A', 'B'])
expected = pd.IndexSlice[:, ['A']]
for subset in slices:
result = _non_reducing_slice(subset)
tm.assert_frame_equal(df.loc[result], df.loc[expected])
def test_maybe_numeric_slice(self):
df = pd.DataFrame({'A': [1, 2], 'B': ['c', 'd'], 'C': [True, False]})
result = _maybe_numeric_slice(df, slice_=None)
expected = pd.IndexSlice[:, ['A']]
self.assertEqual(result, expected)
result = _maybe_numeric_slice(df, None, include_bool=True)
expected = pd.IndexSlice[:, ['A', 'C']]
result = _maybe_numeric_slice(df, [1])
expected = [1]
self.assertEqual(result, expected)
class TestSeriesNoneCoercion(tm.TestCase):
EXPECTED_RESULTS = [
# For numeric series, we should coerce to NaN.
([1, 2, 3], [np.nan, 2, 3]),
([1.0, 2.0, 3.0], [np.nan, 2.0, 3.0]),
# For datetime series, we should coerce to NaT.
([datetime(2000, 1, 1), datetime(2000, 1, 2), datetime(2000, 1, 3)],
[NaT, datetime(2000, 1, 2), datetime(2000, 1, 3)]),
# For objects, we should preserve the None value.
(["foo", "bar", "baz"], [None, "bar", "baz"]),
]
def test_coercion_with_setitem(self):
for start_data, expected_result in self.EXPECTED_RESULTS:
start_series = Series(start_data)
start_series[0] = None
expected_series = Series(expected_result)
tm.assert_series_equal(start_series, expected_series)
def test_coercion_with_loc_setitem(self):
for start_data, expected_result in self.EXPECTED_RESULTS:
start_series = Series(start_data)
start_series.loc[0] = None
expected_series = Series(expected_result)
tm.assert_series_equal(start_series, expected_series)
def test_coercion_with_setitem_and_series(self):
for start_data, expected_result in self.EXPECTED_RESULTS:
start_series = Series(start_data)
start_series[start_series == start_series[0]] = None
expected_series = Series(expected_result)
tm.assert_series_equal(start_series, expected_series)
def test_coercion_with_loc_and_series(self):
for start_data, expected_result in self.EXPECTED_RESULTS:
start_series = Series(start_data)
start_series.loc[start_series == start_series[0]] = None
expected_series = Series(expected_result)
tm.assert_series_equal(start_series, expected_series)
class TestDataframeNoneCoercion(tm.TestCase):
EXPECTED_SINGLE_ROW_RESULTS = [
# For numeric series, we should coerce to NaN.
([1, 2, 3], [np.nan, 2, 3]),
([1.0, 2.0, 3.0], [np.nan, 2.0, 3.0]),
# For datetime series, we should coerce to NaT.
([datetime(2000, 1, 1), datetime(2000, 1, 2), datetime(2000, 1, 3)],
[NaT, datetime(2000, 1, 2), datetime(2000, 1, 3)]),
# For objects, we should preserve the None value.
(["foo", "bar", "baz"], [None, "bar", "baz"]),
]
def test_coercion_with_loc(self):
for start_data, expected_result, in self.EXPECTED_SINGLE_ROW_RESULTS:
start_dataframe = DataFrame({'foo': start_data})
start_dataframe.loc[0, ['foo']] = None
expected_dataframe = DataFrame({'foo': expected_result})
tm.assert_frame_equal(start_dataframe, expected_dataframe)
def test_coercion_with_setitem_and_dataframe(self):
for start_data, expected_result, in self.EXPECTED_SINGLE_ROW_RESULTS:
start_dataframe = DataFrame({'foo': start_data})
start_dataframe[start_dataframe['foo'] == start_dataframe['foo'][
0]] = None
expected_dataframe = DataFrame({'foo': expected_result})
tm.assert_frame_equal(start_dataframe, expected_dataframe)
def test_none_coercion_loc_and_dataframe(self):
for start_data, expected_result, in self.EXPECTED_SINGLE_ROW_RESULTS:
start_dataframe = DataFrame({'foo': start_data})
start_dataframe.loc[start_dataframe['foo'] == start_dataframe[
'foo'][0]] = None
expected_dataframe = DataFrame({'foo': expected_result})
| tm.assert_frame_equal(start_dataframe, expected_dataframe) | pandas.util.testing.assert_frame_equal |
# -*- coding: utf-8 -*-
# pylint: disable-msg=E1101,W0612
from datetime import datetime, timedelta
import pytest
import re
from numpy import nan as NA
import numpy as np
from numpy.random import randint
from pandas.compat import range, u
import pandas.compat as compat
from pandas import Index, Series, DataFrame, isna, MultiIndex, notna
from pandas.util.testing import assert_series_equal
import pandas.util.testing as tm
import pandas.core.strings as strings
class TestStringMethods(object):
def test_api(self):
# GH 6106, GH 9322
assert Series.str is strings.StringMethods
assert isinstance(Series(['']).str, strings.StringMethods)
# GH 9184
invalid = Series([1])
with tm.assert_raises_regex(AttributeError,
"only use .str accessor"):
invalid.str
assert not hasattr(invalid, 'str')
def test_iter(self):
# GH3638
strs = 'google', 'wikimedia', 'wikipedia', 'wikitravel'
ds = Series(strs)
for s in ds.str:
# iter must yield a Series
assert isinstance(s, Series)
# indices of each yielded Series should be equal to the index of
# the original Series
tm.assert_index_equal(s.index, ds.index)
for el in s:
# each element of the series is either a basestring/str or nan
assert isinstance(el, compat.string_types) or isna(el)
# desired behavior is to iterate until everything would be nan on the
# next iter so make sure the last element of the iterator was 'l' in
# this case since 'wikitravel' is the longest string
assert s.dropna().values.item() == 'l'
def test_iter_empty(self):
ds = Series([], dtype=object)
i, s = 100, 1
for i, s in enumerate(ds.str):
pass
# nothing to iterate over so nothing defined values should remain
# unchanged
assert i == 100
assert s == 1
def test_iter_single_element(self):
ds = Series(['a'])
for i, s in enumerate(ds.str):
pass
assert not i
assert_series_equal(ds, s)
def test_iter_object_try_string(self):
ds = Series([slice(None, randint(10), randint(10, 20)) for _ in range(
4)])
i, s = 100, 'h'
for i, s in enumerate(ds.str):
pass
assert i == 100
assert s == 'h'
def test_cat(self):
one = np.array(['a', 'a', 'b', 'b', 'c', NA], dtype=np.object_)
two = np.array(['a', NA, 'b', 'd', 'foo', NA], dtype=np.object_)
# single array
result = strings.str_cat(one)
exp = 'aabbc'
assert result == exp
result = strings.str_cat(one, na_rep='NA')
exp = 'aabbcNA'
assert result == exp
result = strings.str_cat(one, na_rep='-')
exp = 'aabbc-'
assert result == exp
result = strings.str_cat(one, sep='_', na_rep='NA')
exp = 'a_a_b_b_c_NA'
assert result == exp
result = strings.str_cat(two, sep='-')
exp = 'a-b-d-foo'
assert result == exp
# Multiple arrays
result = strings.str_cat(one, [two], na_rep='NA')
exp = np.array(['aa', 'aNA', 'bb', 'bd', 'cfoo', 'NANA'],
dtype=np.object_)
tm.assert_numpy_array_equal(result, exp)
result = strings.str_cat(one, two)
exp = np.array(['aa', NA, 'bb', 'bd', 'cfoo', NA], dtype=np.object_)
tm.assert_almost_equal(result, exp)
def test_count(self):
values = np.array(['foo', 'foofoo', NA, 'foooofooofommmfoo'],
dtype=np.object_)
result = strings.str_count(values, 'f[o]+')
exp = np.array([1, 2, NA, 4])
tm.assert_numpy_array_equal(result, exp)
result = Series(values).str.count('f[o]+')
exp = Series([1, 2, NA, 4])
assert isinstance(result, Series)
tm.assert_series_equal(result, exp)
# mixed
mixed = ['a', NA, 'b', True, datetime.today(), 'foo', None, 1, 2.]
rs = strings.str_count(mixed, 'a')
xp = np.array([1, NA, 0, NA, NA, 0, NA, NA, NA])
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.count('a')
xp = Series([1, NA, 0, NA, NA, 0, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = [u('foo'), u('foofoo'), NA, u('foooofooofommmfoo')]
result = strings.str_count(values, 'f[o]+')
exp = np.array([1, 2, NA, 4])
tm.assert_numpy_array_equal(result, exp)
result = Series(values).str.count('f[o]+')
exp = Series([1, 2, NA, 4])
assert isinstance(result, Series)
tm.assert_series_equal(result, exp)
def test_contains(self):
values = np.array(['foo', NA, 'fooommm__foo',
'mmm_', 'foommm[_]+bar'], dtype=np.object_)
pat = 'mmm[_]+'
result = strings.str_contains(values, pat)
expected = np.array([False, NA, True, True, False], dtype=np.object_)
tm.assert_numpy_array_equal(result, expected)
result = strings.str_contains(values, pat, regex=False)
expected = np.array([False, NA, False, False, True], dtype=np.object_)
tm.assert_numpy_array_equal(result, expected)
values = ['foo', 'xyz', 'fooommm__foo', 'mmm_']
result = strings.str_contains(values, pat)
expected = np.array([False, False, True, True])
assert result.dtype == np.bool_
tm.assert_numpy_array_equal(result, expected)
# case insensitive using regex
values = ['Foo', 'xYz', 'fOOomMm__fOo', 'MMM_']
result = strings.str_contains(values, 'FOO|mmm', case=False)
expected = np.array([True, False, True, True])
tm.assert_numpy_array_equal(result, expected)
# case insensitive without regex
result = strings.str_contains(values, 'foo', regex=False, case=False)
expected = np.array([True, False, True, False])
tm.assert_numpy_array_equal(result, expected)
# mixed
mixed = ['a', NA, 'b', True, datetime.today(), 'foo', None, 1, 2.]
rs = strings.str_contains(mixed, 'o')
xp = np.array([False, NA, False, NA, NA, True, NA, NA, NA],
dtype=np.object_)
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.contains('o')
xp = Series([False, NA, False, NA, NA, True, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = np.array([u'foo', NA, u'fooommm__foo', u'mmm_'],
dtype=np.object_)
pat = 'mmm[_]+'
result = strings.str_contains(values, pat)
expected = np.array([False, np.nan, True, True], dtype=np.object_)
tm.assert_numpy_array_equal(result, expected)
result = strings.str_contains(values, pat, na=False)
expected = np.array([False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
values = np.array(['foo', 'xyz', 'fooommm__foo', 'mmm_'],
dtype=np.object_)
result = strings.str_contains(values, pat)
expected = np.array([False, False, True, True])
assert result.dtype == np.bool_
tm.assert_numpy_array_equal(result, expected)
# na
values = Series(['om', 'foo', np.nan])
res = values.str.contains('foo', na="foo")
assert res.loc[2] == "foo"
def test_startswith(self):
values = Series(['om', NA, 'foo_nom', 'nom', 'bar_foo', NA, 'foo'])
result = values.str.startswith('foo')
exp = Series([False, NA, True, False, False, NA, True])
tm.assert_series_equal(result, exp)
# mixed
mixed = np.array(['a', NA, 'b', True, datetime.today(),
'foo', None, 1, 2.], dtype=np.object_)
rs = strings.str_startswith(mixed, 'f')
xp = np.array([False, NA, False, NA, NA, True, NA, NA, NA],
dtype=np.object_)
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.startswith('f')
assert isinstance(rs, Series)
xp = Series([False, NA, False, NA, NA, True, NA, NA, NA])
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('om'), NA, u('foo_nom'), u('nom'), u('bar_foo'), NA,
u('foo')])
result = values.str.startswith('foo')
exp = Series([False, NA, True, False, False, NA, True])
tm.assert_series_equal(result, exp)
result = values.str.startswith('foo', na=True)
tm.assert_series_equal(result, exp.fillna(True).astype(bool))
def test_endswith(self):
values = Series(['om', NA, 'foo_nom', 'nom', 'bar_foo', NA, 'foo'])
result = values.str.endswith('foo')
exp = Series([False, NA, False, False, True, NA, True])
tm.assert_series_equal(result, exp)
# mixed
mixed = ['a', NA, 'b', True, datetime.today(), 'foo', None, 1, 2.]
rs = strings.str_endswith(mixed, 'f')
xp = np.array([False, NA, False, NA, NA, False, NA, NA, NA],
dtype=np.object_)
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.endswith('f')
xp = Series([False, NA, False, NA, NA, False, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('om'), NA, u('foo_nom'), u('nom'), u('bar_foo'), NA,
u('foo')])
result = values.str.endswith('foo')
exp = Series([False, NA, False, False, True, NA, True])
tm.assert_series_equal(result, exp)
result = values.str.endswith('foo', na=False)
tm.assert_series_equal(result, exp.fillna(False).astype(bool))
def test_title(self):
values = Series(["FOO", "BAR", NA, "Blah", "blurg"])
result = values.str.title()
exp = Series(["Foo", "Bar", NA, "Blah", "Blurg"])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(["FOO", NA, "bar", True, datetime.today(), "blah", None,
1, 2.])
mixed = mixed.str.title()
exp = Series(["Foo", NA, "Bar", NA, NA, "Blah", NA, NA, NA])
tm.assert_almost_equal(mixed, exp)
# unicode
values = Series([u("FOO"), NA, u("bar"), u("Blurg")])
results = values.str.title()
exp = Series([u("Foo"), NA, u("Bar"), u("Blurg")])
tm.assert_series_equal(results, exp)
def test_lower_upper(self):
values = Series(['om', NA, 'nom', 'nom'])
result = values.str.upper()
exp = Series(['OM', NA, 'NOM', 'NOM'])
tm.assert_series_equal(result, exp)
result = result.str.lower()
tm.assert_series_equal(result, values)
# mixed
mixed = Series(['a', NA, 'b', True, datetime.today(), 'foo', None, 1,
2.])
mixed = mixed.str.upper()
rs = Series(mixed).str.lower()
xp = Series(['a', NA, 'b', NA, NA, 'foo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('om'), NA, u('nom'), u('nom')])
result = values.str.upper()
exp = Series([u('OM'), NA, u('NOM'), u('NOM')])
tm.assert_series_equal(result, exp)
result = result.str.lower()
tm.assert_series_equal(result, values)
def test_capitalize(self):
values = Series(["FOO", "BAR", NA, "Blah", "blurg"])
result = values.str.capitalize()
exp = Series(["Foo", "Bar", NA, "Blah", "Blurg"])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(["FOO", NA, "bar", True, datetime.today(), "blah", None,
1, 2.])
mixed = mixed.str.capitalize()
exp = Series(["Foo", NA, "Bar", NA, NA, "Blah", NA, NA, NA])
tm.assert_almost_equal(mixed, exp)
# unicode
values = Series([u("FOO"), NA, u("bar"), u("Blurg")])
results = values.str.capitalize()
exp = Series([u("Foo"), NA, u("Bar"), u("Blurg")])
tm.assert_series_equal(results, exp)
def test_swapcase(self):
values = Series(["FOO", "BAR", NA, "Blah", "blurg"])
result = values.str.swapcase()
exp = Series(["foo", "bar", NA, "bLAH", "BLURG"])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(["FOO", NA, "bar", True, datetime.today(), "Blah", None,
1, 2.])
mixed = mixed.str.swapcase()
exp = Series(["foo", NA, "BAR", NA, NA, "bLAH", NA, NA, NA])
tm.assert_almost_equal(mixed, exp)
# unicode
values = Series([u("FOO"), NA, u("bar"), u("Blurg")])
results = values.str.swapcase()
exp = Series([u("foo"), NA, u("BAR"), u("bLURG")])
tm.assert_series_equal(results, exp)
def test_casemethods(self):
values = ['aaa', 'bbb', 'CCC', 'Dddd', 'eEEE']
s = Series(values)
assert s.str.lower().tolist() == [v.lower() for v in values]
assert s.str.upper().tolist() == [v.upper() for v in values]
assert s.str.title().tolist() == [v.title() for v in values]
assert s.str.capitalize().tolist() == [v.capitalize() for v in values]
assert s.str.swapcase().tolist() == [v.swapcase() for v in values]
def test_replace(self):
values = Series(['fooBAD__barBAD', NA])
result = values.str.replace('BAD[_]*', '')
exp = Series(['foobar', NA])
tm.assert_series_equal(result, exp)
result = values.str.replace('BAD[_]*', '', n=1)
exp = Series(['foobarBAD', NA])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['aBAD', NA, 'bBAD', True, datetime.today(), 'fooBAD',
None, 1, 2.])
rs = Series(mixed).str.replace('BAD[_]*', '')
xp = Series(['a', NA, 'b', NA, NA, 'foo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('fooBAD__barBAD'), NA])
result = values.str.replace('BAD[_]*', '')
exp = Series([u('foobar'), NA])
tm.assert_series_equal(result, exp)
result = values.str.replace('BAD[_]*', '', n=1)
exp = Series([u('foobarBAD'), NA])
tm.assert_series_equal(result, exp)
# flags + unicode
values = Series([b"abcd,\xc3\xa0".decode("utf-8")])
exp = Series([b"abcd, \xc3\xa0".decode("utf-8")])
result = values.str.replace(r"(?<=\w),(?=\w)", ", ", flags=re.UNICODE)
tm.assert_series_equal(result, exp)
# GH 13438
for klass in (Series, Index):
for repl in (None, 3, {'a': 'b'}):
for data in (['a', 'b', None], ['a', 'b', 'c', 'ad']):
values = klass(data)
pytest.raises(TypeError, values.str.replace, 'a', repl)
def test_replace_callable(self):
# GH 15055
values = Series(['fooBAD__barBAD', NA])
# test with callable
repl = lambda m: m.group(0).swapcase()
result = values.str.replace('[a-z][A-Z]{2}', repl, n=2)
exp = Series(['foObaD__baRbaD', NA])
tm.assert_series_equal(result, exp)
# test with wrong number of arguments, raising an error
if compat.PY2:
p_err = r'takes (no|(exactly|at (least|most)) ?\d+) arguments?'
else:
p_err = (r'((takes)|(missing)) (?(2)from \d+ to )?\d+ '
r'(?(3)required )positional arguments?')
repl = lambda: None
with tm.assert_raises_regex(TypeError, p_err):
values.str.replace('a', repl)
repl = lambda m, x: None
with tm.assert_raises_regex(TypeError, p_err):
values.str.replace('a', repl)
repl = lambda m, x, y=None: None
with tm.assert_raises_regex(TypeError, p_err):
values.str.replace('a', repl)
# test regex named groups
values = Series(['Foo Bar Baz', NA])
pat = r"(?P<first>\w+) (?P<middle>\w+) (?P<last>\w+)"
repl = lambda m: m.group('middle').swapcase()
result = values.str.replace(pat, repl)
exp = Series(['bAR', NA])
tm.assert_series_equal(result, exp)
def test_replace_compiled_regex(self):
# GH 15446
values = Series(['fooBAD__barBAD', NA])
# test with compiled regex
pat = re.compile(r'BAD[_]*')
result = values.str.replace(pat, '')
exp = Series(['foobar', NA])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['aBAD', NA, 'bBAD', True, datetime.today(), 'fooBAD',
None, 1, 2.])
rs = Series(mixed).str.replace(pat, '')
xp = Series(['a', NA, 'b', NA, NA, 'foo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('fooBAD__barBAD'), NA])
result = values.str.replace(pat, '')
exp = Series([u('foobar'), NA])
tm.assert_series_equal(result, exp)
result = values.str.replace(pat, '', n=1)
exp = Series([u('foobarBAD'), NA])
tm.assert_series_equal(result, exp)
# flags + unicode
values = Series([b"abcd,\xc3\xa0".decode("utf-8")])
exp = Series([b"abcd, \xc3\xa0".decode("utf-8")])
pat = re.compile(r"(?<=\w),(?=\w)", flags=re.UNICODE)
result = values.str.replace(pat, ", ")
tm.assert_series_equal(result, exp)
# case and flags provided to str.replace will have no effect
# and will produce warnings
values = Series(['fooBAD__barBAD__bad', NA])
pat = re.compile(r'BAD[_]*')
with tm.assert_raises_regex(ValueError,
"case and flags cannot be"):
result = values.str.replace(pat, '', flags=re.IGNORECASE)
with tm.assert_raises_regex(ValueError,
"case and flags cannot be"):
result = values.str.replace(pat, '', case=False)
with tm.assert_raises_regex(ValueError,
"case and flags cannot be"):
result = values.str.replace(pat, '', case=True)
# test with callable
values = Series(['fooBAD__barBAD', NA])
repl = lambda m: m.group(0).swapcase()
pat = re.compile('[a-z][A-Z]{2}')
result = values.str.replace(pat, repl, n=2)
exp = Series(['foObaD__baRbaD', NA])
tm.assert_series_equal(result, exp)
def test_repeat(self):
values = Series(['a', 'b', NA, 'c', NA, 'd'])
result = values.str.repeat(3)
exp = Series(['aaa', 'bbb', NA, 'ccc', NA, 'ddd'])
tm.assert_series_equal(result, exp)
result = values.str.repeat([1, 2, 3, 4, 5, 6])
exp = Series(['a', 'bb', NA, 'cccc', NA, 'dddddd'])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['a', NA, 'b', True, datetime.today(), 'foo', None, 1,
2.])
rs = Series(mixed).str.repeat(3)
xp = Series(['aaa', NA, 'bbb', NA, NA, 'foofoofoo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('a'), u('b'), NA, u('c'), NA, u('d')])
result = values.str.repeat(3)
exp = Series([u('aaa'), u('bbb'), NA, u('ccc'), NA, u('ddd')])
tm.assert_series_equal(result, exp)
result = values.str.repeat([1, 2, 3, 4, 5, 6])
exp = Series([u('a'), u('bb'), NA, u('cccc'), NA, u('dddddd')])
tm.assert_series_equal(result, exp)
def test_match(self):
# New match behavior introduced in 0.13
values = Series(['fooBAD__barBAD', NA, 'foo'])
result = values.str.match('.*(BAD[_]+).*(BAD)')
exp = Series([True, NA, False])
tm.assert_series_equal(result, exp)
values = Series(['fooBAD__barBAD', NA, 'foo'])
result = values.str.match('.*BAD[_]+.*BAD')
exp = Series([True, NA, False])
tm.assert_series_equal(result, exp)
# test passing as_indexer still works but is ignored
values = Series(['fooBAD__barBAD', NA, 'foo'])
exp = Series([True, NA, False])
with tm.assert_produces_warning(FutureWarning):
result = values.str.match('.*BAD[_]+.*BAD', as_indexer=True)
tm.assert_series_equal(result, exp)
with tm.assert_produces_warning(FutureWarning):
result = values.str.match('.*BAD[_]+.*BAD', as_indexer=False)
tm.assert_series_equal(result, exp)
with tm.assert_produces_warning(FutureWarning):
result = values.str.match('.*(BAD[_]+).*(BAD)', as_indexer=True)
tm.assert_series_equal(result, exp)
pytest.raises(ValueError, values.str.match, '.*(BAD[_]+).*(BAD)',
as_indexer=False)
# mixed
mixed = Series(['aBAD_BAD', NA, 'BAD_b_BAD', True, datetime.today(),
'foo', None, 1, 2.])
rs = Series(mixed).str.match('.*(BAD[_]+).*(BAD)')
xp = Series([True, NA, True, NA, NA, False, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('fooBAD__barBAD'), NA, u('foo')])
result = values.str.match('.*(BAD[_]+).*(BAD)')
exp = Series([True, NA, False])
tm.assert_series_equal(result, exp)
# na GH #6609
res = Series(['a', 0, np.nan]).str.match('a', na=False)
exp = Series([True, False, False])
assert_series_equal(exp, res)
res = Series(['a', 0, np.nan]).str.match('a')
exp = Series([True, np.nan, np.nan])
assert_series_equal(exp, res)
def test_extract_expand_None(self):
values = Series(['fooBAD__barBAD', NA, 'foo'])
with tm.assert_produces_warning(FutureWarning):
values.str.extract('.*(BAD[_]+).*(BAD)', expand=None)
def test_extract_expand_unspecified(self):
values = Series(['fooBAD__barBAD', NA, 'foo'])
with tm.assert_produces_warning(FutureWarning):
values.str.extract('.*(BAD[_]+).*(BAD)')
def test_extract_expand_False(self):
# Contains tests like those in test_match and some others.
values = Series(['fooBAD__barBAD', NA, 'foo'])
er = [NA, NA] # empty row
result = values.str.extract('.*(BAD[_]+).*(BAD)', expand=False)
exp = DataFrame([['BAD__', 'BAD'], er, er])
tm.assert_frame_equal(result, exp)
# mixed
mixed = Series(['aBAD_BAD', NA, 'BAD_b_BAD', True, datetime.today(),
'foo', None, 1, 2.])
rs = Series(mixed).str.extract('.*(BAD[_]+).*(BAD)', expand=False)
exp = DataFrame([['BAD_', 'BAD'], er, ['BAD_', 'BAD'], er, er, er, er,
er, er])
tm.assert_frame_equal(rs, exp)
# unicode
values = Series([u('fooBAD__barBAD'), NA, u('foo')])
result = values.str.extract('.*(BAD[_]+).*(BAD)', expand=False)
exp = DataFrame([[u('BAD__'), u('BAD')], er, er])
tm.assert_frame_equal(result, exp)
# GH9980
# Index only works with one regex group since
# multi-group would expand to a frame
idx = Index(['A1', 'A2', 'A3', 'A4', 'B5'])
with tm.assert_raises_regex(ValueError, "supported"):
idx.str.extract('([AB])([123])', expand=False)
# these should work for both Series and Index
for klass in [Series, Index]:
# no groups
s_or_idx = klass(['A1', 'B2', 'C3'])
f = lambda: s_or_idx.str.extract('[ABC][123]', expand=False)
pytest.raises(ValueError, f)
# only non-capturing groups
f = lambda: s_or_idx.str.extract('(?:[AB]).*', expand=False)
pytest.raises(ValueError, f)
# single group renames series/index properly
s_or_idx = klass(['A1', 'A2'])
result = s_or_idx.str.extract(r'(?P<uno>A)\d', expand=False)
assert result.name == 'uno'
exp = klass(['A', 'A'], name='uno')
if klass == Series:
tm.assert_series_equal(result, exp)
else:
tm.assert_index_equal(result, exp)
s = Series(['A1', 'B2', 'C3'])
# one group, no matches
result = s.str.extract('(_)', expand=False)
exp = Series([NA, NA, NA], dtype=object)
tm.assert_series_equal(result, exp)
# two groups, no matches
result = s.str.extract('(_)(_)', expand=False)
exp = DataFrame([[NA, NA], [NA, NA], [NA, NA]], dtype=object)
tm.assert_frame_equal(result, exp)
# one group, some matches
result = s.str.extract('([AB])[123]', expand=False)
exp = Series(['A', 'B', NA])
tm.assert_series_equal(result, exp)
# two groups, some matches
result = s.str.extract('([AB])([123])', expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]])
tm.assert_frame_equal(result, exp)
# one named group
result = s.str.extract('(?P<letter>[AB])', expand=False)
exp = Series(['A', 'B', NA], name='letter')
tm.assert_series_equal(result, exp)
# two named groups
result = s.str.extract('(?P<letter>[AB])(?P<number>[123])',
expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]],
columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# mix named and unnamed groups
result = s.str.extract('([AB])(?P<number>[123])', expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]],
columns=[0, 'number'])
tm.assert_frame_equal(result, exp)
# one normal group, one non-capturing group
result = s.str.extract('([AB])(?:[123])', expand=False)
exp = Series(['A', 'B', NA])
tm.assert_series_equal(result, exp)
# two normal groups, one non-capturing group
result = Series(['A11', 'B22', 'C33']).str.extract(
'([AB])([123])(?:[123])', expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]])
tm.assert_frame_equal(result, exp)
# one optional group followed by one normal group
result = Series(['A1', 'B2', '3']).str.extract(
'(?P<letter>[AB])?(?P<number>[123])', expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, '3']],
columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# one normal group followed by one optional group
result = Series(['A1', 'B2', 'C']).str.extract(
'(?P<letter>[ABC])(?P<number>[123])?', expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], ['C', NA]],
columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# GH6348
# not passing index to the extractor
def check_index(index):
data = ['A1', 'B2', 'C']
index = index[:len(data)]
s = Series(data, index=index)
result = s.str.extract(r'(\d)', expand=False)
exp = Series(['1', '2', NA], index=index)
tm.assert_series_equal(result, exp)
result = Series(data, index=index).str.extract(
r'(?P<letter>\D)(?P<number>\d)?', expand=False)
e_list = [
['A', '1'],
['B', '2'],
['C', NA]
]
exp = DataFrame(e_list, columns=['letter', 'number'], index=index)
tm.assert_frame_equal(result, exp)
i_funs = [
tm.makeStringIndex, tm.makeUnicodeIndex, tm.makeIntIndex,
tm.makeDateIndex, tm.makePeriodIndex, tm.makeRangeIndex
]
for index in i_funs:
check_index(index())
# single_series_name_is_preserved.
s = Series(['a3', 'b3', 'c2'], name='bob')
r = s.str.extract(r'(?P<sue>[a-z])', expand=False)
e = Series(['a', 'b', 'c'], name='sue')
tm.assert_series_equal(r, e)
assert r.name == e.name
def test_extract_expand_True(self):
# Contains tests like those in test_match and some others.
values = Series(['fooBAD__barBAD', NA, 'foo'])
er = [NA, NA] # empty row
result = values.str.extract('.*(BAD[_]+).*(BAD)', expand=True)
exp = DataFrame([['BAD__', 'BAD'], er, er])
tm.assert_frame_equal(result, exp)
# mixed
mixed = Series(['aBAD_BAD', NA, 'BAD_b_BAD', True, datetime.today(),
'foo', None, 1, 2.])
rs = Series(mixed).str.extract('.*(BAD[_]+).*(BAD)', expand=True)
exp = DataFrame([['BAD_', 'BAD'], er, ['BAD_', 'BAD'], er, er,
er, er, er, er])
tm.assert_frame_equal(rs, exp)
# unicode
values = Series([u('fooBAD__barBAD'), NA, u('foo')])
result = values.str.extract('.*(BAD[_]+).*(BAD)', expand=True)
exp = DataFrame([[u('BAD__'), u('BAD')], er, er])
tm.assert_frame_equal(result, exp)
# these should work for both Series and Index
for klass in [Series, Index]:
# no groups
s_or_idx = klass(['A1', 'B2', 'C3'])
f = lambda: s_or_idx.str.extract('[ABC][123]', expand=True)
pytest.raises(ValueError, f)
# only non-capturing groups
f = lambda: s_or_idx.str.extract('(?:[AB]).*', expand=True)
pytest.raises(ValueError, f)
# single group renames series/index properly
s_or_idx = klass(['A1', 'A2'])
result_df = s_or_idx.str.extract(r'(?P<uno>A)\d', expand=True)
assert isinstance(result_df, DataFrame)
result_series = result_df['uno']
assert_series_equal(result_series, Series(['A', 'A'], name='uno'))
def test_extract_series(self):
# extract should give the same result whether or not the
# series has a name.
for series_name in None, "series_name":
s = Series(['A1', 'B2', 'C3'], name=series_name)
# one group, no matches
result = s.str.extract('(_)', expand=True)
exp = DataFrame([NA, NA, NA], dtype=object)
tm.assert_frame_equal(result, exp)
# two groups, no matches
result = s.str.extract('(_)(_)', expand=True)
exp = DataFrame([[NA, NA], [NA, NA], [NA, NA]], dtype=object)
tm.assert_frame_equal(result, exp)
# one group, some matches
result = s.str.extract('([AB])[123]', expand=True)
exp = DataFrame(['A', 'B', NA])
tm.assert_frame_equal(result, exp)
# two groups, some matches
result = s.str.extract('([AB])([123])', expand=True)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]])
tm.assert_frame_equal(result, exp)
# one named group
result = s.str.extract('(?P<letter>[AB])', expand=True)
exp = DataFrame({"letter": ['A', 'B', NA]})
tm.assert_frame_equal(result, exp)
# two named groups
result = s.str.extract(
'(?P<letter>[AB])(?P<number>[123])',
expand=True)
e_list = [
['A', '1'],
['B', '2'],
[NA, NA]
]
exp = DataFrame(e_list, columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# mix named and unnamed groups
result = s.str.extract('([AB])(?P<number>[123])', expand=True)
exp = DataFrame(e_list, columns=[0, 'number'])
tm.assert_frame_equal(result, exp)
# one normal group, one non-capturing group
result = s.str.extract('([AB])(?:[123])', expand=True)
exp = DataFrame(['A', 'B', NA])
tm.assert_frame_equal(result, exp)
def test_extract_optional_groups(self):
# two normal groups, one non-capturing group
result = Series(['A11', 'B22', 'C33']).str.extract(
'([AB])([123])(?:[123])', expand=True)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]])
tm.assert_frame_equal(result, exp)
# one optional group followed by one normal group
result = Series(['A1', 'B2', '3']).str.extract(
'(?P<letter>[AB])?(?P<number>[123])', expand=True)
e_list = [
['A', '1'],
['B', '2'],
[NA, '3']
]
exp = DataFrame(e_list, columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# one normal group followed by one optional group
result = Series(['A1', 'B2', 'C']).str.extract(
'(?P<letter>[ABC])(?P<number>[123])?', expand=True)
e_list = [
['A', '1'],
['B', '2'],
['C', NA]
]
exp = DataFrame(e_list, columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# GH6348
# not passing index to the extractor
def check_index(index):
data = ['A1', 'B2', 'C']
index = index[:len(data)]
result = Series(data, index=index).str.extract(
r'(\d)', expand=True)
exp = DataFrame(['1', '2', NA], index=index)
tm.assert_frame_equal(result, exp)
result = Series(data, index=index).str.extract(
r'(?P<letter>\D)(?P<number>\d)?', expand=True)
e_list = [
['A', '1'],
['B', '2'],
['C', NA]
]
exp = DataFrame(e_list, columns=['letter', 'number'], index=index)
tm.assert_frame_equal(result, exp)
i_funs = [
tm.makeStringIndex, tm.makeUnicodeIndex, tm.makeIntIndex,
tm.makeDateIndex, tm.makePeriodIndex, tm.makeRangeIndex
]
for index in i_funs:
check_index(index())
def test_extract_single_group_returns_frame(self):
# GH11386 extract should always return DataFrame, even when
# there is only one group. Prior to v0.18.0, extract returned
# Series when there was only one group in the regex.
s = Series(['a3', 'b3', 'c2'], name='series_name')
r = s.str.extract(r'(?P<letter>[a-z])', expand=True)
e = DataFrame({"letter": ['a', 'b', 'c']})
tm.assert_frame_equal(r, e)
def test_extractall(self):
subject_list = [
'<EMAIL>',
'<EMAIL>',
'<EMAIL>',
'<EMAIL> some text <EMAIL>',
'<EMAIL> some text c@d.<EMAIL> and <EMAIL>',
np.nan,
"",
]
expected_tuples = [
("dave", "google", "com"),
("tdhock5", "gmail", "com"),
("maudelaperriere", "gmail", "com"),
("rob", "gmail", "com"), ("steve", "gmail", "com"),
("a", "b", "com"), ("c", "d", "com"), ("e", "f", "com"),
]
named_pattern = r"""
(?P<user>[a-z0-9]+)
@
(?P<domain>[a-z]+)
\.
(?P<tld>[a-z]{2,4})
"""
expected_columns = ["user", "domain", "tld"]
S = Series(subject_list)
# extractall should return a DataFrame with one row for each
# match, indexed by the subject from which the match came.
expected_index = MultiIndex.from_tuples([
(0, 0),
(1, 0),
(2, 0),
(3, 0),
(3, 1),
(4, 0),
(4, 1),
(4, 2),
], names=(None, "match"))
expected_df = DataFrame(
expected_tuples, expected_index, expected_columns)
computed_df = S.str.extractall(named_pattern, re.VERBOSE)
tm.assert_frame_equal(computed_df, expected_df)
# The index of the input Series should be used to construct
# the index of the output DataFrame:
series_index = MultiIndex.from_tuples([
("single", "Dave"),
("single", "Toby"),
("single", "Maude"),
("multiple", "robAndSteve"),
("multiple", "abcdef"),
("none", "missing"),
("none", "empty"),
])
Si = Series(subject_list, series_index)
expected_index = MultiIndex.from_tuples([
("single", "Dave", 0),
("single", "Toby", 0),
("single", "Maude", 0),
("multiple", "robAndSteve", 0),
("multiple", "robAndSteve", 1),
("multiple", "abcdef", 0),
("multiple", "abcdef", 1),
("multiple", "abcdef", 2),
], names=(None, None, "match"))
expected_df = DataFrame(
expected_tuples, expected_index, expected_columns)
computed_df = Si.str.extractall(named_pattern, re.VERBOSE)
tm.assert_frame_equal(computed_df, expected_df)
# MultiIndexed subject with names.
Sn = Series(subject_list, series_index)
Sn.index.names = ("matches", "description")
expected_index.names = ("matches", "description", "match")
expected_df = DataFrame(
expected_tuples, expected_index, expected_columns)
computed_df = Sn.str.extractall(named_pattern, re.VERBOSE)
tm.assert_frame_equal(computed_df, expected_df)
# optional groups.
subject_list = ['', 'A1', '32']
named_pattern = '(?P<letter>[AB])?(?P<number>[123])'
computed_df = Series(subject_list).str.extractall(named_pattern)
expected_index = MultiIndex.from_tuples([
(1, 0),
(2, 0),
(2, 1),
], names=(None, "match"))
expected_df = DataFrame([
('A', '1'),
(NA, '3'),
(NA, '2'),
], expected_index, columns=['letter', 'number'])
tm.assert_frame_equal(computed_df, expected_df)
# only one of two groups has a name.
pattern = '([AB])?(?P<number>[123])'
computed_df = Series(subject_list).str.extractall(pattern)
expected_df = DataFrame([
('A', '1'),
(NA, '3'),
(NA, '2'),
], expected_index, columns=[0, 'number'])
tm.assert_frame_equal(computed_df, expected_df)
def test_extractall_single_group(self):
# extractall(one named group) returns DataFrame with one named
# column.
s = Series(['a3', 'b3', 'd4c2'], name='series_name')
r = s.str.extractall(r'(?P<letter>[a-z])')
i = MultiIndex.from_tuples([
(0, 0),
(1, 0),
(2, 0),
(2, 1),
], names=(None, "match"))
e = DataFrame({"letter": ['a', 'b', 'd', 'c']}, i)
tm.assert_frame_equal(r, e)
# extractall(one un-named group) returns DataFrame with one
# un-named column.
r = s.str.extractall(r'([a-z])')
e = DataFrame(['a', 'b', 'd', 'c'], i)
tm.assert_frame_equal(r, e)
def test_extractall_single_group_with_quantifier(self):
# extractall(one un-named group with quantifier) returns
# DataFrame with one un-named column (GH13382).
s = Series(['ab3', 'abc3', 'd4cd2'], name='series_name')
r = s.str.extractall(r'([a-z]+)')
i = MultiIndex.from_tuples([
(0, 0),
(1, 0),
(2, 0),
(2, 1),
], names=(None, "match"))
e = DataFrame(['ab', 'abc', 'd', 'cd'], i)
tm.assert_frame_equal(r, e)
def test_extractall_no_matches(self):
s = Series(['a3', 'b3', 'd4c2'], name='series_name')
# one un-named group.
r = s.str.extractall('(z)')
e = DataFrame(columns=[0])
tm.assert_frame_equal(r, e)
# two un-named groups.
r = s.str.extractall('(z)(z)')
e = DataFrame(columns=[0, 1])
tm.assert_frame_equal(r, e)
# one named group.
r = s.str.extractall('(?P<first>z)')
e = DataFrame(columns=["first"])
tm.assert_frame_equal(r, e)
# two named groups.
r = s.str.extractall('(?P<first>z)(?P<second>z)')
e = DataFrame(columns=["first", "second"])
tm.assert_frame_equal(r, e)
# one named, one un-named.
r = s.str.extractall('(z)(?P<second>z)')
e = DataFrame(columns=[0,
"second"])
tm.assert_frame_equal(r, e)
def test_extractall_stringindex(self):
s = Series(["a1a2", "b1", "c1"], name='xxx')
res = s.str.extractall(r"[ab](?P<digit>\d)")
exp_idx = MultiIndex.from_tuples([(0, 0), (0, 1), (1, 0)],
names=[None, 'match'])
exp = DataFrame({'digit': ["1", "2", "1"]}, index=exp_idx)
tm.assert_frame_equal(res, exp)
# index should return the same result as the default index without name
# thus index.name doesn't affect to the result
for idx in [Index(["a1a2", "b1", "c1"]),
Index(["a1a2", "b1", "c1"], name='xxx')]:
res = idx.str.extractall(r"[ab](?P<digit>\d)")
tm.assert_frame_equal(res, exp)
s = Series(["a1a2", "b1", "c1"], name='s_name',
index=Index(["XX", "yy", "zz"], name='idx_name'))
res = s.str.extractall(r"[ab](?P<digit>\d)")
exp_idx = MultiIndex.from_tuples([("XX", 0), ("XX", 1), ("yy", 0)],
names=["idx_name", 'match'])
exp = DataFrame({'digit': ["1", "2", "1"]}, index=exp_idx)
tm.assert_frame_equal(res, exp)
def test_extractall_errors(self):
# Does not make sense to use extractall with a regex that has
# no capture groups. (it returns DataFrame with one column for
# each capture group)
s = Series(['a3', 'b3', 'd4c2'], name='series_name')
with tm.assert_raises_regex(ValueError, "no capture groups"):
s.str.extractall(r'[a-z]')
def test_extract_index_one_two_groups(self):
s = Series(['a3', 'b3', 'd4c2'], index=["A3", "B3", "D4"],
name='series_name')
r = s.index.str.extract(r'([A-Z])', expand=True)
e = DataFrame(['A', "B", "D"])
tm.assert_frame_equal(r, e)
# Prior to v0.18.0, index.str.extract(regex with one group)
# returned Index. With more than one group, extract raised an
# error (GH9980). Now extract always returns DataFrame.
r = s.index.str.extract(
r'(?P<letter>[A-Z])(?P<digit>[0-9])', expand=True)
e_list = [
("A", "3"),
("B", "3"),
("D", "4"),
]
e = DataFrame(e_list, columns=["letter", "digit"])
tm.assert_frame_equal(r, e)
def test_extractall_same_as_extract(self):
s = Series(['a3', 'b3', 'c2'], name='series_name')
pattern_two_noname = r'([a-z])([0-9])'
extract_two_noname = s.str.extract(pattern_two_noname, expand=True)
has_multi_index = s.str.extractall(pattern_two_noname)
no_multi_index = has_multi_index.xs(0, level="match")
tm.assert_frame_equal(extract_two_noname, no_multi_index)
pattern_two_named = r'(?P<letter>[a-z])(?P<digit>[0-9])'
extract_two_named = s.str.extract(pattern_two_named, expand=True)
has_multi_index = s.str.extractall(pattern_two_named)
no_multi_index = has_multi_index.xs(0, level="match")
tm.assert_frame_equal(extract_two_named, no_multi_index)
pattern_one_named = r'(?P<group_name>[a-z])'
extract_one_named = s.str.extract(pattern_one_named, expand=True)
has_multi_index = s.str.extractall(pattern_one_named)
no_multi_index = has_multi_index.xs(0, level="match")
tm.assert_frame_equal(extract_one_named, no_multi_index)
pattern_one_noname = r'([a-z])'
extract_one_noname = s.str.extract(pattern_one_noname, expand=True)
has_multi_index = s.str.extractall(pattern_one_noname)
no_multi_index = has_multi_index.xs(0, level="match")
tm.assert_frame_equal(extract_one_noname, no_multi_index)
def test_extractall_same_as_extract_subject_index(self):
# same as above tests, but s has an MultiIndex.
i = MultiIndex.from_tuples([
("A", "first"),
("B", "second"),
("C", "third"),
], names=("capital", "ordinal"))
s = Series(['a3', 'b3', 'c2'], i, name='series_name')
pattern_two_noname = r'([a-z])([0-9])'
extract_two_noname = s.str.extract(pattern_two_noname, expand=True)
has_match_index = s.str.extractall(pattern_two_noname)
no_match_index = has_match_index.xs(0, level="match")
tm.assert_frame_equal(extract_two_noname, no_match_index)
pattern_two_named = r'(?P<letter>[a-z])(?P<digit>[0-9])'
extract_two_named = s.str.extract(pattern_two_named, expand=True)
has_match_index = s.str.extractall(pattern_two_named)
no_match_index = has_match_index.xs(0, level="match")
tm.assert_frame_equal(extract_two_named, no_match_index)
pattern_one_named = r'(?P<group_name>[a-z])'
extract_one_named = s.str.extract(pattern_one_named, expand=True)
has_match_index = s.str.extractall(pattern_one_named)
no_match_index = has_match_index.xs(0, level="match")
tm.assert_frame_equal(extract_one_named, no_match_index)
pattern_one_noname = r'([a-z])'
extract_one_noname = s.str.extract(pattern_one_noname, expand=True)
has_match_index = s.str.extractall(pattern_one_noname)
no_match_index = has_match_index.xs(0, level="match")
tm.assert_frame_equal(extract_one_noname, no_match_index)
def test_empty_str_methods(self):
empty_str = empty = Series(dtype=object)
empty_int = Series(dtype=int)
empty_bool = Series(dtype=bool)
empty_bytes = Series(dtype=object)
# GH7241
# (extract) on empty series
tm.assert_series_equal(empty_str, empty.str.cat(empty))
assert '' == empty.str.cat()
tm.assert_series_equal(empty_str, empty.str.title())
tm.assert_series_equal(empty_int, empty.str.count('a'))
tm.assert_series_equal(empty_bool, empty.str.contains('a'))
tm.assert_series_equal(empty_bool, empty.str.startswith('a'))
tm.assert_series_equal(empty_bool, empty.str.endswith('a'))
tm.assert_series_equal(empty_str, empty.str.lower())
tm.assert_series_equal(empty_str, empty.str.upper())
tm.assert_series_equal(empty_str, empty.str.replace('a', 'b'))
tm.assert_series_equal(empty_str, empty.str.repeat(3))
tm.assert_series_equal(empty_bool, empty.str.match('^a'))
tm.assert_frame_equal(
DataFrame(columns=[0], dtype=str),
empty.str.extract('()', expand=True))
tm.assert_frame_equal(
DataFrame(columns=[0, 1], dtype=str),
empty.str.extract('()()', expand=True))
tm.assert_series_equal(
empty_str,
empty.str.extract('()', expand=False))
tm.assert_frame_equal(
DataFrame(columns=[0, 1], dtype=str),
empty.str.extract('()()', expand=False))
tm.assert_frame_equal(DataFrame(dtype=str), empty.str.get_dummies())
tm.assert_series_equal(empty_str, empty_str.str.join(''))
tm.assert_series_equal(empty_int, empty.str.len())
tm.assert_series_equal(empty_str, empty_str.str.findall('a'))
tm.assert_series_equal(empty_int, empty.str.find('a'))
tm.assert_series_equal(empty_int, empty.str.rfind('a'))
tm.assert_series_equal(empty_str, empty.str.pad(42))
tm.assert_series_equal(empty_str, empty.str.center(42))
tm.assert_series_equal(empty_str, empty.str.split('a'))
tm.assert_series_equal(empty_str, empty.str.rsplit('a'))
tm.assert_series_equal(empty_str,
empty.str.partition('a', expand=False))
tm.assert_series_equal(empty_str,
empty.str.rpartition('a', expand=False))
tm.assert_series_equal(empty_str, empty.str.slice(stop=1))
tm.assert_series_equal(empty_str, empty.str.slice(step=1))
tm.assert_series_equal(empty_str, empty.str.strip())
tm.assert_series_equal(empty_str, empty.str.lstrip())
tm.assert_series_equal(empty_str, empty.str.rstrip())
tm.assert_series_equal(empty_str, empty.str.wrap(42))
tm.assert_series_equal(empty_str, empty.str.get(0))
tm.assert_series_equal(empty_str, empty_bytes.str.decode('ascii'))
tm.assert_series_equal(empty_bytes, empty.str.encode('ascii'))
tm.assert_series_equal(empty_str, empty.str.isalnum())
tm.assert_series_equal(empty_str, empty.str.isalpha())
tm.assert_series_equal(empty_str, empty.str.isdigit())
tm.assert_series_equal(empty_str, empty.str.isspace())
tm.assert_series_equal(empty_str, empty.str.islower())
tm.assert_series_equal(empty_str, empty.str.isupper())
tm.assert_series_equal(empty_str, empty.str.istitle())
tm.assert_series_equal(empty_str, empty.str.isnumeric())
tm.assert_series_equal(empty_str, empty.str.isdecimal())
tm.assert_series_equal(empty_str, empty.str.capitalize())
tm.assert_series_equal(empty_str, empty.str.swapcase())
tm.assert_series_equal(empty_str, empty.str.normalize('NFC'))
if compat.PY3:
table = str.maketrans('a', 'b')
else:
import string
table = string.maketrans('a', 'b')
tm.assert_series_equal(empty_str, empty.str.translate(table))
def test_empty_str_methods_to_frame(self):
empty = Series(dtype=str)
empty_df = DataFrame([])
tm.assert_frame_equal(empty_df, empty.str.partition('a'))
tm.assert_frame_equal(empty_df, empty.str.rpartition('a'))
def test_ismethods(self):
values = ['A', 'b', 'Xy', '4', '3A', '', 'TT', '55', '-', ' ']
str_s = Series(values)
alnum_e = [True, True, True, True, True, False, True, True, False,
False]
alpha_e = [True, True, True, False, False, False, True, False, False,
False]
digit_e = [False, False, False, True, False, False, False, True, False,
False]
# TODO: unused
num_e = [False, False, False, True, False, False, # noqa
False, True, False, False]
space_e = [False, False, False, False, False, False, False, False,
False, True]
lower_e = [False, True, False, False, False, False, False, False,
False, False]
upper_e = [True, False, False, False, True, False, True, False, False,
False]
title_e = [True, False, True, False, True, False, False, False, False,
False]
tm.assert_series_equal(str_s.str.isalnum(), Series(alnum_e))
tm.assert_series_equal(str_s.str.isalpha(), Series(alpha_e))
tm.assert_series_equal(str_s.str.isdigit(), Series(digit_e))
tm.assert_series_equal(str_s.str.isspace(), Series(space_e))
tm.assert_series_equal(str_s.str.islower(), Series(lower_e))
tm.assert_series_equal(str_s.str.isupper(), Series(upper_e))
tm.assert_series_equal(str_s.str.istitle(), Series(title_e))
assert str_s.str.isalnum().tolist() == [v.isalnum() for v in values]
assert str_s.str.isalpha().tolist() == [v.isalpha() for v in values]
assert str_s.str.isdigit().tolist() == [v.isdigit() for v in values]
assert str_s.str.isspace().tolist() == [v.isspace() for v in values]
assert str_s.str.islower().tolist() == [v.islower() for v in values]
assert str_s.str.isupper().tolist() == [v.isupper() for v in values]
assert str_s.str.istitle().tolist() == [v.istitle() for v in values]
def test_isnumeric(self):
# 0x00bc: ¼ VULGAR FRACTION ONE QUARTER
# 0x2605: ★ not number
# 0x1378: ፸ ETHIOPIC NUMBER SEVENTY
# 0xFF13: 3 Em 3
values = ['A', '3', u'¼', u'★', u'፸', u'3', 'four']
s = Series(values)
numeric_e = [False, True, True, False, True, True, False]
decimal_e = [False, True, False, False, False, True, False]
tm.assert_series_equal(s.str.isnumeric(), Series(numeric_e))
tm.assert_series_equal(s.str.isdecimal(), Series(decimal_e))
unicodes = [u'A', u'3', u'¼', u'★', u'፸', u'3', u'four']
assert s.str.isnumeric().tolist() == [v.isnumeric() for v in unicodes]
assert s.str.isdecimal().tolist() == [v.isdecimal() for v in unicodes]
values = ['A', np.nan, u'¼', u'★', np.nan, u'3', 'four']
s = Series(values)
numeric_e = [False, np.nan, True, False, np.nan, True, False]
decimal_e = [False, np.nan, False, False, np.nan, True, False]
tm.assert_series_equal(s.str.isnumeric(), Series(numeric_e))
tm.assert_series_equal(s.str.isdecimal(), Series(decimal_e))
def test_get_dummies(self):
s = Series(['a|b', 'a|c', np.nan])
result = s.str.get_dummies('|')
expected = DataFrame([[1, 1, 0], [1, 0, 1], [0, 0, 0]],
columns=list('abc'))
tm.assert_frame_equal(result, expected)
s = Series(['a;b', 'a', 7])
result = s.str.get_dummies(';')
expected = DataFrame([[0, 1, 1], [0, 1, 0], [1, 0, 0]],
columns=list('7ab'))
tm.assert_frame_equal(result, expected)
# GH9980, GH8028
idx = Index(['a|b', 'a|c', 'b|c'])
result = idx.str.get_dummies('|')
expected = MultiIndex.from_tuples([(1, 1, 0), (1, 0, 1),
(0, 1, 1)], names=('a', 'b', 'c'))
tm.assert_index_equal(result, expected)
def test_get_dummies_with_name_dummy(self):
# GH 12180
# Dummies named 'name' should work as expected
s = Series(['a', 'b,name', 'b'])
result = s.str.get_dummies(',')
expected = DataFrame([[1, 0, 0], [0, 1, 1], [0, 1, 0]],
columns=['a', 'b', 'name'])
tm.assert_frame_equal(result, expected)
idx = Index(['a|b', 'name|c', 'b|name'])
result = idx.str.get_dummies('|')
expected = MultiIndex.from_tuples([(1, 1, 0, 0), (0, 0, 1, 1),
(0, 1, 0, 1)],
names=('a', 'b', 'c', 'name'))
tm.assert_index_equal(result, expected)
def test_join(self):
values = Series(['a_b_c', 'c_d_e', np.nan, 'f_g_h'])
result = values.str.split('_').str.join('_')
tm.assert_series_equal(values, result)
# mixed
mixed = Series(['a_b', NA, 'asdf_cas_asdf', True, datetime.today(),
'foo', None, 1, 2.])
rs = Series(mixed).str.split('_').str.join('_')
xp = Series(['a_b', NA, 'asdf_cas_asdf', NA, NA, 'foo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('a_b_c'), u('c_d_e'), np.nan, u('f_g_h')])
result = values.str.split('_').str.join('_')
tm.assert_series_equal(values, result)
def test_len(self):
values = Series(['foo', 'fooo', 'fooooo', np.nan, 'fooooooo'])
result = values.str.len()
exp = values.map(lambda x: len(x) if notna(x) else NA)
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['a_b', NA, 'asdf_cas_asdf', True, datetime.today(),
'foo', None, 1, 2.])
rs = Series(mixed).str.len()
xp = Series([3, NA, 13, NA, NA, 3, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('foo'), u('fooo'), u('fooooo'), np.nan, u(
'fooooooo')])
result = values.str.len()
exp = values.map(lambda x: len(x) if notna(x) else NA)
tm.assert_series_equal(result, exp)
def test_findall(self):
values = Series(['fooBAD__barBAD', NA, 'foo', 'BAD'])
result = values.str.findall('BAD[_]*')
exp = Series([['BAD__', 'BAD'], NA, [], ['BAD']])
tm.assert_almost_equal(result, exp)
# mixed
mixed = Series(['fooBAD__barBAD', NA, 'foo', True, datetime.today(),
'BAD', None, 1, 2.])
rs = Series(mixed).str.findall('BAD[_]*')
xp = Series([['BAD__', 'BAD'], NA, [], NA, NA, ['BAD'], NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('fooBAD__barBAD'), NA, u('foo'), u('BAD')])
result = values.str.findall('BAD[_]*')
exp = Series([[u('BAD__'), u('BAD')], NA, [], [u('BAD')]])
tm.assert_almost_equal(result, exp)
def test_find(self):
values = Series(['ABCDEFG', 'BCDEFEF', 'DEFGHIJEF', 'EFGHEF', 'XXXX'])
result = values.str.find('EF')
tm.assert_series_equal(result, Series([4, 3, 1, 0, -1]))
expected = np.array([v.find('EF') for v in values.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.rfind('EF')
tm.assert_series_equal(result, Series([4, 5, 7, 4, -1]))
expected = np.array([v.rfind('EF') for v in values.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.find('EF', 3)
tm.assert_series_equal(result, Series([4, 3, 7, 4, -1]))
expected = np.array([v.find('EF', 3) for v in values.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.rfind('EF', 3)
tm.assert_series_equal(result, Series([4, 5, 7, 4, -1]))
expected = np.array([v.rfind('EF', 3) for v in values.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.find('EF', 3, 6)
tm.assert_series_equal(result, Series([4, 3, -1, 4, -1]))
expected = np.array([v.find('EF', 3, 6) for v in values.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.rfind('EF', 3, 6)
tm.assert_series_equal(result, Series([4, 3, -1, 4, -1]))
expected = np.array([v.rfind('EF', 3, 6) for v in values.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
with tm.assert_raises_regex(TypeError,
"expected a string object, not int"):
result = values.str.find(0)
with tm.assert_raises_regex(TypeError,
"expected a string object, not int"):
result = values.str.rfind(0)
def test_find_nan(self):
values = Series(['ABCDEFG', np.nan, 'DEFGHIJEF', np.nan, 'XXXX'])
result = values.str.find('EF')
tm.assert_series_equal(result, Series([4, np.nan, 1, np.nan, -1]))
result = values.str.rfind('EF')
tm.assert_series_equal(result, Series([4, np.nan, 7, np.nan, -1]))
result = values.str.find('EF', 3)
tm.assert_series_equal(result, Series([4, np.nan, 7, np.nan, -1]))
result = values.str.rfind('EF', 3)
tm.assert_series_equal(result, Series([4, np.nan, 7, np.nan, -1]))
result = values.str.find('EF', 3, 6)
tm.assert_series_equal(result, Series([4, np.nan, -1, np.nan, -1]))
result = values.str.rfind('EF', 3, 6)
tm.assert_series_equal(result, Series([4, np.nan, -1, np.nan, -1]))
def test_index(self):
def _check(result, expected):
if isinstance(result, Series):
tm.assert_series_equal(result, expected)
else:
tm.assert_index_equal(result, expected)
for klass in [Series, Index]:
s = klass(['ABCDEFG', 'BCDEFEF', 'DEFGHIJEF', 'EFGHEF'])
result = s.str.index('EF')
_check(result, klass([4, 3, 1, 0]))
expected = np.array([v.index('EF') for v in s.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = s.str.rindex('EF')
_check(result, klass([4, 5, 7, 4]))
expected = np.array([v.rindex('EF') for v in s.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = s.str.index('EF', 3)
_check(result, klass([4, 3, 7, 4]))
expected = np.array([v.index('EF', 3) for v in s.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = s.str.rindex('EF', 3)
_check(result, klass([4, 5, 7, 4]))
expected = np.array([v.rindex('EF', 3) for v in s.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = s.str.index('E', 4, 8)
_check(result, klass([4, 5, 7, 4]))
expected = np.array([v.index('E', 4, 8) for v in s.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = s.str.rindex('E', 0, 5)
_check(result, klass([4, 3, 1, 4]))
expected = np.array([v.rindex('E', 0, 5) for v in s.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
with tm.assert_raises_regex(ValueError,
"substring not found"):
result = s.str.index('DE')
with tm.assert_raises_regex(TypeError,
"expected a string "
"object, not int"):
result = s.str.index(0)
# test with nan
s = Series(['abcb', 'ab', 'bcbe', np.nan])
result = s.str.index('b')
tm.assert_series_equal(result, Series([1, 1, 0, np.nan]))
result = s.str.rindex('b')
tm.assert_series_equal(result, Series([3, 1, 2, np.nan]))
def test_pad(self):
values = Series(['a', 'b', NA, 'c', NA, 'eeeeee'])
result = values.str.pad(5, side='left')
exp = Series([' a', ' b', NA, ' c', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
result = values.str.pad(5, side='right')
exp = Series(['a ', 'b ', NA, 'c ', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
result = values.str.pad(5, side='both')
exp = Series([' a ', ' b ', NA, ' c ', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
# mixed
mixed = Series(['a', NA, 'b', True, datetime.today(), 'ee', None, 1, 2.
])
rs = Series(mixed).str.pad(5, side='left')
xp = Series([' a', NA, ' b', NA, NA, ' ee', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
mixed = Series(['a', NA, 'b', True, datetime.today(), 'ee', None, 1, 2.
])
rs = Series(mixed).str.pad(5, side='right')
xp = Series(['a ', NA, 'b ', NA, NA, 'ee ', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
mixed = Series(['a', NA, 'b', True, datetime.today(), 'ee', None, 1, 2.
])
rs = Series(mixed).str.pad(5, side='both')
xp = Series([' a ', NA, ' b ', NA, NA, ' ee ', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('a'), u('b'), NA, u('c'), NA, u('eeeeee')])
result = values.str.pad(5, side='left')
exp = Series([u(' a'), u(' b'), NA, u(' c'), NA, u('eeeeee')])
tm.assert_almost_equal(result, exp)
result = values.str.pad(5, side='right')
exp = Series([u('a '), u('b '), NA, u('c '), NA, u('eeeeee')])
tm.assert_almost_equal(result, exp)
result = values.str.pad(5, side='both')
exp = Series([u(' a '), u(' b '), NA, u(' c '), NA, u('eeeeee')])
tm.assert_almost_equal(result, exp)
def test_pad_fillchar(self):
values = Series(['a', 'b', NA, 'c', NA, 'eeeeee'])
result = values.str.pad(5, side='left', fillchar='X')
exp = Series(['XXXXa', 'XXXXb', NA, 'XXXXc', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
result = values.str.pad(5, side='right', fillchar='X')
exp = Series(['aXXXX', 'bXXXX', NA, 'cXXXX', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
result = values.str.pad(5, side='both', fillchar='X')
exp = Series(['XXaXX', 'XXbXX', NA, 'XXcXX', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
with tm.assert_raises_regex(TypeError,
"fillchar must be a "
"character, not str"):
result = values.str.pad(5, fillchar='XY')
with tm.assert_raises_regex(TypeError,
"fillchar must be a "
"character, not int"):
result = values.str.pad(5, fillchar=5)
def test_pad_width(self):
# GH 13598
s = Series(['1', '22', 'a', 'bb'])
for f in ['center', 'ljust', 'rjust', 'zfill', 'pad']:
with tm.assert_raises_regex(TypeError,
"width must be of "
"integer type, not*"):
getattr(s.str, f)('f')
def test_translate(self):
def _check(result, expected):
if isinstance(result, Series):
tm.assert_series_equal(result, expected)
else:
tm.assert_index_equal(result, expected)
for klass in [Series, Index]:
s = klass(['abcdefg', 'abcc', 'cdddfg', 'cdefggg'])
if not compat.PY3:
import string
table = string.maketrans('abc', 'cde')
else:
table = str.maketrans('abc', 'cde')
result = s.str.translate(table)
expected = klass(['cdedefg', 'cdee', 'edddfg', 'edefggg'])
_check(result, expected)
# use of deletechars is python 2 only
if not compat.PY3:
result = s.str.translate(table, deletechars='fg')
expected = klass(['cdede', 'cdee', 'eddd', 'ede'])
_check(result, expected)
result = s.str.translate(None, deletechars='fg')
expected = klass(['abcde', 'abcc', 'cddd', 'cde'])
_check(result, expected)
else:
with tm.assert_raises_regex(
ValueError, "deletechars is not a valid argument"):
result = s.str.translate(table, deletechars='fg')
# Series with non-string values
s = Series(['a', 'b', 'c', 1.2])
expected = Series(['c', 'd', 'e', np.nan])
result = s.str.translate(table)
tm.assert_series_equal(result, expected)
def test_center_ljust_rjust(self):
values = Series(['a', 'b', NA, 'c', NA, 'eeeeee'])
result = values.str.center(5)
exp = Series([' a ', ' b ', NA, ' c ', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
result = values.str.ljust(5)
exp = Series(['a ', 'b ', NA, 'c ', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
result = values.str.rjust(5)
exp = Series([' a', ' b', NA, ' c', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
# mixed
mixed = Series(['a', NA, 'b', True, datetime.today(), 'c', 'eee', None,
1, 2.])
rs = Series(mixed).str.center(5)
xp = Series([' a ', NA, ' b ', NA, NA, ' c ', ' eee ', NA, NA, NA
])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
rs = Series(mixed).str.ljust(5)
xp = Series(['a ', NA, 'b ', NA, NA, 'c ', 'eee ', NA, NA, NA
])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
rs = Series(mixed).str.rjust(5)
xp = Series([' a', NA, ' b', NA, NA, ' c', ' eee', NA, NA, NA
])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('a'), u('b'), NA, u('c'), NA, u('eeeeee')])
result = values.str.center(5)
exp = Series([u(' a '), u(' b '), NA, u(' c '), NA, u('eeeeee')])
tm.assert_almost_equal(result, exp)
result = values.str.ljust(5)
exp = Series([u('a '), u('b '), NA, u('c '), NA, u('eeeeee')])
tm.assert_almost_equal(result, exp)
result = values.str.rjust(5)
exp = Series([u(' a'), u(' b'), NA, u(' c'), NA, u('eeeeee')])
tm.assert_almost_equal(result, exp)
def test_center_ljust_rjust_fillchar(self):
values = Series(['a', 'bb', 'cccc', 'ddddd', 'eeeeee'])
result = values.str.center(5, fillchar='X')
expected = Series(['XXaXX', 'XXbbX', 'Xcccc', 'ddddd', 'eeeeee'])
tm.assert_series_equal(result, expected)
expected = np.array([v.center(5, 'X') for v in values.values],
dtype=np.object_)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.ljust(5, fillchar='X')
expected = Series(['aXXXX', 'bbXXX', 'ccccX', 'ddddd', 'eeeeee'])
tm.assert_series_equal(result, expected)
expected = np.array([v.ljust(5, 'X') for v in values.values],
dtype=np.object_)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.rjust(5, fillchar='X')
expected = Series(['XXXXa', 'XXXbb', 'Xcccc', 'ddddd', 'eeeeee'])
tm.assert_series_equal(result, expected)
expected = np.array([v.rjust(5, 'X') for v in values.values],
dtype=np.object_)
tm.assert_numpy_array_equal(result.values, expected)
# If fillchar is not a charatter, normal str raises TypeError
# 'aaa'.ljust(5, 'XY')
# TypeError: must be char, not str
with tm.assert_raises_regex(TypeError,
"fillchar must be a "
"character, not str"):
result = values.str.center(5, fillchar='XY')
with tm.assert_raises_regex(TypeError,
"fillchar must be a "
"character, not str"):
result = values.str.ljust(5, fillchar='XY')
with tm.assert_raises_regex(TypeError,
"fillchar must be a "
"character, not str"):
result = values.str.rjust(5, fillchar='XY')
with tm.assert_raises_regex(TypeError,
"fillchar must be a "
"character, not int"):
result = values.str.center(5, fillchar=1)
with tm.assert_raises_regex(TypeError,
"fillchar must be a "
"character, not int"):
result = values.str.ljust(5, fillchar=1)
with tm.assert_raises_regex(TypeError,
"fillchar must be a "
"character, not int"):
result = values.str.rjust(5, fillchar=1)
def test_zfill(self):
values = Series(['1', '22', 'aaa', '333', '45678'])
result = values.str.zfill(5)
expected = Series(['00001', '00022', '00aaa', '00333', '45678'])
tm.assert_series_equal(result, expected)
expected = np.array([v.zfill(5) for v in values.values],
dtype=np.object_)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.zfill(3)
expected = Series(['001', '022', 'aaa', '333', '45678'])
tm.assert_series_equal(result, expected)
expected = np.array([v.zfill(3) for v in values.values],
dtype=np.object_)
tm.assert_numpy_array_equal(result.values, expected)
values = Series(['1', np.nan, 'aaa', np.nan, '45678'])
result = values.str.zfill(5)
expected = Series(['00001', np.nan, '00aaa', np.nan, '45678'])
tm.assert_series_equal(result, expected)
def test_split(self):
values = Series(['a_b_c', 'c_d_e', NA, 'f_g_h'])
result = values.str.split('_')
exp = Series([['a', 'b', 'c'], ['c', 'd', 'e'], NA, ['f', 'g', 'h']])
tm.assert_series_equal(result, exp)
# more than one char
values = Series(['a__b__c', 'c__d__e', NA, 'f__g__h'])
result = values.str.split('__')
tm.assert_series_equal(result, exp)
result = values.str.split('__', expand=False)
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['a_b_c', NA, 'd_e_f', True, datetime.today(), None, 1,
2.])
result = mixed.str.split('_')
exp = Series([['a', 'b', 'c'], NA, ['d', 'e', 'f'], NA, NA, NA, NA, NA
])
assert isinstance(result, Series)
tm.assert_almost_equal(result, exp)
result = mixed.str.split('_', expand=False)
assert isinstance(result, Series)
tm.assert_almost_equal(result, exp)
# unicode
values = Series([u('a_b_c'), u('c_d_e'), NA, u('f_g_h')])
result = values.str.split('_')
exp = Series([[u('a'), u('b'), u('c')], [u('c'), u('d'), u('e')], NA,
[u('f'), u('g'), u('h')]])
tm.assert_series_equal(result, exp)
result = values.str.split('_', expand=False)
tm.assert_series_equal(result, exp)
# regex split
values = Series([u('a,b_c'), u('c_d,e'), NA, u('f,g,h')])
result = values.str.split('[,_]')
exp = Series([[u('a'), u('b'), u('c')], [u('c'), u('d'), u('e')], NA,
[u('f'), u('g'), u('h')]])
tm.assert_series_equal(result, exp)
def test_rsplit(self):
values = Series(['a_b_c', 'c_d_e', NA, 'f_g_h'])
result = values.str.rsplit('_')
exp = Series([['a', 'b', 'c'], ['c', 'd', 'e'], NA, ['f', 'g', 'h']])
tm.assert_series_equal(result, exp)
# more than one char
values = Series(['a__b__c', 'c__d__e', NA, 'f__g__h'])
result = values.str.rsplit('__')
tm.assert_series_equal(result, exp)
result = values.str.rsplit('__', expand=False)
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['a_b_c', NA, 'd_e_f', True, datetime.today(), None, 1,
2.])
result = mixed.str.rsplit('_')
exp = Series([['a', 'b', 'c'], NA, ['d', 'e', 'f'], NA, NA, NA, NA, NA
])
assert isinstance(result, Series)
tm.assert_almost_equal(result, exp)
result = mixed.str.rsplit('_', expand=False)
assert isinstance(result, Series)
tm.assert_almost_equal(result, exp)
# unicode
values = Series([u('a_b_c'), u('c_d_e'), NA, u('f_g_h')])
result = values.str.rsplit('_')
exp = Series([[u('a'), u('b'), u('c')], [ | u('c') | pandas.compat.u |
# UnitCommitment.jl: Optimization Package for Security-Constrained Unit Commitment
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
# Released under the modified BSD license. See COPYING.md for more details.
from pathlib import Path
import pandas as pd
import re
from tabulate import tabulate
from colorama import init, Fore, Back, Style
init()
def process_all_log_files():
pathlist = list(Path(".").glob("results/**/*.log"))
rows = []
for path in pathlist:
if ".ipy" in str(path):
continue
row = process(str(path))
rows += [row]
df = | pd.DataFrame(rows) | pandas.DataFrame |
#!/usr/bin/env python
# coding: utf-8
'''
'''
import time
import pandas as pd
import datarobot as dr
from datarobot.models.modeljob import wait_for_async_model_creation
import numpy as np
import re
import os
from datarobot.errors import JobAlreadyRequested
token_id = ""
ts_setting = {"project_name":"fake_job_posting_210123","filename":"../Data/fake_job_postings.csv", \
"project_id": "60089b3d23aace3eea1810d0","model_id":"", \
"feature_list": "Informative Features","features":[],"set":"validation" , \
"AUC":"Weighted AUC", "LogLoss":"Weighted LogLoss", \
"downsampling": 36,"holdout_pct": 20,"validation_pct":16,"target":"fraudulent" }
parameter_name = ['stop_words','stemmer','num_ngram',"use_idf","pos_tagging"]
value = [1,"porter",[1,2,3,4],1,1]
param_df = pd.DataFrame(list(zip(parameter_name, value)),
columns =['parameter_name', 'value'])
dr.Client(token=token_id, endpoint='https://app.datarobot.com/api/v2')
def check_if_number(st):
tp = re.search("\d+",st)
if tp:
return int(tp.group())
else:
return np.nan
def get_min_max_salary (text):
'''
Get the min and max from the salary_range
:param text: string
:return: the min and max of a salary_range
'''
if type(text) == str:
if re.search("\-",text):
tp = text.split("-")
min_salary = check_if_number(tp[0].strip())
max_salary = check_if_number(tp[1].strip())
return min_salary,max_salary
else:
return np.nan,np.nan
else:
return np.nan, np.nan
def cleaned_location(text):
'''
Extract country, and country_and state from location
:param text: string with country, state, city
:return:
'''
country_state = ""
st = str(text)
if type(st) is str:
tp = re.search("[a-zA-Z]{2,}\s?\,(\s*[a-zA-Z0-9]+|\s)",st)
if tp:
country_state = tp.group().strip()
country = st.strip()[0:2]
else:
return "",""
return country,country_state
else:
return "",""
def create_binary_cat_for_education(text):
if pd.isnull(text) or pd.isna(text):
return "no"
elif text == "unspecified":
return "no"
else:
return "yes"
def PrepareDataSet():
'''
Prepare the dataset for fake_job_postings by adding new features.
:return: enriched original dataset with new features
'''
fake_jobs_df = pd.read_csv(ts_setting["filename"])
fake_jobs_df.min_salary = np.nan
fake_jobs_df.max_salary = np.nan
fake_jobs_df.salary_diff = np.nan
fake_jobs_df["min_salary"],fake_jobs_df["max_salary"] = zip(*fake_jobs_df["salary_range"].apply(get_min_max_salary))
fake_jobs_df["min_salary"] = pd.to_numeric(fake_jobs_df["min_salary"])
fake_jobs_df["max_salary"] = pd.to_numeric(fake_jobs_df["max_salary"])
fake_jobs_df["education_flag"] = [create_binary_cat_for_education(x) for x in fake_jobs_df["required_education"]]
fake_jobs_df["salary_range"] = fake_jobs_df.max_salary - fake_jobs_df.min_salary
fake_jobs_df["salary_diff"] = fake_jobs_df["salary_range"]/fake_jobs_df["min_salary"]
return fake_jobs_df
def start_project_with_settings(fake_jobs_df):
'''
Run a project for fake_jobs_df
:param fake_jobs_df: already enriched dataset
:return: project
'''
global ts_setting
advanced_options = dr.AdvancedOptions(
response_cap=0.7,
blueprint_threshold=2,
smart_downsampled=True, majority_downsampling_rate=ts_setting["downsampling"])
partition = dr.StratifiedTVH(ts_setting["holdout_pct"],ts_setting["validation_pct"], seed=0)
pandas_dataset = dr.Dataset.create_from_in_memory_data(data_frame=fake_jobs_df.drop(columns = ["job_id"]))
project = pandas_dataset.create_project(project_name = ts_setting["project_name"])
project.set_target(target= ts_setting["target"],mode = dr.enums.AUTOPILOT_MODE.QUICK,
partitioning_method=partition,
advanced_options = advanced_options,
worker_count = -1)
project.unlock_holdout()
project.wait_for_autopilot(verbosity=dr.VERBOSITY_LEVEL.SILENT)
return project
'''
From the project, find features, DataRobot set as text features
'''
def get_text_features(project):
'''
get text features
:param project: DataRobot Project
:return: list of features of type text
'''
raw = [feat_list for feat_list in project.get_featurelists()\
if feat_list.name == ts_setting["feature_list"]][0]
text_features = [
feat
for feat in raw.features if dr.Feature.get(project.id, feat).feature_type == "Text"
]
return text_features
#Get all the models for a given text field
def get_1_model_performance(model_p,text_feature,num_modified):
'''
Extract a model metrics
:param model_p: model of interest
:param text_feature: list of features of type text
:param num_modified: number of parameters modified
:return: performance of type dict
'''
global ts_setting
performance = {}
try:
roc = model_p.get_roc_curve(ts_setting["set"])
threshold = roc.get_best_f1_threshold()
metrics = roc.estimate_threshold(threshold)
performance = {"model_id":model_p.id,"text_feature":text_feature,"AUC":model_p.metrics[ts_setting["AUC"]][ts_setting["set"]], \
"sample_pct":model_p.sample_pct,
"LogLoss":model_p.metrics[ts_setting["LogLoss"]][ts_setting["set"]],
'f1_score':metrics['f1_score'],"sample_pct":model_p.sample_pct,\
'true_negative_rate': metrics['true_negative_rate'],
'false_positive_rate':metrics['false_positive_rate'],
'true_positive_rate':metrics['true_positive_rate'],\
'positive_predictive_value':metrics['positive_predictive_value'],\
'negative_predictive_value':metrics['negative_predictive_value'],\
'threshold':metrics['threshold'],'parameters_modified': num_modified}
return performance
except:
performance = {"model_id": model_p.id, "text_feature": text_feature,
"AUC": 0, \
"sample_pct": model_p.sample_pct,
"LogLoss": 1,
'f1_score': 0, "sample_pct": model_p.sample_pct, \
'true_negative_rate': 0,
'false_positive_rate': 0,
'true_positive_rate': 0, \
'positive_predictive_value': 0, \
'negative_predictive_value': 0, \
'threshold': 0, 'parameters_modified': num_modified}
return performance
#Get all the models for a given text field
#This function will have 2 uses: First, it will be used to find the best AutoTuned model for the
#text features, and then it will be used to compare the best model before the pre-processing and
#after the pre-processing. Keep only models that used less than 100 of dataset
def models_performance_for_text(text_feature,project):
'''
extract all models built only for text features
:param text_feature: list of features of type text
:param project: DataRobot project
:return: all models trained on less than 100% and trained on only the text features (Auto-Tuned Word N-gram )
'''
models_desc =project.get_models(
search_params={
'name': text_feature
})
df= pd.DataFrame()
for model_p in models_desc:
tmp_df = get_1_model_performance(model_p,text_feature,0)
if tmp_df:
if tmp_df["sample_pct"] < 100.00:
df = df.append(tmp_df, ignore_index=True)
return df
def get_best_models_before_text(project):
'''
get the best models for each text features. This function calls get_text_features, and models_performance_for_text
:param project: DataRobot project
:return: best models id and logloss metric
'''
text_features= get_text_features(project)
models_df = pd.DataFrame()
performance_df = | pd.DataFrame() | pandas.DataFrame |
import pandas as pd
import lightgbm as lgb
from sklearn.metrics import roc_auc_score
import numpy as np
print("Loading resampled train data")
train_X = pd.read_csv("../input/AllData_v4_os.train")
train_X.pop("Unnamed: 0")
print("Loading resampled train labels")
train_y = pd.read_csv("../input/AllData_v4_os.label")
train_y = train_y.pop("TARGET")
print("Loading resampled validation data")
valid_X = pd.read_csv("../input/AllData_v4_os_valid.train")
valid_X.pop("Unnamed: 0")
print("Loading resampled validation labels")
valid_y = pd.read_csv("../input/AllData_v4_os_valid.label")
valid_y = valid_y.pop("TARGET")
print("Loading application test data")
test_X = pd.read_csv("../input/AllData_v4.test")
print("Replacing Inf values in valid_X")
valid_X = valid_X.replace([np.inf, -np.inf], np.nan)
print("Replacing NaN values in valid_X")
valid_X = valid_X.fillna(0)
print("Replacing Inf values in test_X")
test_X = test_X.replace([np.inf, -np.inf], np.nan)
print("Replacing NaN values in test_X")
test_X = test_X.fillna(0)
print("train_y shape: " + str(train_y.shape))
print("train_X shape: " + str(train_X.shape))
print("valid_y shape: " + str(valid_y.shape))
print("valid_X shape: " + str(valid_X.shape))
print("test_X shape: " + str(test_X.shape))
lgb_train = lgb.Dataset(train_X, train_y)
lgb_test = lgb.Dataset(valid_X)
params = {'task' :'train',
'objective' :'binary',
'learning_rate' :0.01,
'num_leaves' :10,
'max_depth' :3,
'min_data_in_leaf' :80,
'min_sum_hessian_in_leaf' :0.001,
'lambda_l1' :0.2,
'lambda_l2' :0,
'scale_pos_weight' :1,
'metric' :'auc',
'verbose' :-1}
print("Model training started...")
model = lgb.train(params=params,
train_set=lgb_train,
num_boost_round=10000,
verbose_eval=True)
print("Model training completed...")
model.save_model('GridSearch/AllData_v4_OS_LGBM_v2_Model')
print("Predicting validation set...")
valid_preds = model.predict(valid_X)
print("Validation set prediction completed...")
print("Predicting test set...")
test_preds = model.predict(test_X)
print("Test set prediction completed...")
auc = roc_auc_score(valid_y, valid_preds)
print("Validation AUC: " + str(auc))
valid_preds = | pd.DataFrame(valid_preds) | pandas.DataFrame |
import numpy as np
import pandas as pd
from pandas.testing import assert_frame_equal
def test_over_with_sorting(c, user_table_1):
df = c.sql(
"""
SELECT
user_id,
ROW_NUMBER() OVER (ORDER BY user_id, b) AS R
FROM user_table_1
"""
)
df = df.compute()
expected_df = pd.DataFrame({"user_id": user_table_1.user_id, "R": [3, 1, 2, 4]})
expected_df["R"] = expected_df["R"].astype("Int64")
assert_frame_equal(df, expected_df)
def test_over_with_partitioning(c, user_table_2):
df = c.sql(
"""
SELECT
user_id,
ROW_NUMBER() OVER (PARTITION BY c) AS R
FROM user_table_2
"""
)
df = df.compute()
expected_df = | pd.DataFrame({"user_id": user_table_2.user_id, "R": [1, 1, 1, 1]}) | pandas.DataFrame |
# -*- coding:utf-8 -*-
# /usr/bin/env python
"""
Date: 2020/3/23 19:12
Desc: 东方财富网-数据中心-沪深港通持股
http://data.eastmoney.com/hsgtcg/
http://finance.eastmoney.com/news/1622,20161118685370149.html
"""
import requests
import json
import demjson
import pandas as pd
from bs4 import BeautifulSoup
def stock_em_hsgt_north_net_flow_in(indicator="沪股通"):
url = "http://push2his.eastmoney.com/api/qt/kamt.kline/get"
params = {
"fields1": "f1,f3,f5",
"fields2": "f51,f52",
"klt": "101",
"lmt": "500",
"ut": "b2884a393a59ad64002292a3e90d46a5",
"cb": "jQuery18305732402561585701_1584961751919",
"_": "1584962164273",
}
r = requests.get(url, params=params)
data_text = r.text
data_json = json.loads(data_text[data_text.find("{"):-2])
if indicator == "沪股通":
temp_df = pd.DataFrame(data_json["data"]["hk2sh"]).iloc[:, 0].str.split(",", expand=True)
temp_df.columns = ["date", "value"]
return temp_df
if indicator == "深股通":
temp_df = pd.DataFrame(data_json["data"]["hk2sz"]).iloc[:, 0].str.split(",", expand=True)
temp_df.columns = ["date", "value"]
return temp_df
if indicator == "北上":
temp_df = pd.DataFrame(data_json["data"]["s2n"]).iloc[:, 0].str.split(",", expand=True)
temp_df.columns = ["date", "value"]
return temp_df
def stock_em_hsgt_north_cash(indicator="沪股通"):
url = "http://push2his.eastmoney.com/api/qt/kamt.kline/get"
params = {
"fields1": "f1,f3,f5",
"fields2": "f51,f53",
"klt": "101",
"lmt": "500",
"ut": "b2884a393a59ad64002292a3e90d46a5",
"cb": "jQuery18305732402561585701_1584961751919",
"_": "1584962164273",
}
r = requests.get(url, params=params)
data_text = r.text
data_json = json.loads(data_text[data_text.find("{"):-2])
if indicator == "沪股通":
temp_df = pd.DataFrame(data_json["data"]["hk2sh"]).iloc[:, 0].str.split(",", expand=True)
temp_df.columns = ["date", "value"]
return temp_df
if indicator == "深股通":
temp_df = pd.DataFrame(data_json["data"]["hk2sz"]).iloc[:, 0].str.split(",", expand=True)
temp_df.columns = ["date", "value"]
return temp_df
if indicator == "北上":
temp_df = pd.DataFrame(data_json["data"]["s2n"]).iloc[:, 0].str.split(",", expand=True)
temp_df.columns = ["date", "value"]
return temp_df
def stock_em_hsgt_north_acc_flow_in(indicator="沪股通"):
url = "http://push2his.eastmoney.com/api/qt/kamt.kline/get"
params = {
"fields1": "f1,f3,f5",
"fields2": "f51,f54",
"klt": "101",
"lmt": "500",
"ut": "b2884a393a59ad64002292a3e90d46a5",
"cb": "jQuery18305732402561585701_1584961751919",
"_": "1584962164273",
}
r = requests.get(url, params=params)
data_text = r.text
data_json = json.loads(data_text[data_text.find("{"):-2])
if indicator == "沪股通":
temp_df = pd.DataFrame(data_json["data"]["hk2sh"]).iloc[:, 0].str.split(",", expand=True)
temp_df.columns = ["date", "value"]
return temp_df
if indicator == "深股通":
temp_df = pd.DataFrame(data_json["data"]["hk2sz"]).iloc[:, 0].str.split(",", expand=True)
temp_df.columns = ["date", "value"]
return temp_df
if indicator == "北上":
temp_df = pd.DataFrame(data_json["data"]["s2n"]).iloc[:, 0].str.split(",", expand=True)
temp_df.columns = ["date", "value"]
return temp_df
def stock_em_hsgt_south_net_flow_in(indicator="沪股通"):
url = "http://push2his.eastmoney.com/api/qt/kamt.kline/get"
params = {
"fields1": "f2,f4,f6",
"fields2": "f51,f52",
"klt": "101",
"lmt": "500",
"ut": "b2884a393a59ad64002292a3e90d46a5",
"cb": "jQuery18307854355493858363_1584963487410",
"_": "1584964176697",
}
r = requests.get(url, params=params)
data_text = r.text
data_json = json.loads(data_text[data_text.find("{"):-2])
if indicator == "沪股通":
temp_df = pd.DataFrame(data_json["data"]["sh2hk"]).iloc[:, 0].str.split(",", expand=True)
temp_df.columns = ["date", "value"]
return temp_df
if indicator == "深股通":
temp_df = pd.DataFrame(data_json["data"]["sz2hk"]).iloc[:, 0].str.split(",", expand=True)
temp_df.columns = ["date", "value"]
return temp_df
if indicator == "南下":
temp_df = pd.DataFrame(data_json["data"]["n2s"]).iloc[:, 0].str.split(",", expand=True)
temp_df.columns = ["date", "value"]
return temp_df
def stock_em_hsgt_south_cash(indicator="沪股通"):
url = "http://push2his.eastmoney.com/api/qt/kamt.kline/get"
params = {
"fields1": "f2,f4,f6",
"fields2": "f51,f53",
"klt": "101",
"lmt": "500",
"ut": "b2884a393a59ad64002292a3e90d46a5",
"cb": "jQuery18307854355493858363_1584963487410",
"_": "1584964176697",
}
r = requests.get(url, params=params)
data_text = r.text
data_json = json.loads(data_text[data_text.find("{"):-2])
if indicator == "沪股通":
temp_df = | pd.DataFrame(data_json["data"]["sh2hk"]) | pandas.DataFrame |
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
def pyscript_diseases():
# measels
measlesdf = | pd.read_csv('https://docs.google.com/spreadsheets/d/1ogMiFRnX-N4lp1cqI0N22F9K9fFVVFfCWxw4T6W2iVw/export?format=csv&id') | pandas.read_csv |
"""Internal utilties; not for external use
"""
import contextlib
import functools
import itertools
import os.path
import re
import warnings
from collections import OrderedDict
from typing import (
AbstractSet, Any, Callable, Container, Dict, Hashable, Iterable, Iterator,
Mapping, MutableMapping, MutableSet, Optional, Sequence, Tuple, TypeVar,
cast)
import numpy as np
import pandas as pd
from .pycompat import dask_array_type
K = TypeVar('K')
V = TypeVar('V')
T = TypeVar('T')
def _check_inplace(inplace: Optional[bool], default: bool = False) -> bool:
if inplace is None:
inplace = default
else:
warnings.warn('The inplace argument has been deprecated and will be '
'removed in a future version of xarray.',
FutureWarning, stacklevel=3)
return inplace
def alias_message(old_name: str, new_name: str) -> str:
return '%s has been deprecated. Use %s instead.' % (old_name, new_name)
def alias_warning(old_name: str, new_name: str, stacklevel: int = 3) -> None:
warnings.warn(alias_message(old_name, new_name), FutureWarning,
stacklevel=stacklevel)
def alias(obj: Callable[..., T], old_name: str) -> Callable[..., T]:
assert isinstance(old_name, str)
@functools.wraps(obj)
def wrapper(*args, **kwargs):
alias_warning(old_name, obj.__name__)
return obj(*args, **kwargs)
wrapper.__doc__ = alias_message(old_name, obj.__name__)
return wrapper
def _maybe_cast_to_cftimeindex(index: pd.Index) -> pd.Index:
from ..coding.cftimeindex import CFTimeIndex
if len(index) > 0 and index.dtype == 'O':
try:
return CFTimeIndex(index)
except (ImportError, TypeError):
return index
else:
return index
def safe_cast_to_index(array: Any) -> pd.Index:
"""Given an array, safely cast it to a pandas.Index.
If it is already a pandas.Index, return it unchanged.
Unlike pandas.Index, if the array has dtype=object or dtype=timedelta64,
this function will not attempt to do automatic type conversion but will
always return an index with dtype=object.
"""
if isinstance(array, pd.Index):
index = array
elif hasattr(array, 'to_index'):
index = array.to_index()
else:
kwargs = {}
if hasattr(array, 'dtype') and array.dtype.kind == 'O':
kwargs['dtype'] = object
index = pd.Index(np.asarray(array), **kwargs)
return _maybe_cast_to_cftimeindex(index)
def multiindex_from_product_levels(levels: Sequence[pd.Index],
names: Optional[Sequence[str]] = None
) -> pd.MultiIndex:
"""Creating a MultiIndex from a product without refactorizing levels.
Keeping levels the same gives back the original labels when we unstack.
Parameters
----------
levels : sequence of pd.Index
Values for each MultiIndex level.
names : optional sequence of objects
Names for each level.
Returns
-------
pandas.MultiIndex
"""
if any(not isinstance(lev, pd.Index) for lev in levels):
raise TypeError('levels must be a list of pd.Index objects')
split_labels, levels = zip(*[lev.factorize() for lev in levels])
labels_mesh = np.meshgrid(*split_labels, indexing='ij')
labels = [x.ravel() for x in labels_mesh]
return pd.MultiIndex(levels, labels, sortorder=0, names=names)
def maybe_wrap_array(original, new_array):
"""Wrap a transformed array with __array_wrap__ is it can be done safely.
This lets us treat arbitrary functions that take and return ndarray objects
like ufuncs, as long as they return an array with the same shape.
"""
# in case func lost array's metadata
if isinstance(new_array, np.ndarray) and new_array.shape == original.shape:
return original.__array_wrap__(new_array)
else:
return new_array
def equivalent(first: T, second: T) -> bool:
"""Compare two objects for equivalence (identity or equality), using
array_equiv if either object is an ndarray
"""
# TODO: refactor to avoid circular import
from . import duck_array_ops
if isinstance(first, np.ndarray) or isinstance(second, np.ndarray):
return duck_array_ops.array_equiv(first, second)
else:
return ((first is second) or
(first == second) or
( | pd.isnull(first) | pandas.isnull |
import gzip
import pickle5 as pickle
# import pickle
from collections import defaultdict
import numpy as np
import pandas as pd
import os
from copy import deepcopy
import datetime
import neat
from tensorflow.python.framework.ops import default_session
from scipy.optimize import curve_fit
from ongoing.prescriptors.base import BasePrescriptor, PRED_CASES_COL, CASES_COL, NPI_COLUMNS, NPI_MAX_VALUES
import ongoing.prescriptors.base as base
path = '5days-results-2d-1-hidden'
num_checkpoint = 26
ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
CHECKPOINTS_PREFIX = os.path.join(ROOT_DIR, 'neat-checkpoint-')
# CONFIG_FILE = os.path.join(ROOT_DIR, '{}/config-prescriptor-multiobjective'.format(path))
CONFIG_FILE = os.path.join(ROOT_DIR, '{}/config-prescriptor-{}'.format(path, num_checkpoint))
TMP_PRED_FILE_NAME = os.path.join(ROOT_DIR, 'tmp_predictions_for_prescriptions', 'preds.csv')
TMP_PRESCRIPTION_FILE = os.path.join(ROOT_DIR, 'tmp_prescription.csv')
# Number of days the prescriptors will look at in the past.
# Larger values here may make convergence slower, but give
# prescriptors more context. The number of inputs of each neat
# network will be NB_LOOKBACK_DAYS * (NPI_COLUMNS + 1) + NPI_COLUMNS.
# The '1' is for previous case data, and the final NPI_COLUMNS
# is for IP cost information.
NB_LOOKBACK_DAYS = 21
# Number of countries to use for training. Again, lower numbers
# here will make training faster, since there will be fewer
# input variables, but could potentially miss out on useful info.
NB_EVAL_COUNTRIES = 10
# Number of prescriptions to make per country.
# This can be set based on how many solutions in PRESCRIPTORS_FILE
# we want to run and on time constraints.
NB_PRESCRIPTIONS = 10
# Number of days to fix prescribed IPs before changing them.
# This could be a useful toggle for decision makers, who may not
# want to change policy every day. Increasing this value also
# can speed up the prescriptor, at the cost of potentially less
# interesting prescriptions.
ACTION_DURATION = 14
# Range of days the prescriptors will be evaluated on.
# To save time during training, this range may be significantly
# shorter than the maximum days a prescriptor can be evaluated on.
EVAL_START_DATE = '2020-08-01'
EVAL_END_DATE = '2020-08-02'
# Maximum number of generations to run (unlimited if None)
NB_GENERATIONS = 200
# Path to file containing neat prescriptors. Here we simply use a
# recent checkpoint of the population from train_prescriptor.py,
# but this is likely not the most complementary set of prescriptors.
# Many approaches can be taken to generate/collect more diverse sets.
# Note: this set can contain up to 10 prescriptors for evaluation.
# PRESCRIPTORS_FILE = os.path.join(ROOT_DIR, '{}/neat-checkpoint-{}'.format(path, num_checkpoint))
PRESCRIPTORS_FILE = os.path.join(ROOT_DIR, '{}/neat-checkpoint-{}_short_pickle4'.format(path, num_checkpoint))
def dominates(one, other):
"""Return true if each objective of *one* is not strictly worse than
the corresponding objective of *other* and at least one objective is
strictly better.
"""
not_equal = False
for self_wvalue, other_wvalue in zip(one, other):
if self_wvalue > other_wvalue:
not_equal = True
elif self_wvalue < other_wvalue:
return False
return not_equal
def sortNondominatedNSGA2(pop_arr, k, first_front_only=False):
"""Sort the first *k* *individuals* into different nondomination levels
using the "Fast Nondominated Sorting Approach" proposed by Deb et al.,
see [Deb2002]_. This algorithm has a time complexity of :math:`O(MN^2)`,
where :math:`M` is the number of objectives and :math:`N` the number of
individuals.
:param individuals: A list of individuals to select from.
:param k: The number of individuals to select.
:param first_front_only: If :obj:`True` sort only the first front and
exit.
:returns: A list of Pareto fronts (lists), the first list includes
nondominated individuals.
.. [Deb2002] Deb, Pratab, Agarwal, and Meyarivan, "A fast elitist
non-dominated sorting genetic algorithm for multi-objective
optimization: NSGA-II", 2002.
"""
if k == 0:
return []
map_fit_ind = defaultdict(list)
for ind in pop_arr:
map_fit_ind[ind.fitness_mult].append(ind)
fits = list(map_fit_ind.keys())
current_front = []
next_front = []
dominating_fits = defaultdict(int)
dominated_fits = defaultdict(list)
# Rank first Pareto front
for i, fit_i in enumerate(fits):
for fit_j in fits[i + 1:]:
if dominates(fit_i, fit_j):
dominating_fits[fit_j] += 1
dominated_fits[fit_i].append(fit_j)
elif dominates(fit_j, fit_i):
dominating_fits[fit_i] += 1
dominated_fits[fit_j].append(fit_i)
if dominating_fits[fit_i] == 0:
current_front.append(fit_i)
fronts = [[]]
for fit in current_front:
fronts[-1].extend(map_fit_ind[fit])
pareto_sorted = len(fronts[-1])
# Rank the next front until all individuals are sorted or
# the given number of individual are sorted.
if not first_front_only:
N = min(len(pop_arr), k)
while pareto_sorted < N:
fronts.append([])
for fit_p in current_front:
for fit_d in dominated_fits[fit_p]:
dominating_fits[fit_d] -= 1
if dominating_fits[fit_d] == 0:
next_front.append(fit_d)
pareto_sorted += len(map_fit_ind[fit_d])
fronts[-1].extend(map_fit_ind[fit_d])
current_front = next_front
next_front = []
return fronts
def assignCrowdingDist(individuals):
"""Assign a crowding distance to each individual's fitness.
It is done per front.
"""
if len(individuals) == 0:
return
distances = [0.0] * len(individuals)
crowd = [(ind.fitness_mult, i) for i, ind in enumerate(individuals)]
nobj = len(individuals[0].fitness_mult)
for i in range(nobj):
crowd.sort(key=lambda element: element[0][i])
distances[crowd[0][1]] = float("inf")
distances[crowd[-1][1]] = float("inf")
if crowd[-1][0][i] == crowd[0][0][i]:
continue
norm = nobj * float(crowd[-1][0][i] - crowd[0][0][i])
for prev, cur, next in zip(crowd[:-2], crowd[1:-1], crowd[2:]):
distances[cur[1]] += (next[0][i] - prev[0][i]) / norm
# find max and min distance
max_val = -float("inf")
min_val = float("inf")
flag_plus_inf = False
flag_minus_inf = False
for dist in distances:
if dist != float("inf") and max_val < dist:
max_val = dist
pass
if dist != -float("inf") and min_val > dist:
min_val = dist
pass
if dist == float("inf"):
flag_plus_inf = True
elif dist == -float("inf"):
flag_minus_inf = True
pass
# set values equal to inf to be max + 0.5
# set values equal to -inf to be max - 0.5
# and rescale the rest
if flag_plus_inf:
max_val += 0.5
if flag_minus_inf:
min_val -= 0.5
for i in range(0, len(distances)):
if distances[i] == float("inf"):
distances[i] = 1.
elif distances[i] == -float("inf"):
distances[i] = 0.
else:
distances[i] = (distances[i] - min_val) / (max_val - min_val)
pass
pass
for i, dist in enumerate(distances):
individuals[i].crowding_dist = dist / 2
pass
pass
def get_best_n_points(n, x_arr, y_arr):
# 1. fit the curve
# define the true objective function
def objective(x, a, b, c):
return a + b / (c - x)
# fit curve
popt, _ = curve_fit(objective, x_arr, y_arr)
# get coefficients
a, b, c = popt
# define a sequence of inputs between the smallest and largest known inputs
x_line = np.arange(min(x_arr), max(x_arr), 1)
# calculate the output for the range
y_line = objective(x_line, a, b, c)
# 2. find arc length
arc_len_arr = []
for pos in range(0, len(x_line) - 1):
p1 = np.array([x_line[pos], y_line[pos]])
p2 = np.array([x_line[pos + 1], y_line[pos + 1]])
arc_len_arr.append(np.linalg.norm(p2 - p1))
arc_len_arr = np.array(arc_len_arr)
# distance delta
d = sum(arc_len_arr) / (n-1)
# cumul_sum of art length
arc_len_arr_cum = np.cumsum(arc_len_arr)
# 3. choose ref. points
# positions of reference points
points_pos = [0]
for i in range(1, (n-1)):
dist = abs(arc_len_arr_cum - i * d)
points_pos.append(np.argmin(dist) + 1)
pass
points_pos.append(len(x_line) - 1)
ref_points = np.array([x_line[points_pos], y_line[points_pos]]).T
# 4. approximate ref. points
all_my_points = np.array([x_arr, y_arr]).T
chosen_points = []
for ref_point in ref_points:
dist = np.linalg.norm((all_my_points - ref_point), axis=1)
pos = np.argmin(dist)
chosen_points.append(pos)
pass
ref_points_pos = points_pos
return chosen_points
class Neat(BasePrescriptor):
def __init__(self, seed=base.SEED, eval_start_date=EVAL_START_DATE, eval_end_date=EVAL_END_DATE,
nb_eval_countries=NB_EVAL_COUNTRIES, nb_lookback_days=NB_LOOKBACK_DAYS, nb_prescriptions=NB_PRESCRIPTIONS, nb_generations=NB_GENERATIONS,
action_duration=ACTION_DURATION, config_file=CONFIG_FILE, prescriptors_file=PRESCRIPTORS_FILE, hist_df=None, verbose=True):
super().__init__(seed=seed)
self.eval_start_date = pd.to_datetime(eval_start_date, format='%Y-%m-%d')
self.eval_end_date = pd.to_datetime(eval_end_date, format='%Y-%m-%d')
self.nb_eval_countries = nb_eval_countries
self.nb_lookback_days = nb_lookback_days
self.nb_prescriptions = nb_prescriptions
self.nb_generations = nb_generations
self.action_duration = action_duration
self.config_file = config_file
self.prescriptors_file = prescriptors_file
self.hist_df = hist_df
self.verbose = verbose
def fit(self, hist_df=None):
if hist_df is not None:
self.hist_df = hist_df
# As a heuristic, use the top NB_EVAL_COUNTRIES w.r.t. ConfirmedCases
# so far as the geos for evaluation.
eval_geos = list(self.hist_df.groupby('GeoID').max()['ConfirmedCases'].sort_values(
ascending=False).head(self.nb_eval_countries).index)
if self.verbose:
print("Nets will be evaluated on the following geos:", eval_geos)
# Pull out historical data for all geos
past_cases = {}
past_ips = {}
for geo in eval_geos:
geo_df = self.hist_df[self.hist_df['GeoID'] == geo]
past_cases[geo] = np.maximum(0, np.array(geo_df[CASES_COL]))
past_ips[geo] = np.array(geo_df[NPI_COLUMNS])
# Gather values for scaling network output
ip_max_values_arr = np.array([NPI_MAX_VALUES[ip] for ip in NPI_COLUMNS])
# Load configuration.
config = neat.Config(neat.DefaultGenome, neat.DefaultReproduction,
neat.DefaultSpeciesSet, neat.DefaultStagnation,
self.config_file)
# Create the population, which is the top-level object for a NEAT run.
p = neat.Population(config)
if self.verbose:
# Add a stdout reporter to show progress in the terminal.
p.add_reporter(neat.StdOutReporter(show_species_detail=True))
# Add statistics reporter to provide extra info about training progress.
stats = neat.StatisticsReporter()
p.add_reporter(stats)
# Add checkpointer to save population every generation and every 10 minutes.
p.add_reporter(neat.Checkpointer(generation_interval=1,
time_interval_seconds=600,
filename_prefix=CHECKPOINTS_PREFIX))
# Function that evaluates the fitness of each prescriptor model, equal costs
def eval_genomes_multy_ones(genomes, config):
# Every generation sample a different set of costs per geo,
# so that over time solutions become robust to different costs.
cost_df = base.generate_costs(self.hist_df, mode='equal')
cost_df = base.add_geo_id(cost_df)
geo_costs = {}
for geo in eval_geos:
costs = cost_df[cost_df['GeoID'] == geo]
cost_arr = np.array(costs[NPI_COLUMNS])[0]
geo_costs[geo] = cost_arr
# Evaluate each individual
for genome_id, genome in genomes:
if genome.fitness is not None:
continue
# Create net from genome
net = neat.nn.FeedForwardNetwork.create(genome, config)
# Set up dictionary to keep track of prescription
df_dict = {'CountryName': [], 'RegionName': [], 'Date': []}
for ip_col in NPI_COLUMNS:
df_dict[ip_col] = []
# Set initial data
eval_past_cases = deepcopy(past_cases)
eval_past_ips = deepcopy(past_ips)
# Compute prescribed stringency incrementally
stringency = np.zeros(config.genome_config.num_outputs)
# Make prescriptions one day at a time, feeding resulting
# predictions from the predictor back into the prescriptor.
for date in pd.date_range(self.eval_start_date, self.eval_end_date):
date_str = date.strftime("%Y-%m-%d")
# Prescribe for each geo
for geo in eval_geos:
# Prepare input data. Here we use log to place cases
# on a reasonable scale; many other approaches are possible.
X_cases = np.log(eval_past_cases[geo][-self.nb_lookback_days:] + 1)
X_ips = eval_past_ips[geo][-self.nb_lookback_days:]
X_costs = geo_costs[geo]
X = np.concatenate([X_cases.flatten(),
X_ips.flatten(),
X_costs])
# Get prescription
prescribed_ips = net.activate(X)
# Map prescription to integer outputs
prescribed_ips = (prescribed_ips * ip_max_values_arr).round()
# Add it to prescription dictionary
country_name, region_name = (geo.split(' / ') + [np.nan])[:2]
df_dict['CountryName'].append(country_name)
df_dict['RegionName'].append(region_name)
df_dict['Date'].append(date_str)
for ip_col, prescribed_ip in zip(NPI_COLUMNS, prescribed_ips):
df_dict[ip_col].append(prescribed_ip)
# Update stringency. This calculation could include division by
# the number of IPs and/or number of geos, but that would have
# no effect on the ordering of candidate solutions.
stringency += prescribed_ips
# Create dataframe from prescriptions.
pres_df = pd.DataFrame(df_dict)
# Make prediction given prescription for all countries
pred_df = self.get_predictions(self.eval_start_date.strftime("%Y-%m-%d"), date_str, pres_df)
# Update past data with new day of prescriptions and predictions
pres_df = base.add_geo_id(pres_df)
pred_df = base.add_geo_id(pred_df)
new_pres_df = pres_df[pres_df['Date'] == date_str]
new_pred_df = pred_df[pred_df['Date'] == date_str]
for geo in eval_geos:
geo_pres = new_pres_df[new_pres_df['GeoID'] == geo]
geo_pred = new_pred_df[new_pred_df['GeoID'] == geo]
# Append array of prescriptions
pres_arr = np.array([geo_pres[ip_col].values[0] for ip_col in NPI_COLUMNS]).reshape(1, -1)
eval_past_ips[geo] = np.concatenate([eval_past_ips[geo], pres_arr])
# Append predicted cases
eval_past_cases[geo] = np.append(eval_past_cases[geo],
geo_pred[PRED_CASES_COL].values[0])
# Compute fitness. There are many possibilities for computing fitness and ranking
# candidates. Here we choose to minimize the product of ip stringency and predicted
# cases. This product captures the area of the 2D objective space that dominates
# the candidate. We minimize it by including a negation. To place the fitness on
# a reasonable scale, we take means over all geos and days. Note that this fitness
# function can lead directly to the degenerate solution of all ips 0, i.e.,
# stringency zero. To achieve more interesting behavior, a different fitness
# function may be required.
new_cases = pred_df[PRED_CASES_COL].mean().mean()
fitness_mult = list(-stringency)
fitness_mult.append(-new_cases)
genome.fitness_mult = tuple(fitness_mult)
if self.verbose:
print('Evaluated Genome', genome_id)
print('New cases:', new_cases)
print('Stringency:', stringency)
print('Fitness:', genome.fitness_mult)
# Function that evaluates the fitness of each prescriptor model, equal costs
def eval_genomes_2d_ones(genomes, config):
# Every generation sample a different set of costs per geo,
# so that over time solutions become robust to different costs.
cost_df = base.generate_costs(self.hist_df, mode='equal')
cost_df = base.add_geo_id(cost_df)
geo_costs = {}
for geo in eval_geos:
costs = cost_df[cost_df['GeoID'] == geo]
cost_arr = np.array(costs[NPI_COLUMNS])[0]
geo_costs[geo] = cost_arr
# Evaluate each individual
for genome_id, genome in genomes:
# Create net from genome
net = neat.nn.FeedForwardNetwork.create(genome, config)
# Set up dictionary to keep track of prescription
df_dict = {'CountryName': [], 'RegionName': [], 'Date': []}
for ip_col in NPI_COLUMNS:
df_dict[ip_col] = []
# Set initial data
eval_past_cases = deepcopy(past_cases)
eval_past_ips = deepcopy(past_ips)
# Compute prescribed stringency incrementally
stringency = 0.
# Make prescriptions one day at a time, feeding resulting
# predictions from the predictor back into the prescriptor.
for date in pd.date_range(self.eval_start_date, self.eval_end_date):
date_str = date.strftime("%Y-%m-%d")
# Prescribe for each geo
for geo in eval_geos:
# Prepare input data. Here we use log to place cases
# on a reasonable scale; many other approaches are possible.
X_cases = np.log(eval_past_cases[geo][-self.nb_lookback_days:] + 1)
X_ips = eval_past_ips[geo][-self.nb_lookback_days:]
X_costs = geo_costs[geo]
X = np.concatenate([X_cases.flatten(),
X_ips.flatten(),
X_costs])
# Get prescription
prescribed_ips = net.activate(X)
# Map prescription to integer outputs
prescribed_ips = (prescribed_ips * ip_max_values_arr).round()
# Add it to prescription dictionary
country_name, region_name = (geo.split(' / ') + [np.nan])[:2]
df_dict['CountryName'].append(country_name)
df_dict['RegionName'].append(region_name)
df_dict['Date'].append(date_str)
for ip_col, prescribed_ip in zip(NPI_COLUMNS, prescribed_ips):
df_dict[ip_col].append(prescribed_ip)
# Update stringency. This calculation could include division by
# the number of IPs and/or number of geos, but that would have
# no effect on the ordering of candidate solutions.
stringency += np.sum(geo_costs[geo] * prescribed_ips)
# Create dataframe from prescriptions.
pres_df = | pd.DataFrame(df_dict) | pandas.DataFrame |
import datetime
import os
import geopandas as gpd
import numpy as np
import pandas as pd
import pytest
from shapely.geometry import Point
from sklearn.cluster import DBSCAN
from geopandas.testing import assert_geodataframe_equal
import trackintel as ti
from trackintel.geogr.distances import calculate_distance_matrix
@pytest.fixture
def example_staypoints():
"""Staypoints for location generation.
Staypoints have non-continous ids and should result in noise and several locations per user.
With epsilon=10, num_samples=2:
The following staypoint ids should form a location (1, 15), (5,6), (80, 3)
The following staypoint ids should be noise (2, 7)
for agg_level="dataset"
The following staypoint ids should form a location (1, 15), (5,6, 80, 3),
The following staypoint ids should be noise (2, 7)
"""
p1 = Point(8.5067847, 47.4)
p2 = Point(8.5067847, 47.5)
p3 = Point(8.5067847, 47.6)
p4 = Point(8.5067847, 47.7)
t1 = pd.Timestamp("1971-01-01 00:00:00", tz="utc")
t2 = pd.Timestamp("1971-01-01 05:00:00", tz="utc")
t3 = pd.Timestamp("1971-01-02 07:00:00", tz="utc")
t4 = pd.Timestamp("1971-01-02 08:00:00", tz="utc")
t5 = | pd.Timestamp("1971-01-02 09:00:00", tz="utc") | pandas.Timestamp |
import io
import os
import json
import gc
import pandas as pd
import numpy as np
from datetime import date, timedelta
from fastapi import FastAPI, File, HTTPException
import lightgbm as lgb
from lightgbm import LGBMClassifier
import matplotlib.pyplot as plt
import joblib
app = FastAPI(
title="Home Credit Default Risk",
description="""Obtain information related to probability of a client defaulting on loan.""",
version="0.1.0",
)
def calculate_years(days):
"""
Method used to calculate years based on date (today - quantity of days).
Parameters:
-----------------
days (int): Numbers of day to rest of today
Returns:
-----------------
years (int): Numbers of years
"""
today = date.today()
initial_date = today - timedelta(abs(days))
years = today.year - initial_date.year - ((today.month, today.day) < (initial_date.month, initial_date.day))
return years
########################################################
# Columns to read on CSVs
########################################################
COLUMNS = [
"SK_ID_CURR", "AMT_INCOME_TOTAL", "CODE_GENDER",
"DAYS_BIRTH", "DAYS_REGISTRATION", "DAYS_EMPLOYED",
"AMT_CREDIT", "AMT_GOODS_PRICE", "EXT_SOURCE_2",
"EXT_SOURCE_3",
]
########################################################
# Reading the csv
########################################################
df_clients_to_predict = pd.read_csv("datasets/df_clients_to_predict_20220221.csv")
df_current_clients = pd.read_csv("datasets/df_current_clients_20220221.csv")
df_current_clients["AGE"] = df_current_clients["DAYS_BIRTH"].apply(lambda x: calculate_years(x))
df_current_clients["YEARS_EMPLOYED"] = df_current_clients["DAYS_EMPLOYED"].apply(lambda x: calculate_years(x))
df_current_clients["EXT_SOURCE_2"] = df_current_clients["EXT_SOURCE_2"].round(3)
df_current_clients["EXT_SOURCE_3"] = df_current_clients["EXT_SOURCE_3"].round(3)
df_current_clients_by_target_repaid = df_current_clients[df_current_clients["TARGET"] == 0]
df_current_clients_by_target_not_repaid = df_current_clients[df_current_clients["TARGET"] == 1]
@app.get("/api/clients")
async def clients_id():
"""
EndPoint to get all clients id
"""
clients_id = df_clients_to_predict["SK_ID_CURR"].tolist()
return {"clientsId": clients_id}
@app.get("/api/clients/{id}")
async def client_details(id: int):
"""
EndPoint to get client's detail
"""
clients_id = df_clients_to_predict["SK_ID_CURR"].tolist()
if id not in clients_id:
raise HTTPException(status_code=404, detail="client's id not found")
else:
# Filtering by client's id
df_by_id = df_clients_to_predict[COLUMNS][df_clients_to_predict["SK_ID_CURR"] == id]
idx = df_clients_to_predict[df_clients_to_predict["SK_ID_CURR"]==id].index[0]
for col in df_by_id.columns:
globals()[col] = df_by_id.iloc[0, df_by_id.columns.get_loc(col)]
client = {
"clientId" : int(SK_ID_CURR),
"gender" : "Man" if int(CODE_GENDER) == 0 else "Woman",
"age" : calculate_years(int(DAYS_BIRTH)),
"antiquity" : calculate_years(int(DAYS_REGISTRATION)),
"yearsEmployed" : calculate_years(int(DAYS_EMPLOYED)),
"goodsPrice" : float(AMT_GOODS_PRICE),
"credit" : float(AMT_CREDIT),
"anualIncome" : float(AMT_INCOME_TOTAL),
"source2" : float(EXT_SOURCE_2),
"source3" : float(EXT_SOURCE_3),
"shapPosition" : int(idx)
}
return client
@app.get("/api/predictions/clients/{id}")
async def predict(id: int):
"""
EndPoint to get the probability honor/compliance of a client
"""
clients_id = df_clients_to_predict["SK_ID_CURR"].tolist()
if id not in clients_id:
raise HTTPException(status_code=404, detail="client's id not found")
else:
# Loading the model
model = joblib.load("models/model_20220220.pkl")
threshold = 0.135
# Filtering by client's id
df_prediction_by_id = df_clients_to_predict[df_clients_to_predict["SK_ID_CURR"] == id]
df_prediction_by_id = df_prediction_by_id.drop(df_prediction_by_id.columns[[0, 1]], axis=1)
# Predicting
result_proba = model.predict_proba(df_prediction_by_id)
y_prob = result_proba[:, 1]
result = (y_prob >= threshold).astype(int)
if (int(result[0]) == 0):
result = "Yes"
else:
result = "No"
return {
"repay" : result,
"probability0" : result_proba[0][0],
"probability1" : result_proba[0][1],
"threshold" : threshold
}
@app.get("/api/predictions/clients/shap/{id}")
async def client_shap_df(id: int):
"""
EndPoint to return a df with all client's data
"""
clients_id = df_clients_to_predict["SK_ID_CURR"].tolist()
if id not in clients_id:
raise HTTPException(status_code=404, detail="client's id not found")
else:
# Filtering by client's id
idx = df_clients_to_predict[df_clients_to_predict["SK_ID_CURR"]==id].index[0]
client = df_clients_to_predict[df_clients_to_predict["SK_ID_CURR"] == id].drop(columns=["SK_ID_CURR", "AMT_INCOME_TOTAL"])
client = client.to_json(orient="records")
return client
@app.get("/api/statistics/ages")
async def statistical_age():
"""
EndPoint to get some statistics - ages
"""
ages_data_repaid = df_current_clients_by_target_repaid.groupby("AGE").size()
ages_data_repaid = pd.DataFrame(ages_data_repaid).reset_index()
ages_data_repaid.columns = ["AGE", "AMOUNT"]
ages_data_repaid = ages_data_repaid.set_index("AGE").to_dict()["AMOUNT"]
ages_data_not_repaid = df_current_clients_by_target_not_repaid.groupby("AGE").size()
ages_data_not_repaid = pd.DataFrame(ages_data_not_repaid).reset_index()
ages_data_not_repaid.columns = ["AGE", "AMOUNT"]
ages_data_not_repaid = ages_data_not_repaid.set_index("AGE").to_dict()["AMOUNT"]
return {"ages_repaid" : ages_data_repaid, "ages_not_repaid" : ages_data_not_repaid}
@app.get("/api/statistics/yearsEmployed")
async def statistical_years_employed():
"""
EndPoint to get some statistics - years employed
"""
years_employed_data_repaid = df_current_clients_by_target_repaid.groupby("YEARS_EMPLOYED").size()
years_employed_data_repaid = pd.DataFrame(years_employed_data_repaid).reset_index()
years_employed_data_repaid.columns = ["YEARS_EMPLOYED", "AMOUNT"]
years_employed_data_repaid = years_employed_data_repaid.set_index("YEARS_EMPLOYED").to_dict()["AMOUNT"]
years_employed_data_not_repaid = df_current_clients_by_target_not_repaid.groupby("YEARS_EMPLOYED").size()
years_employed_data_not_repaid = pd.DataFrame(years_employed_data_not_repaid).reset_index()
years_employed_data_not_repaid.columns = ["YEARS_EMPLOYED", "AMOUNT"]
years_employed_data_not_repaid = years_employed_data_not_repaid.set_index("YEARS_EMPLOYED").to_dict()["AMOUNT"]
return {
"years_employed_repaid" : years_employed_data_repaid,
"years_employed_not_repaid" : years_employed_data_not_repaid
}
@app.get("/api/statistics/amtCredits")
async def statistical_amt_credit():
"""
EndPoint to get some statistics - AMT Credit
"""
amt_credit_data_repaid = df_current_clients_by_target_repaid.groupby("AMT_CREDIT").size()
amt_credit_data_repaid = pd.DataFrame(amt_credit_data_repaid).reset_index()
amt_credit_data_repaid.columns = ["AMT_CREDIT", "AMOUNT"]
amt_credit_data_repaid = amt_credit_data_repaid.set_index("AMT_CREDIT").to_dict()["AMOUNT"]
amt_credit_data_not_repaid = df_current_clients_by_target_not_repaid.groupby("AMT_CREDIT").size()
amt_credit_data_not_repaid = pd.DataFrame(amt_credit_data_not_repaid).reset_index()
amt_credit_data_not_repaid.columns = ["AMT_CREDIT", "AMOUNT"]
amt_credit_data_not_repaid = amt_credit_data_not_repaid.set_index("AMT_CREDIT").to_dict()["AMOUNT"]
return {
"amt_credit_repaid" : amt_credit_data_repaid,
"amt_credit_not_repaid" : amt_credit_data_not_repaid
}
@app.get("/api/statistics/amtIncomes")
async def statistical_amt_income():
"""
EndPoint to get some statistics - AMT Income
"""
amt_income_data_repaid = df_current_clients_by_target_repaid.groupby("AMT_INCOME_TOTAL").size()
amt_income_data_repaid = pd.DataFrame(amt_income_data_repaid).reset_index()
amt_income_data_repaid.columns = ["AMT_INCOME", "AMOUNT"]
amt_income_data_repaid = amt_income_data_repaid.set_index("AMT_INCOME").to_dict()["AMOUNT"]
amt_income_data_not_repaid = df_current_clients_by_target_not_repaid.groupby("AMT_INCOME_TOTAL").size()
amt_income_data_not_repaid = pd.DataFrame(amt_income_data_not_repaid).reset_index()
amt_income_data_not_repaid.columns = ["AMT_INCOME", "AMOUNT"]
amt_income_data_not_repaid = amt_income_data_not_repaid.set_index("AMT_INCOME").to_dict()["AMOUNT"]
return {
"amt_income_repaid" : amt_income_data_repaid,
"amt_income_not_repaid" : amt_income_data_not_repaid
}
@app.get("/api/statistics/extSource2")
async def statistical_ext_source_2():
"""
EndPoint to get some statistics - EXT SOURCE 2
"""
ext_source_2_data_repaid = df_current_clients_by_target_repaid.groupby("EXT_SOURCE_2").size()
ext_source_2_data_repaid = | pd.DataFrame(ext_source_2_data_repaid) | pandas.DataFrame |
import pandas as pd
import numpy as np
import math
import matplotlib.pyplot as plt
import copy
import seaborn as sn
from sklearn.naive_bayes import GaussianNB, MultinomialNB, CategoricalNB
from DataLoad import dataload
from Classifier.Bayes.NaiveBayes import NaiveBayes
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import confusion_matrix, ConfusionMatrixDisplay, accuracy_score
from sklearn.decomposition import PCA
from sklearn.preprocessing import normalize
from sklearn.model_selection import cross_val_score, cross_val_predict, KFold
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.neural_network import MLPClassifier
# Define the model
model = DecisionTreeClassifier()
# load data
train = dataload('./train.csv')
train_data = train.get_data()
train_ordinal = train.get_ordinal_data()
train_nominal = train.get_nominal_data()
missing_ordinal = train.get_ordinal_mean()
train_label = train.get_label()
test = dataload('./test.csv', missing_ordinal)
test_data = test.get_data()
test_ordinal = test.get_ordinal_data()
test_nominal = test.get_nominal_data()
test_label = test.get_label()
# normalization
train_ordinal = (train_ordinal - train_ordinal.min())/(train_ordinal.max() - train_ordinal.min())
test_ordinal = (test_ordinal - test_ordinal.min())/(test_ordinal.max() - test_ordinal.min())
#train_ordinal = (train_ordinal - train_ordinal.min())/(train_ordinal.std())
#test_ordinal = (test_ordinal - test_ordinal.min())/(test_ordinal.std())
#train_ordinal = normalize(train_ordinal, norm = 'l1', axis = 0)
#test_ordinal = normalize(test_ordinal, norm = 'l1', axis = 0)
#train_ordinal = normalize(train_ordinal, norm = 'l2', axis = 0)
#test_ordinal = normalize(test_ordinal, norm = 'l2', axis = 0)
# feature reduction
nc = 10
pca1 = PCA(n_components=nc, svd_solver='full')
train_ordinal = pca1.fit_transform(train_ordinal)
pca2 = PCA(n_components=nc, svd_solver='full')
test_ordinal = pca2.fit_transform(test_ordinal)
# transform to pandas dataframe
train_ordinal = pd.DataFrame(train_ordinal)
test_ordinal = pd.DataFrame(test_ordinal)
print(train_ordinal)
# train and test model
scores = cross_val_score(model, train_ordinal, train_label, cv=5)
print(scores)
print("Score Accuracy: %0.4f (+/- %0.4f)" % (scores.mean(), scores.std() * 2))
model.fit(train_ordinal, train_label)
pred = model.predict(test_ordinal)
pd.set_option('precision', 4)
print('The accuracy is: %0.4f'%accuracy_score(test_label, pred))
classes = np.sort( | pd.unique(train_label) | pandas.unique |
import sqlite3
from sqlite3 import Error
import pandas as pd
import matplotlib.pyplot as plt
import statsmodels.api as sm
import folium
conn = sqlite3.connect('../data/rodents_data.db')
sql_statement = """SELECT latitude,longitude,count(inspection_date) as recurrence_index FROM 'rodent_incidents' where inspection_date > '2008-12-31' GROUP BY latitude,longitude order by inspection_date"""
overall_location_based_data = pd.read_sql_query(sql_statement, conn)
sql_statement = """SELECT * from (SELECT strftime("%Y/%m", inspection_date) as 'year_month',count(inspection_date) as incidents FROM (SELECT inspection_date FROM 'rodent_incidents' where inspection_date > '2008-12-31' order by inspection_date) GROUP BY strftime("%Y/%m", inspection_date)) where incidents > 500"""
monthly_rodent_incidents = | pd.read_sql_query(sql_statement, conn) | pandas.read_sql_query |
"""
Script to run MCCE simulation at different charges for water molecules.
"""
import os
import sys
import numpy as np
from scipy import stats
from pymcce.automated_mcce import MCCEParams
from pymcce.mcce_simulation import Simulation
from pymcce.utils import write_watpdb_from_coords, get_last_prot_at_index
import matplotlib as mpl
mpl.use("Agg")
import matplotlib.pyplot as plt
import pylab
import seaborn as sns
from sklearn.neighbors import KernelDensity
from numpy import *
import pandas as pd
sns.set(style="white")
mu = []
ab_indices = []
n_wat = []
dipole_x = []
dipole_y = []
dipole_z = []
data_dir = '/home/yzhang/Dropbox/ProtonHopping/data/gramicidin/simulations/input_struct_dp_groups_positive_t3p'
prefix = "run_restart200_000001_1_update"
print("Processing %s:" % prefix)
msdat = os.path.join(data_dir, prefix, "ms.dat")
head3lst = os.path.join(data_dir, prefix, "head3.lst")
fort38 = os.path.join(data_dir, prefix, "fort.38")
step2out = os.path.join(data_dir, prefix, "step2_out.pdb")
msa = Simulation(msdat, head3lst, fort38)
msa.parse_trajectory(sample_frequency=10)
msa.parse_struct(step2out)
conf_dipoles = msa.calculate_dipoles()
print(conf_dipoles['HOH01W0233_006'])
numbers = zeros(msa.trajectory.shape[0], dtype="int64")
for i in range(msa.trajectory.shape[0]):
microstate_conf_ids = msa.trajectory[i, :]
numbers[i] = msa.state_counts[i]
dps = []
curr_wat_ids = []
for index, c in enumerate(microstate_conf_ids):
conf_name = msa.conf_id_name_map[c + 1]
if "DM" not in conf_name:
dpX = conf_dipoles[conf_name][0]
dpY = conf_dipoles[conf_name][1]
dpZ = conf_dipoles[conf_name][2]
dps.append([dpX, dpY, dpZ])
dps = np.array(dps)
x = sum(dps[:, 0])
dipole_x.append(x)
y = sum(dps[:, 1])
dipole_y.append(y)
z = sum(dps[:, 2])
dipole_z.append(z)
dipole_ms = | pd.DataFrame({'x': dipole_x, 'y': dipole_y, 'z': dipole_z, 'count': numbers}) | pandas.DataFrame |
# %%
import pandas as pd
import numpy as np
import time
import datetime
from datetime import datetime as dt
from datetime import timezone
from spacepy import coordinates as coord
from spacepy.time import Ticktock
from astropy.constants import R_earth
import plotly.graph_objects as go
from plotly.subplots import make_subplots
from plotly.offline import iplot
from kamodo_ccmc.flythrough.utils import ConvertCoord
def SatPlot4D(var,time,lon,lat,alt,vard,varu,inCoordName,inCoordType,plotCoord,groupby,model,
displayplot=True,type='3D',body='black',divfile='',htmlfile=''):
"""New 4D plotting for satellite trajectories using plotly by <NAME>
__Required variables__
var: string of variable name
time: time formatted as a timestamp in UTC
lat: latitude in deg
lon: longitude in deg
alt: altitude in km
vard: data of variable var, same size array as positions
varu: string of variable var units
inCoordName: string for incoming coordinate system. GDZ, GEO, GSM, GSE, SM, GEI, MAG, RLL
inCoordType: string for incoming coordinate type. car, sph
plotCoord: string for coordinate system used in 3D plot. Assumes cartesian type.
groupby: grouping of data for animation, values include
all, day, hour, minute, N, orbitE, orbitM
model: string of name of model the data was extracted from
__Optional variables__
displayplot: logical to show/hide displayed plot (may want false when saving htmlfile)
type: string for choice of plot type, values: 3D, 1D, 2D, 2DLT
body: string for choice of 3D inner body, values: black, earth (only GEO), none
divfile: string with filename to save a html div file of the plot
htmlfile: string with filename to save a full html file of the plot
"""
REkm = (R_earth.value/1000.)
if type == "3D":
#Convert incoming coordinates into plot coordinages (cartesian)
xx,yy,zz,units = ConvertCoord(time,lon,lat,alt,inCoordName,inCoordType,plotCoord,'car')
# Create dictionary block to pass to plotting with selected options
plot_dict=dict(
title = 'Satellite extraction from model: '+model+"<br>"+plotCoord+" coordinates", # Displayed title for plot, can use <br> for new lines
sats = ["Sat1"], # Array of satellites to include in plot
Sat1 = dict(
display_name = "",
time = dict(format='timestamp', data=time), # possible formats: datetime, timestamp (assumes UTC)
vars = dict(
x = dict(units=units[0], data=xx),
y = dict(units=units[1], data=yy),
z = dict(units=units[2], data=zz),
Lat = dict(units='deg', data=lat),
Lon = dict(units='deg', data=lon),
Alt = dict(units='km', data=alt),
),
position_variables = ["x", "y", "z"], # three variables to use for position
),
options = dict(
position_units = "R_E", # possible values: R_E, km, ""
var = var, # variable to use for colorscale
hover_vars = ["Lat", "Lon", "Alt"], # other information for hoverinfo display
quiver = False, # logical value to display or hide quivers
quiver_scale = 0.1, # length scale of quivers
quiver_skip = 0, # points to skip between displaying quivers
groupby = groupby, # possible values: all, day, hour, minute, N (integer, show N values at a time)
# orbitE (break at equator crossing S->N), orbitM (break at prime meridian crossing)
body = body, # possible values: black, earth, and any other value is no body
colorscale = "Viridis", # named colorscale
REkm = REkm, # Earth radius in km
coord = coord, # Coordinate system of plot
),
)
# Fixed position variables already included, now add passed in variable to dictionary
plot_dict['Sat1']['vars'][var]=dict(units=varu, data=vard)
# Execute creation and display of figure
fig=custom3Dsat(plot_dict,vbose=0)
if divfile != '':
print('-saving html div file: ',divfile)
fig.write_html(divfile,full_html=False)
if htmlfile != '':
print('-saving full html file: ',htmlfile)
fig.write_html(htmlfile,full_html=True)
if displayplot:
iplot(fig)
if type == "1D" or type == "2D" or type == "2DLT":
#Convert incoming coordinates into GDZ sph
xx,yy,zz,units = ConvertCoord(time,lon,lat,alt,inCoordName,inCoordType,'GDZ','sph')
xx[xx<0.] += 360.
# Create dictionary block to pass to plotting with selected options
plot_dict=dict(
title = 'Satellite extraction from model: '+model, # Displayed title for plot, can use <br> for new lines
sats = ["Sat1"], # Array of satellites to include in plot
Sat1 = dict(
display_name = "",
time = dict(format='timestamp', data=time), # possible formats: datetime, timestamp (assumes UTC)
vars = dict(
Lon = dict(units=units[0], data=xx),
Lat = dict(units=units[1], data=yy),
Alt = dict(units=units[2], data=zz),
),
position_variables = ["Lon", "Lat", "Alt"], # three variables to use for position
),
options = dict(
position_units = "", # possible values: R_E, km, ""
var = var, # variable to use for colorscale
hover_vars = ["Lon", "Lat", "Alt"], # other information for hoverinfo display
groupby = groupby, # possible values: all, day, hour, minute, N (integer, show N values at a time)
# orbitE (break at equator crossing S->N), orbitM (break at prime meridian crossing)
),
)
# Fixed position variables already included, now add passed in variable to dictionary
plot_dict['Sat1']['vars'][var]=dict(units=varu, data=vard)
# Execute creation and display of figure
if type == "1D":
fig=custom1Dsat(plot_dict,vbose=0)
elif type == "2D":
fig=custom2Dsat(plot_dict,vbose=0)
elif type == "2DLT":
fig=custom2Dsat(plot_dict,useLT=True,vbose=0)
if divfile != '':
print('-saving html div file: ',divfile)
fig.write_html(divfile,full_html=False)
if htmlfile != '':
print('-saving full html file: ',htmlfile)
fig.write_html(htmlfile,full_html=True)
if displayplot:
iplot(fig)
# ===============================================================================================
# ===============================================================================================
def custom3Dsat(datad, vbose=1):
'''
This function creates a custom 3D satellite plot, returning a plotly figure object.
Parameters
----------
datad: This is a data dictionary with the data used to create the plot
vbose: An optional verbosity value, 0 will only print out warnings and errors. Default is 1.
Returns
-------
fig: A plotly figure object that can then be visualized.
Other
-----
The python code block below will setup and display a working demo.
import numpy as np
import datetime
from datetime import timezone
from plotly.offline import iplot
# Build a datetime array to use in the dictionary
base = datetime.datetime(2000, 1, 1).replace(tzinfo=timezone.utc)
arr = np.array([base + datetime.timedelta(minutes=30*i) for i in range(8)])
sample=dict(
title = 'Plot Title Here', # Displayed title for plot, can use <br> for new lines
sats = ["Sat1"], # Array of satellites to include in plot
Sat1 = dict(
display_name = "Fake Satellite",
time = dict(format='datetime', data=arr), # possible formats: datetime, timestamp (assumes UTC)
vars = dict(
x = dict(units='R_E', data=np.array(np.arange(1.,9.))),
y = dict(units='R_E', data=np.array(np.arange(1.,9.))),
z = dict(units='R_E', data=np.array(np.arange(1.,9.))),
p = dict(units='nP', data=np.array(np.arange(11.,19.))),
U_x = dict(units='km/s', data=np.array(-1.*np.arange(11.,19.))),
U_y = dict(units='km/s', data=np.array(np.arange(21.,29.))),
U_z = dict(units='km/s', data=np.array(np.arange(31.,39.))),
),
position_variables = ["x", "y", "z"], # three variables to use for position
vector_variables = ["U_x", "U_y", "U_z"], # three variables to use for quiver if quiver is True
),
options = dict(
position_units = "R_E", # possible values: R_E, km, ""
var = "p", # variable to use for colorscale
hover_vars = ["U_x"], # other information for hoverinfo display
quiver = True, # logical value to display or hide quivers
quiver_scale = 0.1, # length scale of quivers
quiver_skip = 0, # points to skip between displaying quivers
groupby = "orbitM", # possible values: all, day, hour, minute, N (integer, show N values at a time)
# orbitE (break at equator crossing S->N), orbitM (break at prime meridian crossing)
body = "black", # possible values: black, earth, and any other value is no body
colorscale = "Viridis", # named colorscale
REkm = 6.3781E3, # Earth radius in km
),
)
fig=custom3Dsat(sample)
iplot(fig)
'''
# ===============================================================================================
# Start timer
tic = time.perf_counter()
# Start with error checking ...
if 'title' not in datad:
print("Warning, no title given for plot.")
txttop = "No Title"
else:
txttop = datad['title']
if 'var' not in datad['options']:
print("ERROR, no variable selected to plot, returning.")
return None
var=datad['options']['var']
if var == "time":
varu=""
else:
varu=datad[datad['sats'][0]]['vars'][var]['units']
if 'REkm' in datad['options']:
REkm = datad['options']['REkm']
else:
REkm=6.3781E3
scale=datad['options']['position_units']
if 'groupby' in datad['options']:
groupby = datad['options']['groupby']
else:
groupby = "all"
if 'quiver' in datad['options']:
quiver=datad['options']['quiver']
else:
quiver=False
if quiver:
quiverscale=datad['options']['quiver_scale']
if scale == "km":
quiverscale=quiverscale*REkm
quiverskip=int(datad['options']['quiver_skip'])
if 'body' in datad['options']:
body=datad['options']['body']
else:
body = "none"
if 'colorscale' in datad['options']:
colorscale = datad['options']['colorscale']
else:
colorscale = "Viridis"
if 'coord' in datad['options']:
coord = datad['options']['coord']
else:
coord = ""
# set initial values used later, including loop over all sats
xmin=0.
xmax=0.
ymin=0.
ymax=0.
zmin=0.
zmax=0.
cmin= 1.e99
cmax=-1.e99
localts=dict()
localtimestring=dict()
agroup=dict()
ugroup=()
for sat in datad['sats']:
sPts=len(datad[sat]['vars'][datad[sat]['position_variables'][0]]['data'])
# Set localtimestring values
notime=False
if 'time' in datad[sat]:
if datad[sat]['time']['format'] == "datetime":
localts[sat]=np.array([d.timestamp() for d in datad[sat]['time']['data']])
elif datad[sat]['time']['format'] == "timestamp":
localts[sat]=datad[sat]['time']['data'].copy()
else:
print("ERROR, Unknown time format.")
return None
localtimestring[sat] = np.array([dt.fromtimestamp(int(d),tz=timezone.utc).strftime\
("%Y-%m-%d %H:%M:%S") for d in localts[sat]])
else:
notime=True
if var == "time":
print("ERROR, no time given and plot var selected is time")
return None
localtimestring[sat]=np.array(["point "+str(i+1) for i in range(sPts)])
# Find global contour min/max
if var == "time":
c=localts[sat]
else:
c=datad[sat]['vars'][var]['data']
cmin=min(cmin,min(c))
cmax=max(cmax,max(c))
# Create array of possible 'groupby' value
if groupby == "day":
if notime:
print("ERROR, no time given and groupby value is",groupby)
return None
agroup[sat] = np.array([dt.fromtimestamp(int(d),tz=timezone.utc).strftime\
("%Y-%m-%d") for d in localts[sat]])
elif groupby == "hour":
if notime:
print("ERROR, no time given and groupby value is",groupby)
return None
agroup[sat] = np.array([dt.fromtimestamp(int(d),tz=timezone.utc).strftime\
("%Y-%m-%d %H") for d in localts[sat]])
elif groupby == "minute":
if notime:
print("ERROR, no time given and groupby value is",groupby)
return None
agroup[sat] = np.array([dt.fromtimestamp(int(d),tz=timezone.utc).strftime\
("%Y-%m-%d %H:%M") for d in localts[sat]])
elif groupby == "orbitM":
# Satellite path crosses prime meridian
x=datad[sat]['vars'][datad[sat]['position_variables'][0]]['data']
y=datad[sat]['vars'][datad[sat]['position_variables'][1]]['data']
bgroup = ['orbit'] * len(x)
j=1
for i in range(sPts):
if i != 0:
if x[i] > 0. and (y[i]*y[i-1]) < 0.:
j+=1
bgroup[i] = "orbit "+str(j)
agroup[sat]=np.array(bgroup)
elif groupby == "orbitE":
# Satellite path crosses equator going North
z=datad[sat]['vars'][datad[sat]['position_variables'][2]]['data']
bgroup = ['orbit'] * len(z)
j=1
for i in range(sPts):
if i != 0:
if (z[i]>0. and z[i-1]<0.):
j+=1
bgroup[i] = "orbit "+str(j)
agroup[sat]=np.array(bgroup)
elif groupby.isdigit():
gb=int(groupby)
agroup[sat] = np.array(["points "+str(int(i/gb)*gb+1)+" - "+str(int(i/gb)*gb+gb) for i in range(sPts)])
else:
agroup[sat] = np.array(["all" for i in range(sPts)])
# Use pandas unique function rather than numpy. Its faster and does not sort the results.
ugroup=pd.unique(np.append(ugroup, pd.unique(agroup[sat])))
ngroup = len(ugroup)
# Build DUMMY data block to insert as needed.
data_dict_dummy = {
"type": "scatter3d",
"name": "dummy", "x": [0.], "y": [0.], "z": [0.],
"mode": "lines", "line": {"width": 1},
"hoverinfo": "none",
}
# =============================================================================================== AAA
# make figure dictionary pieces
fig_dict = {"data": [], "layout": {}, "frames": []}
fig_data_saved = {"data": []}
sliders_dict = {
"active": 0,
"yanchor": "top", "xanchor": "left",
"currentvalue": {
"prefix": "Currently showing: ",
"visible": True,
"xanchor": "left"
},
"transition": {"duration": 0},
"pad": {"b": 10, "t": 10},
"len": 0.9,
"x": 0.1,
"y": 0,
"steps": []
}
# Actual plot creation loop
for date in ugroup:
frame = {"data": [], "name": date}
for sat in datad['sats']:
x=datad[sat]['vars'][datad[sat]['position_variables'][0]]['data']
y=datad[sat]['vars'][datad[sat]['position_variables'][1]]['data']
z=datad[sat]['vars'][datad[sat]['position_variables'][2]]['data']
sc=1.
if scale == "km" and datad[sat]['vars'][datad[sat]['position_variables'][0]]['units'] == "R_E":
sc=REkm
elif scale == "R_E" and datad[sat]['vars'][datad[sat]['position_variables'][0]]['units'] == "km":
sc=1./REkm
if var == "time":
c=localts[sat]
varline=""
else:
c=datad[sat]['vars'][var]['data']
varline=var+": %{marker.color:.4g} "+varu+"<br>"
if quiver:
qxvar=datad[sat]['vector_variables'][0]
qyvar=datad[sat]['vector_variables'][1]
qzvar=datad[sat]['vector_variables'][2]
qx=datad[sat]['vars'][qxvar]['data']
qy=datad[sat]['vars'][qyvar]['data']
qz=datad[sat]['vars'][qzvar]['data']
# Update position min/max values
if date == ugroup[0]:
xmin=min(xmin,min(x*sc))
xmax=max(xmax,max(x*sc))
ymin=min(ymin,min(y*sc))
ymax=max(ymax,max(y*sc))
zmin=min(zmin,min(z*sc))
zmax=max(zmax,max(z*sc))
# Compute mask to restrict all data in trace
mask = date == agroup[sat]
# Create hover information, including extras passed in. Quiver shows additional variables.
Nhv = len(datad['options']['hover_vars'])
cd=[]
cd.append(localtimestring[sat][mask])
qline=""
Ndv=1
if quiver:
cd.append(qx[mask])
cd.append(qy[mask])
cd.append(qz[mask])
qline+=qxvar+": %{customdata[1]:.2f}<br>"
qline+=qyvar+": %{customdata[2]:.2f}<br>"
qline+=qzvar+": %{customdata[3]:.2f}<br>"
Ndv+=3
for i in range(Nhv):
cd.append(datad[sat]['vars'][datad['options']['hover_vars'][i]]['data'][mask])
qline+=datad['options']['hover_vars'][i]+": %{customdata["+str(Ndv)+"]:.2f} "+\
datad[sat]['vars'][datad['options']['hover_vars'][i]]['units']+"<br>"
Ndv+=1
cd=np.asarray(cd).T
dateline="%{customdata[0]}<br>"
# Build data block with mask
data_dict = {
"type": "scatter3d",
"name": date,
"x": list(x[mask]*sc), "y": list(y[mask]*sc), "z": list(z[mask]*sc),
"mode": "markers+lines",
"marker": {
"size": 4, "cmin": cmin, "cmax": cmax, "color": list(c[mask]),
"showscale": True, "colorscale": colorscale,
"colorbar": { "title": "<b>"+var+"</b><br>["+varu+"]", "tickformat": ".3g" }
},
"line": {"width": 3, "color": "rgba(22,22,22,0.2)"},
"customdata": cd,
"hovertemplate": "<b>"+datad[sat]['display_name']+"</b>"+
"<br>X: %{x:.4f} "+scale+"<br>Y: %{y:.4f} "+scale+"<br>Z: %{z:.4f} "+scale+"<br>"+
qline+varline+dateline+"<extra></extra>",
}
# If time is colorbar variable, hide labels by selecting ticks out of range
if var == "time":
data_dict.marker.colorbar['tickvals']=(0,1)
# Put each part of sequence in frame data block
frame["data"].append(data_dict)
# First in sequence, put dummy in main data block
if date == ugroup[0]:
fig_dict["data"].append(data_dict_dummy)
# Start quiver
if quiver:
# Compute values to put in quiver trace
# Make array max possible size (triple len(x)), fill, and trim as needed
xx=np.concatenate([x,x,x])
yy=np.concatenate([y,y,y])
zz=np.concatenate([z,z,z])
qxx=qx
qyy=qy
qzz=qz
# Build new position array, element by element
j=0
for i in range(len(mask)):
if mask[i]:
if i%(quiverskip+1) == 0:
xx[j]=x[i]*sc
yy[j]=y[i]*sc
zz[j]=z[i]*sc
xx[j+1]=x[i]*sc+quiverscale*qx[i]
yy[j+1]=y[i]*sc+quiverscale*qy[i]
zz[j+1]=z[i]*sc+quiverscale*qz[i]
xx[j+2]=None
yy[j+2]=None
zz[j+2]=None
j+=3
xx=np.array(xx[0:j], dtype=np.float64)
yy=np.array(yy[0:j], dtype=np.float64)
zz=np.array(zz[0:j], dtype=np.float64)
# Update position min/max values
xmin=min(xmin,min(xx))
xmax=max(xmax,max(xx))
ymin=min(ymin,min(yy))
ymax=max(ymax,max(yy))
zmin=min(zmin,min(zz))
zmax=max(zmax,max(zz))
# Build data block
data_dict = {
"type": "scatter3d",
"name": "positions", "x": list(xx), "y": list(yy), "z": list(zz),
"mode": "lines", "line": {"width": 3, "color": "rgba(22,22,22,0.2)"},
"hoverinfo": "none",
}
# Put each part of sequence in frame data block
frame["data"].append(data_dict)
# First in sequence, put in main data block
if date == ugroup[0]:
fig_dict["data"].append(data_dict_dummy)
fig_dict["frames"].append(frame)
slider_step = {"args": [
[date],
{"frame": {"duration": 300, "redraw": True},
"mode": "immediate",
"transition": {"duration": 0}}
],
"label": date,
"method": "animate"}
sliders_dict["steps"].append(slider_step)
# Assemble frame and slider pieces
fig_dict["layout"]["sliders"] = [sliders_dict]
# =============================================================================================== BBB
if ngroup > 1:
for sat in datad['sats']:
# Add trace if more than one group.
# This shows the whole trajectory when a subsection of data is showing.
x=datad[sat]['vars'][datad[sat]['position_variables'][0]]['data']
y=datad[sat]['vars'][datad[sat]['position_variables'][1]]['data']
z=datad[sat]['vars'][datad[sat]['position_variables'][2]]['data']
sc=1.
if scale == "km" and datad[sat]['vars'][datad[sat]['position_variables'][0]]['units'] == "R_E":
sc=REkm
elif scale == "R_E" and datad[sat]['vars'][datad[sat]['position_variables'][0]]['units'] == "km":
sc=1./REkm
# Build data block
data_dict = {
"type": "scatter3d",
"name": "positions", "x": list(x*sc), "y": list(y*sc), "z": list(z*sc),
"mode": "lines", "line": {"width": 3, "color": "rgba(22,22,22,0.2)"},
"hoverinfo": "none",
}
# Put into main data block
fig_dict["data"].append(data_dict)
# =============================================================================================== CCC
ticE = time.perf_counter()
# Load points and add 1 RE sphere, padded to cover all data positions
if body == "black":
dataXYZ = pd.read_csv('https://ccmc.gsfc.nasa.gov/Kamodo/demo/sphereXYZ.csv')
dataIJK = pd.read_csv('https://ccmc.gsfc.nasa.gov/Kamodo/demo/sphereIJK.csv')
if scale == "km":
dataXYZ *= REkm
# Build data block
data_dict = {
"type": "mesh3d",
"name": '1 R_E sphere',
"x": list(np.append(dataXYZ['x'],(xmin,xmax))),
"y": list(np.append(dataXYZ['y'],(ymin,ymax))),
"z": list(np.append(dataXYZ['z'],(zmin,zmax))),
"i": list(dataIJK['i']),
"j": list(dataIJK['j']),
"k": list(dataIJK['k']),
"facecolor": list(dataIJK['c']),
"flatshading": True,
"hovertemplate": "Earth<extra></extra>",
}
# Put in main data block
fig_dict["data"].append(data_dict)
elif body == "earth" and coord == "GEO":
dataXYZ = pd.read_csv('https://ccmc.gsfc.nasa.gov/ungrouped/GM_IM/EarthXYZ.csv')
dataIJK = pd.read_csv('https://ccmc.gsfc.nasa.gov/ungrouped/GM_IM/EarthIJKRGB.csv')
if scale == "km":
dataXYZ *= REkm
color=np.array(["rgb("+str(dataIJK['r'][i])+","+str(dataIJK['g'][i])+","+str(dataIJK['b'][i])+")" \
for i in range(len(dataIJK['r']))])
# Need to reverse x,y from file to be in proper GEO coords (180 degree rotation)
xe=-dataXYZ['x']
ye=-dataXYZ['y']
ze= dataXYZ['z']
# Build data block
data_dict = {
"type": "mesh3d",
"name": '1 R_E sphere',
"x": np.append(xe,(xmin,xmax)),
"y": np.append(ye,(ymin,ymax)),
"z": np.append(ze,(zmin,zmax)),
"i": dataIJK['i'],
"j": dataIJK['j'],
"k": dataIJK['k'],
"facecolor": color,
"hovertemplate": "Earth<extra></extra>",
}
# Put in main data block
fig_dict["data"].append(data_dict)
tocE = time.perf_counter()
if vbose > 0:
print(f" -time loading Earth: {tocE - ticE:0.4f} seconds")
# =============================================================================================== DDD
# Set layout values
fig_dict["layout"]["height"] = 700
fig_dict["layout"]["width"] = 800
fig_dict["layout"]["scene_aspectmode"] = "data"
fig_dict["layout"]["scene"] = dict(xaxis=dict(title=dict(text="X ["+scale+"]")),
yaxis=dict(title=dict(text="Y ["+scale+"]")),
zaxis=dict(title=dict(text="Z ["+scale+"]")))
fig_dict["layout"]["title_text"] = txttop
fig_dict["layout"]["showlegend"] = False
fig_dict["layout"]["scene_camera"] = dict(center=dict(x=0, y=0, z=0))
fig_dict["layout"]["hoverlabel_align"] = 'right'
if ngroup > 1:
fig_dict["layout"]["updatemenus"] = [
{
"buttons": [
{
"args": [None, {"frame": {"duration": 500, "redraw": True},
"fromcurrent": True, "transition": {"duration": 0}}],
"label": "Play",
"method": "animate"
},
{
"args": [[None], {"frame": {"duration": 0, "redraw": True},
"mode": "immediate",
"transition": {"duration": 0}}],
"label": "Pause",
"method": "animate"
}
],
"direction": "left",
"pad": {"r": 10, "t": 35},
"showactive": False,
"type": "buttons",
"x": 0.1,
"xanchor": "right",
"y": 0,
"yanchor": "top"
}
]
# end timer
toc = time.perf_counter()
if vbose > 0:
print(f"Total time creating figure object: {toc - tic:0.4f} seconds")
fig3 = go.Figure(fig_dict)
return fig3
# ===============================================================================================
# ===============================================================================================
def custom1Dsat(datad, vbose=1):
"""
This function creates a custom 1D satellite plot, returning a plotly figure object.
Parameters
----------
datad: This is a data dictionary with the data used to create the plot
vbose: Optional verbosity value, 0 will only print out warnings and errors. Default is 1.
Returns
-------
fig: A plotly figure object that can then be visualized.
"""
# Start 1D fig
var=datad['options']['var']
varu=datad[datad['sats'][0]]['vars'][var]['units']
Nhv = len(datad['options']['hover_vars'])
localts=dict()
localtimestring=dict()
localdt=dict()
txttop = datad['title']
# For now this only makes the plot for the last sat in the dictionary.
for sat in datad['sats']:
sPts=len(datad[sat]['time']['data'])
# Set localtimestring values
notime=False
if 'time' in datad[sat]:
if datad[sat]['time']['format'] == "datetime":
localts[sat]=np.array([d.timestamp() for d in datad[sat]['time']['data']])
elif datad[sat]['time']['format'] == "timestamp":
localts[sat]=datad[sat]['time']['data'].copy()
else:
print("ERROR, Unknown time format.")
return None
localtimestring[sat] = np.array([datetime.datetime.fromtimestamp(int(d),tz=timezone.utc).strftime\
("%Y-%m-%d %H:%M:%S") for d in localts[sat]])
localdt[sat] = np.array([datetime.datetime.fromtimestamp(int(d),tz=timezone.utc) for d in localts[sat]])
else:
notime=True
if var == "time":
print("ERROR, no time given and plot var selected is time")
return None
localtimestring[sat]=np.array(["point "+str(i+1) for i in range(sPts)])
# Find global contour min/max
if var == "time":
c=localts[sat]
else:
c=datad[sat]['vars'][var]['data']
fig1 = make_subplots(rows=(Nhv+1), cols=1, shared_xaxes=True, vertical_spacing=0.04)
fig1.add_trace(go.Scatter(x=localdt[sat], y=c, name=var,
mode='lines', line= dict(shape='linear', color='black'),
hovertemplate=var+': %{y:.4g}<br>%{x}<extra></extra>',
),
row=1, col=1)
fig1.update_yaxes(title_text='<b>'+var+'</b><br>['+varu+']', exponentformat='e', row=1, col=1)
fig1.update_layout(yaxis=dict(title=dict(font=dict(size=12))))
for i in range(Nhv):
tmpv=datad['options']['hover_vars'][i]
fig1.add_trace(go.Scatter(x=localdt[sat], y=datad[sat]['vars'][tmpv]['data'], name=tmpv,
mode='lines', line= dict(shape='linear', color='black'),
hovertemplate=tmpv+': %{y:.4g}<br>%{x}<extra></extra>',
),
row=(i+2), col=1)
tmpu=""
if tmpv == "Alt":
tmpu=" [km]"
if tmpv == "Lon":
tmpu=" [deg]"
fig1.update_yaxes(tick0=0., dtick=90., row=(i+2), col=1)
if tmpv == "Lat":
tmpu=" [deg]"
fig1.update_yaxes(tick0=0., dtick=30., row=(i+2), col=1)
ya='yaxis'+str(i+2)
ys="dict(text='<b>"+tmpv+"</b>"+tmpu+"',font=dict(size=12))"
fig1['layout'][ya]['title']=eval(ys)
fig1.update_layout(height=600, width=800, title_text=txttop, showlegend = False,)
return fig1
# ===============================================================================================
# ===============================================================================================
def custom2Dsat(datad, useLT=False, vbose=1):
'''
This function creates a custom 2D satellite plot in lat/lon or lat/LT, returning a plotly figure object.
Parameters
----------
datad: This is a data dictionary with the data used to create the plot
useLT: Optional logical to modify plot to use local time instead of longitude on the X axis.
vbose: Optional verbosity value, 0 will only print out warnings and errors. Default is 1.
Returns
-------
fig: A plotly figure object that can then be visualized.
'''
# ===============================================================================================
# Start timer
tic = time.perf_counter()
# Start with error checking ...
if 'title' not in datad:
print("Warning, no title given for plot.")
txttop = "No Title"
else:
txttop = datad['title']
if 'var' not in datad['options']:
print("ERROR, no variable selected to plot, returning.")
return None
var=datad['options']['var']
if var == "time":
varu=""
else:
varu=datad[datad['sats'][0]]['vars'][var]['units']
if 'REkm' in datad['options']:
REkm = datad['options']['REkm']
else:
REkm=6.3781E3
scale=datad['options']['position_units']
if 'groupby' in datad['options']:
groupby = datad['options']['groupby']
else:
groupby = "all"
if 'body' in datad['options']:
body=datad['options']['body']
else:
body = "none"
if 'colorscale' in datad['options']:
colorscale = datad['options']['colorscale']
else:
colorscale = "Viridis"
# set initial values used later, including loop over all sats
cmin= 1.e99
cmax=-1.e99
localts=dict()
localtimestring=dict()
agroup=dict()
ugroup=()
for sat in datad['sats']:
sPts=len(datad[sat]['vars'][datad[sat]['position_variables'][0]]['data'])
# Set localtimestring values
notime=False
if 'time' in datad[sat]:
if datad[sat]['time']['format'] == "datetime":
localts[sat]=np.array([d.timestamp() for d in datad[sat]['time']['data']])
elif datad[sat]['time']['format'] == "timestamp":
localts[sat]=datad[sat]['time']['data'].copy()
else:
print("ERROR, Unknown time format.")
return None
localtimestring[sat] = np.array([dt.fromtimestamp(int(d),tz=timezone.utc).strftime\
("%Y-%m-%d %H:%M:%S") for d in localts[sat]])
else:
notime=True
if var == "time" or useLT:
print("ERROR, no time given and plot var selected is time")
return None
localtimestring[sat]=np.array(["point "+str(i+1) for i in range(sPts)])
# Find global contour min/max
if var == "time":
c=localts[sat]
else:
c=datad[sat]['vars'][var]['data']
cmin=min(cmin,min(c))
cmax=max(cmax,max(c))
# Create array of possible 'groupby' value
if groupby == "day":
if notime:
print("ERROR, no time given and groupby value is",groupby)
return None
agroup[sat] = np.array([dt.fromtimestamp(int(d),tz=timezone.utc).strftime\
("%Y-%m-%d") for d in localts[sat]])
elif groupby == "hour":
if notime:
print("ERROR, no time given and groupby value is",groupby)
return None
agroup[sat] = np.array([dt.fromtimestamp(int(d),tz=timezone.utc).strftime\
("%Y-%m-%d %H") for d in localts[sat]])
elif groupby == "minute":
if notime:
print("ERROR, no time given and groupby value is",groupby)
return None
agroup[sat] = np.array([dt.fromtimestamp(int(d),tz=timezone.utc).strftime\
("%Y-%m-%d %H:%M") for d in localts[sat]])
elif groupby == "orbitM":
# Satellite path crosses prime meridian
lon=datad[sat]['vars'][datad[sat]['position_variables'][0]]['data']
bgroup = ['orbit'] * len(lon)
j=1
for i in range(sPts):
if i != 0:
if abs(lon[i]-lon[i-1]) > 180.:
j+=1
bgroup[i] = "orbit "+str(j)
agroup[sat]=np.array(bgroup)
elif groupby == "orbitE":
# Satellite path crosses equator going North
lat=datad[sat]['vars'][datad[sat]['position_variables'][1]]['data']
bgroup = ['orbit'] * len(lat)
j=1
for i in range(sPts):
if i != 0:
if (lat[i]>0. and lat[i-1]<0.):
j+=1
bgroup[i] = "orbit "+str(j)
agroup[sat]=np.array(bgroup)
elif groupby.isdigit():
gb=int(groupby)
agroup[sat] = np.array(["points "+str(int(i/gb)*gb+1)+" - "+str(int(i/gb)*gb+gb) for i in range(sPts)])
else:
agroup[sat] = np.array(["all" for i in range(sPts)])
# Use pandas unique function rather than numpy. Its faster and does not sort the results.
ugroup=pd.unique(np.append(ugroup, | pd.unique(agroup[sat]) | pandas.unique |
"""
Unit test suite for OLS and PanelOLS classes
"""
# pylint: disable-msg=W0212
from __future__ import division
from datetime import datetime
import unittest
import nose
import numpy as np
from pandas import date_range, bdate_range
from pandas.core.panel import Panel
from pandas import DataFrame, Index, Series, notnull, datetools
from pandas.stats.api import ols
from pandas.stats.ols import _filter_data
from pandas.stats.plm import NonPooledPanelOLS, PanelOLS
from pandas.util.testing import (assert_almost_equal, assert_series_equal,
assert_frame_equal)
import pandas.util.testing as tm
from common import BaseTest
_have_statsmodels = True
try:
import statsmodels.api as sm
except ImportError:
try:
import scikits.statsmodels.api as sm
except ImportError:
_have_statsmodels = False
def _check_repr(obj):
repr(obj)
str(obj)
def _compare_ols_results(model1, model2):
assert(type(model1) == type(model2))
if hasattr(model1, '_window_type'):
_compare_moving_ols(model1, model2)
else:
_compare_fullsample_ols(model1, model2)
def _compare_fullsample_ols(model1, model2):
assert_series_equal(model1.beta, model2.beta)
def _compare_moving_ols(model1, model2):
assert_frame_equal(model1.beta, model2.beta)
class TestOLS(BaseTest):
# TODO: Add tests for OLS y predict
# TODO: Right now we just check for consistency between full-sample and
# rolling/expanding results of the panel OLS. We should also cross-check
# with trusted implementations of panel OLS (e.g. R).
# TODO: Add tests for non pooled OLS.
@classmethod
def setUpClass(cls):
try:
import matplotlib as mpl
mpl.use('Agg', warn=False)
except ImportError:
pass
if not _have_statsmodels:
raise nose.SkipTest
def testOLSWithDatasets(self):
self.checkDataSet(sm.datasets.ccard.load(), skip_moving=True)
self.checkDataSet(sm.datasets.cpunish.load(), skip_moving=True)
self.checkDataSet(sm.datasets.longley.load(), skip_moving=True)
self.checkDataSet(sm.datasets.stackloss.load(), skip_moving=True)
self.checkDataSet(sm.datasets.copper.load())
self.checkDataSet(sm.datasets.scotland.load())
# degenerate case fails on some platforms
# self.checkDataSet(datasets.ccard.load(), 39, 49) # one col in X all 0s
def testWLS(self):
X = DataFrame(np.random.randn(30, 4), columns=['A', 'B', 'C', 'D'])
Y = Series(np.random.randn(30))
weights = X.std(1)
self._check_wls(X, Y, weights)
weights.ix[[5, 15]] = np.nan
Y[[2, 21]] = np.nan
self._check_wls(X, Y, weights)
def _check_wls(self, x, y, weights):
result = ols(y=y, x=x, weights=1/weights)
combined = x.copy()
combined['__y__'] = y
combined['__weights__'] = weights
combined = combined.dropna()
endog = combined.pop('__y__').values
aweights = combined.pop('__weights__').values
exog = sm.add_constant(combined.values, prepend=False)
sm_result = sm.WLS(endog, exog, weights=1/aweights).fit()
assert_almost_equal(sm_result.params, result._beta_raw)
assert_almost_equal(sm_result.resid, result._resid_raw)
self.checkMovingOLS('rolling', x, y, weights=weights)
self.checkMovingOLS('expanding', x, y, weights=weights)
def checkDataSet(self, dataset, start=None, end=None, skip_moving=False):
exog = dataset.exog[start : end]
endog = dataset.endog[start : end]
x = DataFrame(exog, index=np.arange(exog.shape[0]),
columns=np.arange(exog.shape[1]))
y = Series(endog, index=np.arange(len(endog)))
self.checkOLS(exog, endog, x, y)
if not skip_moving:
self.checkMovingOLS('rolling', x, y)
self.checkMovingOLS('rolling', x, y, nw_lags=0)
self.checkMovingOLS('expanding', x, y, nw_lags=0)
self.checkMovingOLS('rolling', x, y, nw_lags=1)
self.checkMovingOLS('expanding', x, y, nw_lags=1)
self.checkMovingOLS('expanding', x, y, nw_lags=1, nw_overlap=True)
def checkOLS(self, exog, endog, x, y):
reference = sm.OLS(endog, sm.add_constant(exog, prepend=False)).fit()
result = ols(y=y, x=x)
# check that sparse version is the same
sparse_result = ols(y=y.to_sparse(), x=x.to_sparse())
_compare_ols_results(result, sparse_result)
assert_almost_equal(reference.params, result._beta_raw)
assert_almost_equal(reference.df_model, result._df_model_raw)
assert_almost_equal(reference.df_resid, result._df_resid_raw)
assert_almost_equal(reference.fvalue, result._f_stat_raw[0])
assert_almost_equal(reference.pvalues, result._p_value_raw)
assert_almost_equal(reference.rsquared, result._r2_raw)
assert_almost_equal(reference.rsquared_adj, result._r2_adj_raw)
assert_almost_equal(reference.resid, result._resid_raw)
assert_almost_equal(reference.bse, result._std_err_raw)
assert_almost_equal(reference.tvalues, result._t_stat_raw)
assert_almost_equal(reference.cov_params(), result._var_beta_raw)
assert_almost_equal(reference.fittedvalues, result._y_fitted_raw)
_check_non_raw_results(result)
def checkMovingOLS(self, window_type, x, y, weights=None, **kwds):
window = sm.tools.tools.rank(x.values) * 2
moving = ols(y=y, x=x, weights=weights, window_type=window_type,
window=window, **kwds)
# check that sparse version is the same
sparse_moving = ols(y=y.to_sparse(), x=x.to_sparse(),
weights=weights,
window_type=window_type,
window=window, **kwds)
_compare_ols_results(moving, sparse_moving)
index = moving._index
for n, i in enumerate(moving._valid_indices):
if window_type == 'rolling' and i >= window:
prior_date = index[i - window + 1]
else:
prior_date = index[0]
date = index[i]
x_iter = {}
for k, v in x.iteritems():
x_iter[k] = v.truncate(before=prior_date, after=date)
y_iter = y.truncate(before=prior_date, after=date)
static = ols(y=y_iter, x=x_iter, weights=weights, **kwds)
self.compare(static, moving, event_index=i,
result_index=n)
_check_non_raw_results(moving)
FIELDS = ['beta', 'df', 'df_model', 'df_resid', 'f_stat', 'p_value',
'r2', 'r2_adj', 'rmse', 'std_err', 't_stat',
'var_beta']
def compare(self, static, moving, event_index=None,
result_index=None):
index = moving._index
# Check resid if we have a time index specified
if event_index is not None:
ref = static._resid_raw[-1]
label = index[event_index]
res = moving.resid[label]
assert_almost_equal(ref, res)
ref = static._y_fitted_raw[-1]
res = moving.y_fitted[label]
assert_almost_equal(ref, res)
# Check y_fitted
for field in self.FIELDS:
attr = '_%s_raw' % field
ref = getattr(static, attr)
res = getattr(moving, attr)
if result_index is not None:
res = res[result_index]
assert_almost_equal(ref, res)
def test_ols_object_dtype(self):
df = DataFrame(np.random.randn(20, 2), dtype=object)
model = ols(y=df[0], x=df[1])
summary = repr(model)
class TestOLSMisc(unittest.TestCase):
'''
For test coverage with faux data
'''
@classmethod
def setupClass(cls):
if not _have_statsmodels:
raise nose.SkipTest
def test_f_test(self):
x = tm.makeTimeDataFrame()
y = x.pop('A')
model = ols(y=y, x=x)
hyp = '1*B+1*C+1*D=0'
result = model.f_test(hyp)
hyp = ['1*B=0',
'1*C=0',
'1*D=0']
result = model.f_test(hyp)
assert_almost_equal(result['f-stat'], model.f_stat['f-stat'])
self.assertRaises(Exception, model.f_test, '1*A=0')
def test_r2_no_intercept(self):
y = tm.makeTimeSeries()
x = tm.makeTimeDataFrame()
x_with = x.copy()
x_with['intercept'] = 1.
model1 = ols(y=y, x=x)
model2 = ols(y=y, x=x_with, intercept=False)
assert_series_equal(model1.beta, model2.beta)
# TODO: can we infer whether the intercept is there...
self.assert_(model1.r2 != model2.r2)
# rolling
model1 = ols(y=y, x=x, window=20)
model2 = ols(y=y, x=x_with, window=20, intercept=False)
assert_frame_equal(model1.beta, model2.beta)
self.assert_((model1.r2 != model2.r2).all())
def test_summary_many_terms(self):
x = DataFrame(np.random.randn(100, 20))
y = np.random.randn(100)
model = ols(y=y, x=x)
model.summary
def test_y_predict(self):
y = tm.makeTimeSeries()
x = tm.makeTimeDataFrame()
model1 = ols(y=y, x=x)
assert_series_equal(model1.y_predict, model1.y_fitted)
assert_almost_equal(model1._y_predict_raw, model1._y_fitted_raw)
def test_predict(self):
y = tm.makeTimeSeries()
x = tm.makeTimeDataFrame()
model1 = ols(y=y, x=x)
assert_series_equal(model1.predict(), model1.y_predict)
assert_series_equal(model1.predict(x=x), model1.y_predict)
assert_series_equal(model1.predict(beta=model1.beta), model1.y_predict)
exog = x.copy()
exog['intercept'] = 1.
rs = Series(np.dot(exog.values, model1.beta.values), x.index)
assert_series_equal(model1.y_predict, rs)
x2 = x.reindex(columns=x.columns[::-1])
assert_series_equal(model1.predict(x=x2), model1.y_predict)
x3 = x2 + 10
pred3 = model1.predict(x=x3)
x3['intercept'] = 1.
x3 = x3.reindex(columns = model1.beta.index)
expected = Series(np.dot(x3.values, model1.beta.values), x3.index)
assert_series_equal(expected, pred3)
beta = Series(0., model1.beta.index)
pred4 = model1.predict(beta=beta)
assert_series_equal(Series(0., pred4.index), pred4)
def test_predict_longer_exog(self):
exogenous = {"1998": "4760","1999": "5904","2000": "4504",
"2001": "9808","2002": "4241","2003": "4086",
"2004": "4687","2005": "7686","2006": "3740",
"2007": "3075","2008": "3753","2009": "4679",
"2010": "5468","2011": "7154","2012": "4292",
"2013": "4283","2014": "4595","2015": "9194",
"2016": "4221","2017": "4520"}
endogenous = {"1998": "691", "1999": "1580", "2000": "80",
"2001": "1450", "2002": "555", "2003": "956",
"2004": "877", "2005": "614", "2006": "468",
"2007": "191"}
endog = Series(endogenous)
exog = Series(exogenous)
model = ols(y=endog, x=exog)
pred = model.y_predict
self.assert_(pred.index.equals(exog.index))
def test_longpanel_series_combo(self):
wp = tm.makePanel()
lp = wp.to_frame()
y = lp.pop('ItemA')
model = ols(y=y, x=lp, entity_effects=True, window=20)
self.assert_(notnull(model.beta.values).all())
self.assert_(isinstance(model, PanelOLS))
model.summary
def test_series_rhs(self):
y = tm.makeTimeSeries()
x = tm.makeTimeSeries()
model = ols(y=y, x=x)
expected = ols(y=y, x={'x' : x})
assert_series_equal(model.beta, expected.beta)
def test_various_attributes(self):
# just make sure everything "works". test correctness elsewhere
x = DataFrame(np.random.randn(100, 5))
y = np.random.randn(100)
model = ols(y=y, x=x, window=20)
series_attrs = ['rank', 'df', 'forecast_mean', 'forecast_vol']
for attr in series_attrs:
value = getattr(model, attr)
self.assert_(isinstance(value, Series))
# works
model._results
def test_catch_regressor_overlap(self):
df1 = tm.makeTimeDataFrame().ix[:, ['A', 'B']]
df2 = tm.makeTimeDataFrame().ix[:, ['B', 'C', 'D']]
y = tm.makeTimeSeries()
data = {'foo' : df1, 'bar' : df2}
self.assertRaises(Exception, ols, y=y, x=data)
def test_plm_ctor(self):
y = tm.makeTimeDataFrame()
x = {'a' : tm.makeTimeDataFrame(),
'b' : tm.makeTimeDataFrame()}
model = ols(y=y, x=x, intercept=False)
model.summary
model = ols(y=y, x=Panel(x))
model.summary
def test_plm_attrs(self):
y = tm.makeTimeDataFrame()
x = {'a' : tm.makeTimeDataFrame(),
'b' : tm.makeTimeDataFrame()}
rmodel = ols(y=y, x=x, window=10)
model = ols(y=y, x=x)
model.resid
rmodel.resid
def test_plm_lagged_y_predict(self):
y = tm.makeTimeDataFrame()
x = {'a' : tm.makeTimeDataFrame(),
'b' : tm.makeTimeDataFrame()}
model = ols(y=y, x=x, window=10)
result = model.lagged_y_predict(2)
def test_plm_f_test(self):
y = tm.makeTimeDataFrame()
x = {'a' : tm.makeTimeDataFrame(),
'b' : tm.makeTimeDataFrame()}
model = ols(y=y, x=x)
hyp = '1*a+1*b=0'
result = model.f_test(hyp)
hyp = ['1*a=0',
'1*b=0']
result = model.f_test(hyp)
assert_almost_equal(result['f-stat'], model.f_stat['f-stat'])
def test_plm_exclude_dummy_corner(self):
y = tm.makeTimeDataFrame()
x = {'a' : tm.makeTimeDataFrame(),
'b' : tm.makeTimeDataFrame()}
model = ols(y=y, x=x, entity_effects=True, dropped_dummies={'entity' : 'D'})
model.summary
self.assertRaises(Exception, ols, y=y, x=x, entity_effects=True,
dropped_dummies={'entity' : 'E'})
def test_columns_tuples_summary(self):
# #1837
X = DataFrame(np.random.randn(10, 2), columns=[('a', 'b'), ('c', 'd')])
Y = Series(np.random.randn(10))
# it works!
model = ols(y=Y, x=X)
model.summary
class TestPanelOLS(BaseTest):
FIELDS = ['beta', 'df', 'df_model', 'df_resid', 'f_stat',
'p_value', 'r2', 'r2_adj', 'rmse', 'std_err',
't_stat', 'var_beta']
_other_fields = ['resid', 'y_fitted']
def testFiltering(self):
result = ols(y=self.panel_y2, x=self.panel_x2)
x = result._x
index = x.index.get_level_values(0)
index = Index(sorted(set(index)))
exp_index = Index([datetime(2000, 1, 1), datetime(2000, 1, 3)])
self.assertTrue;(exp_index.equals(index))
index = x.index.get_level_values(1)
index = Index(sorted(set(index)))
exp_index = Index(['A', 'B'])
self.assertTrue(exp_index.equals(index))
x = result._x_filtered
index = x.index.get_level_values(0)
index = Index(sorted(set(index)))
exp_index = Index([datetime(2000, 1, 1),
datetime(2000, 1, 3),
datetime(2000, 1, 4)])
self.assertTrue(exp_index.equals(index))
assert_almost_equal(result._y.values.flat, [1, 4, 5])
exp_x = [[6, 14, 1],
[9, 17, 1],
[30, 48, 1]]
assert_almost_equal(exp_x, result._x.values)
exp_x_filtered = [[6, 14, 1],
[9, 17, 1],
[30, 48, 1],
[11, 20, 1],
[12, 21, 1]]
| assert_almost_equal(exp_x_filtered, result._x_filtered.values) | pandas.util.testing.assert_almost_equal |
# -*- coding: utf-8 -*-
from collections import OrderedDict
from datetime import date, datetime, timedelta
import numpy as np
import pytest
from pandas.compat import product, range
import pandas as pd
from pandas import (
Categorical, DataFrame, Grouper, Index, MultiIndex, Series, concat,
date_range)
from pandas.api.types import CategoricalDtype as CDT
from pandas.core.reshape.pivot import crosstab, pivot_table
import pandas.util.testing as tm
@pytest.fixture(params=[True, False])
def dropna(request):
return request.param
class TestPivotTable(object):
def setup_method(self, method):
self.data = DataFrame({'A': ['foo', 'foo', 'foo', 'foo',
'bar', 'bar', 'bar', 'bar',
'foo', 'foo', 'foo'],
'B': ['one', 'one', 'one', 'two',
'one', 'one', 'one', 'two',
'two', 'two', 'one'],
'C': ['dull', 'dull', 'shiny', 'dull',
'dull', 'shiny', 'shiny', 'dull',
'shiny', 'shiny', 'shiny'],
'D': np.random.randn(11),
'E': np.random.randn(11),
'F': np.random.randn(11)})
def test_pivot_table(self):
index = ['A', 'B']
columns = 'C'
table = pivot_table(self.data, values='D',
index=index, columns=columns)
table2 = self.data.pivot_table(
values='D', index=index, columns=columns)
tm.assert_frame_equal(table, table2)
# this works
pivot_table(self.data, values='D', index=index)
if len(index) > 1:
assert table.index.names == tuple(index)
else:
assert table.index.name == index[0]
if len(columns) > 1:
assert table.columns.names == columns
else:
assert table.columns.name == columns[0]
expected = self.data.groupby(
index + [columns])['D'].agg(np.mean).unstack()
tm.assert_frame_equal(table, expected)
def test_pivot_table_nocols(self):
df = DataFrame({'rows': ['a', 'b', 'c'],
'cols': ['x', 'y', 'z'],
'values': [1, 2, 3]})
rs = df.pivot_table(columns='cols', aggfunc=np.sum)
xp = df.pivot_table(index='cols', aggfunc=np.sum).T
tm.assert_frame_equal(rs, xp)
rs = df.pivot_table(columns='cols', aggfunc={'values': 'mean'})
xp = df.pivot_table(index='cols', aggfunc={'values': 'mean'}).T
tm.assert_frame_equal(rs, xp)
def test_pivot_table_dropna(self):
df = DataFrame({'amount': {0: 60000, 1: 100000, 2: 50000, 3: 30000},
'customer': {0: 'A', 1: 'A', 2: 'B', 3: 'C'},
'month': {0: 201307, 1: 201309, 2: 201308, 3: 201310},
'product': {0: 'a', 1: 'b', 2: 'c', 3: 'd'},
'quantity': {0: 2000000, 1: 500000,
2: 1000000, 3: 1000000}})
pv_col = df.pivot_table('quantity', 'month', [
'customer', 'product'], dropna=False)
pv_ind = df.pivot_table(
'quantity', ['customer', 'product'], 'month', dropna=False)
m = MultiIndex.from_tuples([('A', 'a'), ('A', 'b'), ('A', 'c'),
('A', 'd'), ('B', 'a'), ('B', 'b'),
('B', 'c'), ('B', 'd'), ('C', 'a'),
('C', 'b'), ('C', 'c'), ('C', 'd')],
names=['customer', 'product'])
tm.assert_index_equal(pv_col.columns, m)
tm.assert_index_equal(pv_ind.index, m)
def test_pivot_table_categorical(self):
cat1 = Categorical(["a", "a", "b", "b"],
categories=["a", "b", "z"], ordered=True)
cat2 = Categorical(["c", "d", "c", "d"],
categories=["c", "d", "y"], ordered=True)
df = DataFrame({"A": cat1, "B": cat2, "values": [1, 2, 3, 4]})
result = pd.pivot_table(df, values='values', index=['A', 'B'],
dropna=True)
exp_index = pd.MultiIndex.from_arrays(
[cat1, cat2],
names=['A', 'B'])
expected = DataFrame(
{'values': [1, 2, 3, 4]},
index=exp_index)
tm.assert_frame_equal(result, expected)
def test_pivot_table_dropna_categoricals(self, dropna):
# GH 15193
categories = ['a', 'b', 'c', 'd']
df = DataFrame({'A': ['a', 'a', 'a', 'b', 'b', 'b', 'c', 'c', 'c'],
'B': [1, 2, 3, 1, 2, 3, 1, 2, 3],
'C': range(0, 9)})
df['A'] = df['A'].astype(CDT(categories, ordered=False))
result = df.pivot_table(index='B', columns='A', values='C',
dropna=dropna)
expected_columns = Series(['a', 'b', 'c'], name='A')
expected_columns = expected_columns.astype(
CDT(categories, ordered=False))
expected_index = Series([1, 2, 3], name='B')
expected = DataFrame([[0, 3, 6],
[1, 4, 7],
[2, 5, 8]],
index=expected_index,
columns=expected_columns,)
if not dropna:
# add back the non observed to compare
expected = expected.reindex(
columns=Categorical(categories)).astype('float')
tm.assert_frame_equal(result, expected)
def test_pivot_with_non_observable_dropna(self, dropna):
# gh-21133
df = pd.DataFrame(
{'A': pd.Categorical([np.nan, 'low', 'high', 'low', 'high'],
categories=['low', 'high'],
ordered=True),
'B': range(5)})
result = df.pivot_table(index='A', values='B', dropna=dropna)
expected = pd.DataFrame(
{'B': [2, 3]},
index=pd.Index(
pd.Categorical.from_codes([0, 1],
categories=['low', 'high'],
ordered=True),
name='A'))
tm.assert_frame_equal(result, expected)
# gh-21378
df = pd.DataFrame(
{'A': pd.Categorical(['left', 'low', 'high', 'low', 'high'],
categories=['low', 'high', 'left'],
ordered=True),
'B': range(5)})
result = df.pivot_table(index='A', values='B', dropna=dropna)
expected = pd.DataFrame(
{'B': [2, 3, 0]},
index=pd.Index(
pd.Categorical.from_codes([0, 1, 2],
categories=['low', 'high', 'left'],
ordered=True),
name='A'))
tm.assert_frame_equal(result, expected)
def test_pass_array(self):
result = self.data.pivot_table(
'D', index=self.data.A, columns=self.data.C)
expected = self.data.pivot_table('D', index='A', columns='C')
tm.assert_frame_equal(result, expected)
def test_pass_function(self):
result = self.data.pivot_table('D', index=lambda x: x // 5,
columns=self.data.C)
expected = self.data.pivot_table('D', index=self.data.index // 5,
columns='C')
tm.assert_frame_equal(result, expected)
def test_pivot_table_multiple(self):
index = ['A', 'B']
columns = 'C'
table = pivot_table(self.data, index=index, columns=columns)
expected = self.data.groupby(index + [columns]).agg(np.mean).unstack()
tm.assert_frame_equal(table, expected)
def test_pivot_dtypes(self):
# can convert dtypes
f = DataFrame({'a': ['cat', 'bat', 'cat', 'bat'], 'v': [
1, 2, 3, 4], 'i': ['a', 'b', 'a', 'b']})
assert f.dtypes['v'] == 'int64'
z = pivot_table(f, values='v', index=['a'], columns=[
'i'], fill_value=0, aggfunc=np.sum)
result = z.get_dtype_counts()
expected = Series(dict(int64=2))
tm.assert_series_equal(result, expected)
# cannot convert dtypes
f = DataFrame({'a': ['cat', 'bat', 'cat', 'bat'], 'v': [
1.5, 2.5, 3.5, 4.5], 'i': ['a', 'b', 'a', 'b']})
assert f.dtypes['v'] == 'float64'
z = pivot_table(f, values='v', index=['a'], columns=[
'i'], fill_value=0, aggfunc=np.mean)
result = z.get_dtype_counts()
expected = Series(dict(float64=2))
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize('columns,values',
[('bool1', ['float1', 'float2']),
('bool1', ['float1', 'float2', 'bool1']),
('bool2', ['float1', 'float2', 'bool1'])])
def test_pivot_preserve_dtypes(self, columns, values):
# GH 7142 regression test
v = np.arange(5, dtype=np.float64)
df = DataFrame({'float1': v, 'float2': v + 2.0,
'bool1': v <= 2, 'bool2': v <= 3})
df_res = df.reset_index().pivot_table(
index='index', columns=columns, values=values)
result = dict(df_res.dtypes)
expected = {col: np.dtype('O') if col[0].startswith('b')
else np.dtype('float64') for col in df_res}
assert result == expected
def test_pivot_no_values(self):
# GH 14380
idx = pd.DatetimeIndex(['2011-01-01', '2011-02-01', '2011-01-02',
'2011-01-01', '2011-01-02'])
df = pd.DataFrame({'A': [1, 2, 3, 4, 5]},
index=idx)
res = df.pivot_table(index=df.index.month, columns=df.index.day)
exp_columns = pd.MultiIndex.from_tuples([('A', 1), ('A', 2)])
exp = pd.DataFrame([[2.5, 4.0], [2.0, np.nan]],
index=[1, 2], columns=exp_columns)
tm.assert_frame_equal(res, exp)
df = pd.DataFrame({'A': [1, 2, 3, 4, 5],
'dt': pd.date_range('2011-01-01', freq='D',
periods=5)},
index=idx)
res = df.pivot_table(index=df.index.month,
columns=pd.Grouper(key='dt', freq='M'))
exp_columns = pd.MultiIndex.from_tuples([('A',
pd.Timestamp('2011-01-31'))])
exp_columns.names = [None, 'dt']
exp = pd.DataFrame([3.25, 2.0],
index=[1, 2], columns=exp_columns)
tm.assert_frame_equal(res, exp)
res = df.pivot_table(index=pd.Grouper(freq='A'),
columns=pd.Grouper(key='dt', freq='M'))
exp = pd.DataFrame([3],
index=pd.DatetimeIndex(['2011-12-31']),
columns=exp_columns)
tm.assert_frame_equal(res, exp)
def test_pivot_multi_values(self):
result = pivot_table(self.data, values=['D', 'E'],
index='A', columns=['B', 'C'], fill_value=0)
expected = pivot_table(self.data.drop(['F'], axis=1),
index='A', columns=['B', 'C'], fill_value=0)
tm.assert_frame_equal(result, expected)
def test_pivot_multi_functions(self):
f = lambda func: pivot_table(self.data, values=['D', 'E'],
index=['A', 'B'], columns='C',
aggfunc=func)
result = f([np.mean, np.std])
means = f(np.mean)
stds = f(np.std)
expected = concat([means, stds], keys=['mean', 'std'], axis=1)
tm.assert_frame_equal(result, expected)
# margins not supported??
f = lambda func: pivot_table(self.data, values=['D', 'E'],
index=['A', 'B'], columns='C',
aggfunc=func, margins=True)
result = f([np.mean, np.std])
means = f(np.mean)
stds = f(np.std)
expected = concat([means, stds], keys=['mean', 'std'], axis=1)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize('method', [True, False])
def test_pivot_index_with_nan(self, method):
# GH 3588
nan = np.nan
df = DataFrame({'a': ['R1', 'R2', nan, 'R4'],
'b': ['C1', 'C2', 'C3', 'C4'],
'c': [10, 15, 17, 20]})
if method:
result = df.pivot('a', 'b', 'c')
else:
result = pd.pivot(df, 'a', 'b', 'c')
expected = DataFrame([[nan, nan, 17, nan], [10, nan, nan, nan],
[nan, 15, nan, nan], [nan, nan, nan, 20]],
index=Index([nan, 'R1', 'R2', 'R4'], name='a'),
columns=Index(['C1', 'C2', 'C3', 'C4'], name='b'))
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(df.pivot('b', 'a', 'c'), expected.T)
# GH9491
df = DataFrame({'a': pd.date_range('2014-02-01', periods=6, freq='D'),
'c': 100 + np.arange(6)})
df['b'] = df['a'] - pd.Timestamp('2014-02-02')
df.loc[1, 'a'] = df.loc[3, 'a'] = nan
df.loc[1, 'b'] = df.loc[4, 'b'] = nan
if method:
pv = df.pivot('a', 'b', 'c')
else:
pv = pd.pivot(df, 'a', 'b', 'c')
assert pv.notna().values.sum() == len(df)
for _, row in df.iterrows():
assert pv.loc[row['a'], row['b']] == row['c']
if method:
result = df.pivot('b', 'a', 'c')
else:
result = pd.pivot(df, 'b', 'a', 'c')
tm.assert_frame_equal(result, pv.T)
@pytest.mark.parametrize('method', [True, False])
def test_pivot_with_tz(self, method):
# GH 5878
df = DataFrame({'dt1': [datetime(2013, 1, 1, 9, 0),
datetime(2013, 1, 2, 9, 0),
datetime(2013, 1, 1, 9, 0),
datetime(2013, 1, 2, 9, 0)],
'dt2': [datetime(2014, 1, 1, 9, 0),
datetime(2014, 1, 1, 9, 0),
datetime(2014, 1, 2, 9, 0),
datetime(2014, 1, 2, 9, 0)],
'data1': np.arange(4, dtype='int64'),
'data2': np.arange(4, dtype='int64')})
df['dt1'] = df['dt1'].apply(lambda d: pd.Timestamp(d, tz='US/Pacific'))
df['dt2'] = df['dt2'].apply(lambda d: pd.Timestamp(d, tz='Asia/Tokyo'))
exp_col1 = Index(['data1', 'data1', 'data2', 'data2'])
exp_col2 = pd.DatetimeIndex(['2014/01/01 09:00',
'2014/01/02 09:00'] * 2,
name='dt2', tz='Asia/Tokyo')
exp_col = pd.MultiIndex.from_arrays([exp_col1, exp_col2])
expected = DataFrame([[0, 2, 0, 2], [1, 3, 1, 3]],
index=pd.DatetimeIndex(['2013/01/01 09:00',
'2013/01/02 09:00'],
name='dt1',
tz='US/Pacific'),
columns=exp_col)
if method:
pv = df.pivot(index='dt1', columns='dt2')
else:
pv = pd.pivot(df, index='dt1', columns='dt2')
tm.assert_frame_equal(pv, expected)
expected = DataFrame([[0, 2], [1, 3]],
index=pd.DatetimeIndex(['2013/01/01 09:00',
'2013/01/02 09:00'],
name='dt1',
tz='US/Pacific'),
columns=pd.DatetimeIndex(['2014/01/01 09:00',
'2014/01/02 09:00'],
name='dt2',
tz='Asia/Tokyo'))
if method:
pv = df.pivot(index='dt1', columns='dt2', values='data1')
else:
pv = pd.pivot(df, index='dt1', columns='dt2', values='data1')
tm.assert_frame_equal(pv, expected)
@pytest.mark.parametrize('method', [True, False])
def test_pivot_periods(self, method):
df = DataFrame({'p1': [pd.Period('2013-01-01', 'D'),
pd.Period('2013-01-02', 'D'),
pd.Period('2013-01-01', 'D'),
pd.Period('2013-01-02', 'D')],
'p2': [pd.Period('2013-01', 'M'),
pd.Period('2013-01', 'M'),
pd.Period('2013-02', 'M'),
pd.Period('2013-02', 'M')],
'data1': np.arange(4, dtype='int64'),
'data2': np.arange(4, dtype='int64')})
exp_col1 = Index(['data1', 'data1', 'data2', 'data2'])
exp_col2 = pd.PeriodIndex(['2013-01', '2013-02'] * 2,
name='p2', freq='M')
exp_col = pd.MultiIndex.from_arrays([exp_col1, exp_col2])
expected = DataFrame([[0, 2, 0, 2], [1, 3, 1, 3]],
index=pd.PeriodIndex(['2013-01-01', '2013-01-02'],
name='p1', freq='D'),
columns=exp_col)
if method:
pv = df.pivot(index='p1', columns='p2')
else:
pv = pd.pivot(df, index='p1', columns='p2')
tm.assert_frame_equal(pv, expected)
expected = DataFrame([[0, 2], [1, 3]],
index=pd.PeriodIndex(['2013-01-01', '2013-01-02'],
name='p1', freq='D'),
columns=pd.PeriodIndex(['2013-01', '2013-02'],
name='p2', freq='M'))
if method:
pv = df.pivot(index='p1', columns='p2', values='data1')
else:
pv = pd.pivot(df, index='p1', columns='p2', values='data1')
tm.assert_frame_equal(pv, expected)
@pytest.mark.parametrize('values', [
['baz', 'zoo'], np.array(['baz', 'zoo']),
pd.Series(['baz', 'zoo']), pd.Index(['baz', 'zoo'])
])
@pytest.mark.parametrize('method', [True, False])
def test_pivot_with_list_like_values(self, values, method):
# issue #17160
df = pd.DataFrame({'foo': ['one', 'one', 'one', 'two', 'two', 'two'],
'bar': ['A', 'B', 'C', 'A', 'B', 'C'],
'baz': [1, 2, 3, 4, 5, 6],
'zoo': ['x', 'y', 'z', 'q', 'w', 't']})
if method:
result = df.pivot(index='foo', columns='bar', values=values)
else:
result = pd.pivot(df, index='foo', columns='bar', values=values)
data = [[1, 2, 3, 'x', 'y', 'z'],
[4, 5, 6, 'q', 'w', 't']]
index = Index(data=['one', 'two'], name='foo')
columns = MultiIndex(levels=[['baz', 'zoo'], ['A', 'B', 'C']],
codes=[[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 1, 2]],
names=[None, 'bar'])
expected = DataFrame(data=data, index=index,
columns=columns, dtype='object')
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize('values', [
['bar', 'baz'], np.array(['bar', 'baz']),
pd.Series(['bar', 'baz']), pd.Index(['bar', 'baz'])
])
@pytest.mark.parametrize('method', [True, False])
def test_pivot_with_list_like_values_nans(self, values, method):
# issue #17160
df = pd.DataFrame({'foo': ['one', 'one', 'one', 'two', 'two', 'two'],
'bar': ['A', 'B', 'C', 'A', 'B', 'C'],
'baz': [1, 2, 3, 4, 5, 6],
'zoo': ['x', 'y', 'z', 'q', 'w', 't']})
if method:
result = df.pivot(index='zoo', columns='foo', values=values)
else:
result = pd.pivot(df, index='zoo', columns='foo', values=values)
data = [[np.nan, 'A', np.nan, 4],
[np.nan, 'C', np.nan, 6],
[np.nan, 'B', np.nan, 5],
['A', np.nan, 1, np.nan],
['B', np.nan, 2, np.nan],
['C', np.nan, 3, np.nan]]
index = Index(data=['q', 't', 'w', 'x', 'y', 'z'], name='zoo')
columns = MultiIndex(levels=[['bar', 'baz'], ['one', 'two']],
codes=[[0, 0, 1, 1], [0, 1, 0, 1]],
names=[None, 'foo'])
expected = DataFrame(data=data, index=index,
columns=columns, dtype='object')
tm.assert_frame_equal(result, expected)
@pytest.mark.xfail(reason='MultiIndexed unstack with tuple names fails'
'with KeyError GH#19966')
@pytest.mark.parametrize('method', [True, False])
def test_pivot_with_multiindex(self, method):
# issue #17160
index = Index(data=[0, 1, 2, 3, 4, 5])
data = [['one', 'A', 1, 'x'],
['one', 'B', 2, 'y'],
['one', 'C', 3, 'z'],
['two', 'A', 4, 'q'],
['two', 'B', 5, 'w'],
['two', 'C', 6, 't']]
columns = MultiIndex(levels=[['bar', 'baz'], ['first', 'second']],
codes=[[0, 0, 1, 1], [0, 1, 0, 1]])
df = DataFrame(data=data, index=index, columns=columns, dtype='object')
if method:
result = df.pivot(index=('bar', 'first'),
columns=('bar', 'second'),
values=('baz', 'first'))
else:
result = pd.pivot(df,
index=('bar', 'first'),
columns=('bar', 'second'),
values=('baz', 'first'))
data = {'A': Series([1, 4], index=['one', 'two']),
'B': Series([2, 5], index=['one', 'two']),
'C': Series([3, 6], index=['one', 'two'])}
expected = DataFrame(data)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize('method', [True, False])
def test_pivot_with_tuple_of_values(self, method):
# issue #17160
df = pd.DataFrame({'foo': ['one', 'one', 'one', 'two', 'two', 'two'],
'bar': ['A', 'B', 'C', 'A', 'B', 'C'],
'baz': [1, 2, 3, 4, 5, 6],
'zoo': ['x', 'y', 'z', 'q', 'w', 't']})
with pytest.raises(KeyError):
# tuple is seen as a single column name
if method:
df.pivot(index='zoo', columns='foo', values=('bar', 'baz'))
else:
pd.pivot(df, index='zoo', columns='foo', values=('bar', 'baz'))
def test_margins(self):
def _check_output(result, values_col, index=['A', 'B'],
columns=['C'],
margins_col='All'):
col_margins = result.loc[result.index[:-1], margins_col]
expected_col_margins = self.data.groupby(index)[values_col].mean()
tm.assert_series_equal(col_margins, expected_col_margins,
check_names=False)
assert col_margins.name == margins_col
result = result.sort_index()
index_margins = result.loc[(margins_col, '')].iloc[:-1]
expected_ix_margins = self.data.groupby(columns)[values_col].mean()
tm.assert_series_equal(index_margins, expected_ix_margins,
check_names=False)
assert index_margins.name == (margins_col, '')
grand_total_margins = result.loc[(margins_col, ''), margins_col]
expected_total_margins = self.data[values_col].mean()
assert grand_total_margins == expected_total_margins
# column specified
result = self.data.pivot_table(values='D', index=['A', 'B'],
columns='C',
margins=True, aggfunc=np.mean)
_check_output(result, 'D')
# Set a different margins_name (not 'All')
result = self.data.pivot_table(values='D', index=['A', 'B'],
columns='C',
margins=True, aggfunc=np.mean,
margins_name='Totals')
_check_output(result, 'D', margins_col='Totals')
# no column specified
table = self.data.pivot_table(index=['A', 'B'], columns='C',
margins=True, aggfunc=np.mean)
for value_col in table.columns.levels[0]:
_check_output(table[value_col], value_col)
# no col
# to help with a buglet
self.data.columns = [k * 2 for k in self.data.columns]
table = self.data.pivot_table(index=['AA', 'BB'], margins=True,
aggfunc=np.mean)
for value_col in table.columns:
totals = table.loc[('All', ''), value_col]
assert totals == self.data[value_col].mean()
# no rows
rtable = self.data.pivot_table(columns=['AA', 'BB'], margins=True,
aggfunc=np.mean)
assert isinstance(rtable, Series)
table = self.data.pivot_table(index=['AA', 'BB'], margins=True,
aggfunc='mean')
for item in ['DD', 'EE', 'FF']:
totals = table.loc[('All', ''), item]
assert totals == self.data[item].mean()
def test_margins_dtype(self):
# GH 17013
df = self.data.copy()
df[['D', 'E', 'F']] = np.arange(len(df) * 3).reshape(len(df), 3)
mi_val = list(product(['bar', 'foo'], ['one', 'two'])) + [('All', '')]
mi = MultiIndex.from_tuples(mi_val, names=('A', 'B'))
expected = DataFrame({'dull': [12, 21, 3, 9, 45],
'shiny': [33, 0, 36, 51, 120]},
index=mi).rename_axis('C', axis=1)
expected['All'] = expected['dull'] + expected['shiny']
result = df.pivot_table(values='D', index=['A', 'B'],
columns='C', margins=True,
aggfunc=np.sum, fill_value=0)
tm.assert_frame_equal(expected, result)
@pytest.mark.xfail(reason='GH#17035 (len of floats is casted back to '
'floats)')
def test_margins_dtype_len(self):
mi_val = list(product(['bar', 'foo'], ['one', 'two'])) + [('All', '')]
mi = MultiIndex.from_tuples(mi_val, names=('A', 'B'))
expected = DataFrame({'dull': [1, 1, 2, 1, 5],
'shiny': [2, 0, 2, 2, 6]},
index=mi).rename_axis('C', axis=1)
expected['All'] = expected['dull'] + expected['shiny']
result = self.data.pivot_table(values='D', index=['A', 'B'],
columns='C', margins=True,
aggfunc=len, fill_value=0)
tm.assert_frame_equal(expected, result)
def test_pivot_integer_columns(self):
# caused by upstream bug in unstack
d = date.min
data = list(product(['foo', 'bar'], ['A', 'B', 'C'], ['x1', 'x2'],
[d + timedelta(i)
for i in range(20)], [1.0]))
df = DataFrame(data)
table = df.pivot_table(values=4, index=[0, 1, 3], columns=[2])
df2 = df.rename(columns=str)
table2 = df2.pivot_table(
values='4', index=['0', '1', '3'], columns=['2'])
tm.assert_frame_equal(table, table2, check_names=False)
def test_pivot_no_level_overlap(self):
# GH #1181
data = DataFrame({'a': ['a', 'a', 'a', 'a', 'b', 'b', 'b', 'b'] * 2,
'b': [0, 0, 0, 0, 1, 1, 1, 1] * 2,
'c': (['foo'] * 4 + ['bar'] * 4) * 2,
'value': np.random.randn(16)})
table = data.pivot_table('value', index='a', columns=['b', 'c'])
grouped = data.groupby(['a', 'b', 'c'])['value'].mean()
expected = grouped.unstack('b').unstack('c').dropna(axis=1, how='all')
tm.assert_frame_equal(table, expected)
def test_pivot_columns_lexsorted(self):
n = 10000
dtype = np.dtype([
("Index", object),
("Symbol", object),
("Year", int),
("Month", int),
("Day", int),
("Quantity", int),
("Price", float),
])
products = np.array([
('SP500', 'ADBE'),
('SP500', 'NVDA'),
('SP500', 'ORCL'),
('NDQ100', 'AAPL'),
('NDQ100', 'MSFT'),
('NDQ100', 'GOOG'),
('FTSE', 'DGE.L'),
('FTSE', 'TSCO.L'),
('FTSE', 'GSK.L'),
], dtype=[('Index', object), ('Symbol', object)])
items = np.empty(n, dtype=dtype)
iproduct = np.random.randint(0, len(products), n)
items['Index'] = products['Index'][iproduct]
items['Symbol'] = products['Symbol'][iproduct]
dr = pd.date_range(date(2000, 1, 1),
date(2010, 12, 31))
dates = dr[np.random.randint(0, len(dr), n)]
items['Year'] = dates.year
items['Month'] = dates.month
items['Day'] = dates.day
items['Price'] = np.random.lognormal(4.0, 2.0, n)
df = DataFrame(items)
pivoted = df.pivot_table('Price', index=['Month', 'Day'],
columns=['Index', 'Symbol', 'Year'],
aggfunc='mean')
assert pivoted.columns.is_monotonic
def test_pivot_complex_aggfunc(self):
f = OrderedDict([('D', ['std']), ('E', ['sum'])])
expected = self.data.groupby(['A', 'B']).agg(f).unstack('B')
result = self.data.pivot_table(index='A', columns='B', aggfunc=f)
tm.assert_frame_equal(result, expected)
def test_margins_no_values_no_cols(self):
# Regression test on pivot table: no values or cols passed.
result = self.data[['A', 'B']].pivot_table(
index=['A', 'B'], aggfunc=len, margins=True)
result_list = result.tolist()
assert sum(result_list[:-1]) == result_list[-1]
def test_margins_no_values_two_rows(self):
# Regression test on pivot table: no values passed but rows are a
# multi-index
result = self.data[['A', 'B', 'C']].pivot_table(
index=['A', 'B'], columns='C', aggfunc=len, margins=True)
assert result.All.tolist() == [3.0, 1.0, 4.0, 3.0, 11.0]
def test_margins_no_values_one_row_one_col(self):
# Regression test on pivot table: no values passed but row and col
# defined
result = self.data[['A', 'B']].pivot_table(
index='A', columns='B', aggfunc=len, margins=True)
assert result.All.tolist() == [4.0, 7.0, 11.0]
def test_margins_no_values_two_row_two_cols(self):
# Regression test on pivot table: no values passed but rows and cols
# are multi-indexed
self.data['D'] = ['a', 'b', 'c', 'd',
'e', 'f', 'g', 'h', 'i', 'j', 'k']
result = self.data[['A', 'B', 'C', 'D']].pivot_table(
index=['A', 'B'], columns=['C', 'D'], aggfunc=len, margins=True)
assert result.All.tolist() == [3.0, 1.0, 4.0, 3.0, 11.0]
def test_pivot_table_with_margins_set_margin_name(self):
# see gh-3335
for margin_name in ['foo', 'one', 666, None, ['a', 'b']]:
with pytest.raises(ValueError):
# multi-index index
pivot_table(self.data, values='D', index=['A', 'B'],
columns=['C'], margins=True,
margins_name=margin_name)
with pytest.raises(ValueError):
# multi-index column
pivot_table(self.data, values='D', index=['C'],
columns=['A', 'B'], margins=True,
margins_name=margin_name)
with pytest.raises(ValueError):
# non-multi-index index/column
pivot_table(self.data, values='D', index=['A'],
columns=['B'], margins=True,
margins_name=margin_name)
def test_pivot_timegrouper(self):
df = DataFrame({
'Branch': 'A A A A A A A B'.split(),
'Buyer': '<NAME> <NAME>'.split(),
'Quantity': [1, 3, 5, 1, 8, 1, 9, 3],
'Date': [datetime(2013, 1, 1),
datetime(2013, 1, 1),
datetime(2013, 10, 1),
datetime(2013, 10, 2),
datetime(2013, 10, 1),
datetime(2013, 10, 2),
datetime(2013, 12, 2),
datetime(2013, 12, 2), ]}).set_index('Date')
expected = DataFrame(np.array([10, 18, 3], dtype='int64')
.reshape(1, 3),
index=[datetime(2013, 12, 31)],
columns='<NAME>'.split())
expected.index.name = 'Date'
expected.columns.name = 'Buyer'
result = pivot_table(df, index=Grouper(freq='A'), columns='Buyer',
values='Quantity', aggfunc=np.sum)
tm.assert_frame_equal(result, expected)
result = pivot_table(df, index='Buyer', columns=Grouper(freq='A'),
values='Quantity', aggfunc=np.sum)
tm.assert_frame_equal(result, expected.T)
expected = DataFrame(np.array([1, np.nan, 3, 9, 18, np.nan])
.reshape(2, 3),
index=[datetime(2013, 1, 1),
datetime(2013, 7, 1)],
columns='<NAME>'.split())
expected.index.name = 'Date'
expected.columns.name = 'Buyer'
result = pivot_table(df, index=Grouper(freq='6MS'), columns='Buyer',
values='Quantity', aggfunc=np.sum)
tm.assert_frame_equal(result, expected)
result = pivot_table(df, index='Buyer', columns=Grouper(freq='6MS'),
values='Quantity', aggfunc=np.sum)
tm.assert_frame_equal(result, expected.T)
# passing the name
df = df.reset_index()
result = pivot_table(df, index=Grouper(freq='6MS', key='Date'),
columns='Buyer',
values='Quantity', aggfunc=np.sum)
tm.assert_frame_equal(result, expected)
result = pivot_table(df, index='Buyer',
columns=Grouper(freq='6MS', key='Date'),
values='Quantity', aggfunc=np.sum)
tm.assert_frame_equal(result, expected.T)
pytest.raises(KeyError, lambda: pivot_table(
df, index=Grouper(freq='6MS', key='foo'),
columns='Buyer', values='Quantity', aggfunc=np.sum))
pytest.raises(KeyError, lambda: pivot_table(
df, index='Buyer',
columns=Grouper(freq='6MS', key='foo'),
values='Quantity', aggfunc=np.sum))
# passing the level
df = df.set_index('Date')
result = pivot_table(df, index=Grouper(freq='6MS', level='Date'),
columns='Buyer', values='Quantity',
aggfunc=np.sum)
tm.assert_frame_equal(result, expected)
result = pivot_table(df, index='Buyer',
columns=Grouper(freq='6MS', level='Date'),
values='Quantity', aggfunc=np.sum)
tm.assert_frame_equal(result, expected.T)
pytest.raises(ValueError, lambda: pivot_table(
df, index=Grouper(freq='6MS', level='foo'),
columns='Buyer', values='Quantity', aggfunc=np.sum))
pytest.raises(ValueError, lambda: pivot_table(
df, index='Buyer',
columns= | Grouper(freq='6MS', level='foo') | pandas.Grouper |
# -*- coding: utf-8 -*-
import nose
import numpy as np
from datetime import datetime
from pandas.util import testing as tm
from pandas.core import config as cf
from pandas.compat import u
from pandas.tslib import iNaT
from pandas import (NaT, Float64Index, Series,
DatetimeIndex, TimedeltaIndex, date_range)
from pandas.types.dtypes import DatetimeTZDtype
from pandas.types.missing import (array_equivalent, isnull, notnull,
na_value_for_dtype)
_multiprocess_can_split_ = True
def test_notnull():
assert notnull(1.)
assert not notnull(None)
assert not notnull(np.NaN)
with cf.option_context("mode.use_inf_as_null", False):
assert notnull(np.inf)
assert notnull(-np.inf)
arr = np.array([1.5, np.inf, 3.5, -np.inf])
result = notnull(arr)
assert result.all()
with cf.option_context("mode.use_inf_as_null", True):
assert not notnull(np.inf)
assert not notnull(-np.inf)
arr = np.array([1.5, np.inf, 3.5, -np.inf])
result = notnull(arr)
assert result.sum() == 2
with cf.option_context("mode.use_inf_as_null", False):
for s in [tm.makeFloatSeries(), tm.makeStringSeries(),
tm.makeObjectSeries(), tm.makeTimeSeries(),
tm.makePeriodSeries()]:
assert (isinstance(isnull(s), Series))
def test_isnull():
assert not isnull(1.)
assert isnull(None)
assert isnull(np.NaN)
assert not isnull(np.inf)
assert not isnull(-np.inf)
# series
for s in [tm.makeFloatSeries(), tm.makeStringSeries(),
tm.makeObjectSeries(), tm.makeTimeSeries(),
tm.makePeriodSeries()]:
assert (isinstance(isnull(s), Series))
# frame
for df in [tm.makeTimeDataFrame(), tm.makePeriodFrame(),
tm.makeMixedDataFrame()]:
result = isnull(df)
expected = df.apply(isnull)
tm.assert_frame_equal(result, expected)
# panel
for p in [tm.makePanel(), tm.makePeriodPanel(), tm.add_nans(tm.makePanel())
]:
result = isnull(p)
expected = p.apply(isnull)
tm.assert_panel_equal(result, expected)
# panel 4d
for p in [tm.makePanel4D(), tm.add_nans_panel4d(tm.makePanel4D())]:
result = isnull(p)
expected = p.apply(isnull)
tm.assert_panel4d_equal(result, expected)
def test_isnull_lists():
result = isnull([[False]])
exp = np.array([[False]])
assert (np.array_equal(result, exp))
result = isnull([[1], [2]])
exp = np.array([[False], [False]])
assert (np.array_equal(result, exp))
# list of strings / unicode
result = isnull(['foo', 'bar'])
assert (not result.any())
result = isnull([u('foo'), u('bar')])
assert (not result.any())
def test_isnull_nat():
result = isnull([NaT])
exp = np.array([True])
assert (np.array_equal(result, exp))
result = isnull(np.array([NaT], dtype=object))
exp = np.array([True])
assert (np.array_equal(result, exp))
def test_isnull_numpy_nat():
arr = np.array([NaT, np.datetime64('NaT'), np.timedelta64('NaT'),
np.datetime64('NaT', 's')])
result = isnull(arr)
expected = np.array([True] * 4)
tm.assert_numpy_array_equal(result, expected)
def test_isnull_datetime():
assert (not isnull(datetime.now()))
assert notnull(datetime.now())
idx = date_range('1/1/1990', periods=20)
assert (notnull(idx).all())
idx = np.asarray(idx)
idx[0] = iNaT
idx = DatetimeIndex(idx)
mask = isnull(idx)
assert (mask[0])
assert (not mask[1:].any())
# GH 9129
pidx = idx.to_period(freq='M')
mask = isnull(pidx)
assert (mask[0])
assert (not mask[1:].any())
mask = isnull(pidx[1:])
assert (not mask.any())
class TestIsNull(tm.TestCase):
def test_0d_array(self):
self.assertTrue(isnull(np.array(np.nan)))
self.assertFalse(isnull(np.array(0.0)))
self.assertFalse(isnull(np.array(0)))
# test object dtype
self.assertTrue(isnull(np.array(np.nan, dtype=object)))
self.assertFalse(isnull(np.array(0.0, dtype=object)))
self.assertFalse(isnull(np.array(0, dtype=object)))
def test_array_equivalent():
assert array_equivalent(np.array([np.nan, np.nan]),
np.array([np.nan, np.nan]))
assert array_equivalent(np.array([np.nan, 1, np.nan]),
np.array([np.nan, 1, np.nan]))
assert array_equivalent(np.array([np.nan, None], dtype='object'),
np.array([np.nan, None], dtype='object'))
assert array_equivalent(np.array([np.nan, 1 + 1j], dtype='complex'),
np.array([np.nan, 1 + 1j], dtype='complex'))
assert not array_equivalent(
np.array([np.nan, 1 + 1j], dtype='complex'), np.array(
[np.nan, 1 + 2j], dtype='complex'))
assert not array_equivalent(
np.array([np.nan, 1, np.nan]), np.array([np.nan, 2, np.nan]))
assert not array_equivalent(
np.array(['a', 'b', 'c', 'd']), np.array(['e', 'e']))
assert array_equivalent(Float64Index([0, np.nan]),
Float64Index([0, np.nan]))
assert not array_equivalent(
Float64Index([0, np.nan]), Float64Index([1, np.nan]))
assert array_equivalent(DatetimeIndex([0, np.nan]),
DatetimeIndex([0, np.nan]))
assert not array_equivalent(
DatetimeIndex([0, np.nan]), DatetimeIndex([1, np.nan]))
assert array_equivalent(TimedeltaIndex([0, np.nan]),
TimedeltaIndex([0, np.nan]))
assert not array_equivalent(
TimedeltaIndex([0, np.nan]), TimedeltaIndex([1, np.nan]))
assert array_equivalent(DatetimeIndex([0, np.nan], tz='US/Eastern'),
DatetimeIndex([0, np.nan], tz='US/Eastern'))
assert not array_equivalent(
| DatetimeIndex([0, np.nan], tz='US/Eastern') | pandas.DatetimeIndex |
import unittest
import os
import shutil
import numpy as np
import pandas as pd
from aistac import ConnectorContract
from ds_discovery import Wrangle, SyntheticBuilder
from ds_discovery.intent.wrangle_intent import WrangleIntentModel
from aistac.properties.property_manager import PropertyManager
class WrangleIntentCorrelateTest(unittest.TestCase):
def setUp(self):
os.environ['HADRON_PM_PATH'] = os.path.join('work', 'config')
os.environ['HADRON_DEFAULT_PATH'] = os.path.join('work', 'data')
try:
os.makedirs(os.environ['HADRON_PM_PATH'])
os.makedirs(os.environ['HADRON_DEFAULT_PATH'])
except:
pass
PropertyManager._remove_all()
def tearDown(self):
try:
shutil.rmtree('work')
except:
pass
@property
def tools(self) -> WrangleIntentModel:
return Wrangle.scratch_pad()
def test_runs(self):
"""Basic smoke test"""
im = Wrangle.from_env('tester', default_save=False, default_save_intent=False,
reset_templates=False, has_contract=False).intent_model
self.assertTrue(WrangleIntentModel, type(im))
def test_correlate_custom(self):
tools = self.tools
df = pd.DataFrame()
df['A'] = [1, 2, 3]
result = tools.correlate_custom(df, code_str="[x + 2 for x in @['A']]")
self.assertEqual([3, 4, 5], result)
result = tools.correlate_custom(df, code_str="[True if x == $v1 else False for x in @['A']]", v1=2)
self.assertEqual([False, True, False], result)
def test_correlate_choice(self):
tools = self.tools
df = pd.DataFrame()
df['A'] = [[1,2,4,6], [1], [2,4,8,1], [2,4]]
result = tools.correlate_choice(df, header='A', list_size=2)
control = [[1, 2], [1], [2, 4], [2, 4]]
self.assertEqual(control, result)
result = tools.correlate_choice(df, header='A', list_size=1)
self.assertEqual([1, 1, 2, 2], result)
def test_correlate_coefficient(self):
tools = self.tools
df = pd.DataFrame()
df['A'] = [1,2,3]
result = tools.correlate_polynomial(df, header='A', coefficient=[2,1])
self.assertEqual([3, 4, 5], result)
result = tools.correlate_polynomial(df, header='A', coefficient=[0, 0, 1])
self.assertEqual([1, 4, 9], result)
def test_correlate_join(self):
tools = self.tools
df = pd.DataFrame()
df['A'] = [1,2,3]
df['B'] = list('XYZ')
df['C'] = [4.2,7.1,4.1]
result = tools.correlate_join(df, header='B', action="values", sep='_')
self.assertEqual(['X_values', 'Y_values', 'Z_values'], result)
result = tools.correlate_join(df, header='A', action=tools.action2dict(method='correlate_numbers', header='C'))
self.assertEqual(['14.2', '27.1', '34.1'], result)
def test_correlate_columns(self):
tools = self.tools
df = pd.DataFrame({'A': [1,1,1,1,None], 'B': [1,None,2,3,None], 'C': [2,2,2,2,None], 'D': [5,5,5,5,None]})
result = tools.correlate_aggregate(df, headers=list('ABC'), agg='sum')
control = [4.0, 3.0, 5.0, 6.0, 0.0]
self.assertEqual(result, control)
for action in ['sum', 'prod', 'count', 'min', 'max', 'mean']:
print(action)
result = tools.correlate_aggregate(df, headers=list('ABC'), agg=action)
self.assertEqual(5, len(result))
def test_correlate_number(self):
tools = self.tools
df = pd.DataFrame(data=[1,2,3,4.0,5,6,7,8,9,0], columns=['numbers'])
result = tools.correlate_numbers(df, 'numbers', precision=0)
self.assertCountEqual([1,2,3,4,5,6,7,8,9,0], result)
# Offset
df = pd.DataFrame(data=[1, 2, 3, 4, 5, 6, 7, 8, 9, 0], columns=['numbers'])
result = tools.correlate_numbers(df, 'numbers', offset=1, precision=0)
self.assertEqual([2,3,4,5,6,7,8,9,10,1], result)
# str offset
df = pd.DataFrame(data=[1, 2, 3, 4], columns=['numbers'])
result = tools.correlate_numbers(df, 'numbers', offset='1-@', precision=0)
self.assertEqual([0,-1,-2,-3], result)
# complex str offset
result = tools.correlate_numbers(df, 'numbers', offset='x + 2 if x <= 2 else x', precision=0)
self.assertEqual([3, 4, 3, 4], result)
# jitter
df = pd.DataFrame(data=[2] * 1000, columns=['numbers'])
result = tools.correlate_numbers(df, 'numbers', jitter=5, precision=0)
self.assertLessEqual(max(result), 4)
self.assertGreaterEqual(min(result), 0)
df = pd.DataFrame(data=tools._get_number(99999, size=5000), columns=['numbers'])
result = tools.correlate_numbers(df, 'numbers', jitter=5, precision=1)
self.assertNotEqual(df['numbers'].to_list(), result)
self.assertEqual(5000, len(result))
for index in range(len(result)):
loss = abs(df['numbers'][index] - result[index])
self.assertLessEqual(loss, 5)
df = pd.DataFrame(data=tools._get_number(99999, size=5000), columns=['numbers'])
result = tools.correlate_numbers(df, 'numbers', jitter=1, precision=1)
self.assertNotEqual(df['numbers'].to_list(), result)
self.assertEqual(5000, len(result))
for index in range(len(result)):
loss = abs(df['numbers'][index] - result[index])
self.assertLessEqual(loss, 1)
def test_correlate_normalize(self):
tools = self.tools
df = pd.DataFrame(data=[1,2,2,3,3,2,2,1], columns=['numbers'])
result = tools.correlate_numbers(df, header='numbers', normalize=(0, 1))
self.assertEqual([0.0, 0.5, 0.5, 1.0, 1.0, 0.5, 0.5, 0.0], result)
result = tools.correlate_numbers(df, header='numbers', normalize=(-1, 1))
self.assertEqual([-1.0, 0, 0, 1.0, 1.0, 0, 0, -1.0], result)
def test_correlate_standardise(self):
tools = self.tools
df = pd.DataFrame(data=[1,2,2,3,3,2,2,1], columns=['numbers'])
result = tools.correlate_numbers(df, header='numbers', standardize=True, precision=1)
self.assertEqual([-1.4, 0.0, 0.0, 1.4, 1.4, 0.0, 0.0, -1.4], result)
def test_correlate_number_to_numeric(self):
tools = self.tools
df = pd.DataFrame(data=list("123") + ['4-5'], columns=['numbers'])
with self.assertRaises(ValueError) as context:
result = tools.correlate_numbers(df, header='numbers')
self.assertTrue("The header column is of type" in str(context.exception))
result = tools.correlate_numbers(df, header='numbers', to_numeric=True)
self.assertEqual([1.0, 2.0, 3.0], result[:3])
result = tools.correlate_numbers(df, header='numbers', to_numeric=True, replace_nulls=0, rtn_type='int')
self.assertEqual([1, 2, 3, 0], result.to_list())
def test_correlate_number_extras(self):
tools = self.tools
# weighting
df = pd.DataFrame(columns=['numbers'], data=[2] * 1000)
result = tools.correlate_numbers(df, 'numbers', jitter=5, precision=0, jitter_freq=[0, 0, 1, 1])
self.assertCountEqual([2,3,4], list(pd.Series(result).value_counts().index))
result = tools.correlate_numbers(df, 'numbers', jitter=5, precision=0, jitter_freq=[1, 1, 0, 0])
self.assertCountEqual([0,1,2], list(pd.Series(result).value_counts().index))
# fill nan
df = pd.DataFrame(columns=['numbers'], data=[1,1,2,np.nan,3,1,np.nan,3,5,np.nan,7])
result = tools.correlate_numbers(df, 'numbers', replace_nulls=1, precision=0)
self.assertEqual([1,1,2,1,3,1,1,3,5,1,7], result)
df = pd.DataFrame(columns=['numbers'], data=[2] * 1000)
# jitter, offset and fillna
result = tools.correlate_numbers(df, 'numbers', offset=2, jitter=5, replace_nulls=2, precision=0)
self.assertCountEqual([2,3,4,5,6], list(pd.Series(result).value_counts().index))
# min
df = pd.DataFrame(columns=['numbers'], data=[2] * 100)
result = tools.correlate_numbers(df, 'numbers', offset=2, jitter=5, min_value=4, precision=0)
self.assertCountEqual([4, 5, 6], list(pd.Series(result).value_counts().index))
result = tools.correlate_numbers(df, 'numbers', offset=2, jitter=5, min_value=6, precision=0)
self.assertCountEqual([6], list(pd.Series(result).value_counts().index))
with self.assertRaises(ValueError) as context:
result = tools.correlate_numbers(df, 'numbers', offset=2, jitter=5, min_value=7, precision=0)
self.assertTrue("The min value 7 is greater than the max result value" in str(context.exception))
# max
result = tools.correlate_numbers(df, 'numbers', offset=2, jitter=5, max_value=4, precision=0)
self.assertCountEqual([2, 3, 4], list(pd.Series(result).value_counts().index))
result = tools.correlate_numbers(df, 'numbers', offset=2, jitter=5, max_value=2, precision=0)
self.assertCountEqual([2], list(pd.Series(result).value_counts().index))
with self.assertRaises(ValueError) as context:
result = tools.correlate_numbers(df, 'numbers', offset=2, jitter=5, max_value=1, precision=0)
self.assertTrue("The max value 1 is less than the min result value" in str(context.exception))
def test_correlate_categories(self):
tools = self.tools
df = pd.DataFrame(columns=['cat'], data=list("ABCDE"))
correlation = ['A', 'D']
action = {0: 'F', 1: 'G'}
result = tools.correlate_categories(df, 'cat', correlations=correlation, actions=action, default_action=tools.action2dict(method='@header', header='cat'))
self.assertEqual(['F', 'B', 'C', 'G', 'E'], result)
correlation = ['A', 'D']
action = {0: {'method': 'get_category', 'selection': list("HIJ")}, 1: {'method': 'get_number', 'to_value': 10}}
result = tools.correlate_categories(df, 'cat', correlations=correlation, actions=action)
self.assertIn(result[0], list("HIJ"))
self.assertTrue(0 <= result[3] < 10)
df = pd.DataFrame(columns=['cat'], data=tools._get_category(selection=list("ABCDE"), size=5000))
result = tools.correlate_categories(df, 'cat', correlations=correlation, actions=action)
self.assertEqual(5000, len(result))
def test_correlate_categories_selection(self):
tools = self.tools
df = pd.DataFrame(columns=['cat'], data=list("ABACDBA"))
correlation = [[tools.select2dict(column='cat', condition="@=='A'")], [tools.select2dict(column='cat', condition="@=='B'")]]
action = {0: 'F', 1: 'G'}
default = 'H'
result = tools.correlate_categories(df, 'cat', correlations=correlation, actions=action, default_action=default)
self.assertEqual(['F', 'G', 'F', 'H', 'H', 'G', 'F'], result)
correlation = [[tools.select2dict(column='cat', condition="@=='A'")], ['B', 'C'], 'D']
result = tools.correlate_categories(df, 'cat', correlations=correlation, actions=action, default_action=default)
self.assertEqual(['F', 'G', 'F', 'G', 'H', 'G', 'F'], result)
# use with numbers
df = pd.DataFrame(columns=['cat'], data=[1,2,3,4,2,1])
correlation = [[tools.select2dict(column='cat', condition="@<=2")],
[tools.select2dict(column='cat', condition="@==3")]]
result = tools.correlate_categories(df, 'cat', correlations=correlation, actions=action, default_action=default)
self.assertEqual(['F', 'F', 'G', 'H', 'F', 'F'], result)
def test_correlate_categories_builder(self):
builder = Wrangle.from_env('test', has_contract=False)
builder.set_persist_contract(ConnectorContract(uri="eb://synthetic_members", module_name='ds_engines.handlers.event_handlers', handler='EventPersistHandler'))
df = pd.DataFrame()
df['pcp_tax_id'] = [993406113, 133757370, 260089066, 448512481, 546434723] * 2
correlations = [993406113, 133757370, 260089066, 448512481, 546434723]
actions = {0: 'LABCORP OF AMERICA', 1: 'LPCH MEDICAL GROUP', 2: 'ST JOSEPH HERITAGE MEDICAL',
3: 'MONARCH HEALTHCARE', 4: 'PRIVIA MEICAL GROUP'}
df['pcp_name'] = builder.tools.correlate_categories(df, header='pcp_tax_id', correlations=correlations,
actions=actions, column_name='pcp_name')
result = builder.tools.run_intent_pipeline(df)
self.assertEqual((10, 2), result.shape)
def test_correlate_categories_multi(self):
tools = self.tools
df = pd.DataFrame(columns=['cat'], data=list("ABCDEFGH"))
df['cat'] = df['cat'].astype('category')
correlation = [list("ABC"), list("DEFGH")]
action = {0: False, 1: True}
result = tools.correlate_categories(df, 'cat', correlations=correlation, actions=action)
self.assertEqual([False, False, False, True, True, True, True, True], result)
def test_correlate_categories_nulls(self):
tools = self.tools
builder = SyntheticBuilder.from_memory().tools
df = | pd.DataFrame() | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Sun Mar 25 14:09:46 2018
@author: JSong
"""
import numpy as np
import pandas as pd
from sklearn import metrics
import matplotlib.pyplot as plt
#%maplotlib inline
import seaborn as sns
#from tqdm import tqdm
import warnings
warnings.filterwarnings("ignore")
#import pysnooper
__all__=['score']
# def check_array(x):
# return x
#<EMAIL>()
def score(y_true,y_pred,y_score=None,groupid=None,data=None,objective='auto',output='auto',**kwargs):
'''分类问题的指标计算
param::
y_true : 真实值
y_pred : 预测值
y_score : 模型输出值
groupid : 分组标识符
data : 数据集,如果前面四个参数是字符串,则使用该数据
objective : reg、binary、multi、rank、quantile
ordered : 多分类任务中,label是否有序
categories: 多分类任务中,假设label有序,则使用该序
top_k : 适合排序任务,输出precision@k 等
output : 'auto'
return::
result:pd.DataFrame,存储相关结果
crrosstab:列联表
'''
from sklearn.utils.multiclass import type_of_target
output = {
'reg':['rmse','rmsle','mae','mape','r2','corrcoef']
,'binary':['precision','recall','auc','f1','acc']
,'multi':['precision','recall','auc','f1','f1_micro','f1_macro','f1_weighted','acc','acc_lr']
,'rank':['precision@k','recall@k','ndcg@k','support']
,'quantile':['alpha_quantile','rmse','mae_quantile','mape','r2']
}
# 数据源预处理
if isinstance(y_true,str) and isinstance(data,pd.DataFrame):
y_true = np.array(data[y_true])
y_pred = np.array(data[y_pred])
if isinstance(groupid,str):
groupid = data[groupid]
else:
y_true = np.array(y_true)
y_pred = np.array(y_pred)
# 模型任务判别
if objective == 'auto':
labels = set(np.unique(y_true))
if labels == set([0,1]) and groupid is None:
objective = 'binary'
elif labels == set([0,1]) and groupid is not None:
objective = 'rank'
elif type_of_target(y_true) == 'continuous':
objective = 'reg'
elif type_of_target(y_true) == 'multiclass':
objective = 'multi'
else:
objective = 'unknown'
if objective not in output:
return None,None
if objective in ['binary','multi']:
'''
对于多分类:
f1_micro = 2*p*r/(p+r),p = \sum TP/(\sum TP+\sum FP),r = \sum TP/(\sum TP+\sum FN)
f1_macro = (\sum f1) /n
f1_weight = \sum f1*weight
注意:f1_micro == acc
'''
labels=sorted(np.unique(y_true))
crosstab = | pd.crosstab(y_true,y_pred) | pandas.crosstab |
import os
import pandas as pd
import matplotlib.pyplot as plt
from datetime import datetime, date, timedelta
import calendar
from bota import constant
import re
import discord
def findDay(date):
born = datetime.strptime(date, '%Y-%m-%d').weekday()
return (calendar.day_name[born])
class LogStat():
def __init__(self, file_path=constant.COMMAND_USER_LOG_PATH, client=None):
self.log_file_path = file_path
self.new_user_dict = {}
self.new_server_dict = {}
self.client = client
self.update_df()
self.commands = ['!top game', '!trend', '!reddit', '!protrack', '!counter', '!item', '!good', '!skill',
'!twitch', '!profile']
def update_df(self):
if os.path.exists(self.log_file_path):
self.df = self.log_to_df(self.log_file_path)
return True
else:
print("*"*80)
print(f"LOG FILE: {self.log_file_path} does not exist")
print("*" * 80)
return False
def update_new_user_and_server(self, user_id, server_id, date_time):
try:
date = datetime.date(date_time)
if user_id in self.new_user_dict:
saved_date = self.new_user_dict[user_id]
if saved_date > date:
self.new_user_dict[user_id] = date
else:
self.new_user_dict[user_id] = date
if server_id in self.new_server_dict:
saved_date = self.new_server_dict[server_id]
if saved_date > date:
self.new_server_dict[server_id] = date
else:
self.new_server_dict[server_id] = date
except Exception as e:
pass
return
def log_to_df(self, file):
with open(file) as f:
raw_string = f.readlines()
prepared = []
for line in raw_string:
try:
date_time, data = line.split('INFO')
uname, uid, is_server, sid, sname, channel, total_members, command_called, nsfw, command_passed = data.split(
',')
except Exception as e:
uname, uid, is_server, sid, sname, channel, total_members, command_called, nsfw = data.split(',')[:9]
command_passed = ','.join(data.split(',')[9:])
date_time_format = datetime.strptime(date_time.strip(),"%Y-%m-%d %H:%M:%S")
date = datetime.date(date_time_format)
time = datetime.time(date_time_format)
hour = time.replace(microsecond=0,second=0,minute=0)
self.update_new_user_and_server(uid, sid, date_time_format)
command_passed = command_passed.replace('\n', '')
weekday = findDay(date_time.split()[0])
is_weekend = True if weekday in ['Saturday', 'Sunday'] else False
prepared.append([date_time.strip(), uname.strip(), uid.strip(), is_server, sid.strip(), sname.strip(),
channel.strip(), total_members.strip(), command_called.strip(), nsfw.strip(),
command_passed.strip(), weekday, is_weekend, date, hour])
df = pd.DataFrame(prepared)
df.columns = ['date_time', 'user', 'user_id', 'is_server', 'server_id', 'server_name', 'channel', 'total_members',
'command_called', 'nsfw', 'command_passed', 'weekday', 'weekend', 'date', 'hour']
df['date_time'] = df['date_time'].astype('datetime64[ns]')
return df
def get_most_activate_user(self, top):
df = self.df
most_active = df.groupby("user")["command_called"].count().sort_values()
p = pd.DataFrame(most_active)
p = p.iloc[-(top):]
return p
def make_new_user_and_server_df(self):
# New user df
user_ids = list(self.new_user_dict.keys())
user_join_date = list(self.new_user_dict.values())
col_names = ['user_id', 'join_date']
new_user_df = pd.DataFrame(columns=col_names)
new_user_df['user_id'] = user_ids
new_user_df['join_date'] = user_join_date
# New server df
server_ids = list(self.new_server_dict.keys())
server_join_date = list(self.new_server_dict.values())
col_names = ['server_id', 'join_date']
new_server_df = pd.DataFrame(columns=col_names)
new_server_df['server_id'] = server_ids
new_server_df['join_date'] = server_join_date
return new_user_df, new_server_df
def get_new_user_and_server(self, tail=True, n=7):
summary = f"New Users and Servers in last {n} days"
new_user_df, new_server_df = self.make_new_user_and_server_df()
group_new_user_by_dates = new_user_df.groupby("join_date")["user_id"].count()
group_new_server_by_dates = new_server_df.groupby("join_date")["server_id"].count()
if tail:
new_user_series = group_new_user_by_dates.tail(n)
new_server_series = group_new_server_by_dates.tail(n)
else:
new_user_series = group_new_user_by_dates.head(n)
new_server_series = group_new_server_by_dates.head(n)
combined = pd.concat([new_user_series, new_server_series], axis=1)
ax = combined.plot.bar(figsize=(12, 6))
for p in ax.patches:
ax.annotate(str(p.get_height()), (p.get_x() * 1.005, p.get_height() * 1.005))
path = os.path.join(constant.NEW_USER_SERVER_IMAGE_PATH)
plt.savefig(path)
plt.close()
plt.clf()
return constant.NEW_USER_SERVER_IMAGE_PATH, summary
def get_command_calls(self, n=7):
summary = f"Commands & Unique User calls in last {n} days"
date_calls = self.df.groupby("date")["date"].count()
date_calls = date_calls.tail(n)
temp_dates = date_calls.index._ndarray_values
temp_rows = []
for temp_date in temp_dates:
temp_ids = self.df[self.df['date'] == temp_date]
temp_ids_count = temp_ids.groupby("user_id")["user_id"].size().shape[0]
temp_rows.append(temp_ids_count)
unique_user_calls_ondate = | pd.Series(temp_rows, index=temp_dates) | pandas.Series |
import pandas
import numpy as np
import scipy.stats as stats
import matplotlib.pyplot as plt
import seaborn as sns
def evaluate_components(clf, x, y, n_iterations=500, check = 100,
evaluate = True, plot = True, thr = 0.95,
metric=None, random_state=123):
if type(x) != type(pandas.DataFrame()):
x = pandas.DataFrame(x)
# fit model
clf.fit(x,y)
n_comps = clf.n_components
# prepare output
results = pandas.DataFrame(index = range(n_comps * (n_iterations+1)),
columns = ['score', 'component', 'model'])
results.loc[:,'component'] = list(range(n_comps))*(n_iterations+1)
results.loc[range(n_comps),'model'] = ['True']*n_comps
results.loc[range(n_comps,n_comps*(n_iterations+1)), 'model'
] = ['Null']*(n_comps*n_iterations)
if not metric:
true_scores = [stats.pearsonr(clf.x_scores_[:,x], clf.y_scores_[:,x]
)[0]**2 for x in range(n_comps)]
else:
true_scores = [metric(clf.x_scores_[:,x], clf.y_scores_[:,x]
) for x in range(n_comps)]
results.loc[results[results.model=='True'].index,'score'] = true_scores
k = clf.n_components
# permute and refit model
rs = np.random.RandomState(random_state)
x.index = range(len(x.index))
for i in range(n_iterations):
new_ind = rs.permutation(x.index)
new_x = x.iloc[new_ind]
newmod = clf.fit(new_x,y)
if not metric:
new_scores = [stats.pearsonr(newmod.x_scores_[:,x],
newmod.y_scores_[:,x]
)[0]**2 for x in range(n_comps)]
else:
new_scores = [metric(newmod.x_scores_[:,x], newmod.y_scores_[:,x]
) for x in range(n_comps)]
results.loc[range(k, k+n_comps), 'score'] = new_scores
if check:
if i % check == 0:
print('finished iteration',i)
k += n_comps
if evaluate:
if plot:
cr = display_results(results, thr)
else:
cr = display_results(results, thr, False)
return results, cr
def display_results(results, thr = 0.95, plot=True):
if plot:
# plot components
sns.set_context('paper')
plt.close()
sns.catplot(x='component', y = 'score', hue='model', data=results,kind='point')
plt.show()
# get p-values
comp_results = pandas.DataFrame(index=results.component.unique(),
columns = ['r','p','sig'])
for i in results.component.unique():
nullz = results[(results.component==i) & (results.model=='Null')
]['score'].sort_values().values
real = results[(results.component==i) & (results.model=='True')]['score'].values[0]
comp_results.loc[i,'r'] = real
p = (len(nullz[nullz>real])+1) / len(nullz)
if p < (1 - thr):
comp_results.loc[i,['p','sig']] = [p, 1]
print('component %s: p = %s ***'%(i,p))
else:
comp_results.loc[i,['p','sig']] = [p, 0]
print('component %s: p = %s'%(i,p))
return comp_results
def bootstrap_features(clf, fit_model, X, y, n_iterations=500, check = 100, on ='x'):
if type(X) != type(pandas.DataFrame()):
X = pandas.DataFrame(X)
if type(y) != type(pandas.DataFrame()):
y = pandas.DataFrame(y)
# fit model
orig = fit_model
# prepare output
n_feats_x = X.shape[-1]
n_feats_y = y.shape[-1]
all_results_x = {}
all_results_y = {}
for i in range(orig.n_components):
results = pandas.DataFrame(index = range(n_iterations), columns = range(n_feats_x))
all_results_x.update({i: results})
results = pandas.DataFrame(index = range(n_iterations), columns = range(n_feats_y))
all_results_y.update({i: results})
bs_ratio_x = pandas.DataFrame(index = range(orig.n_components),
columns = range(n_feats_x))
bs_ratio_y = pandas.DataFrame(index = range(orig.n_components),
columns = range(n_feats_y))
# bootstrap
for i in range(n_iterations):
n_ind = np.random.choice(X.index, len(X.index))
n_samp = | pandas.DataFrame(X.loc[n_ind],copy=True) | pandas.DataFrame |
import numpy as np
import cv2
import csv
import os
import pandas as pd
import time
def calcuNearestPtsDis2(ptList1):
''' Find the nearest point of each point in ptList1 & return the mean min_distance
Parameters
----------
ptList1: numpy array
points' array, shape:(x,2)
Return
----------
mean_Dis: float
the mean value of the minimum distances
'''
if len(ptList1)<=1:
print('error!')
return 'error'
minDis_list = []
for i in range(len(ptList1)):
currentPt = ptList1[i,0:2]
ptList2 = np.delete(ptList1,i,axis=0)
disMat = np.sqrt(np.sum(np.asarray(currentPt - ptList2)**2, axis=1).astype(np.float32) )
minDis = disMat.min()
minDis_list.append(minDis)
minDisArr = np.array(minDis_list)
mean_Dis = np.mean(minDisArr)
return mean_Dis
def calcuNearestPtsDis(ptList1, ptList2):
''' Find the nearest point of each point in ptList1 from ptList2
& return the mean min_distance
Parameters
----------
ptList1: numpy array
points' array, shape:(x,2)
ptList2: numpy array
points' array, shape:(x,2)
Return
----------
mean_Dis: float
the mean value of the minimum distances
'''
if (not len(ptList2)) or (not len(ptList1)):
print('error!')
return 'error'
minDis_list = []
for i in range(len(ptList1)):
currentPt = ptList1[i,0:2]
disMat = np.sqrt(np.sum(np.asarray(currentPt - ptList2)**2, axis=1).astype(np.float32) )
minDis = disMat.min()
minDis_list.append(minDis)
minDisArr = np.array(minDis_list)
mean_Dis = np.mean(minDisArr)
return mean_Dis
def calcuNearestPts(csvName1, csvName2):
ptList1_csv = pd.read_csv(csvName1,usecols=['x_cord', 'y_cord'])
ptList2_csv = pd.read_csv(csvName2,usecols=['x_cord', 'y_cord'])
ptList1 = ptList1_csv.values[:,:2]
ptList2 = ptList2_csv.values[:,:2]
minDisInd_list = []
for i in range(len(ptList1)):
currentPt = ptList1[i,0:2]
disMat = np.sqrt(np.sum(np.asarray(currentPt - ptList2)**2, axis=1))
minDisInd = np.argmin(disMat)
minDisInd_list.append(minDisInd)
minDisInd = np.array(minDisInd_list).reshape(-1,1)
ptList1_csv = pd.concat([ptList1_csv, pd.DataFrame( columns=['nearestInd'],data = minDisInd)], axis=1)
ptList1_csv.to_csv(csvName1,index=False)
return minDisInd
def drawDisPic(picInd):
picName = 'patients_dataset/image/'+ picInd +'.png'
img = cv2.imread(picName)
csvName1='patients_dataset/data_csv/'+picInd+'other_tumour_pts.csv'
csvName2='patients_dataset/data_csv/'+picInd+'other_lymph_pts.csv'
ptList1_csv = pd.read_csv(csvName1)
ptList2_csv = pd.read_csv(csvName2)
ptList1 = ptList1_csv.values
ptList2 = ptList2_csv.values
for i in range(len(ptList1)):
img = cv2.circle(img, tuple(ptList1[i,:2]), 3 , (0, 0, 255), -1 )
img = cv2.line(img, tuple(ptList1[i,:2]) , tuple(ptList2[ ptList1[i,2] ,:2]), (0,255,0), 1)
for i in range(len(ptList2)):
img = cv2.circle(img, tuple(ptList2[i,:2]), 3 , (255, 0, 0), -1 )
cv2.imwrite( picInd+'_dis.png',img)
def drawDistancePic(disName1, disName2, picID):
''' Draw & save the distance pics
Parameters
----------
disName1,disName2: str
such as 'positive_lymph', 'all_tumour'
picID: str
the patient's ID
'''
cellName_color = {'other_lymph': (255, 0, 0), 'positive_lymph': (255, 255, 0),
'other_tumour': (0, 0, 255), 'positive_tumour': (0, 255, 0)}
ptline_color = {'positive_lymph': (0,0,255), 'positive_tumour': (0,0,255),
'ptumour_plymph': (51, 97, 235), 'other_tumour': (0, 255, 0)}
if (disName1 == 'all_tumour' and disName2 == 'all_lymph') or (disName1 == 'all_tumour' and disName2 == 'positive_lymph'):
line_color = (0,255,255)
elif disName1 == 'positive_tumour' and disName2 == 'positive_lymph':
line_color = (51, 97, 235)
else:
line_color = ptline_color[disName1]
csv_dir = '/data/Datasets/MediImgExp/data_csv'
img_dir = '/data/Datasets/MediImgExp/image'
if disName1 == 'all_tumour' and disName2 == 'positive_lymph':
dis1_csv = pd.read_csv(csv_dir + '/' + picID + 'positive_tumour' + '_pts.csv', usecols=['x_cord', 'y_cord'])
dis2_csv = pd.read_csv(csv_dir + '/' + picID + 'other_tumour' + '_pts.csv', usecols=['x_cord', 'y_cord'])
dis3_csv = pd.read_csv(csv_dir + '/' + picID + 'positive_lymph' + '_pts.csv', usecols=['x_cord', 'y_cord'])
ptList1 = dis1_csv.values[:,:2]
ptList2 = dis2_csv.values[:,:2]
ptList3 = dis3_csv.values[:,:2]
# positive tumour: find the nearest lymph cell
minDisInd_list = []
for i in range(len(ptList1)):
currentPt = ptList1[i,:]
disMat = np.sqrt(np.sum(np.asarray(currentPt - ptList3)**2, axis=1))
minDisInd = np.argmin(disMat)
minDisInd_list.append(minDisInd)
minDisInd = np.array(minDisInd_list).reshape(-1,1)
dis1_csv = pd.concat([dis1_csv, pd.DataFrame(columns=['nearestInd'], data=minDisInd)], axis=1)
# other tumour: find the nearest lymph cell
minDisInd_list = []
for i in range(len(ptList2)):
currentPt = ptList2[i,:]
disMat = np.sqrt(np.sum(np.asarray(currentPt - ptList3)**2, axis=1))
minDisInd = np.argmin(disMat)
minDisInd_list.append(minDisInd)
minDisInd = np.array(minDisInd_list).reshape(-1,1)
dis2_csv = pd.concat([dis2_csv, pd.DataFrame(columns=['nearestInd'], data=minDisInd)], axis=1)
img = cv2.imread(img_dir + '/' + picID + '.jpg')
ptList1 = dis1_csv.values
for i in range(len(ptList1)):
img = cv2.line(img, tuple(ptList1[i,:2]), tuple(ptList3[ptList1[i, 2],:2]), line_color, 1)
ptList2 = dis2_csv.values
for i in range(len(ptList2)):
img = cv2.line(img, tuple(ptList2[i,:2]), tuple(ptList3[ptList2[i, 2],:2]), line_color, 1)
for i in range(len(ptList1)):
img = cv2.circle(img, tuple(ptList1[i,:2]), 4, (0, 255, 0), -1)
for i in range(len(ptList2)):
img = cv2.circle(img, tuple(ptList2[i,:2]), 4, (0, 0, 255), -1)
for i in range(len(ptList3)):
img = cv2.circle(img, tuple(ptList3[i,:2]), 4, (255, 255, 0), -1)
cv2.imwrite(picID + disName1 + '_' + disName2 + '_dis.png', img)
elif disName1 == 'all_tumour' and disName2 == 'all_lymph':
dis1_csv = pd.read_csv(csv_dir + '/' + picID + 'positive_tumour' + '_pts.csv', usecols=['x_cord', 'y_cord'])
dis2_csv = pd.read_csv(csv_dir + '/' + picID + 'other_tumour' + '_pts.csv', usecols=['x_cord', 'y_cord'])
dis3_csv = pd.read_csv(csv_dir + '/' + picID + 'positive_lymph' + '_pts.csv', usecols=['x_cord', 'y_cord'])
dis4_csv = pd.read_csv(csv_dir + '/' + picID + 'other_lymph' + '_pts.csv', usecols=['x_cord', 'y_cord'])
ptList1 = dis1_csv.values[:,:2]
ptList2 = dis2_csv.values[:,:2]
ptList3 = dis3_csv.values[:,:2]
ptList4 = dis4_csv.values[:,:2]
ptList6 = np.concatenate((ptList3, ptList4), axis=0)
minDisInd_list = []
for i in range(len(ptList1)):
currentPt = ptList1[i,:]
disMat = np.sqrt(np.sum(np.asarray(currentPt - ptList6)**2, axis=1))
minDisInd = np.argmin(disMat)
minDisInd_list.append(minDisInd)
minDisInd = np.array(minDisInd_list).reshape(-1,1)
dis1_csv = pd.concat([dis1_csv, pd.DataFrame(columns=['nearestInd'], data=minDisInd)], axis=1)
minDisInd_list = []
for i in range(len(ptList2)):
currentPt = ptList2[i,:]
disMat = np.sqrt(np.sum(np.asarray(currentPt - ptList6)**2, axis=1))
minDisInd = np.argmin(disMat)
minDisInd_list.append(minDisInd)
minDisInd = np.array(minDisInd_list).reshape(-1,1)
dis2_csv = pd.concat([dis2_csv, pd.DataFrame(columns=['nearestInd'], data=minDisInd)], axis=1)
img = cv2.imread(img_dir + '/' + picID + '.jpg')
ptList1 = dis1_csv.values
for i in range(len(ptList1)):
img = cv2.line(img, tuple(ptList1[i,:2]), tuple(ptList6[ptList1[i, 2],:2]), line_color, 1)
ptList2 = dis2_csv.values
for i in range(len(ptList2)):
img = cv2.line(img, tuple(ptList2[i,:2]), tuple(ptList6[ptList2[i, 2],:2]), line_color, 1)
for i in range(len(ptList1)):
img = cv2.circle(img, tuple(ptList1[i,:2]), 4, (0, 255, 0), -1)
for i in range(len(ptList2)):
img = cv2.circle(img, tuple(ptList2[i,:2]), 4, (0, 0, 255), -1)
for i in range(len(ptList3)):
img = cv2.circle(img, tuple(ptList3[i,:2]), 4, (255, 255, 0), -1)
for i in range(len(ptList4)):
img = cv2.circle(img, tuple(ptList4[i,:2]), 4, (255, 0, 0), -1)
cv2.imwrite(picID + disName1 + '_' + disName2 + '_dis.png', img)
elif disName1 != disName2:
dis1_csv = pd.read_csv(csv_dir + '/' + picID + disName1 + '_pts.csv', usecols=['x_cord', 'y_cord'])
dis2_csv = pd.read_csv(csv_dir + '/' + picID + disName2 + '_pts.csv', usecols=['x_cord', 'y_cord'])
ptList1 = dis1_csv.values[:,:2]
ptList2 = dis2_csv.values[:,:2]
minDisInd_list = []
for i in range(len(ptList1)):
currentPt = ptList1[i,:]
disMat = np.sqrt(np.sum(np.asarray(currentPt - ptList2)**2, axis=1))
minDisInd = np.argmin(disMat)
minDisInd_list.append(minDisInd)
minDisInd = np.array(minDisInd_list).reshape(-1,1)
dis1_csv = pd.concat([dis1_csv, pd.DataFrame( columns=['nearestInd'],data = minDisInd)], axis=1)
img = cv2.imread(img_dir + '/' + picID + '.jpg')
img[:,:, 0] = 255
img[:,:, 1] = 255
img[:,:, 2] = 255
ptList1 = dis1_csv.values
for i in range(len(ptList1)):
img = cv2.line(img, tuple(ptList1[i,:2]) , tuple(ptList2[ ptList1[i,2] ,:2]), line_color, 1)
for i in range(len(ptList1)):
img = cv2.circle(img, tuple(ptList1[i,:2]), 5, cellName_color[disName1], -1)
for i in range(len(ptList2)):
img = cv2.circle(img, tuple(ptList2[i,:2]), 5, cellName_color[disName2], -1)
cv2.imwrite(picID + disName1 + '_' + disName2 + '_dis.png', img)
elif disName1 == disName2:
dis1_csv = pd.read_csv(csv_dir + '/' + picID + disName1 + '_pts.csv', usecols=['x_cord', 'y_cord'])
ptList1 = dis1_csv.values[:,:2]
minDisInd_list = []
for i in range(len(ptList1)):
currentPt = ptList1[i, :2]
disMat = np.sqrt(np.sum(np.asarray(currentPt - ptList1)** 2, axis=1).astype(np.float32))
minDisInd = np.argmin(disMat)
disMat[minDisInd] = 1000.0
minDisInd = np.argmin(disMat)
minDisInd_list.append(minDisInd)
minDisInd = np.array(minDisInd_list).reshape(-1,1)
dis1_csv = pd.concat([dis1_csv, pd.DataFrame( columns=['nearestInd'],data = minDisInd)], axis=1)
img = cv2.imread(img_dir + '/' + picID + '.jpg')
img[:,:, 0] = 255
img[:,:, 1] = 255
img[:,:, 2] = 255
ptList1 = dis1_csv.values
for i in range(len(ptList1)):
img = cv2.line(img, tuple(ptList1[i,:2]), tuple(ptList1[ptList1[i, 2],:2]), line_color, 1)
for i in range(len(ptList1)):
img = cv2.circle(img, tuple(ptList1[i,:2]), 5, cellName_color[disName1], -1)
cv2.imwrite(picID + disName1 + '_dis.png', img)
def getAllPicsDisCSV():
'''
Get all distance data from the saved csv files (get from the above functions)
'''
base_dir = '/data/Datasets/MediImgExp'
f = open( base_dir + '/' + 'AllDisData.csv','w',encoding='utf-8',newline="")
csv_writer = csv.writer(f)
csv_writer.writerow([ 'Ind','PosiTumourRatio','PosiLymphRatio',
'DisTumourLymph','DisPosiTumour','DisPosiLymph',
'DisPosiTumourPosiLymph','DisTumourPosiLymph'])
process_dir = base_dir + '/process'
csv_dir = base_dir + '/data_csv'
pic_name = os.listdir(process_dir)
picIDList = []
for pic_name_ in pic_name:
picIDList.append( pic_name_.split('_')[0] )
for picID in picIDList:
list_data = []
list_data.append(picID)
# PosiTumourRatio
PosiTumourCsv = pd.read_csv( csv_dir+'/'+ picID +'positive_tumour_pts.csv')
OtherTumourCsv = pd.read_csv( csv_dir+'/'+ picID +'other_tumour_pts.csv')
Num_PosiTumour = PosiTumourCsv.shape[0]
Num_OtherTumour = OtherTumourCsv.shape[0]
if (Num_PosiTumour + Num_OtherTumour)!=0 :
PosiTumourRatio = Num_PosiTumour / (Num_PosiTumour + Num_OtherTumour)
else:
PosiTumourRatio = 'error'
list_data.append(PosiTumourRatio)
# PosiLymphRatio
PosiLymphCsv = pd.read_csv( csv_dir+'/'+ picID +'positive_lymph_pts.csv')
OtherLymphCsv = pd.read_csv( csv_dir+'/'+ picID +'other_lymph_pts.csv')
Num_PosiLymph = PosiLymphCsv.shape[0]
Num_OtherLymph = OtherLymphCsv.shape[0]
if (Num_PosiLymph + Num_OtherLymph)!=0 :
PosiLymphRatio = Num_PosiLymph / (Num_PosiLymph + Num_OtherLymph)
else:
PosiLymphRatio = 'error'
list_data.append(PosiLymphRatio)
# DisTumourLymph
ptList1_csv = pd.read_csv(csv_dir+'/'+ picID +'positive_tumour_pts.csv',usecols=['x_cord', 'y_cord'])
ptList2_csv = pd.read_csv(csv_dir+'/'+ picID +'positive_lymph_pts.csv',usecols=['x_cord', 'y_cord'])
ptList1 = ptList1_csv.values[:,:2]
ptList2 = ptList2_csv.values[:,:2]
ptList3_csv = pd.read_csv(csv_dir+'/'+ picID +'other_tumour_pts.csv',usecols=['x_cord', 'y_cord'])
ptList4_csv = pd.read_csv(csv_dir+'/'+ picID +'other_lymph_pts.csv',usecols=['x_cord', 'y_cord'])
ptList3 = ptList3_csv.values[:,:2]
ptList4 = ptList4_csv.values[:,:2]
ptList1 = np.concatenate((ptList1,ptList3), axis=0)
ptList2 = np.concatenate((ptList2,ptList4), axis=0)
DisTumourLymph = calcuNearestPtsDis(ptList1, ptList2)
list_data.append(DisTumourLymph)
# DisPosiTumour
ptList1_csv = pd.read_csv(csv_dir+'/'+ picID +'positive_tumour_pts.csv',usecols=['x_cord', 'y_cord'])
ptList1 = ptList1_csv.values[:,:2]
DisPosiTumour = calcuNearestPtsDis2(ptList1)
list_data.append(DisPosiTumour)
# DisPosiLymph
ptList1_csv = pd.read_csv(csv_dir+'/'+ picID +'positive_lymph_pts.csv',usecols=['x_cord', 'y_cord'])
ptList1 = ptList1_csv.values[:,:2]
DisPosiLymph = calcuNearestPtsDis2(ptList1)
list_data.append(DisPosiLymph)
# DisPosiTumourPosiLymph
ptList1_csv = pd.read_csv(csv_dir+'/'+ picID +'positive_tumour_pts.csv',usecols=['x_cord', 'y_cord'])
ptList2_csv = pd.read_csv(csv_dir+'/'+ picID +'positive_lymph_pts.csv',usecols=['x_cord', 'y_cord'])
ptList1 = ptList1_csv.values[:,:2]
ptList2 = ptList2_csv.values[:,:2]
DisPosiTumourPosiLymph = calcuNearestPtsDis(ptList1, ptList2)
list_data.append(DisPosiTumourPosiLymph)
# DisTumourPosiLymph
ptList1_csv = pd.read_csv(csv_dir+'/'+ picID +'positive_tumour_pts.csv',usecols=['x_cord', 'y_cord'])
ptList2_csv = | pd.read_csv(csv_dir+'/'+ picID +'positive_lymph_pts.csv',usecols=['x_cord', 'y_cord']) | pandas.read_csv |
from utils import load_yaml
import pandas as pd
import click
from datetime import datetime, timedelta
import numpy as np
import os
cli = click.Group()
@cli.command()
@click.option('--lan', default='en')
@click.option('--config', default="configs/configuration.yaml")
def dump(lan, config, country_code):
# load the tweets of the requested language
config = load_yaml(config)[lan]
data = pd.read_csv(config['path']+"tweets_id_0.csv")
tweets = data[data.is_retweet == False]
# fetch only tweets from yesterday
tweets.set_index( | pd.to_datetime(tweets.created_at, format='%a %b %d %H:%M:%S +0000 %Y') | pandas.to_datetime |
import numpy as np
import pytest
from pandas.compat import range, u, zip
import pandas as pd
from pandas import DataFrame, Index, MultiIndex, Series
import pandas.core.common as com
from pandas.core.indexing import IndexingError
from pandas.util import testing as tm
@pytest.fixture
def frame_random_data_integer_multi_index():
levels = [[0, 1], [0, 1, 2]]
codes = [[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 1, 2]]
index = MultiIndex(levels=levels, codes=codes)
return DataFrame(np.random.randn(6, 2), index=index)
@pytest.fixture
def dataframe_with_duplicate_index():
"""Fixture for DataFrame used in tests for gh-4145 and gh-4146"""
data = [['a', 'd', 'e', 'c', 'f', 'b'],
[1, 4, 5, 3, 6, 2],
[1, 4, 5, 3, 6, 2]]
index = ['h1', 'h3', 'h5']
columns = MultiIndex(
levels=[['A', 'B'], ['A1', 'A2', 'B1', 'B2']],
codes=[[0, 0, 0, 1, 1, 1], [0, 3, 3, 0, 1, 2]],
names=['main', 'sub'])
return DataFrame(data, index=index, columns=columns)
@pytest.mark.parametrize('access_method', [lambda s, x: s[:, x],
lambda s, x: s.loc[:, x],
lambda s, x: s.xs(x, level=1)])
@pytest.mark.parametrize('level1_value, expected', [
(0, Series([1], index=[0])),
(1, Series([2, 3], index=[1, 2]))
])
def test_series_getitem_multiindex(access_method, level1_value, expected):
# GH 6018
# series regression getitem with a multi-index
s = Series([1, 2, 3])
s.index = MultiIndex.from_tuples([(0, 0), (1, 1), (2, 1)])
result = access_method(s, level1_value)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize('level0_value', ['D', 'A'])
def test_getitem_duplicates_multiindex(level0_value):
# GH 5725 the 'A' happens to be a valid Timestamp so the doesn't raise
# the appropriate error, only in PY3 of course!
index = MultiIndex(levels=[[level0_value, 'B', 'C'],
[0, 26, 27, 37, 57, 67, 75, 82]],
codes=[[0, 0, 0, 1, 2, 2, 2, 2, 2, 2],
[1, 3, 4, 6, 0, 2, 2, 3, 5, 7]],
names=['tag', 'day'])
arr = np.random.randn(len(index), 1)
df = DataFrame(arr, index=index, columns=['val'])
# confirm indexing on missing value raises KeyError
if level0_value != 'A':
msg = "'A'"
with pytest.raises(KeyError, match=msg):
df.val['A']
msg = "'X'"
with pytest.raises(KeyError, match=msg):
df.val['X']
result = df.val[level0_value]
expected = Series(arr.ravel()[0:3], name='val', index=Index(
[26, 37, 57], name='day'))
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize('indexer, is_level1, expected_error', [
([], False, None), # empty ok
(['A'], False, None),
(['A', 'D'], False, None),
(['D'], False, r"\['D'\] not in index"), # not any values found
(pd.IndexSlice[:, ['foo']], True, None),
(pd.IndexSlice[:, ['foo', 'bah']], True, None)
])
def test_getitem_duplicates_multiindex_missing_indexers(indexer, is_level1,
expected_error):
# GH 7866
# multi-index slicing with missing indexers
idx = MultiIndex.from_product([['A', 'B', 'C'],
['foo', 'bar', 'baz']],
names=['one', 'two'])
s = Series(np.arange(9, dtype='int64'), index=idx).sort_index()
if indexer == []:
expected = s.iloc[[]]
elif is_level1:
expected = Series([0, 3, 6], index=MultiIndex.from_product(
[['A', 'B', 'C'], ['foo']], names=['one', 'two'])).sort_index()
else:
exp_idx = MultiIndex.from_product([['A'], ['foo', 'bar', 'baz']],
names=['one', 'two'])
expected = Series(np.arange(3, dtype='int64'),
index=exp_idx).sort_index()
if expected_error is not None:
with pytest.raises(KeyError, match=expected_error):
s.loc[indexer]
else:
result = s.loc[indexer]
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize('columns_indexer', [
([], slice(None)),
(['foo'], [])
])
def test_getitem_duplicates_multiindex_empty_indexer(columns_indexer):
# GH 8737
# empty indexer
multi_index = MultiIndex.from_product((['foo', 'bar', 'baz'],
['alpha', 'beta']))
df = DataFrame(np.random.randn(5, 6), index=range(5), columns=multi_index)
df = df.sort_index(level=0, axis=1)
expected = DataFrame(index=range(5), columns=multi_index.reindex([])[0])
result = df.loc[:, columns_indexer]
tm.assert_frame_equal(result, expected)
def test_getitem_duplicates_multiindex_non_scalar_type_object():
# regression from < 0.14.0
# GH 7914
df = DataFrame([[np.mean, np.median], ['mean', 'median']],
columns=MultiIndex.from_tuples([('functs', 'mean'),
('functs', 'median')]),
index=['function', 'name'])
result = df.loc['function', ('functs', 'mean')]
expected = np.mean
assert result == expected
def test_getitem_simple(multiindex_dataframe_random_data):
df = multiindex_dataframe_random_data.T
expected = df.values[:, 0]
result = df['foo', 'one'].values
tm.assert_almost_equal(result, expected)
@pytest.mark.parametrize('indexer,msg', [
(lambda df: df[('foo', 'four')], r"\('foo', 'four'\)"),
(lambda df: df['foobar'], "'foobar'")
])
def test_getitem_simple_key_error(
multiindex_dataframe_random_data, indexer, msg):
df = multiindex_dataframe_random_data.T
with pytest.raises(KeyError, match=msg):
indexer(df)
@pytest.mark.parametrize('indexer', [
lambda s: s[2000, 3],
lambda s: s.loc[2000, 3]
])
def test_series_getitem(
multiindex_year_month_day_dataframe_random_data, indexer):
s = multiindex_year_month_day_dataframe_random_data['A']
expected = s.reindex(s.index[42:65])
expected.index = expected.index.droplevel(0).droplevel(0)
result = indexer(s)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize('indexer', [
lambda s: s[2000, 3, 10],
lambda s: s.loc[2000, 3, 10]
])
def test_series_getitem_returns_scalar(
multiindex_year_month_day_dataframe_random_data, indexer):
s = multiindex_year_month_day_dataframe_random_data['A']
expected = s.iloc[49]
result = indexer(s)
assert result == expected
@pytest.mark.filterwarnings("ignore:\\n.ix:DeprecationWarning")
@pytest.mark.parametrize('indexer', [
lambda s: s.loc[[(2000, 3, 10), (2000, 3, 13)]],
lambda s: s.ix[[(2000, 3, 10), (2000, 3, 13)]]
])
def test_series_getitem_fancy(
multiindex_year_month_day_dataframe_random_data, indexer):
s = multiindex_year_month_day_dataframe_random_data['A']
expected = s.reindex(s.index[49:51])
result = indexer(s)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize('indexer,error,msg', [
(lambda s: s.__getitem__((2000, 3, 4)), KeyError, '356'),
(lambda s: s[(2000, 3, 4)], KeyError, '356'),
(lambda s: s.loc[(2000, 3, 4)], IndexingError, 'Too many indexers'),
(lambda s: s.__getitem__(len(s)), IndexError, 'index out of bounds'),
(lambda s: s[len(s)], IndexError, 'index out of bounds'),
(lambda s: s.iloc[len(s)], IndexError,
'single positional indexer is out-of-bounds')
])
def test_series_getitem_indexing_errors(
multiindex_year_month_day_dataframe_random_data, indexer, error, msg):
s = multiindex_year_month_day_dataframe_random_data['A']
with pytest.raises(error, match=msg):
indexer(s)
def test_series_getitem_corner_generator(
multiindex_year_month_day_dataframe_random_data):
s = multiindex_year_month_day_dataframe_random_data['A']
result = s[(x > 0 for x in s)]
expected = s[s > 0]
tm.assert_series_equal(result, expected)
def test_frame_getitem_multicolumn_empty_level():
df = DataFrame({'a': ['1', '2', '3'], 'b': ['2', '3', '4']})
df.columns = [['level1 item1', 'level1 item2'], ['', 'level2 item2'],
['level3 item1', 'level3 item2']]
result = df['level1 item1']
expected = DataFrame([['1'], ['2'], ['3']], index=df.index,
columns=['level3 item1'])
tm.assert_frame_equal(result, expected)
def test_getitem_tuple_plus_slice():
# GH 671
df = DataFrame({'a': np.arange(10),
'b': np.arange(10),
'c': np.random.randn(10),
'd': np.random.randn(10)}
).set_index(['a', 'b'])
expected = df.loc[0, 0]
result = df.loc[(0, 0), :]
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize('indexer,expected_slice', [
(lambda df: df['foo'], slice(3)),
(lambda df: df['bar'], slice(3, 5)),
(lambda df: df.loc[:, 'bar'], slice(3, 5))
])
def test_getitem_toplevel(
multiindex_dataframe_random_data, indexer, expected_slice):
df = multiindex_dataframe_random_data.T
expected = df.reindex(columns=df.columns[expected_slice])
expected.columns = expected.columns.droplevel(0)
result = indexer(df)
tm.assert_frame_equal(result, expected)
def test_getitem_int(frame_random_data_integer_multi_index):
df = frame_random_data_integer_multi_index
result = df.loc[1]
expected = df[-3:]
expected.index = expected.index.droplevel(0)
tm.assert_frame_equal(result, expected)
def test_getitem_int_raises_exception(frame_random_data_integer_multi_index):
df = frame_random_data_integer_multi_index
msg = "3"
with pytest.raises(KeyError, match=msg):
df.loc.__getitem__(3)
def test_getitem_iloc(multiindex_dataframe_random_data):
df = multiindex_dataframe_random_data
result = df.iloc[2]
expected = df.xs(df.index[2])
tm.assert_series_equal(result, expected)
def test_frame_setitem_view_direct(multiindex_dataframe_random_data):
# this works because we are modifying the underlying array
# really a no-no
df = multiindex_dataframe_random_data.T
df['foo'].values[:] = 0
assert (df['foo'].values == 0).all()
def test_frame_setitem_copy_raises(multiindex_dataframe_random_data):
# will raise/warn as its chained assignment
df = multiindex_dataframe_random_data.T
msg = "A value is trying to be set on a copy of a slice from a DataFrame"
with pytest.raises(com.SettingWithCopyError, match=msg):
df['foo']['one'] = 2
def test_frame_setitem_copy_no_write(multiindex_dataframe_random_data):
frame = multiindex_dataframe_random_data.T
expected = frame
df = frame.copy()
msg = "A value is trying to be set on a copy of a slice from a DataFrame"
with pytest.raises(com.SettingWithCopyError, match=msg):
df['foo']['one'] = 2
result = df
tm.assert_frame_equal(result, expected)
def test_getitem_lowerdim_corner(multiindex_dataframe_random_data):
df = multiindex_dataframe_random_data
# test setup - check key not in dataframe
with pytest.raises(KeyError, match="11"):
df.loc[('bar', 'three'), 'B']
# in theory should be inserting in a sorted space????
df.loc[('bar', 'three'), 'B'] = 0
expected = 0
result = df.sort_index().loc[('bar', 'three'), 'B']
assert result == expected
@pytest.mark.parametrize('unicode_strings', [True, False])
def test_mixed_depth_get(unicode_strings):
# If unicode_strings is True, the column labels in dataframe
# construction will use unicode strings in Python 2 (pull request
# #17099).
arrays = [['a', 'top', 'top', 'routine1', 'routine1', 'routine2'],
['', 'OD', 'OD', 'result1', 'result2', 'result1'],
['', 'wx', 'wy', '', '', '']]
if unicode_strings:
arrays = [[u(s) for s in arr] for arr in arrays]
tuples = sorted(zip(*arrays))
index = MultiIndex.from_tuples(tuples)
df = DataFrame(np.random.randn(4, 6), columns=index)
result = df['a']
expected = df['a', '', ''].rename('a')
tm.assert_series_equal(result, expected)
result = df['routine1', 'result1']
expected = df['routine1', 'result1', '']
expected = expected.rename(('routine1', 'result1'))
| tm.assert_series_equal(result, expected) | pandas.util.testing.assert_series_equal |
import os, re, subprocess, matplotlib, seaborn, pandas
from . import utils, FEATURETABLE, GENOME, CODONTABLE, TYPEPOS, SEQTYPES
from time import time
from Bio import SeqIO, AlignIO
def rm_genome_w_stopm(vtab):
"""Return a dataframe of genomes with nonsense variants given a
dataframe of genomes with their shared & unique variants."""
# dict of genome with their list of variants
vtab['unique'] = vtab['unique'].fillna('')
genome_vrs = vtab[['shared','unique']].apply(axis=1,
func=lambda x: ','.join(x.astype(str))).apply( lambda x: x.split(','))
# identify genomes w/ stop mutations
genome_stopm = genome_vrs.apply(utils.get_stopm)
genome_stopm = genome_stopm[genome_stopm!='']
return genome_stopm
def extract_nmsa_prots(fmsa,dr):
"""
Extract nucleotide MSA of proteins from Reference-limited Whole-genome Multiple Sequence Alignment.
Arguments:
- fmsa - path to the reference-limited Whole-genome Multiple Sequence Alignment
- dr - path to the output directory of extracted MSAs
"""
# biopython multiple sequence alignment
msa = AlignIO.read(fmsa, 'fasta')
# proteins' end points
prot_ends = FEATURETABLE[ ['feature', 'start','end','name']]
# apply function to extract CDS MSA
out = prot_ends.apply( axis=1, func=lambda x:
utils.extract_cds_msa(p=x.name,pstart=x.start,pstop=x.end,msa=msa))
# for every item
for k,v in out.items():
# prepare a file path for the output
fout = os.path.join( dr, prot_ends.loc[k,'name']+'.msa')
# write the MSA to this path
AlignIO.write(alignments=[v], handle=fout, format='fasta')
def run_fubar(fmsa,ftree,outdr,prog):
"""
Run positive selection analysis using FUBAR from Hyphy.
Arguments:
- fmsa - full path to input MSA file
- ftree - full path to input tree file based on the above MSA
- outdr - full path to the directory to store FUBAR output
- prog - hyphy program, full path may be required
"""
## checks
# msa file is present
if not os.path.exists(fmsa):
raise FileNotFoundError('msa file %s must be present.'%fmsa)
# tree file is present
if not os.path.exists(ftree):
raise FileNotFoundError('tree file %s must be present.'%fmsa)
# program is available
cmd = [prog,'-h']
try:
runstat = subprocess.run(cmd,stdout=subprocess.DEVNULL)
except FileNotFoundError:
raise FileNotFoundError("couldn't find the HYPHY program. You may want to check the path.")
# if output file is already present
indr = os.path.dirname(fmsa)
f = os.path.basename(fmsa)
p = f.replace('.msa','')
fsout = os.path.join( outdr, p+'.out')
# json output intermediate file
fjout_int = os.path.join( indr, f +'.FUBAR.json')
# json output file
fjout = os.path.join( outdr, p+'.json')
# cache file
fcout = os.path.join( outdr, p+'.cache')
cmd = [ prog, 'fubar', '--alignment', fmsa, '--tree', ftree, '--cache', fcout]
print("Command: %s"%' '.join(cmd))
with open(fsout,'w') as flob:
s = subprocess.run(cmd, stdout=flob)
if s.returncode == 0:
os.rename(src=fjout_int, dst=fjout)
def parse_fubar(indr,frout,fsout):
"""
Parse FUBAR output to generate
- table of dN/dS rates of proteins
- table of protein sites under positive selection.
Arguments:
- indr - full path to directory with the results of FUBAR analysis
- frout - full path to output rates file
- fsout - full path to output sites file
"""
## checks
if not os.path.exists(indr):
raise IOError("Couldn't find the input directory %s."%indr)
# initialize lists for rates ans sites output tables
rates_out = []
sites_out = []
# list of FUBAR output files, to be used as input here
infls = [ i for i in os.listdir(indr) if i.endswith('.out')]
# for each of these files
for f in infls:
# protein name
p = f.replace('.out', '')
# full path to the file
fin = os.path.join( indr, f)
# extract file's contents
with open(fin) as flob:
contents = [ i.strip('\n') for i in flob]
# tree length
tln = [ i for i in contents if 'Tree length' in i]
# skip the protein, if no change, as estimated by tree length
if len(tln) == 0:
print('\t"Tree Length" entry missing! Check the FUBAR output file for details.%s.'%p)
continue
else:
tln = tln[0]
# extract tree length from the string
t = float( re.split(':\W+', tln)[1])
# lines with syn and non-syn rates
rate_lns = [ i for i in contents if 'synonymous' in i]
# extract the rates from the lines above and make an entry in the output list
rates = [p,t] + [ float(i.split(' = ')[1]) for i in rate_lns]
rates_out.append(rates)
# extract the table of positively selected sites
sites_tabs = [ re.split('\W*\|\W*',i) for i in contents if '|' in i][2:]
# process the table and make an entry in the output list
sites_ls = [ [ p, int(i[1]), float(i[3]), float(i[4]), float(i[5].split(' = ')[1]) ]\
for i in sites_tabs]
sites_out.extend(sites_ls)
# further process rates output table
rates_out = [ i + [ round(i[-1]-i[-2],3)] for i in rates_out]
rates_out = sorted( rates_out, key=lambda x: x[-1], reverse=True)
# convert both to pandas dataframe
rates_out = pandas.DataFrame(rates_out,columns=['protein', 'exp_subs','syn', 'nonsyn', 'dnds'])
sites_out = | pandas.DataFrame(sites_out,columns=['protein','site','syn', 'nonsyn', 'post_prob']) | pandas.DataFrame |
import pandas as pd
import numpy as np
from bokeh.io import curdoc
from bokeh.layouts import row, column
from bokeh.models import ColumnDataSource
from bokeh.models.widgets import Slider, TextInput
from bokeh.plotting import figure
from bokeh.palettes import Spectral5, Spectral11
from bokeh.driving import count
# Set up widgets
area = Slider(title="A (area)", value=0.65, start=0.05, end=2, step=0.1)
branches = Slider(title="B (branches)", value=0.73,
start=0.1, end=2, step=0.01)
carrying_cap = Slider(title="K (carrying capacity)",
value=10, start=5, end=20, step=0.1)
bug_birth_rate = Slider(title="R (birth rate)", value=0.2,
start=0.2, end=0.7, step=0.005)
initial_pop = Slider(title="Initial Pop.", value=5, start=1, end=100, step=1)
params1 = [initial_pop]
params2 = [area, branches]
params3 = [carrying_cap, bug_birth_rate]
source = ColumnDataSource(data=dict())
rk_source = ColumnDataSource(data=dict())
bifurc_source = ColumnDataSource(data=dict())
bifurc_source1 = ColumnDataSource(data=dict())
surface_source = ColumnDataSource(data=dict())
def predation(u):
return u ** 2 / (1 + u ** 2)
def population(u):
R = bug_birth_rate.value * area.value / branches.value
K = carrying_cap.value / area.value
return R * u * (1 - u / K) - predation(u)
def create_plot(title, x, y_series, x_label, y_label, source, circle_series):
plot = figure(plot_height=300, plot_width=300, title=title,
tools="crosshair,pan,reset,save,wheel_zoom,box_zoom", output_backend="webgl")
i = 0
palette = Spectral5
if len(y_series) > 5:
palette = Spectral11
for series in y_series:
plot.line(x, series, source=source, line_width=3,
line_alpha=0.6, color=palette[i])
i += 1
if circle_series is True:
plot.circle('k_pt', 'r_pt', source=bifurc_source1,
size=10, color="navy", alpha=0.5)
plot.xaxis.axis_label = x_label
plot.yaxis.axis_label = y_label
return plot
def calc_r(u):
return 2 * u ** 3 / (1 + u ** 2) ** 2
def calc_k(u):
return 2 * u ** 3 / (u ** 2 - 1)
def calculate_N(n_start, T):
N = [n_start]
for s in T[1:]:
start_pop = N[-1]
n_dot = population(start_pop)
N += [start_pop + n_dot]
return np.array(N)
def update_data(attrname, old, new):
# Get the current slider values
A = area.value
B = branches.value
R = bug_birth_rate.value * A / B
K = carrying_cap.value / A
N_init = initial_pop.value
# Generate the new curve
new_df = pd.DataFrame()
new_df['t'] = df['t'].copy()
N = calculate_N(N_init, new_df['t'])
new_df['N_dot'] = population(N)
U = np.linspace(0, 20, len(df['t'])) # N / A
new_df['u'] = U
# updates for the first state space
new_df['tau'] = B * df['t'] / A
new_df['r'] = calc_r(U)
new_df['k'] = calc_k(U)
new_df['f(u)_RHS'] = U / (1 + U ** 2)
new_df['f(u)_LHS'] = R * (1 - U / K)
# rk_df['t'] = df['t'].copy()new_df = new_df[new_df['u'] > 1.0]
source.data = source.from_df(new_df)
rk_df = | pd.DataFrame() | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Dec 22 11:30:32 2018
@author: jkp
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
data= | pd.read_csv("/home/sysadm/Desktop/JKP BSPro/Used_startup_funding.csv") | pandas.read_csv |
import matplotlib.pyplot as plt
import numpy as np
import os,glob,sys,importlib,pickle#,scipy,coolbox,pybedtools,
# from tqdm import tqdm
from scipy.stats import rankdata
import pandas as pd
import networkx as nx
import seaborn as sns
from joblib import delayed, wrap_non_picklable_objects
from pathlib import Path
import plotly
from numba import jit
from joblib import Parallel
import sklearn.utils as sku
import plotly.graph_objects as go
import plotly.express as px
# j=sys.argv[1]
from urllib import request
import xml.etree.ElementTree as ET
import urllib
sys.path.insert(1, './nestedness_analysis/')
import nestedness_metrics_other_functions
from nestedness_metrics_other_functions import from_edges_to_matrix
# importlib.reload(sys.modules['EO_functions_bipartite'])
import extremal_bi
@delayed
@wrap_non_picklable_objects
def bip(cc,net,ff,C,patt):
# print(net)
# dd=cc[['spec','gene',net]]
dd=pd.read_csv('data/gcn/cc_'+patt+'.txt',index_col=False,sep='\t',usecols=['spec','gene',net])
# try:
dd=dd[dd[net]!=0]
# except:
# pass
# ee=nx.from_pandas_edgelist(dd,source='spec',target='gene')
# remove = [node for node,degree in dict(ee.degree()).items() if degree <5]
# ee.remove_nodes_from(remove)
# ff.append(ee)
B = nx.Graph()
B.add_nodes_from(dd['spec'], bipartite=0)
B.add_nodes_from(dd['gene'], bipartite=1)
B.add_weighted_edges_from(tuple(dd[['spec','gene',net]].itertuples(index=False, name=None)))
remove = [node for node,degree in dict(B.degree()).items() if degree <5]
B.remove_nodes_from(remove)
# C.append(B)
xx=nx.from_pandas_edgelist(dd,source='spec',target='gene',edge_attr=net)
remove = [node for node,degree in dict(xx.degree()).items() if degree <5]
xx.remove_nodes_from(remove)
# with open('data/gcn/NX_'+str(patt)+'_hypert.pkl', 'ab+') as f:
# pickle.dump(ff, f)
# with open('data/gcn/BX_'+str(patt)+'_hypert.pkl', 'ab+') as f:
# pickle.dump(C, f)
return xx,B
def load_list_of_dicts(filename, create_using=nx.Graph):
with open(filename, 'rb') as f:
list_of_dicts = pickle.load(f)
graphs = [create_using(graph) for graph in list_of_dicts]
return graphs
# @delayed
# @wrap_non_picklable_objects
def meas(measur,uni_bact,relgene,graphs,patt):
HTXX=uni_bact[uni_bact.index.isin(relgene.columns[1:-2].str.split('-').str[0])]
HTXX['index']=np.arange(len(HTXX))
# measur=eval(measur)
S = [eval(measur)(graphs[i]) for i in HTXX[HTXX['HT']==0]['index'].values]
T = [eval(measur)(graphs[i]) for i in HTXX[HTXX['HT']!=0]['index'].values]
if measur!='nx.degree':
non=pd.DataFrame(S).melt()
yes=pd.DataFrame(T).melt()
elif measur=='nx.degree':
non=pd.DataFrame(S.pop())
non=non.rename(columns={0:'variable',1:'value'})
yes=pd.DataFrame(T.pop())
yes=yes.rename(columns={0:'variable',1:'value'})
non['type']='NoHT'
non.dropna(inplace=True)
non=non[non.value!=0]
non=non[~non['variable'].str.contains('UniRef90')]
non.value=non.value/np.sum(non.value)
yes['type']='HT'
yes.dropna(inplace=True)
yes=yes[yes.value!=0]
yes=yes[~yes['variable'].str.contains('UniRef90')]
yes.value=yes.value/np.sum(yes.value)
df=non.append(yes)
# df=df.dropna()
df['gen']=df.variable.str.split('_').str[2]
df.to_csv("data/gcn/"+patt+"_"+str(measur)+".txt",sep='\t')
plt.figure(figsize=(10,30))
sns.set_theme(style="whitegrid")
sns.violinplot(data=df, y="gen", x="value",hue="type",
split=True, inner="quart", linewidth=1,
orient="h")
sns.despine(left=True)
plt.savefig("data/gcn/"+patt+"_"+str(measur)+"_violin.png",dpi=300,bbox_inches = "tight")
return df
def time_bar(data,XX,rank='rank',species='all'):
if rank=='rank':
data['rank']=rankdata(data.value,method='min')
elif rank=='rank_diff' or rank=='diff':
data['vx']=rankdata(data.value_x,method='min')
data['vy']=rankdata(data.value_y,method='min')
data['rank_diff']=data['vx'].astype('int')-data['vy'].astype('int')
data['diff']=data['value_x']-data['value_y']
# elif rank=='value':
# rank=data.value
if species!='all':
data=data[data['species']==species]
# clust = ll.groupby(['species','target','time'], as_index=False)['diff'].sum()
df = data[['species','target','time',rank]]#.sort_values(['time'], ascending=[True]).groupby(['species','time']).max(5)
jeff=pd.DataFrame(df.groupby(['species','time'])[rank].nlargest(XX))
jeff.reset_index(inplace=True)
for cc in np.unique(jeff.species):
jeff2=jeff[jeff['species']==cc]
if species!='all':
jeff2=df.loc[jeff2['level_2']]
else:
jeff2=df.iloc[jeff2['level_2']]
plt.figure(figsize=(15,5))
ax = sns.histplot(jeff2, x='time', hue='target', weights=rank,
multiple='stack', palette='icefire', shrink=0.6,bins=len(pd.unique(jeff2.time))+5)
ax.set_ylabel(str(rank)+'_HT')
ax.set_title(cc)
# Fix the legend so it's not on top of the bars.
# legend = ax.get_legend()
plt.legend([],[], frameon=False)
Path("data/gcn/img/"+cc).mkdir(parents=True, exist_ok=True)
plt.savefig("data/gcn/img/"+cc+"/"+str(data)+"_"+cc+"_"+str(rank)+".png",dpi=300,bbox_inches = "tight")
def proc_dat(noHT):
# noHT=jj.filter(regex=str(focus)).dropna(how='all')
noHT.columns=noHT.columns.str.split('_').str[0]
noHT=noHT.groupby(by=noHT.columns, axis=1).mean()
noHT=noHT.dropna(how='any')
noHT.reset_index(inplace=True)
jj=noHT.melt(['source','target'])
jj.rename(columns={'variable':'time'},inplace=True)
jj['t']=jj['time']
# jj['time']=jj['time'].astype('int')+2000
# jj['time'] = pd.to_datetime(jj['time'], format='%Y')
# jj=jj[jj['value']>5]
jj['species']=jj['source'].str.split('_').str[2]
jj=jj.dropna(how='any')
return jj
# @delayed
# @wrap_non_picklable_objects
def rev_tbar(jj,XX,gg,species='all'):
data=jj[['species','target','time','t','value']]
# df=data.copy()
# data.reset_index(inplace=True)
data['sum']=pd.DataFrame(data.groupby(['species','t','target'])['value'].transform('sum'))
# jeff.reset_index(inplace=True)
del data['value']
data.drop_duplicates(inplace=True)
data.reset_index(inplace=True)
del data['index'],data['time']
jeff=pd.DataFrame(data.groupby(['species','t'])['sum'].nlargest(XX))
jeff.reset_index(inplace=True)
jeffA=data.iloc[jeff['level_2']]
tim_len=len(np.unique(jeffA['t']))
if species!='all':
jeff=jeff[jeff['species']==species]
JJ=pd.DataFrame()
rr=[]
for q,ee in enumerate((np.unique(jeff.species))):
jeff2=jeffA[jeffA['species']==ee]#.explode('target')
dd=pd.DataFrame(jeff2['target'].to_numpy().reshape(int(len(jeff2)/tim_len),tim_len,order='F'))
if len(dd.melt())==(tim_len*XX):
JJ=JJ.append(dd)
rr=np.append(rr, ee)
jeffA=jeffA.sort_values(['species', 't'], ascending=[True, True])
labels,levels=pd.factorize(sku.shuffle(JJ.melt()['value']))
cc=pd.DataFrame(np.array(labels).reshape((XX)*len(rr),tim_len,order='F'))
for i in np.arange(0,len(cc),XX+1):
for col in cc:
cc.iloc[i:i+XX,col] = cc.iloc[i:i+XX,col].sort_values(ignore_index=True)
cc.loc[i+XX]=0
plt.figure(figsize=(10,30))
ax=sns.heatmap(cc,cmap='rocket_r',annot=True, fmt="d",cbar=False,xticklabels=False,
yticklabels=False).set(ylabel=' - '.join(rr))
# plt.show()
data.to_csv('data/gcn/'+str(gg)+'.csv',sep='\t')
# Path("data/gcn/img/"+cc).mkdir(parents=True, exist_ok=True)
plt.savefig("data/gcn/img/full_"+str(gg)+"_10.png",dpi=300,bbox_inches = "tight")
def group_time_plot(noHT,steps,XX,spec_spec):
noHT.columns=noHT.columns.str.split('_').str[0]
noHT.columns=pd.qcut((noHT.columns).astype('int'), steps, labels=False)
noHT=noHT.groupby(by=noHT.columns, axis=1).mean()
noHT=noHT.dropna(how='all')
noHT.reset_index(inplace=True)
jj=noHT.melt(['source','target'])
jj.rename(columns={'variable':'time'},inplace=True)
jj['t']=jj['time']
# jj['time']=jj['time'].astype('int')+2000
# jj['time'] = pd.to_datetime(jj['time'], format='%Y')
# jj=jj[jj['value']>5]
jj['species']=jj['source'].str.split('_').str[2]
jj=jj.dropna(how='any')
jj['rank']=rankdata(jj.value,method='min')
XX=50 #10
# df = noHT[['species','target','time','rank']]
del jj['value'], jj['t'], jj['source']
if spec_spec=='1':
jeff=pd.DataFrame(jj.groupby(['species','time'])['rank_diff'].nlargest(XX))
jeff=jeff.dropna(how='any')
jeff.reset_index(inplace=True)
jeff2=jj.loc[jeff['level_2']]
else:
jeff=pd.DataFrame(jj.groupby(['time'])['rank'].nlargest(XX))
jeff=jeff.dropna(how='any')
jeff.reset_index(inplace=True)
jeff2=jj.loc[jeff['level_1']]
plt.figure(figsize=(15,5))
ax = sns.histplot(jeff2, x='time', hue='target', weights='rank',
multiple='stack', palette='icefire', shrink=0.6,bins=len(pd.unique(jeff2['time']))+5)
ax.set_ylabel('rank_noHT')
# ax.set_title(cc)
# Fix the legend so it's not on top of the bars.
# legend = ax.get_legend()
plt.legend([],[], frameon=False)
def time_order_net(control,case,thresh=10**-6,group='source',groups=6,rounder=1,math='mean'):
def preproc(data):
data.columns=data.columns.str.split('_').str[0]
data.columns=pd.qcut((data.columns).astype('int'), groups, labels=False)
noHTm=data.groupby(by=data.columns, axis=1).mean()
noHTm=noHTm.dropna(how='all')
noHTm.reset_index(inplace=True)
noHTv=data.groupby(by=data.columns, axis=1).var()
noHTv=noHTv.dropna(how='all')
noHTv.reset_index(inplace=True)
return noHTm,noHTv
noHTm,noHTv=preproc(control)
HTm,HTv=preproc(case)
if math=='mean':
BB=noHTm[noHTm[0]>thresh].dropna().groupby(group).mean()-HTm[HTm[0]>thresh].dropna().groupby(group).mean()
elif math=='median':
BB=noHTm[noHTm[0]>thresh].dropna().groupby(group).median()-HTm[HTm[0]>thresh].dropna().groupby(group).median()
BB=np.round(BB,rounder)
aa='(BB[0]>='
bb='(BB[0]<='
for i in np.arange(groups)[1:]:
cc='BB['+str(i)+'])&(BB['+str(i)+']>='
aa=aa+str(cc)
dd='BB['+str(i)+'])&(BB['+str(i)+']<='
bb=bb+str(dd)
grow=BB[eval(bb[:-9])]
die=BB[eval(aa[:-9])]
def proc_run(BBgrow,grow):
if len(BBgrow)>0:
BBgrow[groups]=BBgrow[0]-BBgrow[groups-1]
BBgrow=BBgrow[BBgrow[groups]!=0]
BBgrow.sort_values(by=groups,inplace=True)
del BBgrow[groups]
BBgrow.to_csv('data/gcn/comp_net/'+str(group)+'_'+str(thresh)+'_'+str(math)+'_'+str(groups)+'_'+grow+'.txt',sep='\t')
else:
BBgrow=0
return BBgrow
BBgrow=proc_run(grow,'grow')
BBdie=proc_run(die,'die')
return BBgrow,BBdie,noHTm,HTm
def build_gcn(i,net,cc,min_deg=5):
# relgene=pd.read_csv(path,sep='\t')
# # relgene=pd.read_csv('50_genefamilies-cpm.tsv')
# # relgene=pd.read_csv('hmp_subset_genefamilies-cpm.tsv',sep='\t',nrows=100)
# relgene['gene']=relgene['# Gene Family'].str.split('|').str[0]
# relgene=relgene[relgene['gene']!='UniRef90_unknown']
# relgene=relgene[relgene['gene']!='UNMAPPED']
# relgene.index=relgene['# Gene Family']
# del relgene['gene'], relgene['# Gene Family']
# # relgene=relgene/relgene.sum(axis=0)
# # relgene=relgene/relgene.sum(axis=0)
# relgene['gen']=relgene.index.str.split('|').str[1].str.split('.').str[0].tolist()
# relgene['spec']=relgene.index.str.split('.').str[1]#.str.split('.').str[0].tolist()
# relgene['spec'].replace('_',' ')
# relgene.index=relgene.index.str.split('|').str[0]
# relgene=relgene.dropna()
# cc=relgene.groupby(['# Gene Family','spec']).sum()
# cc=cc.reset_index()
# cc=cc.rename(columns={'# Gene Family':'gene'})
# ff=[]
# C=[]
# for i,net in enumerate(relgene.columns[1:-2]):
# pd.read_csv()
dd=cc[['spec','gene',net]]
dd=dd[dd[net]!=0]
ee=nx.from_pandas_edgelist(dd,source='spec',target='gene',edge_attr=net)
remove = [node for node,degree in dict(ee.degree()).items() if degree <min_deg]
ee.remove_nodes_from(remove)
# ff.append(ee)
B = nx.Graph()
B.add_nodes_from(dd['spec'], bipartite=0)
B.add_nodes_from(dd['gene'], bipartite=1)
B.add_edges_from(tuple(dd[['spec','gene']].itertuples(index=False, name=None)))
remove = [node for node,degree in dict(B.degree()).items() if degree <min_deg]
B.remove_nodes_from(remove)
# C.append(B)
return ee,B
# with open('data/gcn/NX_Emore_'+name+'.pkl', 'wb') as f:
# pickle.dump(ff, f)
# with open('data/gcn/BX_Emore_'+name+'.pkl', 'wb') as f:
# pickle.dump(C, f)
def buildSYNCSA(dd):
names=pd.unique(dd.columns.str.split('_').str[1]+'_'+dd.columns.str.split('_').str[2])[1:]
for i in names:
# ff.columns = ff.columns.str.strip('_x')
# ff.columns = ff.columns.str.strip('_y')
# i=i.split('_')[1]+'_'+i.split('_')[2]
ff=dd.loc[:,dd.columns.str.contains(i)]
ff[['source','target']]=dd[['source','target']]
ff=ff[ff['source'].str.contains('s__')]
ff=ff[ff['target'].str.contains('UniRef')]
ff.groupby('source').sum().transpose().to_csv('comm_'+i+'.csv')
ff.reset_index(inplace=True)
ff.set_index(['source', 'target'], inplace=True)
del ff['index']
ff.columns=(ff.columns.str.split('_').str[1]+'_'+ff.columns.str.split('_').str[2])
gg=ff.groupby(by=ff.columns, axis=1).sum()
traits=gg[[i]].reset_index().pivot('source','target',i).dropna(how='all',axis=1).replace(np.nan,0)
traits.to_csv('trait_'+i+'.csv')
def buildNestedNess():
C= | pd.DataFrame(columns=['N','Q','I','type']) | pandas.DataFrame |
import sys
import pandas as pd
def combine_express_output(fnL,
column='eff_counts',
names=None,
tg=None,
define_sample_name=None,
debug=False):
"""
Combine eXpress output files
Parameters:
-----------
fnL : list of strs of filenames
List of paths to results.xprs files.
column : string
Column name of eXpress output to combine.
names : list of strings
Names to use for columns of output files. Overrides define_sample_name
if provided.
tg : string
File with transcript-to-gene mapping. Transcripts should be in first
column and genes in second column.
define_sample_name : function that takes string as input
Function mapping filename to sample name (or basename). For instance,
you may have the basename in the path and use a regex to extract it.
The basenames will be used as the column names. If this is not provided,
the columns will be named as the input files.
debug : boolean
Passing True will trigger any debugging statements.
"""
if names is not None:
assert len(names) == len(fnL)
if define_sample_name is None:
define_sample_name = lambda x: x
transcriptL = []
for i,fn in enumerate(fnL):
if names is not None:
bn = names[i]
else:
bn = define_sample_name(fn)
tDF = pd.read_table(fn, index_col=1, header=0)
se = tDF[column]
se.name = bn
transcriptL.append(se)
transcriptDF = | pd.DataFrame(transcriptL) | pandas.DataFrame |
#!/usr/bin/env python
import pandas as pd
import numpy as np
from collections import defaultdict
from itertools import combinations
from itertools import chain
import pickle
from pas_utils import *
from feature import *
if __name__=="__main__":
OUTPUT_DIR="./APA_ML/processed"
if not os.path.exists(OUTPUT_DIR):
os.mkdir(OUTPUT_DIR)
CONTROL_USAGE_FILE='./APA_ML/F1/control.f1.usage.txt'
DIFFERENTIAL_USAGE_FILE='./APA_ML/F1/differential.f1.usage.txt'
PARENTAL_SEQUENCE_TABLE_FILE=os.path.join(OUTPUT_DIR,'parental_sequence_table.h5')
control_usage_table=pd.read_table(CONTROL_USAGE_FILE)
differential_usage_table=pd.read_table(DIFFERENTIAL_USAGE_FILE)
parental_table=pd.read_hdf(PARENTAL_SEQUENCE_TABLE_FILE)
control_usage_table.rename(map_index,inplace=True)
differential_usage_table.rename(map_index,inplace=True)
control_sequence_table=pd.DataFrame({'coordinate':parental_table['coordinate'].loc[control_usage_table.index],
'bl_sequence':parental_table['bl_sequence'].loc[control_usage_table.index],
'sp_sequence':parental_table['sp_sequence'].loc[control_usage_table.index],
'bl_usage':control_usage_table['F1/BL6'],
'sp_usage':control_usage_table['F1/SPR'],
'differential':False})
control_sequence_table=control_sequence_table[['coordinate','bl_sequence','sp_sequence','bl_usage','sp_usage','differential']]
differential_sequence_table=pd.DataFrame({'coordinate':parental_table['coordinate'].loc[differential_usage_table.index],
'bl_sequence':parental_table['bl_sequence'].loc[differential_usage_table.index],
'sp_sequence':parental_table['sp_sequence'].loc[differential_usage_table.index],
'bl_usage':differential_usage_table['F1/BL6'],
'sp_usage':differential_usage_table['F1/SPR'],
'differential':True})
differential_sequence_table=differential_sequence_table[['coordinate','bl_sequence','sp_sequence','bl_usage','sp_usage','differential']]
sequence_table=pd.concat([control_sequence_table,differential_sequence_table])
gene_ids=list(sorted(set([pas_id.split(':')[0] for pas_id in sequence_table.index])))
sequence_table.sort_index(inplace=True)
print("Preparing Signals")
bl_signal=[]
sp_signal=[]
for i,gene in enumerate(gene_ids):
gene_indices=sequence_table.index.str.startswith(gene)
print("[%d/%d]"%(i+1,len(gene_ids)),end='\r')
gene_bl_usage=sequence_table.loc[gene_indices].sort_index()['bl_usage'].values
gene_sp_usage=sequence_table.loc[gene_indices].sort_index()['sp_usage'].values
gene_bl_signal=usage2signal(gene_bl_usage)
gene_sp_signal=usage2signal(gene_sp_usage)
bl_signal.append(gene_bl_signal)
sp_signal.append(gene_sp_signal)
bl_signal=np.concatenate(bl_signal,axis=0)
sp_signal=np.concatenate(sp_signal,axis=0)
print()
sorted_index=sequence_table.sort_index().index
bl_signal= | pd.Series(bl_signal,index=sorted_index) | pandas.Series |
import os
import sys
import time
import argparse
import pandas as pd
import numpy as np
from scipy import interp
from sklearn.metrics import roc_curve, auc
import matplotlib.pyplot as plt
import seaborn as sns
from matplotlib.ticker import FormatStrFormatter
plt.style.use('ggplot')
sns.set(color_codes=True)
sns.set(font_scale=4)
sns.set_style("whitegrid")
sns.set_context("paper")
sns.set(style='white', palette='muted', color_codes=True)
sns.despine(left=True)
fig_format = 'pdf'
if not os.path.exists('fig'):
os.system('mkdir fig')
##########
# figure 1
##########
title_list = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i']
filename_dataset = 'snap_data_w_annot_1000_seq_reg.txt'
dataset = pd.read_csv(filename_dataset, sep='\t')
d1 = dataset[dataset['label']==1]
d0 = dataset[dataset['label']==0]
# AFR, AMR, ASN, EUR
for i, ft in enumerate(['AFR', 'AMR', 'ASN', 'EUR']):
f = plt.figure()
x_range = np.linspace(0.0, 1.0, 11)
ft_ll1 = np.histogram(d1[ft], x_range, density=True)[0]
ft_ll0 = np.histogram(d0[ft], x_range, density=True)[0]
ft_llr = np.log(ft_ll1 / ft_ll0)
sns.barplot(x=[0.0,0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9], y=ft_llr, palette='vlag')
plt.xlabel('allele frequency', fontsize=18)
plt.ylabel('LLR(+/-)', fontsize=18)
plt.tick_params(axis='both', labelsize=18)
plt.title('({}) Binned log-likelihood-ratio: {}'.format(title_list[i], ft), fontsize=18)
plt.tight_layout()
plt.savefig('./fig/fig1_{}.{}'.format(ft, fig_format))
print('[INFO]fig1_{}.{} saved to ./fig'.format(ft, fig_format))
# reg_score_int
f = plt.figure()
ft = 'reg_score_int'
ft_ll1 = np.array([d1[d1[ft] == x].shape[0] for x in np.arange(1.0, 17.0, 1.0)])
ft_ll0 = np.array([d0[d0[ft] == x].shape[0] for x in np.arange(1.0, 17.0, 1.0)])
ft_llr = np.log(ft_ll1 / ft_ll0)
sns.barplot(x=np.arange(1, 17, 1), y=ft_llr, palette='vlag')
plt.xlabel('RegulomeDB score (encoded)', fontsize=18)
plt.ylabel('LLR(+/-)', fontsize=18)
plt.tick_params(axis='both', labelsize=18)
plt.title('(e) Log-likelihood-ratio: {}'.format(ft), fontsize=18)
plt.tight_layout()
plt.savefig('./fig/fig1_{}.{}'.format(ft, fig_format))
print('[INFO]fig1_{}.{} saved to ./fig'.format(ft, fig_format))
# GENCODE_direction, RefSeq_direction
for i, ft in enumerate(['GENCODE_direction', 'RefSeq_direction']):
f = plt.figure()
x_range = [-1, 1, 4, 6]
ft_ll1 = np.histogram(d1[ft], x_range, density=True)[0]
ft_ll0 = np.histogram(d0[ft], x_range, density=True)[0]
ft_llr = np.log(ft_ll1 / ft_ll0)
sns.barplot(x=[0, 3, 5], y=ft_llr, palette='vlag', label=ft)
plt.xlabel(ft, fontsize=18)
plt.ylabel('LLR(+/-)', fontsize=18)
plt.tick_params(axis='both', labelsize=18)
plt.title('({}) Log-likelihood-ratio: {}'.format(title_list[i+5], ft), fontsize=18)
plt.tight_layout()
plt.savefig('./fig/fig1_{}.{}'.format(ft, fig_format))
print('[INFO]fig1_{}.{} saved to ./fig'.format(ft, fig_format))
# GERP_cons, SiPhy_cons
for i, ft in enumerate(['GERP_cons', 'SiPhy_cons']):
f = plt.figure()
ft_ll1 = np.array([d1[d1[ft]==0.0].shape[0], d1[d1[ft]==1.0].shape[0]])
ft_ll0 = np.array([d0[d0[ft]==0.0].shape[0], d0[d0[ft]==1.0].shape[0]])
ft_llr = np.log(ft_ll1 / ft_ll0)
sns.barplot(x=[0.0, 1.0], y=ft_llr, palette='vlag')
plt.xlabel(ft, fontsize=18)
plt.ylabel('LLR(+/-)', fontsize=18)
plt.tick_params(axis='both', labelsize=18)
plt.title('({}) Log-likelihood-ratio: {}'.format(title_list[i+7], ft), fontsize=18)
plt.tight_layout()
plt.savefig('./fig/fig1_{}.{}'.format(ft, fig_format))
print('[INFO]fig1_{}.{} saved to ./fig'.format(ft, fig_format))
# plot annotation distribution
f = plt.figure()
table0 = d0.pivot_table(index='chr', columns='annotation', values='label', aggfunc='count')
sns.heatmap(table0, annot=True, cmap='YlGnBu', fmt='g', cbar_kws={'label': 'count'})
f.savefig('./fig/fig1_annot0.{}'.format(fig_format))
print('[INFO] fig1_annot0.{} saved to ./fig'.format(fig_format))
f = plt.figure()
table1 = d1.pivot_table(index='chr', columns='annotation', values='label', aggfunc=np.sum)
sns.heatmap(table1, annot=True, cmap='YlGnBu', fmt='g', cbar_kws={'label': 'count'})
f.savefig('./fig/fig1_annot1.{}'.format(fig_format))
print('[INFO] fig1_annot1.{} saved to ./fig'.format(fig_format))
##########
# figure 2
##########
method_list = ['regulomedb', 'DeFine0', 'DeFine', 'cnn_1s', 'cnn_2s', 'resnet_2s2a', 'resnet_2s2a_metadata']
method_list_xticks = ['RDB', 'DF0', 'DF', 'CNN1s', 'CNN2s', 'Res', 'ResM']
random_seed_list = [1337, 1338, 1339, 1340, 1341]
perf_list = []
for method in method_list:
for random_seed in random_seed_list:
filename = 'out_test_{}_1000_fold_{}.txt'.format(method, random_seed)
perf = | pd.read_csv(filename, sep='\t', nrows=1) | pandas.read_csv |
# all domains
# merge/split common boundary x = max(3bin,0.1 TAD Length)
# region < agrs.remote
# less complex
# zoom
# to filter the strength first
import pandas as pd
import numpy as np
#from tqdm import tqdm
import argparse
import os
# import warnings
# warnings.filterwarnings('ignore')
# the arguments from command line
parser = argparse.ArgumentParser(description='python scriptname <-d> <-t> [options]')
parser.add_argument('-d','--diff', type=str, default = None,help="path/ the text of diffdoamin's outcome")
parser.add_argument('-t','--tad',type=str, default=None,help='path/ the other tadlist')
parser.add_argument('-o','--out',type=str,default=None,help='the output path')
parser.add_argument('-l','--limit',type=int,default=40000,help='the range(length of bases) to judge the common boundary')
parser.add_argument('-k','--kpercent',type=int,default=10,help='the common boundareis are within max(l*bin,k% TAD length)')
parser.add_argument('-r','--remote',type=int,default=1000000,help='the limitation of the biggest region')
parser.add_argument('-s1','--skip1',type=int,default=25,help='to skip the first s1 rows in "--diff" file; if you input 25, the first 25 rows [0,24] will be skipped.')
parser.add_argument('-s2','--skip2',type=int,default=None,help='to skip the first s2 rows in the other tadlist file')
parser.add_argument('--sep1',type=str,default='\t',help="the seperator of the diffdoamin's outcome (like ',')")
parser.add_argument('--sep2',type=str,default='\t',help="the seperator of the other tadlist")
args = parser.parse_args()
# load the files
data = pd.read_table(args.diff,skiprows=skip1,sep=args.sep1)
tad = pd.read_table(args.tad,skiprows=skip2,sep=args.sep2,header=None)
#preprocessing
cols = data.columns
data.rename(columns={cols[0]:'chr',cols[1]:'start',cols[2]:'end'},inplace=True)
data_diff = data.loc[data['adj_pvalue']<0.05,['chr','start','end']]
data_diff['significant'] = 1
data_diff.reset_index(inplace=True,drop=True)
tad = tad.iloc[:,0:3]
tad.columns = ['chr','start','end']
tad.sort_values(by=['chr','start','end'],inplace=True)
tad.reset_index(inplace=True,drop = True)
tad['range'] = list(map(lambda a,b:(a,b) , tad.start,tad.end))
# preparation
chrs = list(map(str,list(range(1,23))))+['X']
colnames = ['chr','start','end','range','type','origin','subtype','significant']
tad_ = data_main = loss = single = merge = split = multi = pd.DataFrame(columns=colnames)
tad_ = pd.concat([tad_,tad],axis=0)
tad = tad_
data_main = pd.concat([data_main,data.iloc[:,0:3]],axis=0)
data_main['significant'] = 0
data_main = pd.concat([data_main,data_diff],axis=0)
data_main.drop_duplicates(subset=['chr','start','end'],keep='last',inplace=True)
data_main['range'] = list(map(lambda a,b:(a,b) , data_main.start,data_main.end))
data_main['origin'] = 'diffdomain'
data_main.sort_values(by=['chr','start','end'],inplace=True)
data_main.reset_index(inplace=True,drop=True)
def identical(boundary1,boundary2):
# to judge the "common boundary"
if int(boundary1) <= int(boundary2)+limit and int(boundary1) >= int(boundary2)-limit:
return True
else:
return False
def cross(main,vise):
# main is the protagnist tad
# to find the tads related to main in vise
note=pd.DataFrame(columns=colnames)
for i in range(vise.shape[0]):
if (int(main['end'])-limit > int(vise.loc[i,'start']) and int(main['start'])+limit < int(vise.loc[i,'end']) ):
note=pd.concat([note,pd.DataFrame(vise.loc[i,:].values.reshape(1,-1),columns=colnames)],axis=0)
return note
def n_of_region(outcome):
# to count the number of regions in the dataframe
n_region = 0
if len(outcome) != 0 :
n_region = 1
for i in range(2,len(outcome)):
if outcome['origin'].values[i]=='diffdomain' and outcome['origin'].values[i-1]=='the other tadlist':
n_region = n_region+1
return n_region
def n_diffdomain(outcome):
n_diff = outcome.loc[outcome['origin']=='diffdomain',:].shape[0]
return n_diff
# the 4th virsion+ bin
# try:
for c in chrs:
temp = data_main.loc[data_main['chr']==c,:].copy()
tadlist = tad.loc[tad['chr']==c,:].copy()
tadlist['origin'] = 'the other tadlist'
temp.reset_index(inplace=True,drop=True)
tadlist.reset_index(inplace=True,drop=True)
temp = temp[colnames]
tadlist = tadlist[colnames]
temp['start'] = temp['start'].astype(int)
temp['end'] = temp['end'].astype(int)
tadlist['start'] = tadlist['start'].astype(int)
tadlist['end'] = tadlist['end'].astype(int)
# filter the strength-change diffdomains and other non-significantly differentail tads with common boudaries in vise tadlist
tad_index = []
cross_index = []
for i in range(temp.shape[0]):
# the i th TADs in the result of DiffDomain
# to filter the TADs with common boundaries in different conditions
# initialize the variables
note_tad = note_cross = | pd.DataFrame(columns=colnames) | pandas.DataFrame |
from collections import namedtuple
import numpy as np
import pandas as pd
import random
from scipy.special import gammaln
from scipy.integrate import solve_ivp
from scipy.optimize import minimize
from scipy.linalg import expm
from tqdm import tqdm
from matplotlib import pyplot as plt
from tqdm import tqdm
from eda import us_data
from mass_pop_data import ma_county_pops
from tx_pop_data import tx_county_pops
from nyt_data import county_data
# T = len(us_data['confirmed'])
np.set_printoptions(precision=3)
log = np.log
exp = np.exp
#N = US_POP = 327 * 10**6
underreporting_factors = np.linspace(1, 10, 1000)
doubling_times = np.linspace(2, 7, 1000)
VAR_NAMES = ['s', 'i', 'c', 'ru', 'rc', 'd']
SEIR_VAR_NAMES = ['s', 'e', 'i', 'r', 'd']
SEIR_PARAM_NAMES = ['beta', 'sigma', 'gamma', 'mu', 'I0']
PARAM_NAMES = ['beta', 'delta', 'gamma_u', 'gamma_c', 'mu']
# Param assumptions
incubation_period = 14
recovery_period = 21
fatality_rate = 0.02
R0 = 2.2
iota = 1 / incubation_period
rho = 1 / recovery_period
delta = rho * (fatality_rate) / (1 - fatality_rate)
epsilon = R0 * (rho + delta)
def log_fac(x):
return gammaln(x + 1)
def sir_deriv(arr, params):
assert(np.isclose(np.sum(arr), 1))
s, i, c, ru, rc, d = arr
beta, delta, gamma_u, gamma_c, mu = params
ds = - beta * s * i
di = beta * s * i - gamma_u * i - delta * i
dc = delta * i - (mu + gamma_c) * c
dru = gamma_u * i
drc = gamma_c * c
dd = mu * c
darr = np.array([ds, di, dc, dru, drc, dd])
assert(np.isclose(np.sum(darr), 0))
return darr
def seir_deriv(x, params):
assert(np.isclose(np.sum(x), 1))
s, e, i, r, d = x
beta, sigma, gamma, mu = params
ds = -beta * s * i
de = beta * s * i - sigma * e
di = sigma * e - (gamma + mu) * i
dr = gamma * i
dd = mu * i
dx = np.array([ds, de, di, dr, dd])
assert(np.isclose(np.sum(dx), 0))
return dx
def solve_sir(x0, params, end_time):
f = lambda t, x: sir_deriv(x, params)
assert(np.isclose(sum(x0), 1))
t0 = 0
tf = end_time
t_span = (t0, tf)
sol = solve_ivp(f, t_span, x0, max_step=1, t_eval=range(tf))
return sol
def solve_seir(x0, params, end_time):
f = lambda t, x: seir_deriv(x, params)
assert(np.isclose(sum(x0), 1))
t0 = 0
tf = end_time
t_span = (t0, tf)
sol = solve_ivp(f, t_span, x0, max_step=1, t_eval=range(tf))
return sol
def init_approximation(params):
beta, delta, gamma_u, gamma_c, mu = params
ALPHA = beta - (delta + gamma_u)
ETA = gamma_c + mu
coeff = delta * I0/(ALPHA + ETA)
Kc = -coeff # c should be zero at t=0
def c(t):
return coeff * exp(ALPHA * t) + Kc*exp(-ETA*t)
def z(t):
return coeff / ALPHA * exp(ALPHA * t) - Kc / ETA * exp(-ETA*t)
Kz = -mu * z(0)
def d(t):
return mu * z(t) + Kz
Kru = -gamma_c * z(0)
def rc(t):
return gamma_c * z(t) + Kru
return c, rc, d
def bound(x, N):
return np.clip(x, 1/N, 1 - 1/N)
def init_approximation_sse(log_params, data):
M = 10
N = data['pop']
T = len(data['confirmed'])
params = exp(log_params)
ts = np.arange(T)
_c, _rc, _d = init_approximation(params)
c = (lambda x: bound(_c(x)))(ts)[:-2] + 1/N
d = (lambda x: bound(_d(x)))(ts)[:-2] + 1/N
rc = (lambda x: bound(_rc(x)))(ts)[:-2] + 1/N
trash = bound(1 - (c + d + rc))
obs_c = us_data['confirmed'][:-2]
obs_d = us_data['deaths'][:-2]
obs_rc = us_data['recovered']
obs_trash = N - (obs_c + obs_d + obs_rc)
prefactor = log_fac(N) - (log_fac(obs_c) + log_fac(obs_d) + log_fac(obs_rc) + log_fac(obs_trash))
#return sum(((log(c(ts) + 1/N) - log(obs_c + 1/N)))**2) + sum(((log(d(ts) + 1/N) - log(obs_d + 1/N)))**2) + sum((log(rc(ts)[:-2] + 1/N) - log(obs_rc + 1/N))**2)
return sum(prefactor + obs_c * log(c) + obs_d * log(d) + obs_rc * log(rc) + obs_trash * log(trash))
def q(x, sigma=0.01):
"""for use with log params"""
return x + np.random.normal(0, sigma, size=len(x))
def mh(lf, q, x, iterations=10000, modulus=100):
traj = []
ll = lf(x)
accepts = 0
for iteration in range(iterations):
xp = q(x)
llp = lf(xp)
if log(random.random()) < llp - ll:
x = xp
ll = llp
accepts += 1
if iteration % modulus == 0:
traj.append((x, ll))
print(
"{}/{} log_params: {} log-likelihood: {:1.3f} acceptances: {} acceptance ratio: {:1.3f}".format(
iteration, iterations, x, ll, accepts, accepts / (iteration + 1)
)
)
return traj
def fit_init_approximation(tol=10**-14):
x0 = np.random.normal(0, 1, size=len(PARAM_NAMES))
# x0 = np.array([ 13.26726095, -7.21161112, 13.26726049, -6.55617211,
# -52.65910809])
return minimize(init_approximation_sse, x0, method='powell', options={'maxiter': 100000, 'xtol':tol, 'disp':True})
def check_init_approxiation_fit(tol):
sol = fit_init_approximation(tol)
def plot_log_params(log_params, data, plot_data=True, plot_legend=True, show=True):
params = exp(log_params)
N = data['pop']
T = len(data['confirmed'])
c, rc, d = init_approximation(params)
obs_c = data['confirmed'] / N
obs_d = data['deaths'] / N
obs_rc = data['recovered'] / N
ts = np.arange(T)
if plot_data:
plt.plot(obs_c, linestyle=' ', marker='o', label='obs c')
plt.plot(obs_d, linestyle=' ', marker='o', label='obs d')
plt.plot(obs_rc, linestyle=' ', marker='o', label='obs rc')
plt.plot(c(ts), label='est c', color='b', linestyle='--')
plt.plot(d(ts), label='est d', color='orange', linestyle='--')
plt.plot(rc(ts), label='est rc', color='g', linestyle='--')
if plot_legend:
plt.legend()
if show:
plt.show()
def test_init_approximation(data):
# VAR_NAMES = ['s', 'i', 'c', 'ru', 'rc', 'd']
N = data['pop']
I0 = 1/N
ic = [1-I0, I0, 0, 0, 0, 0]
params = np.array([ 0.82, 0.22, 0.34, 2.30, 10.28]) * 3
sol = solve_sir(ic, params)
def estimate_init_conds():
confirmed_cases = 13
underreporting_factor = 10
initial_cases = confirmed_cases * underreporting_factor
susceptible_cases = boston_pop - initial_cases
infected_cases = initial_cases / 3
exposed_cases = initial_cases - infected_cases
s = susceptible_cases / boston_pop
e = exposed_cases / boston_pop
i = infected_cases / boston_pop
d = 0
r = 0
def plot_sir_sol(sol):
ts = sol.t
c = sol.y[VAR_NAMES.index('c'), :]
i = sol.y[VAR_NAMES.index('i'), :]
y = c + i
y0, yf = y[0], y[10]
t0, tf = ts[0], ts[10]
doublings = np.log2(yf / y0)
doubling_time = (tf - t0) / doublings
print("doubling time:", doubling_time)
for i, var_name in enumerate(var_names):
plt.plot(sol.y[i, :], label=var_name)
plt.legend()
plt.show()
def log_likelihood(sol, data):
obs_c = data['confirmed']
obs_rc = data['recovered']
obs_d = data['deaths']
N = data['pop']
T = len(data['confirmed'])
y_c = sol.y[VAR_NAMES.index('c'), :]
#y_rc = sol.y[VAR_NAMES.index('rc'), :]
y_d = sol.y[VAR_NAMES.index('d'), :]
y_trash = 1 - (y_c + y_d)
log_prob = 0
for t in range(T):
#print(t)
C, D = obs_c[t], obs_d[t]
TRASH = N - (C + D)
c, d, trash = y_c[t], y_d[t], y_trash[t]
prefactor = log_fac(N) - (log_fac(C) + log_fac(D) + log_fac(TRASH))
#print(c, rc, d)
log_prob_t = prefactor + C * log(c) + D * log(d) + TRASH * log(trash)
#print(prefactor, log_prob_t)
log_prob += log_prob_t
return log_prob
def log_likelihood2(sol, data):
obs_c = data['confirmed']
obs_rc = data['recovered']
obs_d = data['deaths']
N = data['pop']
T = len(data['confirmed'])
y_c = sol.y[VAR_NAMES.index('c'), :]
y_rc = sol.y[VAR_NAMES.index('rc'), :]
y_d = sol.y[VAR_NAMES.index('d'), :]
y_trash = 1 - (y_c + y_rc + y_d)
log_prob = 0
for t in range(T):
#print(t)
C, RC, D = obs_c[t], obs_rc[t], obs_d[t]
TRASH = N - (C + RC + D)
c, rc, d, trash = y_c[t], y_rc[t], y_d[t], y_trash[t]
#print(c, rc, d)
log_prob_t = -((C - c*N)**2 + (RC - rc*N)**2 + (D - (d*N))**2 + (TRASH - trash*N)**2)
#print(prefactor, log_prob_t)
log_prob += log_prob_t
return log_prob
def seir_log_likelihood(sol, data, only_deaths=True):
obs_c = data['confirmed']
obs_d = data['deaths']
N = data['pop']
T = len(data['confirmed'])
y_c = bound(sol.y[SEIR_VAR_NAMES.index('i'), :], N)
y_d = bound(sol.y[SEIR_VAR_NAMES.index('d'), :], N)
if only_deaths:
y_trash = 1 - (y_d)
else:
y_trash = 1 - (y_c + y_d)
log_prob = 0
for t in range(T):
#print(t)
# if obs_c[t] < 100:
# continue
if only_deaths:
D = obs_d[t]
TRASH = N - D
d, trash = y_d[t], y_trash[t]
log_prob += multinomial_ll([d, trash], [D, TRASH])
else:
C, D = obs_c[t], obs_d[t]
TRASH = N - (C + D)
c, d, trash = y_c[t], y_d[t], y_trash[t]
log_prob += multinomial_ll([c, d, trash], [C, D, TRASH])
# log_prob += sse_ll([c, d, trash], [C, D, TRASH])
return log_prob
def multinomial_ll(ps, obs):
N = np.sum(obs)
prefactor = log_fac(N) - sum(log_fac(n) for n in obs)
return prefactor + sum(o * log(p) for (p, o) in zip(ps, obs))
def sse_ll(ps, obs):
N = sum(obs)
return -sum((p * N - o)**2 for (p, o) in zip(ps, obs))
def random_hyp():
ic = np.array([0.99] + [random.random() * 0.01 for _ in range(len(VAR_NAMES) - 1)])
ic = ic / sum(ic)
log_thetas = np.random.normal(0, 1, size=len(PARAM_NAMES))
thetas = exp(log_thetas)
thetas[5:] /= 10
return ic, thetas
def mutate_hyp(hyp):
ic, thetas = hyp
log_ic = log(ic)
new_log_ic = log_ic + np.random.normal(0, 0.01, size=len(ic))
new_ic = exp(new_log_ic)
new_ic /= sum(new_ic)
log_thetas = log(thetas)
new_log_thetas = log_thetas + np.random.normal(0, 0.01, size=len(thetas))
new_thetas = exp(new_log_thetas)
return new_ic, new_thetas
def ll_from_hyp(hyp, data):
ic, thetas = hyp
T = len(data['confirmed'])
sol = solve_sir(ic, thetas, T)
return log_likelihood(sol, data)
def fit_model(data, generations=10000):
ll = None
traj = []
acceptances = 0
while ll is None:
hyp = random_hyp()
print(hyp)
prop_ll = ll_from_hyp(hyp, data)
if not np.isnan(prop_ll):
ll = prop_ll
for t in range(generations):
hyp_p = mutate_hyp(hyp)
ll_p = ll_from_hyp(hyp_p, data)
if np.log(random.random()) < ll_p - ll:
acceptances += 1
hyp = hyp_p
ll = ll_p
if t % 100 == 0:
traj.append((hyp, ll))
print(t, ll, "ar:", acceptances / (t + 1))
print(hyp)
return traj
def ps_from_lls(lls):
print("min, max:", min(lls), max(lls))
a = min(lls)
expa = exp(a)
ps = [exp(ll - a) for ll in lls]
return ps
def check_hyp(hyp, data):
N = data['pop']
T = len(data['confirmed'])
x0, params = hyp
sol = solve_sir(x0, params, T)
for name, ts in zip(VAR_NAMES, sol.y):
plt.plot(ts, label=name)
plt.plot(data['confirmed'] / N, label='obs confirmed', marker='o', linestyle=' ')
plt.plot(data['recovered'] / N, label='obs recovered', marker='o', linestyle=' ')
plt.plot(data['deaths'] / N, label='obs deaths', marker='o', linestyle=' ')
plt.legend()
def plot_lls(traj):
lls = [ll for (x, ll) in traj]
plt.subplot(2, 1, 1)
plt.plot(lls)
plt.xlabel("Iterations x 100", size='x-large')
plt.ylabel("Log-likelihood", size='x-large')
plt.subplot(2, 1, 2)
plt.plot(lls)
plt.ylim(-760, -730)
plt.xlabel("Iterations x 100", size='x-large')
plt.ylabel("Log-likelihood", size='x-large')
plt.tight_layout()
plt.savefig("ll-plot.png", dpi=300)
def plot_param_results(traj, data):
"""Use with SIR"""
N = data['pop']
T = len(data['confirmed'])
log_params, ll = traj[-1]
params = exp(log_params)
# VAR_NAMES = ['s', 'i', 'c', 'ru', 'rc', 'd']
params = exp(log_params)
c, rc, d = init_approximation(params)
sir_x0 = np.array([1-1/N, 1/N, 0, 0, 0, 0])
sir_sol = solve_sir(sir_x0, params)
sir_c, sir_rc, sir_d = sir_sol.y[2], sir_sol.y[4], sir_sol.y[5]
obs_c = data['confirmed'] / N
obs_d = data['deaths'] / N
obs_rc = data['recovered'] / N
ts = np.arange(T)
plt.subplot(3, 1, 1)
plt.plot(obs_c, linestyle=' ', marker='o', label='C (observed)')
plt.plot(sir_c, color='blue', label='C (SIR model)')
plt.plot(c(ts), color='orange', linestyle='--', label='C (init approx)')
plt.legend()
plt.subplot(3, 1, 2)
plt.plot(obs_rc, linestyle=' ', marker='o', label='Rc (observed)')
plt.plot(sir_rc, color='blue', label='Rc (SIR model)')
plt.plot(rc(ts), color='orange', linestyle='--', label='Rc (init approx)')
plt.ylabel("Population Fraction", size='x-large')
plt.legend()
plt.subplot(3, 1, 3)
plt.plot(obs_d, linestyle=' ', marker='o', label='D (observed)')
plt.plot(sir_d, color='blue', label='D (SIR model)')
plt.plot(d(ts), color='orange', linestyle='--', label='D (init approx)')
plt.legend()
plt.xlabel("Days since 1/22/20", size='x-large')
plt.tight_layout()
plt.savefig("fit-results.png", dpi=300)
plt.close()
def log_param_scatterplot(log_param_traj, param_names=["beta", "sigma", "gamma", "mu", "IO"]):
# param_traj = [exp(lp) for lp in log_param_traj]
K = len(log_param_traj[0])
log_param_vecs = list(zip(*log_param_traj))
for i, i_param in enumerate(param_names):
for j, j_param in enumerate(param_names):
plt_idx = j * K + i + 1
print(i_param, j_param)
plt.subplot(K, K, plt_idx)
if plt_idx % K == 1:
plt.ylabel(j_param)
if j_param == param_names[-1]:
plt.xlabel(i_param)
print("x label:", i_param)
#plt.title(i_param + " " + j_param)
if i == j:
plt.hist(log_param_vecs[i])
else:
plt.scatter(log_param_vecs[i], log_param_vecs[j], s=5)
# plt.tight_layout()
# plt.savefig("param-pairplots.png", dpi=300)
# plt.close()l
def seir_experiment(data, log_params=None, iterations=10_000, sigma=0.01, only_deaths=True):
# S, E, I, R, D
T = len(data['confirmed'])
if log_params is None:
log_params = np.array([-0.19780107, -2.65762238, -3.21675428, -6.12722099, -19.6])
log_params = np.random.normal(-2, 1, size=len(log_params))
N = data['pop']
I0 = 1/N
log_params[-1] = log(I0) # seed I0 as 1 / US_POP
def lf(log_params):
params = exp(log_params)
params, I0 = params[:-1], params[-1]
init_condition = np.array([1 -I0, 0, I0, 0, 0])
sol = solve_seir(init_condition, params, T)
return seir_log_likelihood(sol, data, only_deaths=only_deaths)
traj = mh(lf, lambda x:q(x, sigma=sigma), log_params, modulus=10, iterations=iterations)
return traj
# log_params1 = traj1[-1][0]
# traj01 = mh(lf, lambda x:q(x, sigma=0.01), log_params1, modulus=10, iterations=1000)
# log_params01 = traj01[-1][0]
# traj001 = mh(lf, lambda x:q(x, sigma=0.01), log_params01, modulus=10, iterations=1000)
# log_params001 = traj001[-1][0]
# traj0001 = mh(lf, lambda x:q(x, sigma=0.001), log_params001, modulus=10, iterations=1000)
# return traj1 + traj01 + traj--1 + traj0001
def plot_seir_param_results(traj, data, fname=None):
log_params, ll = traj[-1]
T = len(data['confirmed'])
params = exp(log_params)
# SEIRD
params, I0 = params[:-1], params[-1]
init_condition = np.array([1 -I0, 0, I0, 0, 0])
seir_sol = solve_seir(init_condition, params, T)
seir_c, seir_r, seir_d = seir_sol.y[2], seir_sol.y[3], seir_sol.y[4]
N = data['pop']
T = len(data['confirmed'])
obs_c = data['confirmed']
obs_d = data['deaths']
#obs_rc = data['recovered'] / N
ts = np.arange(T)
approx_f = seir_approximation(init_condition, params)
approx_c = np.array([approx_f(t)[SEIR_VAR_NAMES.index('i')] for t in ts])
#approx_r = [approx_f(t)[SEIR_VAR_NAMES.index('r')] for t in ts]
approx_d = np.array([approx_f(t)[SEIR_VAR_NAMES.index('d')] for t in ts])
plt.subplot(2, 1, 1)
plt.plot(obs_c, linestyle=' ', marker='o', label='C (observed)')
plt.plot(seir_c * N, color='blue', label='C (SEIR model)')
plt.plot(approx_c * N, color='orange', label='C (approx)', linestyle='--')
# for log_params, ll in traj[::10]:
# params = exp(log_params)
# params, I0 = params[:-1], params[-1]
# init_condition = np.array([1 -I0, 0, I0, 0, 0])
# seir_sol = solve_seir(init_condition, params)
# seir_c, seir_r, seir_d = seir_sol.y[2], seir_sol.y[3], seir_sol.y[4]
# plt.plot(seir_c, color='blue', alpha=0.01)
plt.legend()
# plt.subplot(3, 1, 2)
# plt.plot(obs_rc, linestyle=' ', marker='o', label='Rc (observed)')
# plt.plot(seir_r, color='blue', label='Rc (SEIR model)')
# plt.plot(approx_r, color='orange', label='Rc (approx)', linestyle='--')
# plt.ylabel("Population Fraction", size='x-large')
# plt.legend()
plt.subplot(2, 1, 2)
plt.plot(obs_d, linestyle=' ', marker='o', label='D (observed)')
plt.plot(seir_d * N, color='blue', label='D (SEIR model)')
plt.plot(approx_d * N, color='orange', label='D (approx)', linestyle='--')
plt.legend()
plt.xlabel("Days since 1/22/20", size='x-large')
if fname:
plt.tight_layout()
plt.savefig("fit-results.png", dpi=300)
plt.close()
else:
plt.show()
def plot_seir_sol(sol):
start = 'Jan 22, 2020'
today = pd.to_datetime('now')
date_range = pd.date_range(start=start, end=today)
today_t = len(date_range)
for var_name, data in zip(SEIR_VAR_NAMES, sol.y):
plt.plot(data, label=var_name)
plt.axvline(today_t, linestyle='--', label='today')
plt.legend()
plt.xlabel("Days since 1/22/2020")
plt.show()
def plot_seir_sols_from_traj(traj, data):
N = data['pop']
colors = 'brygc'
for i, (log_params, ll) in tqdm(enumerate(traj)):
params = exp(log_params)
params, I0 = params[:-1], params[-1]
init_condition = np.array([1 -I0, 0, I0, 0, 0])
sol = solve_seir(init_condition, params, 365*2)
for var_name, time_series, color in zip(SEIR_VAR_NAMES, sol.y, colors):
plt.plot(
time_series * N,
label=(var_name if i == 0 else None),
color=color,
alpha=0.5
)
plt.plot(data['confirmed'], marker='o', linestyle=' ', label='obs C')
plt.plot(data['deaths'], marker='o', linestyle=' ', label='obs D')
start = 'Jan 22, 2020'
today = pd.to_datetime('now')
date_range = | pd.date_range(start=start, end=today) | pandas.date_range |
# -*- coding: utf8 -*-
# My imports
from __future__ import division
import numpy as np
import os
import pandas as pd
from astropy.io import fits
def save_synth_spec(x, y, initial=None, **options):
'''Save synthetic spectrum of all intervals
Input
----
x : ndarray
Wavelength
y : ndarray
Flux
initial : list
Set of parameters to name the new file, else it is named 'synthetic.spec'.
Output
-----
fname fits file
'''
# Create header
header = fits.Header()
header['CRVAL1'] = x[0]
header['CDELT1'] = x[1] - x[0]
if initial:
fname = (
str(initial[0])
+ '_'
+ str(initial[1])
+ '_'
+ str(initial[2])
+ '_'
+ str(initial[3])
+ '_'
+ str(initial[4])
+ '_'
+ str(initial[5])
+ '_'
+ str(options['resolution'])
+ '.spec'
)
else:
fname = 'synthetic.spec'
tbhdu = fits.BinTableHDU.from_columns(
[
fits.Column(name='wavelength', format='D', array=x),
fits.Column(name='flux', format='D', array=y),
],
header=header,
)
tbhdu.writeto('results/%s' % fname, overwrite=True)
print('Synthetic spectrum saved: results/%s' % fname)
return
def broadening(x, y, vsini, vmac, resolution=None, epsilon=0.60):
'''This function broadens the given data using velocity kernels,
e.g. instrumental profile, vsini and vmac.
Based on http://www.hs.uni-hamburg.de/DE/Ins/Per/Czesla/PyA/PyA/pyaslDoc/aslDoc/broadening.html
Input
----
x : ndarray
wavelength
y : ndarray
flux
resolution : float
Instrumental resolution (lambda /delta lambda)
vsini : float
vsini in km/s
vmac : float
vmac in km/s
epsilon : limb-darkening parameter
Output
-----
y_broad : ndarray
Broadened flux
x : ndarray
Same wavelength
'''
from PyAstronomy import pyasl
from scipy.signal import fftconvolve
from scipy.integrate import quad
def instrumental_profile(x, y, resolution):
'''
Inputs
-----
x, y : The abscissa and ordinate of the data.
sigma : The width (i.e., standard deviation) of the Gaussian profile
used in the convolution.
edgeHandling : None, "firstlast". Determines the way edges will be
handled. If None, nothing will be done about it. If set to "firstlast",
the spectrum will be extended by using the first and last value at the
start or end. Note that this is not necessarily appropriate.
The default is None.
maxsig : The extent of the broadening kernel in terms of standrad
deviations. By default, the Gaussian broadening kernel will be extended
over the entire given spectrum, which can cause slow evaluation in the
case of large spectra. A reasonable choice could, e.g., be five.
Output
-----
y_inst : convolved flux
'''
# Deal with zero or None values seperately
if (resolution is None) or (resolution == 0):
y_inst = y
else:
y_inst = pyasl.instrBroadGaussFast(
x, y, resolution, edgeHandling="firstlast", fullout=False, maxsig=None
)
return y_inst
def vsini_broadening(x, y, epsilon, vsini):
'''
Apply rotational broadening to a spectrum assuming a linear limb darkening
law. The adopted limb darkening law is the linear one, parameterize by the
linear limb darkening parameter: epsilon = 0.6.
The effect of rotational broadening on the spectrum is
wavelength dependent, because the Doppler shift depends
on wavelength. This function neglects this dependence, which
is weak if the wavelength range is not too large.
Code from: http://www.phoebe-project.org/2.0/
.. note:: numpy.convolve is used to carry out the convolution
and "mode = same" is used. Therefore, the output
will be of the same size as the input, but it
will show edge effects.
Input
-----
wvl : The wavelength
flux : The flux
epsilon : Linear limb-darkening coefficient (0-1).
vsini : Projected rotational velocity in km/s.
effWvl : The wavelength at which the broadening kernel is evaluated.
If not specified, the mean wavelength of the input will be used.
Output
------
y_rot : convolved flux
'''
if vsini == 0:
y_rot = y
else:
y_rot = pyasl.rotBroad(x, y, epsilon, vsini, edgeHandling='firstlast')
return y_rot
def vmacro_kernel(dlam, Ar, At, Zr, Zt):
'''
Macroturbulent velocity kernel.
'''
dlam[dlam == 0] = 1e-8
if Zr != Zt:
return np.array(
[
(
2
* Ar
* idlam
/ (np.sqrt(np.pi) * Zr ** 2)
* quad(lambda u: np.exp(-1 / u ** 2), 0, Zr / idlam)[0]
+ 2
* At
* idlam
/ (np.sqrt(np.pi) * Zt ** 2)
* quad(lambda u: np.exp(-1 / u ** 2), 0, Zt / idlam)[0]
)
for idlam in dlam
]
)
else:
return np.array(
[
(
2 * Ar * idlam / (np.sqrt(np.pi) * Zr ** 2)
+ 2 * At * idlam / (np.sqrt(np.pi) * Zt ** 2)
)
* quad(lambda u: np.exp(-1 / u ** 2), 0, Zr / idlam)[0]
for idlam in dlam
]
)
def vmac_broadening(wave, flux, vmacro_rad):
'''
Apply macroturbulent broadening.
The macroturbulent kernel is defined as in [Gray2005].
These functions are taken from iSpec (Blanco-Cuaresma et al. 2014)
Input
-----
:parameter wave: Wavelength of the spectrum
:parameter flux: Flux of the spectrum
:parameter vmacro_rad: macroturbulent broadening, radial component
Output
------
y_mac : broadened flux
'''
# radial component is equal to the tangential component
vmacro_tan = vmacro_rad
if vmacro_rad == vmacro_tan == 0:
return flux
# Define central wavelength
lambda0 = (wave[0] + wave[-1]) / 2.0
vmac_rad = vmacro_rad / (299792458.0 * 1e-3) * lambda0
vmac_tan = vmac_rad
# Make sure the wavelength range is equidistant before applying the
# convolution
delta_wave = np.diff(wave).min()
range_wave = wave.ptp()
n_wave = int(range_wave / delta_wave) + 1
wave_ = np.linspace(wave[0], wave[-1], n_wave)
flux_ = np.interp(wave_, wave, flux)
dwave = wave_[1] - wave_[0]
n_kernel = int(5 * max(vmac_rad, vmac_tan) / dwave)
if n_kernel % 2 == 0:
n_kernel += 1
# The kernel might be of too low resolution, or the the wavelength range
# might be too narrow. In both cases, raise an appropriate error
if n_kernel == 0:
raise ValueError(
("Spectrum resolution too low for macroturbulent broadening")
)
elif n_kernel > n_wave:
raise ValueError(
("Spectrum range too narrow for macroturbulent broadening")
)
# Construct the broadening kernel
wave_k = np.arange(n_kernel) * dwave
wave_k -= wave_k[-1] / 2.0
kernel = vmacro_kernel(wave_k, 1.0, 1.0, vmac_rad, vmac_tan)
kernel /= sum(kernel)
flux_conv = fftconvolve(1 - flux_, kernel, mode='same')
# And interpolate the results back on to the original wavelength array,
# taking care of even vs. odd-length kernels
if n_kernel % 2 == 1:
offset = 0.0
else:
offset = dwave / 2.0
flux = np.interp(wave + offset, wave_, 1 - flux_conv)
return flux
# vmac broadening
y_mac = vmac_broadening(x, y, vmacro_rad=vmac)
# vsini broadening
y_rot = vsini_broadening(x, y_mac, epsilon, vsini)
# Instrumental broadening
y_inst = instrumental_profile(x, y_rot, resolution)
return x, y_inst
def _read_raw_moog(fname='summary.out'):
'''Read the summary.out and return them
Inputs
------
fname : str (default: summary.out)
Filename of the output file from MOOG from summary_out
Output
------
wavelenth : ndarray
The wavelenth vector
flux : ndarray
The flux vector
'''
import itertools
with open('summary.out', 'r') as f:
f.readline()
f.readline()
start_wave, end_wave, step, flux_step = list(map(float, f.readline().split()))
lines = f.readlines()
data = []
for line in lines:
line = line.replace('-', ' ')
line = line.replace('\n', '').split(' ')
line = filter(None, line)
data.append(line)
flux = list(itertools.chain(*data))
flux = np.array(flux)
flux = flux.astype(float)
flux = 1.0 - flux
w0, dw, n = float(start_wave), float(step), len(flux)
w = w0 + dw * n
wavelength = np.linspace(w0, w, n, endpoint=False)
return wavelength, flux
def read_linelist(fname, intname='intervals.lst'):
'''Read the line list (atomic data) and the file which includes the ranges
where the synthesis will happen.
Input
-----
fname : str
File that contains the linelist
intname : str
File that contains the intervals
Output
------
ranges : wavelength ranges of the linelist
atomic : atomic data
'''
lines = pd.read_csv(
fname,
skiprows=1,
comment='#',
delimiter='\t',
usecols=range(6),
names=['wl', 'elem', 'excit', 'loggf', 'vdwaals', 'Do'],
converters={
'Do': lambda x: x.replace("nan", " "),
'vdwaals': lambda x: float(x),
},
)
lines.sort_values(by='wl', inplace=True)
intervals = pd.read_csv(
intname, comment='#', names=['start', 'end'], delimiter='\t'
)
ranges = intervals.values
atomic = []
N = []
for i, ri in enumerate(intervals.values):
a = lines[(lines.wl > ri[0]) & (lines.wl < ri[1])]
atomic.append(a.values)
N.append(len(a))
N = sum(N)
atomic = np.vstack(atomic)
print('Linelist contains %s lines in %s intervals' % (N, len(ranges)))
# Create line list for MOOG
fmt = ['%9.3f', '%10.1f', '%9.2f', '%9.3f', '%9.3f', '%7.4s']
header = 'Wavelength ele EP loggf vdwaals Do'
np.savetxt('linelist.moog', atomic, fmt=fmt, header=header)
return ranges, atomic
def read_linelist_elem(fname, element=None, intname='intervals_elements.lst'):
'''Read the line list (atomic data) and the file which includes the ranges
where the synthesis will happen for the element abundances.
Input
-----
fname : str
File that contains the linelist
element : str
The element to be searched in the line list
intname : str
File that contains the central line where -+2.0 \AA{} are added to create
the interval around each line.
Output
------
ranges : wavelength ranges of the linelist
atomic : atomic data
'''
if not os.path.isfile(intname):
raise IOError('The interval list is not in the correct place!')
print('Line list:', fname)
print('Intervals list:', intname)
lines = pd.read_csv(
fname,
skiprows=1,
comment='#',
delimiter='\t',
usecols=range(6),
names=['wl', 'elem', 'excit', 'loggf', 'vdwaals', 'Do'],
)
lines.sort_values(by='wl', inplace=True)
intervals = pd.read_csv(
intname,
comment='#',
usecols=(0, 1),
names=['El', 'wave'],
delimiter='\t',
)
try:
intervals['El'] = intervals['El'].map(lambda x: x.strip().strip("I"))
except AttributeError:
print('The format of the line list is not correct.')
intervals = intervals[intervals['El'] == element]
intervals.sort_values(by='wave', inplace=True)
N = []
ranges = []
atomic = | pd.DataFrame([]) | pandas.DataFrame |
print('Chapter 04: Data Preparation')
print('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~')
print('setup.py')
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
BASE_DIR = ".."
def figNum():
figNum.counter += 1
return "{0:02d}".format(figNum.counter)
figNum.counter = 0
FIGPREFIX = 'ch04_fig'
print('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~')
print('Additional setup ...')
print("$ 'python -m spacy download en_core_web_sm'")
print('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~')
print('\n')
print('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~')
print('settings.py')
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# suppress warnings
import warnings;
warnings.filterwarnings('ignore');
# common imports
import pandas as pd
import numpy as np
import math
import re
import glob
import os
import sys
import json
import random
import pprint as pp
import textwrap
import sqlite3
import logging
import spacy
import nltk
from tqdm.auto import tqdm
# register `pandas.progress_apply` and `pandas.Series.map_apply` with `tqdm`
tqdm.pandas()
# pandas display options
# https://pandas.pydata.org/pandas-docs/stable/user_guide/options.html#available-options
pd.options.display.max_columns = 30 # default 20
pd.options.display.max_rows = 60 # default 60
pd.options.display.float_format = '{:.2f}'.format
# pd.options.display.precision = 2
pd.options.display.max_colwidth = 200 # default 50; -1 = all
# otherwise text between $ signs will be interpreted as formula and printed in italic
pd.set_option('display.html.use_mathjax', False)
# np.set_printoptions(edgeitems=3) # default 3
import matplotlib
from matplotlib import pyplot as plt
plot_params = {'figure.figsize': (8, 6),
'axes.labelsize': 'small',
'axes.titlesize': 'small',
'xtick.labelsize': 'small',
'ytick.labelsize':'small',
'figure.dpi': 100}
# adjust matplotlib defaults
matplotlib.rcParams.update(plot_params)
import seaborn as sns
sns.set_style("darkgrid")
print('\n')
print('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~')
print('Loading Data into Pandas')
import pandas as pd
posts_file = "rspct.tsv.gz"
posts_file = f"{BASE_DIR}/data/reddit-selfposts/rspct_autos.tsv.gz" ### real location
posts_df = pd.read_csv(posts_file, sep='\t')
subred_file = "subreddit_info.csv.gz"
subred_file = f"{BASE_DIR}/data/reddit-selfposts/subreddit_info.csv.gz" ### real location
subred_df = pd.read_csv(subred_file).set_index(['subreddit'])
df = posts_df.join(subred_df, on='subreddit')
len(df)
print('\n')
print('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~')
print('Blueprint: Standardizing Attribute Names')
print(df.columns)
column_mapping = {
'id': 'id',
'subreddit': 'subreddit',
'title': 'title',
'selftext': 'text',
'category_1': 'category',
'category_2': 'subcategory',
'category_3': None, # no data
'in_data': None, # not needed
'reason_for_exclusion': None # not needed
}
# define remaining columns
columns = [c for c in column_mapping.keys() if column_mapping[c] != None]
# select and rename those columns
df = df[columns].rename(columns=column_mapping)
df = df[df['category'] == 'autos']
len(df)
pd.options.display.max_colwidth = None
print(df.sample(1, random_state=7).T)
pd.options.display.max_colwidth = 200
print('\n')
print('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~')
print('Saving and Loading a Data Frame')
df.to_pickle("reddit_dataframe.pkl")
import sqlite3
db_name = "reddit-selfposts.db"
con = sqlite3.connect(db_name)
df.to_sql("posts", con, index=False, if_exists="replace")
con.close()
con = sqlite3.connect(db_name)
df = pd.read_sql("select * from posts", con)
con.close()
len(df)
print('\n')
print('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~')
print('Cleaning Text Data')
text = """
After viewing the [PINKIEPOOL Trailer](https://www.youtu.be/watch?v=ieHRoHUg)
it got me thinking about the best match ups.
<lb>Here's my take:<lb><lb>[](/sp)[](/ppseesyou) Deadpool<lb>[](/sp)[](/ajsly)
Captain America<lb>"""
print(text)
print('\n')
print('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~')
print('Blueprint: Identify Noise with Regular Expressions')
import re
RE_SUSPICIOUS = re.compile(r'[&#<>{}\[\]\\]')
def impurity(text, min_len=10):
"""returns the share of suspicious characters in a text"""
if text == None or len(text) < min_len:
return 0
else:
return len(RE_SUSPICIOUS.findall(text))/len(text)
print(impurity(text))
pd.options.display.max_colwidth = 100
# add new column to data frame
df['impurity'] = df['text'].progress_apply(impurity, min_len=10)
# get the top 3 records
print(df[['text', 'impurity']].sort_values(by='impurity', ascending=False).head(3))
pd.options.display.max_colwidth = 200
from blueprints.exploration import count_words
print(count_words(df, column='text', preprocess=lambda t: re.findall(r'<[\w/]*>', t)))
print('\n')
print('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~')
print('Blueprint: Noise Removal with Regular Expressions')
import html
def clean(text):
# convert html escapes like & to characters.
text = html.unescape(text)
# tags like <tab>
text = re.sub(r'<[^<>]*>', ' ', text)
# markdown URLs like [Some text](https://....)
text = re.sub(r'\[([^\[\]]*)\]\([^\(\)]*\)', r'\1', text)
# text or code in brackets like [0]
text = re.sub(r'\[[^\[\]]*\]', ' ', text)
# standalone sequences of specials, matches &# but not #cool
text = re.sub(r'(?:^|\s)[&#<>{}\[\]+|\\:-]{1,}(?:\s|$)', ' ', text)
# standalone sequences of hyphens like --- or ==
text = re.sub(r'(?:^|\s)[\-=\+]{2,}(?:\s|$)', ' ', text)
# sequences of white spaces
text = re.sub(r'\s+', ' ', text)
return text.strip()
clean_text = clean(text)
print(clean_text)
print("Impurity:", impurity(clean_text))
df['clean_text'] = df['text'].progress_map(clean)
df['impurity'] = df['clean_text'].apply(impurity, min_len=20)
print(df[['clean_text', 'impurity']].sort_values(by='impurity', ascending=False).head(3))
print('\n')
print('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~')
print('Blueprint: Character Normalization with textacy')
text = '"The café “Saint-Raphaël” is loca-\nted on Côte dʼAzur.'
import textacy.preprocessing as tprep
def normalize(text):
text = tprep.normalize_hyphenated_words(text)
text = tprep.normalize_quotation_marks(text)
text = tprep.normalize_unicode(text)
text = tprep.remove_accents(text)
return text
print(normalize(text))
print('\n')
print('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~')
print('Blueprint: Pattern-based Data Masking with textacy')
from textacy.preprocessing.resources import RE_URL
print(count_words(df, column='clean_text', preprocess=RE_URL.findall).head(3))
from textacy.preprocessing.replace import replace_urls
text = "Check out https://spacy.io/usage/spacy-101"
# using default substitution _URL_
print(replace_urls(text))
df['clean_text'] = df['clean_text'].progress_map(replace_urls)
df['clean_text'] = df['clean_text'].progress_map(normalize)
df.rename(columns={'text': 'raw_text', 'clean_text': 'text'}, inplace=True)
df.drop(columns=['impurity'], inplace=True)
con = sqlite3.connect(db_name)
df.to_sql("posts_cleaned", con, index=False, if_exists="replace")
con.close()
print('\n')
print('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~')
print('Tokenization')
text = """
2019-08-10 23:32: @pete/@louis - I don't have a well-designed
solution for today's problem. The code of module AC68 should be -1.
Have to think a bit... #goodnight ;-) 😩😬"""
print('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~')
print('Blueprint: Tokenization with Regular Expressions')
tokens = re.findall(r'\w\w+', text)
print(*tokens, sep='|')
RE_TOKEN = re.compile(r"""
( [#]?[@\w'’\.\-\:]*\w # words, hash tags and email adresses
| [:;<]\-?[\)\(3] # coarse pattern for basic text emojis
| [\U0001F100-\U0001FFFF] # coarse code range for unicode emojis
)
""", re.VERBOSE)
def tokenize(text):
return RE_TOKEN.findall(text)
tokens = tokenize(text)
print(*tokens, sep='|')
print('\n')
print('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~')
print('Tokenization with NLTK')
import nltk
nltk.download('punkt')
tokens = nltk.tokenize.word_tokenize(text)
print(*tokens, sep='|')
# Not in book: Regex Tokenizer
tokenizer = nltk.tokenize.RegexpTokenizer(RE_TOKEN.pattern, flags=re.VERBOSE)
tokens = tokenizer.tokenize(text)
print(*tokens, sep='|')
# Not in book: Tweet Tokenizer
tokenizer = nltk.tokenize.TweetTokenizer()
tokens = tokenizer.tokenize(text)
print(*tokens, sep='|')
# Not in book: Toktok Tokenizer
tokenizer = nltk.tokenize.ToktokTokenizer()
tokens = tokenizer.tokenize(text)
print(*tokens, sep='|')
print('\n')
print('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~')
print('Recommendations for Tokenization')
print('Linguistic Processing with spaCy')
print('Instantiating a Pipeline')
import spacy
nlp = spacy.load('en_core_web_sm')
print(nlp.pipeline)
nlp = spacy.load("en_core_web_sm", disable=["parser", "ner"])
print('\n')
print('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~')
print('Processing Text')
nlp = spacy.load("en_core_web_sm")
text = "My best friend <NAME> likes fancy adventure games."
doc = nlp(text)
for token in doc:
print(token, end="|")
def display_nlp(doc, include_punct=False):
"""Generate data frame for visualization of spaCy tokens."""
rows = []
for i, t in enumerate(doc):
if not t.is_punct or include_punct:
row = {'token': i, 'text': t.text, 'lemma_': t.lemma_,
'is_stop': t.is_stop, 'is_alpha': t.is_alpha,
'pos_': t.pos_, 'dep_': t.dep_,
'ent_type_': t.ent_type_, 'ent_iob_': t.ent_iob_}
rows.append(row)
df = pd.DataFrame(rows).set_index('token')
df.index.name = None
return df
print(display_nlp(doc))
print('\n\n')
print('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~')
print('Blueprint: Customizing Tokenization')
text = "@Pete: choose low-carb #food #eat-smart. _url_ ;-) 😋👍"
nlp = spacy.load('en_core_web_sm') ###
doc = nlp(text)
for token in doc:
print(token, end="|")
print('\n')
import re ###
import spacy ###
from spacy.tokenizer import Tokenizer
from spacy.util import compile_prefix_regex, \
compile_infix_regex, compile_suffix_regex
def custom_tokenizer(nlp):
# use default patterns except the ones matched by re.search
prefixes = [pattern for pattern in nlp.Defaults.prefixes
if pattern not in ['-', '_', '#']]
suffixes = [pattern for pattern in nlp.Defaults.suffixes
if pattern not in ['_']]
infixes = [pattern for pattern in nlp.Defaults.infixes
if not re.search(pattern, 'xx-xx')]
return Tokenizer(vocab=nlp.vocab,
rules=nlp.Defaults.tokenizer_exceptions,
prefix_search=compile_prefix_regex(prefixes).search,
suffix_search=compile_suffix_regex(suffixes).search,
infix_finditer=compile_infix_regex(infixes).finditer,
token_match=nlp.Defaults.token_match)
nlp = spacy.load('en_core_web_sm')
nlp.tokenizer = custom_tokenizer(nlp)
doc = nlp(text)
for token in doc:
print(token, end="|")
print('\n\n')
print('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~')
print('Blueprint: Working with Stop Words')
nlp = spacy.load('en_core_web_sm')
text = "Dear Ryan, we need to sit down and talk. Regards, Pete"
doc = nlp(text)
non_stop = [t for t in doc if not t.is_stop and not t.is_punct]
print(non_stop)
nlp = spacy.load('en_core_web_sm')
nlp.vocab['down'].is_stop = False
nlp.vocab['Dear'].is_stop = True
nlp.vocab['Regards'].is_stop = True
"""
Not in book: Modifying stop words with a language subclass
Modifying the stop word by changing the vocabulary will probably become
deprecated with spaCy 3.0. Instead it is recommended to create a subclass of the
respective language like this:
"""
# not in book: subclass approach to modify stop word lists
# recommended from spaCy version 3.0 onwards
from spacy.lang.en import English
excluded_stop_words = {'down'}
included_stop_words = {'dear', 'regards'}
class CustomEnglishDefaults(English.Defaults):
stop_words = English.Defaults.stop_words.copy()
stop_words -= excluded_stop_words
stop_words |= included_stop_words
class CustomEnglish(English):
Defaults = CustomEnglishDefaults
nlp = CustomEnglish()
text = "Dear Ryan, we need to sit down and talk. Regards, Pete"
doc = nlp.make_doc(text) # only tokenize
tokens_wo_stop = [token for token in doc]
for token in doc:
if not token.is_stop and not token.is_punct:
print(token, end='|')
print('\n')
# reset nlp to original
nlp = spacy.load('en_core_web_sm')
print('\n')
print('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~')
print('Blueprint: Extracting Lemmas based on Part-of-Speech')
text = "My best friend <NAME> likes fancy adventure games."
doc = nlp(text)
print(*[t.lemma_ for t in doc], sep='|')
text = "My best friend <NAME> likes fancy adventure games."
doc = nlp(text)
nouns = [t for t in doc if t.pos_ in ['NOUN', 'PROPN']]
print(nouns)
import textacy
tokens = textacy.extract.words(doc,
filter_stops = True, # default True, no stopwords
filter_punct = True, # default True, no punctuation
filter_nums = True, # default False, no numbers
include_pos = ['ADJ', 'NOUN'], # default None = include all
exclude_pos = None, # default None = exclude none
min_freq = 1) # minimum frequency of words
print(*[t for t in tokens], sep='|')
def extract_lemmas(doc, **kwargs):
return [t.lemma_ for t in textacy.extract.words(doc, **kwargs)]
lemmas = extract_lemmas(doc, include_pos=['ADJ', 'NOUN'])
print(*lemmas, sep='|')
print('\n')
print('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~')
print('Blueprint: Extracting Noun Phrases')
text = "My best friend <NAME> likes fancy adventure games."
doc = nlp(text)
patterns = ["POS:ADJ POS:NOUN:+"]
spans = textacy.extract.matches(doc, patterns=patterns)
print(*[s.lemma_ for s in spans], sep='|')
print(*doc.noun_chunks, sep='|')
def extract_noun_phrases(doc, preceding_pos=['NOUN'], sep='_'):
patterns = []
for pos in preceding_pos:
patterns.append(f"POS:{pos} POS:NOUN:+")
spans = textacy.extract.matches(doc, patterns=patterns)
return [sep.join([t.lemma_ for t in s]) for s in spans]
print(*extract_noun_phrases(doc, ['ADJ', 'NOUN']), sep='|')
print('\n')
print('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~')
print('Blueprint: Extracting Named Entities')
text = "<NAME>, chairman of World Cargo Inc, lives in San Francisco."
doc = nlp(text)
for ent in doc.ents:
print(f"({ent.text}, {ent.label_})", end=" ")
from spacy import displacy
displacy.render(doc, style='ent', jupyter=False)
def extract_entities(doc, include_types=None, sep='_'):
ents = textacy.extract.entities(doc,
include_types=include_types,
exclude_types=None,
drop_determiners=True,
min_freq=1)
return [sep.join([t.lemma_ for t in e]) + '/' + e.label_ for e in ents]
print(extract_entities(doc, ['PERSON', 'GPE']))
print('\n')
print('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~')
print('Feature Extraction on a Large Dataset')
print('Blueprint: One Function to Get It All')
def extract_nlp(doc):
return {
'lemmas' : extract_lemmas(doc,
exclude_pos = ['PART', 'PUNCT',
'DET', 'PRON', 'SYM', 'SPACE'],
filter_stops = False),
'adjs_verbs' : extract_lemmas(doc, include_pos = ['ADJ', 'VERB']),
'nouns' : extract_lemmas(doc, include_pos = ['NOUN', 'PROPN']),
'noun_phrases' : extract_noun_phrases(doc, ['NOUN']),
'adj_noun_phrases': extract_noun_phrases(doc, ['ADJ']),
'entities' : extract_entities(doc, ['PERSON', 'ORG', 'GPE', 'LOC'])
}
nlp = spacy.load('en_core_web_sm')
text = "My best friend <NAME> likes fancy adventure games."
doc = nlp(text)
for col, values in extract_nlp(doc).items():
print(f"{col}: {values}")
nlp_columns = list(extract_nlp(nlp.make_doc('')).keys())
print(nlp_columns)
print('\n')
print('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~')
print('Blueprint: Using spaCy on a Large Data Set')
import sqlite3
db_name = "reddit-selfposts.db"
con = sqlite3.connect(db_name)
df = pd.read_sql("select * from posts_cleaned", con)
con.close()
df['text'] = df['title'] + ': ' + df['text']
for col in nlp_columns:
df[col] = None
"""
On Colab: Choose "Runtime"→"Change Runtime Type"→"GPU" to benefit from the GPUs.
"""
if spacy.prefer_gpu():
print("Working on GPU.")
else:
print("No GPU found, working on CPU.")
nlp = spacy.load('en_core_web_sm', disable=[])
nlp.tokenizer = custom_tokenizer(nlp) # optional
"""
full data set takes about 6-8 minutes
for faster processing use a sample like this
df = df.sample(500)
"""
batch_size = 50
batches = math.ceil(len(df) / batch_size) ###
for i in tqdm(range(0, len(df), batch_size), total=batches):
docs = nlp.pipe(df['text'][i:i + batch_size])
for j, doc in enumerate(docs):
for col, values in extract_nlp(doc).items():
df[col].iloc[i + j] = values
print(df[['text', 'lemmas', 'nouns', 'noun_phrases', 'entities']].sample(5))
df_plt = count_words(df, 'noun_phrases').head(10).plot(kind='barh', figsize=(8,3)).invert_yaxis()
plt.tight_layout()
plt.savefig('ch04_fig01_token_freq_hbar.png')
print('\n')
print('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~')
print('Persisting the Result')
df[nlp_columns] = df[nlp_columns].applymap(lambda items: ' '.join(items))
con = sqlite3.connect(db_name)
df.to_sql("posts_nlp", con, index=False, if_exists="replace")
con.close()
print('\n')
print('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~')
print('A Note on Execution Time')
print('There is More')
print('Language Detection')
print('Additional Blueprint (not in book): Language Detection with fastText')
"""
There are different trained models available on the fastText website. We will be using the smaller
model lid.176.ftz which has a size of less than 1 MB and is almost as accurate as the large model
with 126MB. See https://fasttext.cc/docs/en/language-identification.html for instructions.
"""
# download model
os.system('wget https://dl.fbaipublicfiles.com/fasttext/supervised-models/lid.176.ftz')
import fasttext
lang_model = fasttext.load_model("lid.176.ftz")
# make a prediction
print(lang_model.predict('"Good morning" in German is "Guten Morgen"', 3))
lang_model.predict('"Good morning" in German is "Guten Morgen"', 3)
"""
The predict function takes a Unicode string as its first argument. The second, optional parameter
k specifies that we want the k language labels with the highest probabilities.
The model returns labels in the form __label__<code>, where code is the ISO 639 language
code<footnote>See https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes for a complete list.
</footnote> and probabilites for each label.
Let's wrap the language identification into a preprocessing function. The function returns the detected
language only if the calculated probability is higher than the specified threshold, otherwise, it returns
the default language. This is useful for corpora like the hacker news, which is basically an English
corpus with some utterances from other languages.
"""
def predict_language(text, threshold=0.8, default='en'):
# skip language detection for very short texts
if len(text) < 20:
return default
# fasttext requires single line input
text = text.replace('\n', ' ')
labels, probas = lang_model.predict(text)
lang = labels[0].replace("__label__", "")
proba = probas[0]
if proba < threshold:
return default
else:
return lang
"""
The prediction function can now easily be applied to a data frame to identify the language of each
document.
"""
data = ["I don't like version 2.0 of Chat4you 😡👎", # English
"Ich mag Version 2.0 von Chat4you nicht 😡👎", # German
"Мне не нравится версия 2.0 Chat4you 😡👎", # Russian
"Não gosto da versão 2.0 do Chat4you 😡👎", # Portugese
"मुझे Chat4you का संस्करण 2.0 पसंद नहीं है 😡👎"] # Hindi
demo_df = | pd.Series(data, name='text') | pandas.Series |
import ast
import os
import re
import uuid
import pandas as pd
import configuration as cf
from guesslang import Guess
from pydriller import Repository
from utils import log_commit_urls
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
fixes_columns = [
'cve_id',
'hash',
'repo_url',
]
commit_columns = [
'hash',
'repo_url',
'author',
'author_date',
'author_timezone',
'committer',
'committer_date',
'committer_timezone',
'msg',
'merge',
'parents',
'num_lines_added',
'num_lines_deleted',
'dmm_unit_complexity',
'dmm_unit_interfacing',
'dmm_unit_size'
]
file_columns = [
'file_change_id',
'hash',
'filename',
'old_path',
'new_path',
'change_type',
'diff',
'diff_parsed',
'num_lines_added',
'num_lines_deleted',
'code_after',
'code_before',
'nloc',
'complexity',
'token_count',
'programming_language'
]
method_columns = [
'method_change_id',
'file_change_id',
'name',
'signature',
'parameters',
'start_line',
'end_line',
'code',
'nloc',
'complexity',
'token_count',
'top_nesting_level',
'before_change',
]
def extract_project_links(df_master):
"""
extracts all the reference urls from CVE records that match to the repo commit urls
"""
df_fixes = pd.DataFrame(columns=fixes_columns)
git_url = r'(((?P<repo>(https|http):\/\/(bitbucket|github|gitlab)\.(org|com)\/(?P<owner>[^\/]+)\/(?P<project>[^\/]*))\/(commit|commits)\/(?P<hash>\w+)#?)+)'
cf.logger.info('-' * 70)
cf.logger.info('Extracting all the reference urls from CVE...')
for i in range(len(df_master)):
ref_list = ast.literal_eval(df_master['reference_json'].iloc[i])
if len(ref_list) > 0:
for ref in ref_list:
url = dict(ref)['url']
link = re.search(git_url, url)
if link:
row = {
'cve_id': df_master['cve_id'][i],
'hash': link.group('hash'),
'repo_url': link.group('repo').replace(r'http:', r'https:')
}
df_fixes = df_fixes.append(pd.Series(row), ignore_index=True)
df_fixes = df_fixes.drop_duplicates().reset_index(drop=True)
cf.logger.info('Number of collected references to vulnerability fixing commits:', len(df_fixes))
return df_fixes
def guess_pl(code):
"""
:returns guessed programming language of the code
"""
if code:
return Guess().language_name(code.strip())
else:
return 'unknown'
def clean_string(signature):
return signature.strip().replace(' ','')
def get_method_code(source_code, start_line, end_line):
try:
if source_code is not None:
code = ('\n'.join(source_code.split('\n')[int(start_line) - 1: int(end_line)]))
return code
else:
return None
except Exception as e:
cf.logger.warning('Problem while getting method code from the file!', e)
pass
def changed_methods_both(file):
"""
Return the list of methods that were changed.
:return: list of methods
"""
new_methods = file.methods
old_methods = file.methods_before
added = file.diff_parsed["added"]
deleted = file.diff_parsed["deleted"]
methods_changed_new = {
y
for x in added
for y in new_methods
if y.start_line <= x[0] <= y.end_line
}
methods_changed_old = {
y
for x in deleted
for y in old_methods
if y.start_line <= x[0] <= y.end_line
}
return methods_changed_new, methods_changed_old
# --------------------------------------------------------------------------------------------------------
# extracting method_change data
def get_methods(file, file_change_id):
"""
returns the list of methods in the file.
"""
file_methods = []
try:
if file.changed_methods:
cf.logger.debug('-' * 70)
cf.logger.debug('\nmethods_after: ')
cf.logger.debug('- ' * 35)
for m in file.methods:
if m.name != '(anonymous)':
cf.logger.debug(m.long_name)
cf.logger.debug('\nmethods_before: ')
cf.logger.debug('- ' * 35)
for mb in file.methods_before:
if mb.name != '(anonymous)':
cf.logger.debug(mb.long_name)
cf.logger.debug('\nchanged_methods: ')
cf.logger.debug('- ' * 35)
for mc in file.changed_methods:
if mc.name != '(anonymous)':
cf.logger.debug(mc.long_name)
cf.logger.debug('-' * 70)
# for mb in file.methods_before:
# for mc in file.changed_methods:
# #if mc.name == mb.name and mc.name != '(anonymous)':
# if clean_string(mc.long_name) == clean_string(mb.long_name) and mc.name != '(anonymous)':
if file.changed_methods:
methods_after, methods_before = changed_methods_both(file) # modified methods in source_code_after/_before
if methods_before:
for mb in methods_before:
# filtering out code not existing, and (anonymous)
# because lizard API classifies the code part not as a correct function.
# Since, we did some manual test, (anonymous) function are not function code.
# They are also not listed in the changed functions.
if file.source_code_before is not None and mb.name != '(anonymous)':
# method_before_code = ('\n'.join(file.source_code_before.split('\n')[int(mb.start_line) - 1: int(mb.end_line)]))
method_before_code = get_method_code(file.source_code_before, mb.start_line, mb.end_line)
method_before_row = {
'method_change_id': uuid.uuid4().fields[-1],
'file_change_id': file_change_id,
'name': mb.name,
'signature': mb.long_name,
'parameters': mb.parameters,
'start_line': mb.start_line,
'end_line': mb.end_line,
'code': method_before_code,
'nloc': mb.nloc,
'complexity': mb.complexity,
'token_count': mb.token_count,
'top_nesting_level': mb.top_nesting_level,
'before_change': 'True',
}
file_methods.append(method_before_row)
if methods_after:
for mc in methods_after:
if file.source_code is not None and mc.name != '(anonymous)':
# changed_method_code = ('\n'.join(file.source_code.split('\n')[int(mc.start_line) - 1: int(mc.end_line)]))
changed_method_code = get_method_code(file.source_code, mc.start_line, mc.end_line)
changed_method_row = {
'method_change_id': uuid.uuid4().fields[-1],
'file_change_id': file_change_id,
'name': mc.name,
'signature': mc.long_name,
'parameters': mc.parameters,
'start_line': mc.start_line,
'end_line': mc.end_line,
'code': changed_method_code,
'nloc': mc.nloc,
'complexity': mc.complexity,
'token_count': mc.token_count,
'top_nesting_level': mc.top_nesting_level,
'before_change': 'False',
}
file_methods.append(changed_method_row)
if file_methods:
return file_methods
else:
return None
except Exception as e:
cf.logger.warning('Problem while fetching the methods!', e)
pass
# ---------------------------------------------------------------------------------------------------------
# extracting file_change data of each commit
def get_files(commit):
"""
returns the list of files of the commit.
"""
commit_files = []
commit_methods = []
try:
cf.logger.info(f'Extracting files for {commit.hash}')
if commit.modified_files:
for file in commit.modified_files:
cf.logger.debug(f'Processing file {file.filename} in {commit.hash}')
# programming_language = (file.filename.rsplit(".')[-1] if '.' in file.filename else None)
programming_language = guess_pl(file.source_code) # guessing the programming language of fixed code
file_change_id = uuid.uuid4().fields[-1]
file_row = {
'file_change_id': file_change_id, # filename: primary key
'hash': commit.hash, # hash: foreign key
'filename': file.filename,
'old_path': file.old_path,
'new_path': file.new_path,
'change_type': file.change_type, # i.e. added, deleted, modified or renamed
'diff': file.diff, # diff of the file as git presents it (e.g. @@xx.. @@)
'diff_parsed': file.diff_parsed, # diff parsed in a dict containing added and deleted lines lines
'num_lines_added': file.added_lines, # number of lines added
'num_lines_deleted': file.deleted_lines, # number of lines removed
'code_after': file.source_code,
'code_before': file.source_code_before,
'nloc': file.nloc,
'complexity': file.complexity,
'token_count': file.token_count,
'programming_language': programming_language,
}
file_methods = []
commit_files.append(file_row)
file_methods = get_methods(file, file_change_id)
if file_methods is not None:
commit_methods.extend(file_methods)
else:
cf.logger.info('The list of modified_files is empty')
return commit_files, commit_methods
except Exception as e:
cf.logger.warning('Problem while fetching the files!', e)
pass
def extract_commits(repo_url, hashes):
"""This function extract git commit information of only the hashes list that were specified in the
commit URL. All the commit_fields of the corresponding commit have been obtained.
Every git commit hash can be associated with one or more modified/manipulated files.
One vulnerability with same hash can be fixed in multiple files so we have created a dataset of modified files
as 'df_file' of a project.
:param repo_url: list of url links of all the projects.
:param hashes: list of hashes of the commits to collect
:return dataframes: at commit level and file level.
"""
repo_commits = []
repo_files = []
repo_methods = []
# ----------------------------------------------------------------------------------------------------------------
# extracting commit-level data
if 'github' in repo_url:
repo_url = repo_url + '.git'
cf.logger.debug(f'Extracting commits for {repo_url} with {cf.NUM_WORKERS} worker(s) looking for the following hashes:')
log_commit_urls(repo_url, hashes)
# giving first priority to 'single' parameter for single hash because
# it has been tested that 'single' gets commit information in some cases where 'only_commits' does not,
# for example: https://github.com/hedgedoc/hedgedoc.git/35b0d39a12aa35f27fba8c1f50b1886706e7efef
single_hash = None
if len(hashes) == 1:
single_hash = hashes[0]
hashes = None
for commit in Repository(path_to_repo=repo_url,
only_commits=hashes,
single=single_hash,
num_workers=cf.NUM_WORKERS).traverse_commits():
cf.logger.debug(f'Processing {commit.hash}')
try:
commit_row = {
'hash': commit.hash,
'repo_url': repo_url,
'author': commit.author.name,
'author_date': commit.author_date,
'author_timezone': commit.author_timezone,
'committer': commit.committer.name,
'committer_date': commit.committer_date,
'committer_timezone': commit.committer_timezone,
'msg': commit.msg,
'merge': commit.merge,
'parents': commit.parents,
'num_lines_added': commit.insertions,
'num_lines_deleted': commit.deletions,
'dmm_unit_complexity': commit.dmm_unit_complexity,
'dmm_unit_interfacing': commit.dmm_unit_interfacing,
'dmm_unit_size': commit.dmm_unit_size,
}
commit_files, commit_methods = get_files(commit)
repo_commits.append(commit_row)
repo_files.extend(commit_files)
repo_methods.extend(commit_methods)
except Exception as e:
cf.logger.warning('Problem while fetching the commits!', e)
pass
if repo_commits:
df_repo_commits = pd.DataFrame.from_dict(repo_commits)
df_repo_commits = df_repo_commits[commit_columns] # ordering the columns
else:
df_repo_commits = None
if repo_files:
df_repo_files = pd.DataFrame.from_dict(repo_files)
df_repo_files = df_repo_files[file_columns] # ordering the columns
else:
df_repo_files = None
if repo_methods:
df_repo_methods = | pd.DataFrame.from_dict(repo_methods) | pandas.DataFrame.from_dict |
from collections import OrderedDict
import numpy as np
import pytest
from pandas import (
DataFrame,
Index,
MultiIndex,
Series,
)
import pandas._testing as tm
from pandas.core.construction import create_series_with_explicit_dtype
class TestFromDict:
# Note: these tests are specific to the from_dict method, not for
# passing dictionaries to DataFrame.__init__
def test_from_dict_scalars_requires_index(self):
msg = "If using all scalar values, you must pass an index"
with pytest.raises(ValueError, match=msg):
DataFrame.from_dict(OrderedDict([("b", 8), ("a", 5), ("a", 6)]))
def test_constructor_list_of_odicts(self):
data = [
OrderedDict([["a", 1.5], ["b", 3], ["c", 4], ["d", 6]]),
OrderedDict([["a", 1.5], ["b", 3], ["d", 6]]),
OrderedDict([["a", 1.5], ["d", 6]]),
OrderedDict(),
OrderedDict([["a", 1.5], ["b", 3], ["c", 4]]),
OrderedDict([["b", 3], ["c", 4], ["d", 6]]),
]
result = DataFrame(data)
expected = DataFrame.from_dict(
dict(zip(range(len(data)), data)), orient="index"
)
tm.assert_frame_equal(result, expected.reindex(result.index))
def test_constructor_single_row(self):
data = [OrderedDict([["a", 1.5], ["b", 3], ["c", 4], ["d", 6]])]
result = DataFrame(data)
expected = DataFrame.from_dict(dict(zip([0], data)), orient="index").reindex(
result.index
)
tm.assert_frame_equal(result, expected)
def test_constructor_list_of_series(self):
data = [
OrderedDict([["a", 1.5], ["b", 3.0], ["c", 4.0]]),
OrderedDict([["a", 1.5], ["b", 3.0], ["c", 6.0]]),
]
sdict = OrderedDict(zip(["x", "y"], data))
idx = Index(["a", "b", "c"])
# all named
data2 = [
Series([1.5, 3, 4], idx, dtype="O", name="x"),
Series([1.5, 3, 6], idx, name="y"),
]
result = DataFrame(data2)
expected = DataFrame.from_dict(sdict, orient="index")
tm.assert_frame_equal(result, expected)
# some unnamed
data2 = [
Series([1.5, 3, 4], idx, dtype="O", name="x"),
Series([1.5, 3, 6], idx),
]
result = DataFrame(data2)
sdict = OrderedDict(zip(["x", "Unnamed 0"], data))
expected = DataFrame.from_dict(sdict, orient="index")
tm.assert_frame_equal(result, expected)
# none named
data = [
OrderedDict([["a", 1.5], ["b", 3], ["c", 4], ["d", 6]]),
OrderedDict([["a", 1.5], ["b", 3], ["d", 6]]),
OrderedDict([["a", 1.5], ["d", 6]]),
OrderedDict(),
OrderedDict([["a", 1.5], ["b", 3], ["c", 4]]),
OrderedDict([["b", 3], ["c", 4], ["d", 6]]),
]
data = [
create_series_with_explicit_dtype(d, dtype_if_empty=object) for d in data
]
result = DataFrame(data)
sdict = OrderedDict(zip(range(len(data)), data))
expected = DataFrame.from_dict(sdict, orient="index")
tm.assert_frame_equal(result, expected.reindex(result.index))
result2 = DataFrame(data, index=np.arange(6))
tm.assert_frame_equal(result, result2)
result = DataFrame([ | Series(dtype=object) | pandas.Series |
import os
import pandas as pd
import numpy as np
from sklearn.model_selection import StratifiedShuffleSplit
def read_meta(meta_csv):
df = pd.read_csv(meta_csv, sep=',')
df = pd.DataFrame(df)
audio_names = []
set_categories = []
cycle_labels = []
for row in df.iterrows():
audio_name = row[1]['audio_name']
set_category = row[1]['set_category']
cycle_label = row[1]['cycle_label']
audio_names.append(audio_name)
set_categories.append(set_category)
cycle_labels.append(cycle_label)
return audio_names, set_categories, cycle_labels
def datasplit(meta_dir):
"""
Split data into train and dev (test set is already defined in ICBHI)
Args:
meta_dir: meta data folder
Returns:
train.csv, dev.csv, test.csv, traindev.csv
"""
audio_names, set_categories, cycle_labels = read_meta(os.path.join(meta_dir, 'meta.csv'))
audio_traindev = []
audio_test = []
label_traindev = []
label_test = []
subjectid_traindev = []
subjectid_test = []
for i in range(0, len(audio_names)):
if set_categories[i] == 'train':
audio_traindev.append(audio_names[i])
label_traindev.append(cycle_labels[i])
subjectid_traindev.append(audio_names[i].split('_')[0])
elif set_categories[i] == 'test':
audio_test.append(audio_names[i])
label_test.append(cycle_labels[i])
subjectid_test.append(audio_names[i].split('_')[0])
else:
print('Wrong set category!')
subid_traindev_unique, subid_traindev_unique_ind = np.unique(subjectid_traindev, return_index=True)
subid_traindev_unique_label = np.array(label_traindev)[subid_traindev_unique_ind]
'''
labels, counts = np.unique(subid_traindev_unique_label, return_counts=True)
print(labels)
print(counts)
'''
stratSplit = StratifiedShuffleSplit(n_splits=1, test_size=0.3, random_state=12)
for train_idx, test_idx in stratSplit.split(subid_traindev_unique, subid_traindev_unique_label):
subid_train_unique, subid_train_unique_label = subid_traindev_unique[train_idx], subid_traindev_unique_label[train_idx]
subid_dev_unique, subid_dev_unique_label = subid_traindev_unique[test_idx], subid_traindev_unique_label[test_idx]
'''
labels, counts = np.unique(subid_train_unique_label, return_counts=True)
print(labels)
print(counts)
labels, counts = np.unique(subid_dev_unique_label, return_counts=True)
print(labels)
print(counts)
'''
audio_train = []
audio_dev = []
label_train = []
label_dev = []
for i in range(0, len(subid_train_unique)):
ind = np.argwhere(np.array(subjectid_traindev) == subid_train_unique[i])
ind = [j[0] for j in ind]
audio_train.extend(np.array(audio_traindev)[ind])
label_train.extend(np.array(label_traindev)[ind])
for i in range(0, len(subid_dev_unique)):
ind = np.argwhere(np.array(subjectid_traindev) == subid_dev_unique[i])
ind = [j[0] for j in ind]
audio_dev.extend(np.array(audio_traindev)[ind])
label_dev.extend(np.array(label_traindev)[ind])
df = pd.DataFrame(data={'audio_name': audio_train, 'cycle_label': label_train})
df.to_csv(os.path.join(meta_dir, 'meta_train.csv'), index=False)
df = pd.DataFrame(data={'audio_name': audio_dev, 'cycle_label': label_dev})
df.to_csv(os.path.join(meta_dir, 'meta_dev.csv'), index=False)
df = | pd.DataFrame(data={'audio_name': audio_test, 'cycle_label': label_test}) | pandas.DataFrame |
""" self-contained to write legacy pickle files """
from __future__ import print_function
def _create_sp_series():
import numpy as np
from pandas import SparseSeries
nan = np.nan
# nan-based
arr = np.arange(15, dtype=np.float64)
index = np.arange(15)
arr[7:12] = nan
arr[-1:] = nan
bseries = SparseSeries(arr, kind='block')
bseries.name = 'bseries'
return bseries
def _create_sp_tsseries():
import numpy as np
from pandas import bdate_range, SparseTimeSeries
nan = np.nan
# nan-based
arr = np.arange(15, dtype=np.float64)
index = np.arange(15)
arr[7:12] = nan
arr[-1:] = nan
date_index = bdate_range('1/1/2011', periods=len(index))
bseries = | SparseTimeSeries(arr, index=date_index, kind='block') | pandas.SparseTimeSeries |
"""This module contains classes and functions specific to SAMPL6 data files"""
import pandas as pd
import numpy as np
from titrato.titrato import TitrationCurve, free_energy_from_population
from titrato.titrato import data_dir
from titrato.stats import (
area_between_curves,
BootstrapDistribution,
array_rmse,
PearsonRBootstrapDistribution,
)
from scipy.stats import pearsonr
from networkx import DiGraph
import warnings
import os
import logging
from typing import List, Tuple, Dict, Callable, Union, Optional, Any
sampl6_charges_file = os.path.join(data_dir, "SAMPL6_microstate_charges.csv")
def read_epik_formal_charge(prop_csvfile: str) -> int:
"""
Read the formal charge at pH 7.0 from an epik sequential run from an atom props file.
"""
df = pd.read_csv(prop_csvfile, header=0)
return int(df["i_m_formal_charge"].sum())
def get_typei_pka_data(
molecule_name: str, datafile: str, header: Optional[int] = 0
) -> pd.DataFrame:
"""Retrieve type I pka data for a single molecule from the datafile.
Parameters
----------
molecule_name - SAMPL6 identifier of the molecule.
datafile - location of csv file in type I format (micropKa)
header - optional, which lines are header lines, set to None for file without headers
Returns
-------
graph of states connected by pKa, dataframe of all pKa values.
"""
df = pd.read_csv(datafile, header=header)
# Override column names
df.columns = ["Protonated", "Deprotonated", "pKa", "SEM"]
df["Molecule"] = df["Protonated"].apply(lambda string: string.split("_")[0])
mol_frame = df[df["Molecule"] == molecule_name]
return mol_frame
def create_graph_from_typei_df(mol_frame):
"""Create a graph from a typei dataframe for a single molecule."""
# Direction of edges of the graph is deprotonated -> protonated state
from_list = list(mol_frame["Deprotonated"])
to_list = list(mol_frame["Protonated"])
# Add properties
properties = [
dict(pKa=row["pKa"], SEM=row["SEM"]) for i, row in mol_frame.iterrows()
]
graph = DiGraph()
graph.add_edges_from(zip(from_list, to_list, properties))
return graph
def get_typeii_logp_data(
molecule_name: str, datafile, header: Optional[int] = 0
) -> pd.DataFrame:
"""Retrieve type II log population data for a single molecule from the datafile.
Parameters
----------
molecule_name - SAMPL6 identifier of the molecule.
datafile - location of csv file in type II format (microstate log populations)
header - optional, which lines are header lines, set to None for file without headers
Returns
-------
Dataframe with populations, dataframe with charges
"""
df = pd.read_csv(datafile, header=header)
colnames = list(df.columns)
colnames[0] = "Microstate ID"
df.columns = colnames
df["Molecule"] = df["Microstate ID"].apply(lambda id: id.split("_")[0])
return df[df["Molecule"] == molecule_name]
def get_typeiii_pka_data(molecule_name: str, datafile: str, header: Optional[int] = 0):
"""Retrieve type III macroscopic pKa data for a single molecule from the data file
Parameters
----------
molecule_name - SAMPL6 identifier of the molecule.
datafile - location of csv file in type III format (macropKa)
header - optional, which lines are header lines, set to None for file without headers
Returns
-------
graph of states connected by pKa, dataframe of all pKa values. """
df = pd.read_csv(datafile, header=header)
# Override column names
df.columns = ["Molecule", "pKa", "SEM"]
return df[df["Molecule"] == molecule_name]
def species_by_charge(state_ids: List[str], charges: List[int]) -> Dict[int, List[str]]:
"""Make a dict with charge as key, and lists of species as values.
Parameters
----------
state_ids - identifiers for states
charges - charges for states
Returns
-------
Dict with charge as keys, and lists that contain the names of microstates with that charge.
"""
charge_dict = dict(zip(state_ids, charges))
species_dict = dict()
# Duplicates don't matter
for value in charge_dict.values():
species_dict[value] = list()
for state_id, charge in charge_dict.items():
species_dict[charge].append(state_id)
return species_dict
def macropka_from_micro_pka(
deprotonated_charge: int, species_dict: Dict[int, List[str]], typei_df: pd.DataFrame
) -> float:
"""Calculate the macropKa from a set of micropkas
According to Bochevarov, Watson and Greenwood, J chem Theory comput 2016, 12, 6001-6019
DOI:10.1021/acs.jctc.6b00805
Parameters
----------
deprotonated_charge - charge of deprotonated species
species_dict - Dict[charge] = List[microstate names]
typei_df - typei pKa dataframe.
Returns
-------
The macro pKa for going from state with deprotonated charge -> deprotonated charge +1
"""
typei_df["Ka"] = np.power(10.0, -typei_df["pKa"])
k_macro = 0
for deprot in species_dict[deprotonated_charge]:
k_micro_inverted = 0
for prot in species_dict[deprotonated_charge + 1]:
for row_id, row in typei_df.loc[
(typei_df["Protonated"] == prot) & (typei_df["Deprotonated"] == deprot)
].iterrows():
k_micro_inverted += 1.0 / row["Ka"]
# If there were no equilibrium constants for this pair, dont add
if k_micro_inverted != 0:
k_macro += 1.0 / k_micro_inverted
if k_macro != 0:
return -np.log10(k_macro)
else:
print(
typei_df,
species_dict[deprotonated_charge],
species_dict[deprotonated_charge + 1],
)
raise ValueError("Could not generate a macropKa, is the graph disconnected?")
def bootstrap_pKa_dataframe(original_df: pd.DataFrame) -> pd.DataFrame:
"""Perform empirical bootstrap over rows for correlation analysis.
Works with type I and type III dataframes."""
size = original_df.shape[0]
rows = np.random.choice(np.arange(size), size=size)
return original_df.iloc[rows].copy()
def parametric_bootstrap_pka_dataframe(
original_df: pd.DataFrame, n_samples: int = 1, n_bootstrap=10000
) -> pd.DataFrame:
"""Perform a parametric bootstrap over pKa values using SEMS.
SEMS are converted to SD using n_samples.
"""
long_df = pd.concat([original_df] * n_bootstrap)
long_df["pKa"] = long_df.apply(
lambda row: np.random.normal(row.pKa, row.SEM * np.sqrt(n_samples)), axis=1
)
return np.array_split(long_df, n_bootstrap)
def bootstrap_rmse_r(df: pd.DataFrame, nsamples: int):
"""Perform a bootstrap correlation analysis for a pKa dataframe
Parameters
----------
df - the original pandas dataframe with pKa data.
nsamples - number of bootstrap samples to draw
"""
rmse_list = list()
rs_list = list()
for i in range(nsamples):
bootstrap_df = bootstrap_pKa_dataframe(df)
exp = bootstrap_df.Experimental
pred = bootstrap_df.Predicted
rmse_list.append(array_rmse(exp, pred))
rs_list.append(pearsonr(exp, pred)[0])
rmse_array = np.asarray(rmse_list)
rs_array = np.asarray(rs_list)
rmse = array_rmse(df.Experimental, df.Predicted)
rs = pearsonr(df.Experimental, df.Predicted)[0]
return (
BootstrapDistribution(rmse, rmse_array),
PearsonRBootstrapDistribution(rs, rs_array),
)
def get_experimental_pKa_data(
molecule_name: str,
datafile: str = os.path.join(data_dir, "SAMPL6_experimental_pkas.csv"),
) -> pd.DataFrame:
"""Retrieve experimental pKa values, and errors from the experimental csv file."""
df = pd.read_csv(datafile)
pKas = list()
sems = list()
# Should match only one row, but have to grab the first entry
mol_match = df[df["Molecule ID"] == molecule_name].iloc[0]
for x in range(1, 4):
pKas.append(mol_match[f"pKa{x} mean"])
sems.append(mol_match[f"pKa{x} SEM"])
pKas = np.asarray(pKas)
sems = np.asarray(sems)
mask = np.isnan(pKas)
pKas = pKas[~mask]
sems = sems[~mask]
new_df = pd.DataFrame.from_records(dict(pKa=pKas, SEM=sems))
new_df["Molecule"] = molecule_name
return new_df[["Molecule", "pKa", "SEM"]]
class TypeIPrediction(TitrationCurve):
"""Representation of a Type I (micropKa) prediction for SAMPL6"""
ph_range = np.linspace(0, 12, num=101)
def __init__(self):
super(TypeIPrediction, self).__init__()
self.pkas = None
self.sems = None
return
@classmethod
def from_id(
cls,
mol_id: str,
datafile: str,
header: int = 0,
drop_nodes: Optional[List[str]] = None,
):
"""Retrieve the titration curve for one molecule from typeI predicted micropKas.
Parameters
----------
mol_id - the SAMPL6 identifier for this molecule
datafile - source of the type I pKa values as a csv file
header - integer index for the header, set to None if no header
drop_nodes - drop these states from generating the graph.
"""
data = get_typei_pka_data(mol_id, datafile, header)
graph = create_graph_from_typei_df(data)
# Drop any requested nodes.
if drop_nodes is not None:
for node in drop_nodes:
graph.remove_node(node)
micropKas = np.asarray(data["pKa"])
sems = np.asarray(data["SEM"])
instance = cls.from_equilibrium_graph(graph, cls.ph_range)
# Store data for reference
instance.graph = graph
instance.pkas = micropKas
instance.sems = sems
instance._update_charges_from_file(sampl6_charges_file)
instance._pick_zero_charge_ref_state()
instance.dataframe = data
return instance
@classmethod
def bootstrap_from_id(
cls,
mol_id: str,
datafile: str,
n_samples: Optional[int] = 1,
n_bootstrap: Optional[int] = 100,
header: int = 0,
drop_nodes: Optional[List[str]] = None,
):
"""Retrieve the titration curve for one molecule from typeI predicted micropKas.
Parameters
----------
mol_id - the SAMPL6 identifier for this molecule
datafile - source of the type I pKa values as a csv file
header - integer index for the header, set to None if no header
drop_nodes - drop these states from generating the graph.
n_samples - the number of samples over which the SEM was determined
n_bootstrap - number of curves to return.
Returns
-------
original curve, list of bootstrap curves
"""
data = get_typei_pka_data(mol_id, datafile, header)
charges = pd.read_csv(sampl6_charges_file)
charge_dict = dict(zip(charges["Microstate ID"], charges["Charge"]))
graph = create_graph_from_typei_df(data)
# Drop any requested nodes.
if drop_nodes is not None:
for node in drop_nodes:
graph.remove_node(node)
instances: List[TitrationCurve] = list()
for bootstrap_sample in range(n_bootstrap):
bootstrap_copy = data.copy()
bootstrap_copy["pKa"] = data.apply(
lambda row: np.random.normal(
row["pKa"], row["SEM"] * np.sqrt(n_samples)
),
axis=1,
)
bootstrap_graph = create_graph_from_typei_df(bootstrap_copy)
# Drop any requested nodes.
if drop_nodes is not None:
for node in drop_nodes:
bootstrap_graph.remove_node(node)
new_instance = cls.from_equilibrium_graph(bootstrap_graph, cls.ph_range)
new_instance._update_charges_from_dict(charge_dict)
new_instance._pick_zero_charge_ref_state()
new_instance.dataframe = bootstrap_copy
instances.append(new_instance)
micropKas = np.asarray(data["pKa"])
sems = np.asarray(data["SEM"])
instance = cls.from_equilibrium_graph(graph, cls.ph_range)
# Store data for reference
instance.pkas = micropKas
instance.sems = sems
instance._update_charges_from_file(sampl6_charges_file)
instance._pick_zero_charge_ref_state()
instance.dataframe = data
return instance, instances
def to_macroscopic(
self, bootstrap_sem=False, n_bootstrap_sem=10000
) -> TitrationCurve:
"""Convert microscopic pKas to macroscopic and provide a macroscopic curve.
Parameters
----------
bootstrap_sem = if True, estimate SD for macroscopic pKa
"""
species_dict = species_by_charge(self.state_ids, self.charges)
macropkas = list()
for q in range(min(self.charges), max(self.charges)):
pka = macropka_from_micro_pka(
q, species_by_charge(self.state_ids, self.charges), self.dataframe
)
macropkas.append(pka)
sems = np.zeros_like(macropkas)
if bootstrap_sem:
data = np.empty([len(macropkas), n_bootstrap_sem])
new_dfs = parametric_bootstrap_pka_dataframe(
self.dataframe, n_bootstrap=n_bootstrap_sem
)
for n, new_df in enumerate(new_dfs):
newmacropkas = list()
for q in range(min(self.charges), max(self.charges)):
pka = macropka_from_micro_pka(
q, species_by_charge(self.state_ids, self.charges), new_df
)
newmacropkas.append(pka)
data[:, n] = newmacropkas[:]
sems = np.std(data, axis=1)
macropkas = np.asarray(macropkas)
new_curve = TitrationCurve.from_macro_pkas(macropkas, self.ph_values)
new_q = np.asarray(list(range(min(self.charges), max(self.charges) + 1)))
new_curve._override_charges(new_q)
new_curve.sems = sems
return new_curve
@classmethod
def to_macroscopic_bootstrap(cls, *args, **kwargs):
"""Generate macroscopic curves, and use bootstrap."""
instance, instances = cls.bootstrap_from_id(*args, **kwargs)
instance = instance.to_macroscopic(bootstrap_sem=True)
instances = [
TypeIPrediction.to_macroscopic(bootstrap_instance)
for bootstrap_instance in instances
]
return instance, instances
class TypeIIPrediction(TitrationCurve):
"""Representation of a Type II (microstate log population) prediction for SAMPL6"""
ph_range = np.linspace(0, 12, num=101)
def __init__(self):
super(TypeIIPrediction, self).__init__()
return
@classmethod
def from_id(cls, molecule_name: str, datafile: str, header=0):
"""Instantiate a titration curve for one molecule from Type II predicted log populations."""
data = get_typeii_logp_data(molecule_name, datafile, header=header)
state_ids = data["Microstate ID"]
log_pop = data.iloc[:, 1:-1].values
pop = np.exp(np.asarray(log_pop))
# normalize
pop /= np.sum(pop, axis=0)[None, :]
instance = cls.from_populations(
pop, cls.ph_range, np.zeros(len(state_ids), int), state_ids
)
# TODO remove states with infinite free energy/log_population in every case
instance._update_charges_from_file(sampl6_charges_file, charge_header=0)
instance._pick_zero_charge_ref_state()
instance.dataframe = data
return instance
class TypeIIIPrediction(TitrationCurve):
"""Representation of a Type III (macropKa) prediction for SAMPL6."""
ph_range = np.linspace(0, 12, num=101)
def __init__(self):
super(TypeIIIPrediction, self).__init__()
self.pkas = None
self.sems = None
return
@classmethod
def from_id(
cls,
mol_id: str,
datafile: str,
header: Optional[int] = 0,
charge_at_pH7: int = 0,
):
"""Retrieve the titration curve for one molecule from typeIII predicted macropKas.
Parameters
----------
mol_id - the identifier for the molecule, e.g. "SM01".
datafile - location to take type III data from.
header - index of the header line in the csv file.
charge_at_pH7 - charge of most populated species at pH 7, useful for Epik sequential predictions.
Notes
-----
Titration curves are defined over a pH range of 2-12 with intervals of 0.1 pH unit.
"""
data = get_typeiii_pka_data(mol_id, datafile, header)
macropKas = np.asarray(data["pKa"])
sems = np.asarray(data["SEM"])
instance = cls.from_macro_pkas(macropKas, cls.ph_range)
# pH 7 is at the middle of the 101 element array of pH values
# The species is selected by the index with max value in the array
argmax_at_ph7 = np.argmax(instance.populations[:, 50])
instance.charges -= instance.charges[argmax_at_ph7]
instance.charges += charge_at_pH7
instance.mean_charge = instance.charges @ instance.populations
# Store data for reference
instance.pkas = macropKas
instance.sems = sems
instance._pick_zero_charge_ref_state()
instance.dataframe = data
return instance
@classmethod
def bootstrap_from_id(
cls,
mol_id: str,
datafile: str,
n_samples: int = 1,
n_bootstrap: int = 100,
header: int = 0,
charge_at_pH7: int = 0,
):
"""
Retrieve the titration curve for one molecule from typeIII predicted macropKas.
Parameters
----------
mol_id - the identifier for the molecule, e.g. "SM01".
datafile - location to take type III data from.
n_bootstrap - default[500], number of curves to return.
n_samples - default[1] the number of samples over which the SEM was determined
header - index of the header line in the csv file.
charge_at_pH7 - charge of most populated species at pH 7, useful for Epik sequential predictions.
"""
data = get_typeiii_pka_data(mol_id, datafile, header)
# Regular curve, no bootstrap
macropKas = np.asarray(data["pKa"])
sems = np.asarray(data["SEM"])
instance = cls.from_macro_pkas(macropKas, cls.ph_range)
# Store data for reference
instance.pkas = macropKas
instance.sems = sems
# pH 7 is at the middle of the 101 element array of pH values
# The species is selected by the index with max value in the array
argmax_at_ph7 = np.argmax(instance.populations[:, 50])
instance.charges -= instance.charges[argmax_at_ph7]
instance.charges += charge_at_pH7
instance.mean_charge = instance.charges @ instance.populations
instance._pick_zero_charge_ref_state()
# Bootstrap pKa values
instances: List[TypeIIIPrediction] = list()
for bootstrap_sample in range(n_bootstrap):
pkas = data.apply(
lambda row: np.random.normal(
row["pKa"], row["SEM"] * np.sqrt(n_samples)
),
axis=1,
)
new_instance = cls.from_macro_pkas(np.asarray(pkas), cls.ph_range)
new_instance.charges = instance.charges
argmax_at_ph7 = np.argmax(new_instance.populations[:, 50])
new_instance.charges -= new_instance.charges[argmax_at_ph7]
new_instance.charges += charge_at_pH7
new_instance.mean_charge = new_instance.charges @ new_instance.populations
new_instance._pick_zero_charge_ref_state()
instances.append(new_instance)
return instance, instances
class SAMPL6Experiment(TitrationCurve):
"""Class to represent a Sirius T3 experimental titration curve from the SAMPL6 dataset."""
# Experiments by <NAME>, 2018
experimental_data_file = os.path.join(data_dir, "SAMPL6_experimental_pkas.csv")
ph_range = np.linspace(0, 12, num=101)
def __init__(self):
super(SAMPL6Experiment, self).__init__()
self.pkas = None
self.sems = None
@classmethod
def from_id(cls, mol_id: str, datafile: Optional[str] = None):
"""Retrieve the titration curve for one molecule from the experiment.
Parameters
----------
mol_id - the identifier for the molecule, e.g. "SM01".
datafile - optional, location to take experimental data from.
Uses the file "experimental_pkas.csv" by default.
Notes
-----
The experiments are defined over a pH range of 2-12.
"""
# Use built in file for convenience
if datafile is None:
datafile = cls.experimental_data_file
data = get_experimental_pKa_data(mol_id, datafile)
macropKas = np.asarray(data["pKa"])
sems = np.asarray(data["SEM"])
instance = cls.from_macro_pkas(macropKas, cls.ph_range)
# Store data for reference
instance.pkas = macropKas
instance.sems = sems
instance._pick_zero_charge_ref_state()
return instance
@classmethod
def bootstrap_from_id(
cls,
mol_id: str,
datafile: Optional[str] = None,
n_samples: Optional[int] = 3,
n_bootstrap: Optional[int] = 100,
):
"""Retrieve the titration curve for one molecule from the experiment.
Parameters
----------
mol_id - the identifier for the molecule, e.g. "SM01".
datafile - optional, location to take experimental data from.
Uses the file "experimental_pkas.csv" by default.
n_bootstrap - number of bootstrap samples to generate
n_samples - number of samples used to determine SEM (was three for the data set)
Notes
-----
The experiments are defined over a pH range of 2-12.
"""
# Use built in file for convenience
if datafile is None:
datafile = cls.experimental_data_file
data = get_experimental_pKa_data(mol_id, datafile)
instances = list()
for bootstrap_sample in range(n_bootstrap):
pkas = data.apply(
lambda row: np.random.normal(
row["pKa"], row["SEM"] * np.sqrt(n_samples)
),
axis=1,
)
instances.append(cls.from_macro_pkas(np.asarray(pkas), cls.ph_range))
# Store data for reference
macropKas = np.asarray(data["pKa"])
sems = np.asarray(data["SEM"])
instance = cls.from_macro_pkas(macropKas, cls.ph_range)
instance.pkas = macropKas
instance.sems = sems
instance._pick_zero_charge_ref_state()
return instance, instances
def add_unobserved_state(self):
"""Adds a new, unvisited state to the system.
Note
----
This hypothetical state can be useful for modeling purposes, as it provides a point to match any unmatched prediction to this state.
"""
# Assumed all states are at 0 population.
new_state_population = np.zeros(self.populations.shape[1], dtype=float)
self.populations = np.vstack((self.populations, new_state_population))
# Recalculate free energies for consistency.
# note these will be taking the log of 0, so it will give a warning.
with warnings.catch_warnings():
warnings.simplefilter("ignore")
self.free_energies = free_energy_from_population(self.populations)
self.state_ids.append("Unobserved")
def bootstrap_comparison(
molecule: str,
prediction_file: str,
datatype: str,
n_samples=1,
n_bootstrap=1000,
**kwargs,
):
"""Perform a bootstrap analysis on the experimental and the computed titration curve.
Parameters
----------
molecule - SAMPL6 identifier of the molecule.
prediction_file - file name containing the computed pKa values.
datatype - typeI or typeIII, (type II doesnt have error bars so we cant bootstrap)
n_samples - number of samples used to determine the standard error.
n_bootstrap - number of bootstrap samples to draw.
"""
if datatype == "typeI":
predicted_curve, strapped_curves = TypeIPrediction.bootstrap_from_id(
molecule, prediction_file, n_samples, n_bootstrap, **kwargs
)
elif datatype == "typeIII":
predicted_curve, strapped_curves = TypeIIIPrediction.bootstrap_from_id(
molecule, prediction_file, n_samples, n_bootstrap, **kwargs
)
experimental_curve, exp_strapped_curves = SAMPL6Experiment.bootstrap_from_id(
molecule, n_bootstrap
)
df = | pd.DataFrame(columns=["Molecule", "Δ"]) | pandas.DataFrame |
# <NAME> & LYDIA SCHWEITZER Assignment 3
# Yelp Data visualization using Streamlit
# code referenced from demo-uper-nyc-pickups
# https://github.com/streamlit/demo-uber-nyc-pickups/blob/master/streamlit_app.py
# IMPORTS **********************************************************************
import streamlit as st
import copy
import pandas as pd
import numpy as np
import altair as alt
import pydeck as pdk
import warnings
warnings.filterwarnings('ignore')
# SETTING PAGE CONFIG TO WIDE MODE
st.beta_set_page_config(layout="wide")
################################################################################
# DATA SETUP
################################################################################
bFile = 'business.csv' # business file
rFile, rDateCol = 'reviewCity.csv', 'date' # reviews file
#uFile, uDateCol = 'user.csv', 'yelping_since' # user file
#dataFile = bFile
@st.cache()
def load_data(dataFile, dateCol = None, nrows = 400000):
data = pd.read_csv(dataFile, nrows = nrows)
lowercase = lambda x: str(x).lower()
data.rename(lowercase, axis="columns", inplace=True)
if dateCol in data.columns:
data[dateCol] = | pd.to_datetime(data[dateCol]) | pandas.to_datetime |
import logging
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from math import pi
from wordcloud import (WordCloud, get_single_color_func)
import numpy as np
from PIL import Image
import squarify
import os
logger = logging.getLogger('nodes.data_viz')
class SimpleGroupedColorFunc(object):
def __init__(self, color_to_words, default_color):
self.word_to_color = {word: color
for (color, words) in color_to_words.items()
for word in words}
self.default_color = default_color
def __call__(self, word, **kwargs):
return self.word_to_color.get(word, self.default_color)
class GroupedColorFunc(object):
def __init__(self, color_to_words, default_color):
self.color_func_to_words = [
(get_single_color_func(color), set(words))
for (color, words) in color_to_words.items()]
self.default_color_func = get_single_color_func(default_color)
def get_color_func(self, word):
try:
color_func = next(
color_func for (color_func, words) in self.color_func_to_words
if word in words)
except StopIteration:
color_func = self.default_color_func
return color_func
def __call__(self, word, **kwargs):
return self.get_color_func(word)(word, **kwargs)
def plot_decks_colors(client):
query = '''select 1 as uno, d.id as id,
case when string_agg(c.color_identity, '') like '%W%' then 1 else 0 end as white,
case when string_agg(c.color_identity, '') like '%U%' then 1 else 0 end as blue,
case when string_agg(c.color_identity, '') like '%B%' then 1 else 0 end as black,
case when string_agg(c.color_identity, '') like '%R%' then 1 else 0 end as red,
case when string_agg(c.color_identity, '') like '%G%' then 1 else 0 end as green
from deck as d,card as c, deck_card as dc
where dc.deck_id = d.id
and dc.card_id = c.uuid
group by d.id;
'''
decks_colors = | pd.read_sql_query(query, client.engine) | pandas.read_sql_query |
import pandas as pd
import requests
from bs4 import BeautifulSoup
import numpy as np
from time import sleep
website = lambda start, end: [f"https://en.wikipedia.org/wiki/UFC_{i}" for i in range(start, end)]
def get_top_level_data(end, start=20, get_fight_card_stats=False, both=True, avg_pause=.6):
"""
@param end: integer
@param start : integer
@param get_fight_card_stats :
@param both : boolean
@param avg_pause : float
"""
def add_d_set(df_use, name):
df = df_use.copy()
df.insert(0, "Event", f"UFC{name}")
return df
vals = website(start, end)
rets_df = []
if both:
rets_df_2 = []
for j, pause in zip(vals, np.random.poisson(avg_pause, len(vals))):
print(j)
if get_fight_card_stats and not both:
rets_df.append(add_d_set( | pd.read_html(j) | pandas.read_html |
from typing import List, Optional
import numpy as np
import pandas as pd
from pandas import Series
from snorkel.labeling.model import LabelModel
from bohr.config.pathconfig import PathConfig
from bohr.datamodel.dataset import Dataset
from bohr.datamodel.task import Task
def label_dataset(
task: Task,
dataset: Dataset,
path_config: Optional[PathConfig] = None,
debug: bool = False,
):
path_config = path_config or PathConfig.load()
applied_heuristics_df = pd.read_pickle(
str(path_config.generated / task.name / f"heuristic_matrix_{dataset.name}.pkl")
)
label_model = LabelModel()
label_model.load(str(path_config.generated / task.name / "label_model.pkl"))
df = dataset.load()
df_labeled = do_labeling(
label_model, applied_heuristics_df.to_numpy(), df, task.labels
)
if debug:
for (
heuristic_name,
applied_heuristic_series,
) in applied_heuristics_df.iteritems():
applied_heuristics_df[heuristic_name] = applied_heuristic_series.map(
{0: heuristic_name, 1: heuristic_name, -1: ""}
)
col_lfs = applied_heuristics_df.apply(
lambda row: ";".join([elm for elm in row if elm]), axis=1
)
df_labeled["lfs"] = col_lfs
labeled_data_path = path_config.labeled_data / task.name
if not labeled_data_path.exists():
labeled_data_path.mkdir(parents=True)
target_file = labeled_data_path / f"{dataset.name}.labeled.csv"
df_labeled.to_csv(target_file, index=False)
print(f"Labeled dataset has been written to {target_file}.")
def do_labeling(
label_model: LabelModel,
matrix: np.ndarray,
df: pd.DataFrame,
label_names: List[str],
) -> pd.DataFrame:
labels, probs = label_model.predict(L=matrix, return_probs=True)
probs = np.around(probs, decimals=2)
df_labeled = df.assign(predicted=Series(labels))
df_labeled[f"prob_{label_names[0]}"] = | Series(probs[:, 0]) | pandas.Series |
'''Python script to generate Revenue Analysis given ARR by Customer'''
'''Authors - <NAME>
'''
import numpy as np
import pandas as pd
from datetime import datetime
import collections
from .helpers import *
class RevAnalysis:
def __init__(self, json):
print("INIT REV ANALYSIS")
self.arr = pd.DataFrame(json)
self.years = []
self.rev_brackets = {}
self.cust_brackets = {}
def run(self):
self.clean_inputs()
print(self.arr)
self.mrr_by_customer()
self.rev_cohorts()
self.cy_ttm_revenue()
self.revenue_brackets("CY", "TTM")
self.customer_brackets("CY", "TTM")
self.revenue_brackets("ARR", "ARR*")
self.customer_brackets("ARR", "ARR*")
self.clean_outputs()
json = {
"MRR by Customer": self.mrr.to_dict(orient='records'),
"Revenue Cohorts (Monthly)": self.rev_cohorts.to_dict(orient='records'),
"Revenue Calculations": self.cy_ttm_revenue.to_dict(orient='records'),
"Revenue Brackets (CY, TTM)": self.rev_brackets["CY"].to_dict(orient='records'),
"Customer Brackets (CY, TTM)": self.cust_brackets["CY"].to_dict(orient='records'),
"Revenue Brackets (ARR)": self.rev_brackets["ARR"].to_dict(orient='records'),
"Customer Brackets (ARR)": self.cust_brackets["ARR"].to_dict(orient='records')
}
return json
def clean_inputs(self):
self.arr.set_index("Customer", inplace=True)
self.arr.apply(filter_to_dec_list)
def clean_outputs(self):
self.mrr = self.mrr.astype(object)
self.mrr.apply(zero_to_blank_list)
self.mrr.apply(dec_to_dollars_list)
self.mrr.reset_index(inplace=True)
self.rev_cohorts = self.rev_cohorts.astype(object)
self.rev_cohorts.iloc[:, 1:-1] = self.rev_cohorts.iloc[:, 1:-1].apply(zero_to_blank_list)
self.rev_cohorts.iloc[:, 1:-1] = self.rev_cohorts.iloc[:, 1:-1].apply(dec_to_dollars_list)
self.rev_cohorts.reset_index(inplace=True)
cy = [col for col in self.cy_ttm_revenue.columns if "CY" in col and "YOY" not in col]
ttm = [col for col in self.cy_ttm_revenue.columns if "TTM" in col]
yoy = [col for col in self.cy_ttm_revenue.columns if "YOY" in col]
yoy_indices = [i for i in range(self.cy_ttm_revenue.shape[1]) if "YOY" in self.cy_ttm_revenue.columns[i] or "Total ARR" in self.cy_ttm_revenue.columns[i]]
not_yoy_indices = list(set(range(self.cy_ttm_revenue.shape[1])) - set(yoy_indices))
arr = [col for col in self.cy_ttm_revenue.columns if "ARR" in col]
self.cy_ttm_revenue = self.cy_ttm_revenue.astype(object)
self.cy_ttm_revenue.apply(zero_to_blank_list)
self.cy_ttm_revenue.iloc[:, not_yoy_indices] = self.cy_ttm_revenue.iloc[:, not_yoy_indices].apply(dec_to_dollars_list)
self.cy_ttm_revenue.iloc[:, yoy_indices] = self.cy_ttm_revenue.iloc[:, yoy_indices].apply(dec_to_percents_list)
self.cy_ttm_revenue.sort_values(self.cy_ttm_revenue.columns[-1])
self.cy_ttm_revenue = self.cy_ttm_revenue.reindex(cy + ttm + yoy + arr, axis=1)
self.cy_ttm_revenue.reset_index(inplace=True)
self.clean_brackets_outputs("CY", "TTM")
self.clean_brackets_outputs("ARR", "ARR*")
print("MRR BY CUSTOMER")
print(self.mrr)
print("REVENUE COHORTS")
print(self.rev_cohorts)
print("CY TTM ARR")
print(self.cy_ttm_revenue)
print("CY TTM BRACKETS")
print(self.rev_brackets["CY"])
print("REVENUE CUSTOMER BRACKETS")
print(self.cust_brackets["CY"])
print("ARR BRACKETS")
print(self.rev_brackets["ARR"])
print("ARR CUSTOMER BRACKETS")
print(self.cust_brackets["ARR"])
def clean_brackets_outputs(self, type, not_type):
cy_only = [col for col in self.rev_brackets[type].columns if type in col and not_type not in col and "% Rev" not in col]
cy_rev = [col for col in self.rev_brackets[type].columns if "% Rev" in col and not_type not in col]
new_cy = [j for i in zip(cy_only,cy_rev) for j in i]
ttm_all = [col for col in self.rev_brackets[type].columns if not_type in col]
rev_indices = [i for i in range(self.rev_brackets[type].shape[1]) if "% Rev" in self.rev_brackets[type].columns[i]]
not_rev_indices = list(set(range(self.rev_brackets[type].shape[1])) - set(rev_indices))
self.rev_brackets[type] = self.rev_brackets[type].astype(object)
self.rev_brackets[type].iloc[:, not_rev_indices] = self.rev_brackets[type].iloc[:, not_rev_indices].apply(numbers_with_commas_list)
self.rev_brackets[type].iloc[:, rev_indices] = self.rev_brackets[type].iloc[:, rev_indices].apply(dec_to_percents_list)
self.rev_brackets[type] = self.rev_brackets[type].reindex(new_cy + ttm_all, axis=1)
self.rev_brackets[type].index = self.rev_brackets[type].index.map(dec_to_dollars)
self.rev_brackets[type].reset_index(inplace=True)
self.cust_brackets[type] = self.cust_brackets[type].astype(object)
self.cust_brackets[type].apply(numbers_with_commas_list)
self.cust_brackets[type].index = self.cust_brackets[type].index.map(dec_to_dollars)
cust_brackets_index = self.cust_brackets[type].index
index_labels_dict = {cust_brackets_index[i]: str(cust_brackets_index[i])+"-"+str(cust_brackets_index[i+1]) for i in range(len(cust_brackets_index)-1)}
index_labels_dict[cust_brackets_index[-1]] = str(cust_brackets_index[-1])+'+'
self.cust_brackets[type].rename(index=index_labels_dict, inplace=True)
self.cust_brackets[type].reset_index(inplace=True)
def mrr_by_customer(self):
self.mrr = self.arr.copy()/12
self.mrr.loc["Grand Total"] = self.mrr.sum()
self.mrr.loc["ARR"] = (self.mrr.loc["Grand Total"]*12).iloc[0]
# Only keep the last 3 years
self.years = pd.to_datetime(self.mrr.columns).strftime('%Y')
counter = collections.Counter(self.years)
num_trailing_months = counter[max(counter.keys())]
del counter[max(counter.keys())]
last_index = min(36, 12*len(counter.keys())) + num_trailing_months
self.mrr = self.mrr.iloc[:, -last_index:]
self.years = pd.to_datetime(self.mrr.columns).strftime('%Y')
def rev_cohorts(self):
first_rev = np.argmax(self.mrr.values!=0.0,axis=1)
last_rev = self.mrr.shape[1] - np.argmax(self.mrr.iloc[:, ::-1].values!=0.0,axis=1) - 1
self.rev_cohorts = pd.DataFrame(index=np.arange(self.mrr.shape[0]))
self.rev_cohorts.set_index(self.mrr.index, inplace=True)
self.rev_cohorts['Cohort'] = self.mrr.columns[first_rev]
self.rev_cohorts['Cohort'] = pd.to_datetime(self.rev_cohorts['Cohort']).dt.strftime('%m/%Y')
self.rev_cohorts['Initial Rev'] = [self.mrr.iloc[i][first_rev[i]] for i in range(len(first_rev))]
self.rev_cohorts['End Rev'] = [self.mrr.iloc[i][last_rev[i]] for i in range(len(last_rev))]
self.rev_cohorts['End'] = self.mrr.columns[last_rev]
self.rev_cohorts['End'] = pd.to_datetime(self.rev_cohorts['End']).dt.strftime('%m/%Y')
self.rev_cohorts = self.rev_cohorts.apply(lambda x: ["N/A"]+list(x.iloc[1:-1])+["N/A"] if (x['Initial Rev']==0 and x['End Rev']==0) else x, axis=1)
self.rev_cohorts.drop(self.rev_cohorts.tail(2).index, inplace=True)
def cy_ttm_revenue(self):
self.cy_ttm_revenue = pd.DataFrame(index=np.arange(self.mrr.shape[0]))
self.cy_ttm_revenue.set_index(self.mrr.index, inplace=True)
counter = collections.Counter(self.years)
for year in counter.keys():
if counter[year] == 12:
# Calculate CY for each full year
current_year_indices = [i for i in range(len(self.years)) if self.years[i] == year]
current_year_columns = self.mrr.iloc[:,current_year_indices]
self.cy_ttm_revenue["CY "+year] = current_year_columns.sum(axis=1)
# Calculate ARRs for the last month of each full year
self.cy_ttm_revenue["12/"+year+" ARR"] = current_year_columns.iloc[:, -1:]*12
# Calculate TTM for the last month
mrr_ttm = self.mrr.iloc[:, -12:]
self.cy_ttm_revenue["TTM "+ | pd.to_datetime(mrr_ttm.columns[-1]) | pandas.to_datetime |
# This script performs the statistical analysis for the pollution growth paper
# Importing required modules
import pandas as pd
import numpy as np
import statsmodels.api as stats
from ToTeX import restab
# Reading in the data
data = pd.read_csv('C:/Users/User/Documents/Data/Pollution/pollution_data_kp.csv')
# Prepping data for pollution regression
# Data sets for ndividual pollutants
co2_data = data[['ln_co2', 'ln_co2_lag', 'ln_sk', 'ln_n5', 'ln_co2_intensity_rate', 'Country', 'Year']].dropna()#, 'ln_co2_intensity_lag']].dropna()
ch4_data = data[['ln_ch4', 'ln_ch4_lag3', 'ln_sk', 'ln_n5', 'ln_ch4_intensity_rate', 'Country', 'Year']].dropna()#, 'ln_ch4_intensity_lag3']].dropna()
nox_data = data[['ln_nox', 'ln_nox_lag', 'ln_sk', 'ln_n5', 'ln_nox_intensity_rate', 'Country', 'Year']].dropna()#, 'ln_nox_intensity_lag']].dropna()
ghg_data = data[['ln_ghg', 'ln_ghg_lag', 'ln_sk', 'ln_n5', 'ln_ghg_intensity_rate', 'Country', 'Year']].dropna()#, 'ln_ghg_intensity_lag']].dropna()
# Creating dummy variables for each pollutant
co2_national_dummies = pd.get_dummies(co2_data['Country'])
co2_year_dummies = pd.get_dummies(co2_data['Year'])
ch4_national_dummies = pd.get_dummies(ch4_data['Country'])
ch4_year_dummies = pd.get_dummies(ch4_data['Year'])
nox_national_dummies = pd.get_dummies(nox_data['Country'])
nox_year_dummies = pd.get_dummies(nox_data['Year'])
ghg_national_dummies = | pd.get_dummies(ghg_data['Country']) | pandas.get_dummies |
# Copyright 1999-2021 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pandas as pd
from mars.dataframe.datasource.dataframe import from_pandas
from mars.dataframe.datasource.series import from_pandas as series_from_pandas
from mars.dataframe.merge import concat
from mars.dataframe.utils import sort_dataframe_inplace
def test_merge(setup):
df1 = pd.DataFrame(np.arange(20).reshape((4, 5)) + 1, columns=['a', 'b', 'c', 'd', 'e'])
df2 = pd.DataFrame(np.arange(20).reshape((5, 4)) + 1, columns=['a', 'b', 'x', 'y'])
df3 = df1.copy()
df3.index = pd.RangeIndex(2, 6, name='index')
df4 = df1.copy()
df4.index = pd.MultiIndex.from_tuples([(i, i + 1) for i in range(4)], names=['i1', 'i2'])
mdf1 = from_pandas(df1, chunk_size=2)
mdf2 = from_pandas(df2, chunk_size=2)
mdf3 = from_pandas(df3, chunk_size=3)
mdf4 = from_pandas(df4, chunk_size=2)
# Note [Index of Merge]
#
# When `left_index` and `right_index` of `merge` is both false, pandas will generate an RangeIndex to
# the final result dataframe.
#
# We chunked the `left` and `right` dataframe, thus every result chunk will have its own RangeIndex.
# When they are contenated we don't generate a new RangeIndex for the result, thus we cannot obtain the
# same index value with pandas. But we guarantee that the content of dataframe is correct.
# merge on index
expected0 = df1.merge(df2)
jdf0 = mdf1.merge(mdf2)
result0 = jdf0.execute().fetch()
pd.testing.assert_frame_equal(sort_dataframe_inplace(expected0, 0), sort_dataframe_inplace(result0, 0))
# merge on left index and `right_on`
expected1 = df1.merge(df2, how='left', right_on='x', left_index=True)
jdf1 = mdf1.merge(mdf2, how='left', right_on='x', left_index=True)
result1 = jdf1.execute().fetch()
expected1.set_index('a_x', inplace=True)
result1.set_index('a_x', inplace=True)
pd.testing.assert_frame_equal(sort_dataframe_inplace(expected1, 0), sort_dataframe_inplace(result1, 0))
# merge on `left_on` and right index
expected2 = df1.merge(df2, how='right', left_on='a', right_index=True)
jdf2 = mdf1.merge(mdf2, how='right', left_on='a', right_index=True)
result2 = jdf2.execute().fetch()
expected2.set_index('a', inplace=True)
result2.set_index('a', inplace=True)
pd.testing.assert_frame_equal(sort_dataframe_inplace(expected2, 0), sort_dataframe_inplace(result2, 0))
# merge on `left_on` and `right_on`
expected3 = df1.merge(df2, how='left', left_on='a', right_on='x')
jdf3 = mdf1.merge(mdf2, how='left', left_on='a', right_on='x')
result3 = jdf3.execute().fetch()
expected3.set_index('a_x', inplace=True)
result3.set_index('a_x', inplace=True)
pd.testing.assert_frame_equal(sort_dataframe_inplace(expected3, 0), sort_dataframe_inplace(result3, 0))
# merge on `on`
expected4 = df1.merge(df2, how='right', on='a')
jdf4 = mdf1.merge(mdf2, how='right', on='a')
result4 = jdf4.execute().fetch()
expected4.set_index('a', inplace=True)
result4.set_index('a', inplace=True)
pd.testing.assert_frame_equal(sort_dataframe_inplace(expected4, 0), sort_dataframe_inplace(result4, 0))
# merge on multiple columns
expected5 = df1.merge(df2, how='inner', on=['a', 'b'])
jdf5 = mdf1.merge(mdf2, how='inner', on=['a', 'b'])
result5 = jdf5.execute().fetch()
pd.testing.assert_frame_equal(sort_dataframe_inplace(expected5, 0), sort_dataframe_inplace(result5, 0))
# merge when some on is index
expected6 = df3.merge(df2, how='inner', left_on='index', right_on='a')
jdf6 = mdf3.merge(mdf2, how='inner', left_on='index', right_on='a')
result6 = jdf6.execute().fetch()
pd.testing.assert_frame_equal(sort_dataframe_inplace(expected6, 0), sort_dataframe_inplace(result6, 0))
# merge when on is in MultiIndex
expected7 = df4.merge(df2, how='inner', left_on='i1', right_on='a')
jdf7 = mdf4.merge(mdf2, how='inner', left_on='i1', right_on='a')
result7 = jdf7.execute().fetch()
pd.testing.assert_frame_equal(sort_dataframe_inplace(expected7, 0), sort_dataframe_inplace(result7, 0))
# merge when on is in MultiIndex, and on not in index
expected8 = df4.merge(df2, how='inner', on=['a', 'b'])
jdf8 = mdf4.merge(mdf2, how='inner', on=['a', 'b'])
result8 = jdf8.execute().fetch()
pd.testing.assert_frame_equal(sort_dataframe_inplace(expected8, 0), sort_dataframe_inplace(result8, 0))
def test_join(setup):
df1 = pd.DataFrame([[1, 3, 3], [4, 2, 6], [7, 8, 9]], index=['a1', 'a2', 'a3'])
df2 = pd.DataFrame([[1, 2, 3], [1, 5, 6], [7, 8, 9]], index=['a1', 'b2', 'b3']) + 1
df2 = pd.concat([df2, df2 + 1])
mdf1 = from_pandas(df1, chunk_size=2)
mdf2 = from_pandas(df2, chunk_size=2)
# default `how`
expected0 = df1.join(df2, lsuffix='l_', rsuffix='r_')
jdf0 = mdf1.join(mdf2, lsuffix='l_', rsuffix='r_')
result0 = jdf0.execute().fetch()
pd.testing.assert_frame_equal(expected0.sort_index(), result0.sort_index())
# how = 'left'
expected1 = df1.join(df2, how='left', lsuffix='l_', rsuffix='r_')
jdf1 = mdf1.join(mdf2, how='left', lsuffix='l_', rsuffix='r_')
result1 = jdf1.execute().fetch()
pd.testing.assert_frame_equal(expected1.sort_index(), result1.sort_index())
# how = 'right'
expected2 = df1.join(df2, how='right', lsuffix='l_', rsuffix='r_')
jdf2 = mdf1.join(mdf2, how='right', lsuffix='l_', rsuffix='r_')
result2 = jdf2.execute().fetch()
pd.testing.assert_frame_equal(expected2.sort_index(), result2.sort_index())
# how = 'inner'
expected3 = df1.join(df2, how='inner', lsuffix='l_', rsuffix='r_')
jdf3 = mdf1.join(mdf2, how='inner', lsuffix='l_', rsuffix='r_')
result3 = jdf3.execute().fetch()
pd.testing.assert_frame_equal(expected3.sort_index(), result3.sort_index())
# how = 'outer'
expected4 = df1.join(df2, how='outer', lsuffix='l_', rsuffix='r_')
jdf4 = mdf1.join(mdf2, how='outer', lsuffix='l_', rsuffix='r_')
result4 = jdf4.execute().fetch()
pd.testing.assert_frame_equal(expected4.sort_index(), result4.sort_index())
def test_join_on(setup):
df1 = pd.DataFrame([[1, 3, 3], [4, 2, 6], [7, 8, 9]], columns=['a1', 'a2', 'a3'])
df2 = pd.DataFrame([[1, 2, 3], [1, 5, 6], [7, 8, 9]], columns=['a1', 'b2', 'b3']) + 1
df2 = pd.concat([df2, df2 + 1])
mdf1 = from_pandas(df1, chunk_size=2)
mdf2 = from_pandas(df2, chunk_size=2)
expected0 = df1.join(df2, on=None, lsuffix='_l', rsuffix='_r')
jdf0 = mdf1.join(mdf2, on=None, lsuffix='_l', rsuffix='_r')
result0 = jdf0.execute().fetch()
pd.testing.assert_frame_equal(sort_dataframe_inplace(expected0, 0), sort_dataframe_inplace(result0, 0))
expected1 = df1.join(df2, how='left', on='a1', lsuffix='_l', rsuffix='_r')
jdf1 = mdf1.join(mdf2, how='left', on='a1', lsuffix='_l', rsuffix='_r')
result1 = jdf1.execute().fetch()
# Note [Columns of Left Join]
#
# I believe we have no chance to obtain the entirely same result with pandas here:
#
# Look at the following example:
#
# >>> df1
# a1 a2 a3
# 0 1 3 3
# >>> df2
# a1 b2 b3
# 1 2 6 7
# >>> df3
# a1 b2 b3
# 1 2 6 7
# 1 2 6 7
#
# >>> df1.merge(df2, how='left', left_on='a1', left_index=False, right_index=True)
# a1_x a2 a3 a1_y b2 b3
# 0 1 3 3 2 6 7
# >>> df1.merge(df3, how='left', left_on='a1', left_index=False, right_index=True)
# a1 a1_x a2 a3 a1_y b2 b3
# 0 1 1 3 3 2 6 7
# 0 1 1 3 3 2 6 7
#
# Note that the result of `df1.merge(df3)` has an extra column `a` compared to `df1.merge(df2)`.
# The value of column `a` is the same of `a1_x`, just because `1` occurs twice in index of `df3`.
# I haven't invistagated why pandas has such behaviour...
#
# We cannot yield the same result with pandas, because, the `df3` is chunked, then some of the
# result chunk has 6 columns, others may have 7 columns, when concatenated into one DataFrame
# some cells of column `a` will have value `NaN`, which is different from the result of pandas.
#
# But we can guarantee that other effective columns have absolutely same value with pandas.
columns_to_compare = jdf1.columns_value.to_pandas()
pd.testing.assert_frame_equal(sort_dataframe_inplace(expected1[columns_to_compare], 0, 1),
sort_dataframe_inplace(result1[columns_to_compare], 0, 1))
# Note [Index of Join on EmptyDataFrame]
#
# It is tricky that it is non-trivial to get the same `index` result with pandas.
#
# Look at the following example:
#
# >>> df1
# a1 a2 a3
# 1 4 2 6
# >>> df2
# a1 b2 b3
# 1 2 6 7
# 2 8 9 10
# >>> df3
# Empty DataFrame
# Columns: [a1, a2, a3]
# Index: []
# >>> df1.join(df2, how='right', on='a2', lsuffix='_l', rsuffix='_r')
# a1_l a2 a3 a1_r b2 b3
# 1.0 4.0 2 6.0 8 9 10
# NaN NaN 1 NaN 2 6 7
# >>> df3.join(df2, how='right', on='a2', lsuffix='_l', rsuffix='_r')
# a1_l a2 a3 a1_r b2 b3
# 1 NaN 1 NaN 2 6 7
# 2 NaN 2 NaN 8 9 10
#
# When the `left` dataframe is not empty, the mismatched rows in `right` will have index value `NaN`,
# and the matched rows have index value from `right`. When the `left` dataframe is empty, the mismatched
# rows have index value from `right`.
#
# Since we chunked the `left` dataframe, it is uneasy to obtain the same index value with pandas in the
# final result dataframe, but we guaranteed that the dataframe content is correctly.
expected2 = df1.join(df2, how='right', on='a2', lsuffix='_l', rsuffix='_r')
jdf2 = mdf1.join(mdf2, how='right', on='a2', lsuffix='_l', rsuffix='_r')
result2 = jdf2.execute().fetch()
expected2.set_index('a2', inplace=True)
result2.set_index('a2', inplace=True)
pd.testing.assert_frame_equal(sort_dataframe_inplace(expected2, 0), sort_dataframe_inplace(result2, 0))
expected3 = df1.join(df2, how='inner', on='a2', lsuffix='_l', rsuffix='_r')
jdf3 = mdf1.join(mdf2, how='inner', on='a2', lsuffix='_l', rsuffix='_r')
result3 = jdf3.execute().fetch()
pd.testing.assert_frame_equal(sort_dataframe_inplace(expected3, 0), sort_dataframe_inplace(result3, 0))
expected4 = df1.join(df2, how='outer', on='a2', lsuffix='_l', rsuffix='_r')
jdf4 = mdf1.join(mdf2, how='outer', on='a2', lsuffix='_l', rsuffix='_r')
result4 = jdf4.execute().fetch()
expected4.set_index('a2', inplace=True)
result4.set_index('a2', inplace=True)
pd.testing.assert_frame_equal(sort_dataframe_inplace(expected4, 0), sort_dataframe_inplace(result4, 0))
def test_merge_one_chunk(setup):
df1 = pd.DataFrame({'lkey': ['foo', 'bar', 'baz', 'foo'],
'value': [1, 2, 3, 5]}, index=['a1', 'a2', 'a3', 'a4'])
df2 = pd.DataFrame({'rkey': ['foo', 'bar', 'baz', 'foo'],
'value': [5, 6, 7, 8]}, index=['a1', 'a2', 'a3', 'a4'])
# all have one chunk
mdf1 = from_pandas(df1)
mdf2 = from_pandas(df2)
expected = df1.merge(df2, left_on='lkey', right_on='rkey')
jdf = mdf1.merge(mdf2, left_on='lkey', right_on='rkey')
result = jdf.execute().fetch()
pd.testing.assert_frame_equal(expected.sort_values(by=expected.columns[1]).reset_index(drop=True),
result.sort_values(by=result.columns[1]).reset_index(drop=True))
# left have one chunk
mdf1 = from_pandas(df1)
mdf2 = from_pandas(df2, chunk_size=2)
expected = df1.merge(df2, left_on='lkey', right_on='rkey')
jdf = mdf1.merge(mdf2, left_on='lkey', right_on='rkey')
result = jdf.execute().fetch()
pd.testing.assert_frame_equal(expected.sort_values(by=expected.columns[1]).reset_index(drop=True),
result.sort_values(by=result.columns[1]).reset_index(drop=True))
# right have one chunk
mdf1 = from_pandas(df1, chunk_size=3)
mdf2 = from_pandas(df2)
expected = df1.merge(df2, left_on='lkey', right_on='rkey')
jdf = mdf1.merge(mdf2, left_on='lkey', right_on='rkey')
result = jdf.execute().fetch()
pd.testing.assert_frame_equal(expected.sort_values(by=expected.columns[1]).reset_index(drop=True),
result.sort_values(by=result.columns[1]).reset_index(drop=True))
def test_merge_on_duplicate_columns(setup):
raw1 = pd.DataFrame([['foo', 1, 'bar'],
['bar', 2, 'foo'],
['baz', 3, 'foo']],
columns=['lkey', 'value', 'value'],
index=['a1', 'a2', 'a3'])
raw2 = pd.DataFrame({'rkey': ['foo', 'bar', 'baz', 'foo'],
'value': [5, 6, 7, 8]}, index=['a1', 'a2', 'a3', 'a4'])
df1 = from_pandas(raw1, chunk_size=2)
df2 = from_pandas(raw2, chunk_size=3)
r = df1.merge(df2, left_on='lkey', right_on='rkey')
result = r.execute().fetch()
expected = raw1.merge(raw2, left_on='lkey', right_on='rkey')
pd.testing.assert_frame_equal(expected, result)
def test_append_execution(setup):
df1 = pd.DataFrame(np.random.rand(10, 4), columns=list('ABCD'))
df2 = pd.DataFrame(np.random.rand(10, 4), columns=list('ABCD'))
mdf1 = from_pandas(df1, chunk_size=3)
mdf2 = from_pandas(df2, chunk_size=3)
adf = mdf1.append(mdf2)
expected = df1.append(df2)
result = adf.execute().fetch()
pd.testing.assert_frame_equal(expected, result)
adf = mdf1.append(mdf2, ignore_index=True)
expected = df1.append(df2, ignore_index=True)
result = adf.execute(extra_config={'check_index_value': False}).fetch()
pd.testing.assert_frame_equal(expected, result)
mdf1 = from_pandas(df1, chunk_size=3)
mdf2 = from_pandas(df2, chunk_size=2)
adf = mdf1.append(mdf2)
expected = df1.append(df2)
result = adf.execute().fetch()
pd.testing.assert_frame_equal(expected, result)
adf = mdf1.append(mdf2, ignore_index=True)
expected = df1.append(df2, ignore_index=True)
result = adf.execute(extra_config={'check_index_value': False}).fetch()
pd.testing.assert_frame_equal(expected, result)
df3 = pd.DataFrame(np.random.rand(8, 4), columns=list('ABCD'))
mdf3 = from_pandas(df3, chunk_size=3)
expected = df1.append([df2, df3])
adf = mdf1.append([mdf2, mdf3])
result = adf.execute().fetch()
| pd.testing.assert_frame_equal(expected, result) | pandas.testing.assert_frame_equal |
from datetime import datetime, timedelta
import pandas as pd
from driver_repo import driver, driver_stats_fv
from feast import FeatureStore
def main():
pd.set_option("display.max_columns", None)
| pd.set_option("display.width", 1000) | pandas.set_option |
# coding=utf-8
# pylint: disable-msg=E1101,W0612
from datetime import timedelta
from numpy import nan
import numpy as np
import pandas as pd
from pandas import (Series, isnull, date_range,
MultiIndex, Index)
from pandas.tseries.index import Timestamp
from pandas.compat import range
from pandas.util.testing import assert_series_equal
import pandas.util.testing as tm
from .common import TestData
def _skip_if_no_pchip():
try:
from scipy.interpolate import pchip_interpolate # noqa
except ImportError:
import nose
raise nose.SkipTest('scipy.interpolate.pchip missing')
def _skip_if_no_akima():
try:
from scipy.interpolate import Akima1DInterpolator # noqa
except ImportError:
import nose
raise nose.SkipTest('scipy.interpolate.Akima1DInterpolator missing')
class TestSeriesMissingData(TestData, tm.TestCase):
_multiprocess_can_split_ = True
def test_timedelta_fillna(self):
# GH 3371
s = Series([Timestamp('20130101'), Timestamp('20130101'), Timestamp(
'20130102'), Timestamp('20130103 9:01:01')])
td = s.diff()
# reg fillna
result = td.fillna(0)
expected = Series([timedelta(0), timedelta(0), timedelta(1), timedelta(
days=1, seconds=9 * 3600 + 60 + 1)])
assert_series_equal(result, expected)
# interprested as seconds
result = td.fillna(1)
expected = Series([timedelta(seconds=1), timedelta(0), timedelta(1),
timedelta(days=1, seconds=9 * 3600 + 60 + 1)])
assert_series_equal(result, expected)
result = td.fillna(timedelta(days=1, seconds=1))
expected = Series([timedelta(days=1, seconds=1), timedelta(
0), timedelta(1), timedelta(days=1, seconds=9 * 3600 + 60 + 1)])
assert_series_equal(result, expected)
result = td.fillna(np.timedelta64(int(1e9)))
expected = Series([timedelta(seconds=1), timedelta(0), timedelta(1),
timedelta(days=1, seconds=9 * 3600 + 60 + 1)])
assert_series_equal(result, expected)
from pandas import tslib
result = td.fillna(tslib.NaT)
expected = Series([tslib.NaT, timedelta(0), timedelta(1),
timedelta(days=1, seconds=9 * 3600 + 60 + 1)],
dtype='m8[ns]')
assert_series_equal(result, expected)
# ffill
td[2] = np.nan
result = td.ffill()
expected = td.fillna(0)
expected[0] = np.nan
assert_series_equal(result, expected)
# bfill
td[2] = np.nan
result = td.bfill()
expected = td.fillna(0)
expected[2] = timedelta(days=1, seconds=9 * 3600 + 60 + 1)
assert_series_equal(result, expected)
def test_datetime64_fillna(self):
s = Series([Timestamp('20130101'), Timestamp('20130101'), Timestamp(
'20130102'), Timestamp('20130103 9:01:01')])
s[2] = np.nan
# reg fillna
result = s.fillna(Timestamp('20130104'))
expected = Series([Timestamp('20130101'), Timestamp(
'20130101'), Timestamp('20130104'), Timestamp('20130103 9:01:01')])
assert_series_equal(result, expected)
from pandas import tslib
result = s.fillna(tslib.NaT)
expected = s
assert_series_equal(result, expected)
# ffill
result = s.ffill()
expected = Series([Timestamp('20130101'), Timestamp(
'20130101'), Timestamp('20130101'), Timestamp('20130103 9:01:01')])
assert_series_equal(result, expected)
# bfill
result = s.bfill()
expected = Series([Timestamp('20130101'), Timestamp('20130101'),
Timestamp('20130103 9:01:01'), Timestamp(
'20130103 9:01:01')])
assert_series_equal(result, expected)
# GH 6587
# make sure that we are treating as integer when filling
# this also tests inference of a datetime-like with NaT's
s = Series([pd.NaT, pd.NaT, '2013-08-05 15:30:00.000001'])
expected = Series(
['2013-08-05 15:30:00.000001', '2013-08-05 15:30:00.000001',
'2013-08-05 15:30:00.000001'], dtype='M8[ns]')
result = s.fillna(method='backfill')
assert_series_equal(result, expected)
def test_datetime64_tz_fillna(self):
for tz in ['US/Eastern', 'Asia/Tokyo']:
# DatetimeBlock
s = Series([Timestamp('2011-01-01 10:00'), pd.NaT, Timestamp(
'2011-01-03 10:00'), pd.NaT])
result = s.fillna(pd.Timestamp('2011-01-02 10:00'))
expected = Series([Timestamp('2011-01-01 10:00'), Timestamp(
'2011-01-02 10:00'), Timestamp('2011-01-03 10:00'), Timestamp(
'2011-01-02 10:00')])
self.assert_series_equal(expected, result)
result = s.fillna(pd.Timestamp('2011-01-02 10:00', tz=tz))
expected = Series([Timestamp('2011-01-01 10:00'), Timestamp(
'2011-01-02 10:00', tz=tz), Timestamp('2011-01-03 10:00'),
Timestamp('2011-01-02 10:00', tz=tz)])
self.assert_series_equal(expected, result)
result = s.fillna('AAA')
expected = Series([Timestamp('2011-01-01 10:00'), 'AAA',
Timestamp('2011-01-03 10:00'), 'AAA'],
dtype=object)
self.assert_series_equal(expected, result)
result = s.fillna({1: pd.Timestamp('2011-01-02 10:00', tz=tz),
3: pd.Timestamp('2011-01-04 10:00')})
expected = Series([Timestamp('2011-01-01 10:00'), Timestamp(
'2011-01-02 10:00', tz=tz), Timestamp('2011-01-03 10:00'),
Timestamp('2011-01-04 10:00')])
self.assert_series_equal(expected, result)
result = s.fillna({1: pd.Timestamp('2011-01-02 10:00'),
3: pd.Timestamp('2011-01-04 10:00')})
expected = Series([Timestamp('2011-01-01 10:00'), Timestamp(
'2011-01-02 10:00'), Timestamp('2011-01-03 10:00'), Timestamp(
'2011-01-04 10:00')])
self.assert_series_equal(expected, result)
# DatetimeBlockTZ
idx = pd.DatetimeIndex(['2011-01-01 10:00', pd.NaT,
'2011-01-03 10:00', pd.NaT], tz=tz)
s = pd.Series(idx)
result = s.fillna(pd.Timestamp('2011-01-02 10:00'))
expected = Series([Timestamp('2011-01-01 10:00', tz=tz), Timestamp(
'2011-01-02 10:00'), Timestamp('2011-01-03 10:00', tz=tz),
Timestamp('2011-01-02 10:00')])
self.assert_series_equal(expected, result)
result = s.fillna(pd.Timestamp('2011-01-02 10:00', tz=tz))
idx = pd.DatetimeIndex(['2011-01-01 10:00', '2011-01-02 10:00',
'2011-01-03 10:00', '2011-01-02 10:00'],
tz=tz)
expected = Series(idx)
self.assert_series_equal(expected, result)
result = s.fillna(pd.Timestamp(
'2011-01-02 10:00', tz=tz).to_pydatetime())
idx = pd.DatetimeIndex(['2011-01-01 10:00', '2011-01-02 10:00',
'2011-01-03 10:00', '2011-01-02 10:00'],
tz=tz)
expected = Series(idx)
self.assert_series_equal(expected, result)
result = s.fillna('AAA')
expected = Series([Timestamp('2011-01-01 10:00', tz=tz), 'AAA',
Timestamp('2011-01-03 10:00', tz=tz), 'AAA'],
dtype=object)
self.assert_series_equal(expected, result)
result = s.fillna({1: pd.Timestamp('2011-01-02 10:00', tz=tz),
3: pd.Timestamp('2011-01-04 10:00')})
expected = Series([Timestamp('2011-01-01 10:00', tz=tz), Timestamp(
'2011-01-02 10:00', tz=tz), Timestamp(
'2011-01-03 10:00', tz=tz), Timestamp('2011-01-04 10:00')])
self.assert_series_equal(expected, result)
result = s.fillna({1: pd.Timestamp('2011-01-02 10:00', tz=tz),
3: pd.Timestamp('2011-01-04 10:00', tz=tz)})
expected = Series([Timestamp('2011-01-01 10:00', tz=tz), Timestamp(
'2011-01-02 10:00', tz=tz), Timestamp(
'2011-01-03 10:00', tz=tz), Timestamp('2011-01-04 10:00',
tz=tz)])
self.assert_series_equal(expected, result)
# filling with a naive/other zone, coerce to object
result = s.fillna(Timestamp('20130101'))
expected = Series([Timestamp('2011-01-01 10:00', tz=tz), Timestamp(
'2013-01-01'), Timestamp('2011-01-03 10:00', tz=tz), Timestamp(
'2013-01-01')])
self.assert_series_equal(expected, result)
result = s.fillna(Timestamp('20130101', tz='US/Pacific'))
expected = Series([Timestamp('2011-01-01 10:00', tz=tz),
Timestamp('2013-01-01', tz='US/Pacific'),
Timestamp('2011-01-03 10:00', tz=tz),
Timestamp('2013-01-01', tz='US/Pacific')])
self.assert_series_equal(expected, result)
def test_fillna_int(self):
s = Series(np.random.randint(-100, 100, 50))
s.fillna(method='ffill', inplace=True)
assert_series_equal(s.fillna(method='ffill', inplace=False), s)
def test_fillna_raise(self):
s = Series(np.random.randint(-100, 100, 50))
self.assertRaises(TypeError, s.fillna, [1, 2])
self.assertRaises(TypeError, s.fillna, (1, 2))
def test_isnull_for_inf(self):
s = Series(['a', np.inf, np.nan, 1.0])
with pd.option_context('mode.use_inf_as_null', True):
r = s.isnull()
dr = s.dropna()
e = Series([False, True, True, False])
de = Series(['a', 1.0], index=[0, 3])
tm.assert_series_equal(r, e)
tm.assert_series_equal(dr, de)
def test_fillna(self):
ts = Series([0., 1., 2., 3., 4.], index=tm.makeDateIndex(5))
self.assert_series_equal(ts, ts.fillna(method='ffill'))
ts[2] = np.NaN
exp = Series([0., 1., 1., 3., 4.], index=ts.index)
self.assert_series_equal(ts.fillna(method='ffill'), exp)
exp = Series([0., 1., 3., 3., 4.], index=ts.index)
self.assert_series_equal(ts.fillna(method='backfill'), exp)
exp = Series([0., 1., 5., 3., 4.], index=ts.index)
self.assert_series_equal(ts.fillna(value=5), exp)
self.assertRaises(ValueError, ts.fillna)
self.assertRaises(ValueError, self.ts.fillna, value=0, method='ffill')
# GH 5703
s1 = Series([np.nan])
s2 = Series([1])
result = s1.fillna(s2)
expected = Series([1.])
assert_series_equal(result, expected)
result = s1.fillna({})
assert_series_equal(result, s1)
result = s1.fillna(Series(()))
assert_series_equal(result, s1)
result = s2.fillna(s1)
assert_series_equal(result, s2)
result = s1.fillna({0: 1})
assert_series_equal(result, expected)
result = s1.fillna({1: 1})
assert_series_equal(result, Series([np.nan]))
result = s1.fillna({0: 1, 1: 1})
assert_series_equal(result, expected)
result = s1.fillna(Series({0: 1, 1: 1}))
assert_series_equal(result, expected)
result = s1.fillna(Series({0: 1, 1: 1}, index=[4, 5]))
assert_series_equal(result, s1)
s1 = Series([0, 1, 2], list('abc'))
s2 = Series([0, np.nan, 2], list('bac'))
result = s2.fillna(s1)
expected = Series([0, 0, 2.], list('bac'))
assert_series_equal(result, expected)
# limit
s = Series(np.nan, index=[0, 1, 2])
result = s.fillna(999, limit=1)
expected = Series([999, np.nan, np.nan], index=[0, 1, 2])
assert_series_equal(result, expected)
result = s.fillna(999, limit=2)
expected = Series([999, 999, np.nan], index=[0, 1, 2])
assert_series_equal(result, expected)
# GH 9043
# make sure a string representation of int/float values can be filled
# correctly without raising errors or being converted
vals = ['0', '1.5', '-0.3']
for val in vals:
s = Series([0, 1, np.nan, np.nan, 4], dtype='float64')
result = s.fillna(val)
expected = Series([0, 1, val, val, 4], dtype='object')
assert_series_equal(result, expected)
def test_fillna_bug(self):
x = Series([nan, 1., nan, 3., nan], ['z', 'a', 'b', 'c', 'd'])
filled = x.fillna(method='ffill')
expected = Series([nan, 1., 1., 3., 3.], x.index)
assert_series_equal(filled, expected)
filled = x.fillna(method='bfill')
expected = Series([1., 1., 3., 3., nan], x.index)
assert_series_equal(filled, expected)
def test_fillna_inplace(self):
x = Series([nan, 1., nan, 3., nan], ['z', 'a', 'b', 'c', 'd'])
y = x.copy()
y.fillna(value=0, inplace=True)
expected = x.fillna(value=0)
assert_series_equal(y, expected)
def test_fillna_invalid_method(self):
try:
self.ts.fillna(method='ffil')
except ValueError as inst:
self.assertIn('ffil', str(inst))
def test_ffill(self):
ts = Series([0., 1., 2., 3., 4.], index=tm.makeDateIndex(5))
ts[2] = np.NaN
assert_series_equal(ts.ffill(), ts.fillna(method='ffill'))
def test_bfill(self):
ts = Series([0., 1., 2., 3., 4.], index=tm.makeDateIndex(5))
ts[2] = np.NaN
assert_series_equal(ts.bfill(), ts.fillna(method='bfill'))
def test_timedelta64_nan(self):
from pandas import tslib
td = Series([timedelta(days=i) for i in range(10)])
# nan ops on timedeltas
td1 = td.copy()
td1[0] = np.nan
self.assertTrue(isnull(td1[0]))
self.assertEqual(td1[0].value, tslib.iNaT)
td1[0] = td[0]
self.assertFalse(isnull(td1[0]))
td1[1] = tslib.iNaT
self.assertTrue(isnull(td1[1]))
self.assertEqual(td1[1].value, tslib.iNaT)
td1[1] = td[1]
self.assertFalse(isnull(td1[1]))
td1[2] = tslib.NaT
self.assertTrue(isnull(td1[2]))
self.assertEqual(td1[2].value, tslib.iNaT)
td1[2] = td[2]
self.assertFalse(isnull(td1[2]))
# boolean setting
# this doesn't work, not sure numpy even supports it
# result = td[(td>np.timedelta64(timedelta(days=3))) &
# td<np.timedelta64(timedelta(days=7)))] = np.nan
# self.assertEqual(isnull(result).sum(), 7)
# NumPy limitiation =(
# def test_logical_range_select(self):
# np.random.seed(12345)
# selector = -0.5 <= self.ts <= 0.5
# expected = (self.ts >= -0.5) & (self.ts <= 0.5)
# assert_series_equal(selector, expected)
def test_dropna_empty(self):
s = Series([])
self.assertEqual(len(s.dropna()), 0)
s.dropna(inplace=True)
self.assertEqual(len(s), 0)
# invalid axis
self.assertRaises(ValueError, s.dropna, axis=1)
def test_datetime64_tz_dropna(self):
# DatetimeBlock
s = Series([Timestamp('2011-01-01 10:00'), pd.NaT, Timestamp(
'2011-01-03 10:00'), pd.NaT])
result = s.dropna()
expected = Series([Timestamp('2011-01-01 10:00'),
Timestamp('2011-01-03 10:00')], index=[0, 2])
self.assert_series_equal(result, expected)
# DatetimeBlockTZ
idx = pd.DatetimeIndex(['2011-01-01 10:00', pd.NaT,
'2011-01-03 10:00', pd.NaT],
tz='Asia/Tokyo')
s = pd.Series(idx)
self.assertEqual(s.dtype, 'datetime64[ns, Asia/Tokyo]')
result = s.dropna()
expected = Series([Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
Timestamp('2011-01-03 10:00', tz='Asia/Tokyo')],
index=[0, 2])
self.assertEqual(result.dtype, 'datetime64[ns, Asia/Tokyo]')
self.assert_series_equal(result, expected)
def test_dropna_no_nan(self):
for s in [Series([1, 2, 3], name='x'), Series(
[False, True, False], name='x')]:
result = s.dropna()
self.assert_series_equal(result, s)
self.assertFalse(result is s)
s2 = s.copy()
s2.dropna(inplace=True)
self.assert_series_equal(s2, s)
def test_valid(self):
ts = self.ts.copy()
ts[::2] = np.NaN
result = ts.valid()
self.assertEqual(len(result), ts.count())
tm.assert_series_equal(result, ts[1::2])
tm.assert_series_equal(result, ts[pd.notnull(ts)])
def test_isnull(self):
ser = Series([0, 5.4, 3, nan, -0.001])
np.array_equal(ser.isnull(),
Series([False, False, False, True, False]).values)
ser = Series(["hi", "", nan])
np.array_equal(ser.isnull(), Series([False, False, True]).values)
def test_notnull(self):
ser = Series([0, 5.4, 3, nan, -0.001])
np.array_equal(ser.notnull(),
Series([True, True, True, False, True]).values)
ser = Series(["hi", "", nan])
np.array_equal(ser.notnull(), Series([True, True, False]).values)
def test_pad_nan(self):
x = Series([np.nan, 1., np.nan, 3., np.nan], ['z', 'a', 'b', 'c', 'd'],
dtype=float)
x.fillna(method='pad', inplace=True)
expected = Series([np.nan, 1.0, 1.0, 3.0, 3.0],
['z', 'a', 'b', 'c', 'd'], dtype=float)
assert_series_equal(x[1:], expected[1:])
self.assertTrue(np.isnan(x[0]), np.isnan(expected[0]))
def test_dropna_preserve_name(self):
self.ts[:5] = np.nan
result = self.ts.dropna()
self.assertEqual(result.name, self.ts.name)
name = self.ts.name
ts = self.ts.copy()
ts.dropna(inplace=True)
self.assertEqual(ts.name, name)
def test_fill_value_when_combine_const(self):
# GH12723
s = Series([0, 1, np.nan, 3, 4, 5])
exp = s.fillna(0).add(2)
res = s.add(2, fill_value=0)
assert_series_equal(res, exp)
class TestSeriesInterpolateData(TestData, tm.TestCase):
def test_interpolate(self):
ts = Series(np.arange(len(self.ts), dtype=float), self.ts.index)
ts_copy = ts.copy()
ts_copy[5:10] = np.NaN
linear_interp = ts_copy.interpolate(method='linear')
self.assert_series_equal(linear_interp, ts)
ord_ts = Series([d.toordinal() for d in self.ts.index],
index=self.ts.index).astype(float)
ord_ts_copy = ord_ts.copy()
ord_ts_copy[5:10] = np.NaN
time_interp = ord_ts_copy.interpolate(method='time')
self.assert_series_equal(time_interp, ord_ts)
# try time interpolation on a non-TimeSeries
# Only raises ValueError if there are NaNs.
non_ts = self.series.copy()
non_ts[0] = np.NaN
self.assertRaises(ValueError, non_ts.interpolate, method='time')
def test_interpolate_pchip(self):
tm._skip_if_no_scipy()
_skip_if_no_pchip()
ser = Series(np.sort(np.random.uniform(size=100)))
# interpolate at new_index
new_index = ser.index.union(Index([49.25, 49.5, 49.75, 50.25, 50.5,
50.75]))
interp_s = ser.reindex(new_index).interpolate(method='pchip')
# does not blow up, GH5977
interp_s[49:51]
def test_interpolate_akima(self):
tm._skip_if_no_scipy()
_skip_if_no_akima()
ser = Series([10, 11, 12, 13])
expected = Series([11.00, 11.25, 11.50, 11.75,
12.00, 12.25, 12.50, 12.75, 13.00],
index=Index([1.0, 1.25, 1.5, 1.75,
2.0, 2.25, 2.5, 2.75, 3.0]))
# interpolate at new_index
new_index = ser.index.union(Index([1.25, 1.5, 1.75, 2.25, 2.5, 2.75]))
interp_s = ser.reindex(new_index).interpolate(method='akima')
assert_series_equal(interp_s[1:3], expected)
def test_interpolate_piecewise_polynomial(self):
tm._skip_if_no_scipy()
ser = Series([10, 11, 12, 13])
expected = Series([11.00, 11.25, 11.50, 11.75,
12.00, 12.25, 12.50, 12.75, 13.00],
index=Index([1.0, 1.25, 1.5, 1.75,
2.0, 2.25, 2.5, 2.75, 3.0]))
# interpolate at new_index
new_index = ser.index.union(Index([1.25, 1.5, 1.75, 2.25, 2.5, 2.75]))
interp_s = ser.reindex(new_index).interpolate(
method='piecewise_polynomial')
assert_series_equal(interp_s[1:3], expected)
def test_interpolate_from_derivatives(self):
tm._skip_if_no_scipy()
ser = Series([10, 11, 12, 13])
expected = Series([11.00, 11.25, 11.50, 11.75,
12.00, 12.25, 12.50, 12.75, 13.00],
index=Index([1.0, 1.25, 1.5, 1.75,
2.0, 2.25, 2.5, 2.75, 3.0]))
# interpolate at new_index
new_index = ser.index.union(Index([1.25, 1.5, 1.75, 2.25, 2.5, 2.75]))
interp_s = ser.reindex(new_index).interpolate(
method='from_derivatives')
assert_series_equal(interp_s[1:3], expected)
def test_interpolate_corners(self):
s = Series([np.nan, np.nan])
assert_series_equal(s.interpolate(), s)
s = Series([]).interpolate()
assert_series_equal(s.interpolate(), s)
tm._skip_if_no_scipy()
s = Series([np.nan, np.nan])
assert_series_equal(s.interpolate(method='polynomial', order=1), s)
s = Series([]).interpolate()
assert_series_equal(s.interpolate(method='polynomial', order=1), s)
def test_interpolate_index_values(self):
s = Series(np.nan, index=np.sort(np.random.rand(30)))
s[::3] = np.random.randn(10)
vals = s.index.values.astype(float)
result = s.interpolate(method='index')
expected = s.copy()
bad = isnull(expected.values)
good = ~bad
expected = Series(np.interp(vals[bad], vals[good],
s.values[good]),
index=s.index[bad])
assert_series_equal(result[bad], expected)
# 'values' is synonymous with 'index' for the method kwarg
other_result = s.interpolate(method='values')
assert_series_equal(other_result, result)
assert_series_equal(other_result[bad], expected)
def test_interpolate_non_ts(self):
s = Series([1, 3, np.nan, np.nan, np.nan, 11])
with tm.assertRaises(ValueError):
s.interpolate(method='time')
# New interpolation tests
def test_nan_interpolate(self):
s = Series([0, 1, np.nan, 3])
result = s.interpolate()
expected = Series([0., 1., 2., 3.])
assert_series_equal(result, expected)
tm._skip_if_no_scipy()
result = s.interpolate(method='polynomial', order=1)
assert_series_equal(result, expected)
def test_nan_irregular_index(self):
s = Series([1, 2, np.nan, 4], index=[1, 3, 5, 9])
result = s.interpolate()
expected = Series([1., 2., 3., 4.], index=[1, 3, 5, 9])
assert_series_equal(result, expected)
def test_nan_str_index(self):
s = Series([0, 1, 2, np.nan], index=list('abcd'))
result = s.interpolate()
expected = Series([0., 1., 2., 2.], index=list('abcd'))
assert_series_equal(result, expected)
def test_interp_quad(self):
tm._skip_if_no_scipy()
sq = Series([1, 4, np.nan, 16], index=[1, 2, 3, 4])
result = sq.interpolate(method='quadratic')
expected = Series([1., 4., 9., 16.], index=[1, 2, 3, 4])
assert_series_equal(result, expected)
def test_interp_scipy_basic(self):
tm._skip_if_no_scipy()
s = Series([1, 3, np.nan, 12, np.nan, 25])
# slinear
expected = Series([1., 3., 7.5, 12., 18.5, 25.])
result = s.interpolate(method='slinear')
assert_series_equal(result, expected)
result = s.interpolate(method='slinear', downcast='infer')
assert_series_equal(result, expected)
# nearest
expected = Series([1, 3, 3, 12, 12, 25])
result = s.interpolate(method='nearest')
assert_series_equal(result, expected.astype('float'))
result = s.interpolate(method='nearest', downcast='infer')
assert_series_equal(result, expected)
# zero
expected = Series([1, 3, 3, 12, 12, 25])
result = s.interpolate(method='zero')
assert_series_equal(result, expected.astype('float'))
result = s.interpolate(method='zero', downcast='infer')
assert_series_equal(result, expected)
# quadratic
expected = Series([1, 3., 6.769231, 12., 18.230769, 25.])
result = s.interpolate(method='quadratic')
assert_series_equal(result, expected)
result = s.interpolate(method='quadratic', downcast='infer')
assert_series_equal(result, expected)
# cubic
expected = Series([1., 3., 6.8, 12., 18.2, 25.])
result = s.interpolate(method='cubic')
assert_series_equal(result, expected)
def test_interp_limit(self):
s = Series([1, 3, np.nan, np.nan, np.nan, 11])
expected = Series([1., 3., 5., 7., np.nan, 11.])
result = s.interpolate(method='linear', limit=2)
assert_series_equal(result, expected)
def test_interp_limit_forward(self):
s = Series([1, 3, np.nan, np.nan, np.nan, 11])
# Provide 'forward' (the default) explicitly here.
expected = Series([1., 3., 5., 7., np.nan, 11.])
result = s.interpolate(method='linear', limit=2,
limit_direction='forward')
assert_series_equal(result, expected)
result = s.interpolate(method='linear', limit=2,
limit_direction='FORWARD')
assert_series_equal(result, expected)
def test_interp_limit_bad_direction(self):
s = Series([1, 3, np.nan, np.nan, np.nan, 11])
self.assertRaises(ValueError, s.interpolate, method='linear', limit=2,
limit_direction='abc')
# raises an error even if no limit is specified.
self.assertRaises(ValueError, s.interpolate, method='linear',
limit_direction='abc')
def test_interp_limit_direction(self):
# These tests are for issue #9218 -- fill NaNs in both directions.
s = Series([1, 3, np.nan, np.nan, np.nan, 11])
expected = Series([1., 3., np.nan, 7., 9., 11.])
result = s.interpolate(method='linear', limit=2,
limit_direction='backward')
assert_series_equal(result, expected)
expected = Series([1., 3., 5., np.nan, 9., 11.])
result = s.interpolate(method='linear', limit=1,
limit_direction='both')
assert_series_equal(result, expected)
# Check that this works on a longer series of nans.
s = Series([1, 3, np.nan, np.nan, np.nan, 7, 9, np.nan, np.nan, 12,
np.nan])
expected = Series([1., 3., 4., 5., 6., 7., 9., 10., 11., 12., 12.])
result = s.interpolate(method='linear', limit=2,
limit_direction='both')
assert_series_equal(result, expected)
expected = Series([1., 3., 4., np.nan, 6., 7., 9., 10., 11., 12., 12.])
result = s.interpolate(method='linear', limit=1,
limit_direction='both')
assert_series_equal(result, expected)
def test_interp_limit_to_ends(self):
# These test are for issue #10420 -- flow back to beginning.
s = Series([np.nan, np.nan, 5, 7, 9, np.nan])
expected = Series([5., 5., 5., 7., 9., np.nan])
result = s.interpolate(method='linear', limit=2,
limit_direction='backward')
assert_series_equal(result, expected)
expected = Series([5., 5., 5., 7., 9., 9.])
result = s.interpolate(method='linear', limit=2,
limit_direction='both')
assert_series_equal(result, expected)
def test_interp_limit_before_ends(self):
# These test are for issue #11115 -- limit ends properly.
s = Series([np.nan, np.nan, 5, 7, np.nan, np.nan])
expected = Series([np.nan, np.nan, 5., 7., 7., np.nan])
result = s.interpolate(method='linear', limit=1,
limit_direction='forward')
assert_series_equal(result, expected)
expected = Series([np.nan, 5., 5., 7., np.nan, np.nan])
result = s.interpolate(method='linear', limit=1,
limit_direction='backward')
assert_series_equal(result, expected)
expected = Series([np.nan, 5., 5., 7., 7., np.nan])
result = s.interpolate(method='linear', limit=1,
limit_direction='both')
assert_series_equal(result, expected)
def test_interp_all_good(self):
# scipy
tm._skip_if_no_scipy()
s = Series([1, 2, 3])
result = s.interpolate(method='polynomial', order=1)
| assert_series_equal(result, s) | pandas.util.testing.assert_series_equal |
import pytest
import os
from mapping import util
from pandas.util.testing import assert_frame_equal, assert_series_equal
import pandas as pd
from pandas import Timestamp as TS
import numpy as np
@pytest.fixture
def price_files():
cdir = os.path.dirname(__file__)
path = os.path.join(cdir, 'data/')
files = ["CME-FVU2014.csv", "CME-FVZ2014.csv"]
return [os.path.join(path, f) for f in files]
def assert_dict_of_frames(dict1, dict2):
assert dict1.keys() == dict2.keys()
for key in dict1:
assert_frame_equal(dict1[key], dict2[key])
def test_read_price_data(price_files):
# using default name_func in read_price_data()
df = util.read_price_data(price_files)
dt1 = TS("2014-09-30")
dt2 = TS("2014-10-01")
idx = pd.MultiIndex.from_tuples([(dt1, "CME-FVU2014"),
(dt1, "CME-FVZ2014"),
(dt2, "CME-FVZ2014")],
names=["date", "contract"])
df_exp = pd.DataFrame([119.27344, 118.35938, 118.35938],
index=idx, columns=["Open"])
assert_frame_equal(df, df_exp)
def name_func(fstr):
file_name = os.path.split(fstr)[-1]
name = file_name.split('-')[1].split('.')[0]
return name[-4:] + name[:3]
df = util.read_price_data(price_files, name_func)
dt1 = TS("2014-09-30")
dt2 = TS("2014-10-01")
idx = pd.MultiIndex.from_tuples([(dt1, "2014FVU"), (dt1, "2014FVZ"),
(dt2, "2014FVZ")],
names=["date", "contract"])
df_exp = pd.DataFrame([119.27344, 118.35938, 118.35938],
index=idx, columns=["Open"])
assert_frame_equal(df, df_exp)
def test_calc_rets_one_generic():
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-05'), 'CLG5')])
rets = pd.Series([0.1, 0.05, 0.1, 0.8], index=idx)
vals = [1, 0.5, 0.5, 1]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-05'), 'CLG5')
])
weights = pd.DataFrame(vals, index=widx, columns=['CL1'])
wrets = util.calc_rets(rets, weights)
wrets_exp = pd.DataFrame([0.1, 0.075, 0.8],
index=weights.index.levels[0],
columns=['CL1'])
assert_frame_equal(wrets, wrets_exp)
def test_calc_rets_two_generics():
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5'),
(TS('2015-01-05'), 'CLG5'),
(TS('2015-01-05'), 'CLH5')])
rets = pd.Series([0.1, 0.15, 0.05, 0.1, 0.8, -0.5, 0.2], index=idx)
vals = [[1, 0], [0, 1],
[0.5, 0], [0.5, 0.5], [0, 0.5],
[1, 0], [0, 1]]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5'),
(TS('2015-01-05'), 'CLG5'),
(TS('2015-01-05'), 'CLH5')
])
weights = pd.DataFrame(vals, index=widx, columns=['CL1', 'CL2'])
wrets = util.calc_rets(rets, weights)
wrets_exp = pd.DataFrame([[0.1, 0.15], [0.075, 0.45], [-0.5, 0.2]],
index=weights.index.levels[0],
columns=['CL1', 'CL2'])
assert_frame_equal(wrets, wrets_exp)
def test_calc_rets_two_generics_nans_in_second_generic():
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5'),
(TS('2015-01-05'), 'CLG5'),
(TS('2015-01-05'), 'CLH5')])
rets = pd.Series([0.1, np.NaN, 0.05, 0.1, np.NaN, -0.5, 0.2],
index=idx)
vals = [[1, 0], [0, 1],
[0.5, 0], [0.5, 0.5], [0, 0.5],
[1, 0], [0, 1]]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5'),
(TS('2015-01-05'), 'CLG5'),
(TS('2015-01-05'), 'CLH5')
])
weights = pd.DataFrame(vals, index=widx, columns=['CL1', 'CL2'])
wrets = util.calc_rets(rets, weights)
wrets_exp = pd.DataFrame([[0.1, np.NaN], [0.075, np.NaN], [-0.5, 0.2]],
index=weights.index.levels[0],
columns=['CL1', 'CL2'])
assert_frame_equal(wrets, wrets_exp)
def test_calc_rets_two_generics_non_unique_columns():
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5'),
(TS('2015-01-05'), 'CLG5'),
(TS('2015-01-05'), 'CLH5')])
rets = pd.Series([0.1, 0.15, 0.05, 0.1, 0.8, -0.5, 0.2], index=idx)
vals = [[1, 0], [0, 1],
[0.5, 0], [0.5, 0.5], [0, 0.5],
[1, 0], [0, 1]]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5'),
(TS('2015-01-05'), 'CLG5'),
(TS('2015-01-05'), 'CLH5')
])
weights = pd.DataFrame(vals, index=widx, columns=['CL1', 'CL1'])
with pytest.raises(ValueError):
util.calc_rets(rets, weights)
def test_calc_rets_two_generics_two_asts():
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5'),
(TS('2015-01-05'), 'CLG5'),
(TS('2015-01-05'), 'CLH5')])
rets1 = pd.Series([0.1, 0.15, 0.05, 0.1, 0.8, -0.5, 0.2], index=idx)
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'COF5'),
(TS('2015-01-03'), 'COG5'),
(TS('2015-01-04'), 'COF5'),
(TS('2015-01-04'), 'COG5'),
(TS('2015-01-04'), 'COH5')])
rets2 = pd.Series([0.1, 0.15, 0.05, 0.1, 0.4], index=idx)
rets = {"CL": rets1, "CO": rets2}
vals = [[1, 0], [0, 1],
[0.5, 0], [0.5, 0.5], [0, 0.5],
[1, 0], [0, 1]]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5'),
(TS('2015-01-05'), 'CLG5'),
(TS('2015-01-05'), 'CLH5')
])
weights1 = pd.DataFrame(vals, index=widx, columns=["CL0", "CL1"])
vals = [[1, 0], [0, 1],
[0.5, 0], [0.5, 0.5], [0, 0.5]]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'COF5'),
(TS('2015-01-03'), 'COG5'),
(TS('2015-01-04'), 'COF5'),
(TS('2015-01-04'), 'COG5'),
(TS('2015-01-04'), 'COH5')
])
weights2 = pd.DataFrame(vals, index=widx, columns=["CO0", "CO1"])
weights = {"CL": weights1, "CO": weights2}
wrets = util.calc_rets(rets, weights)
wrets_exp = pd.DataFrame([[0.1, 0.15, 0.1, 0.15],
[0.075, 0.45, 0.075, 0.25],
[-0.5, 0.2, pd.np.NaN, pd.np.NaN]],
index=weights["CL"].index.levels[0],
columns=['CL0', 'CL1', 'CO0', 'CO1'])
assert_frame_equal(wrets, wrets_exp)
def test_calc_rets_missing_instr_rets_key_error():
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5')])
irets = pd.Series([0.02, 0.01, 0.012], index=idx)
vals = [1, 1/2, 1/2, 1]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-05'), 'CLG5')])
weights = pd.DataFrame(vals, index=widx, columns=["CL1"])
with pytest.raises(KeyError):
util.calc_rets(irets, weights)
def test_calc_rets_nan_instr_rets():
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-05'), 'CLG5')])
rets = pd.Series([pd.np.NaN, pd.np.NaN, 0.1, 0.8], index=idx)
vals = [1, 0.5, 0.5, 1]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-05'), 'CLG5')
])
weights = pd.DataFrame(vals, index=widx, columns=['CL1'])
wrets = util.calc_rets(rets, weights)
wrets_exp = pd.DataFrame([pd.np.NaN, pd.np.NaN, 0.8],
index=weights.index.levels[0],
columns=['CL1'])
assert_frame_equal(wrets, wrets_exp)
def test_calc_rets_missing_weight():
# see https://github.com/matthewgilbert/mapping/issues/8
# missing weight for return
idx = pd.MultiIndex.from_tuples([
(TS('2015-01-02'), 'CLF5'),
(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-04'), 'CLF5')
])
rets = pd.Series([0.02, -0.03, 0.06], index=idx)
vals = [1, 1]
widx = pd.MultiIndex.from_tuples([
(TS('2015-01-02'), 'CLF5'),
(TS('2015-01-04'), 'CLF5')
])
weights = pd.DataFrame(vals, index=widx, columns=["CL1"])
with pytest.raises(ValueError):
util.calc_rets(rets, weights)
# extra instrument
idx = pd.MultiIndex.from_tuples([(TS('2015-01-02'), 'CLF5'),
(TS('2015-01-04'), 'CLF5')])
weights1 = pd.DataFrame(1, index=idx, columns=["CL1"])
idx = pd.MultiIndex.from_tuples([
(TS('2015-01-02'), 'CLF5'),
(TS('2015-01-02'), 'CLH5'),
(TS('2015-01-03'), 'CLH5'), # extra day for no weight instrument
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLH5')
])
rets = pd.Series([0.02, -0.03, 0.06, 0.05, 0.01], index=idx)
with pytest.raises(ValueError):
util.calc_rets(rets, weights1)
# leading / trailing returns
idx = pd.MultiIndex.from_tuples([(TS('2015-01-02'), 'CLF5'),
(TS('2015-01-04'), 'CLF5')])
weights2 = pd.DataFrame(1, index=idx, columns=["CL1"])
idx = pd.MultiIndex.from_tuples([(TS('2015-01-01'), 'CLF5'),
(TS('2015-01-02'), 'CLF5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-05'), 'CLF5')])
rets = pd.Series([0.02, -0.03, 0.06, 0.05], index=idx)
with pytest.raises(ValueError):
util.calc_rets(rets, weights2)
def test_to_notional_empty():
instrs = pd.Series()
prices = pd.Series()
multipliers = pd.Series()
res_exp = pd.Series()
res = util.to_notional(instrs, prices, multipliers)
assert_series_equal(res, res_exp)
def test_to_notional_same_fx():
instrs = pd.Series([-1, 2, 1], index=['CLZ6', 'COZ6', 'GCZ6'])
prices = pd.Series([30.20, 30.5, 10.2], index=['CLZ6', 'COZ6', 'GCZ6'])
multipliers = pd.Series([1, 1, 1], index=['CLZ6', 'COZ6', 'GCZ6'])
res_exp = pd.Series([-30.20, 2 * 30.5, 10.2],
index=['CLZ6', 'COZ6', 'GCZ6'])
res = util.to_notional(instrs, prices, multipliers)
assert_series_equal(res, res_exp)
def test_to_notional_extra_prices():
instrs = pd.Series([-1, 2, 1], index=['CLZ6', 'COZ6', 'GCZ6'])
multipliers = pd.Series([1, 1, 1], index=['CLZ6', 'COZ6', 'GCZ6'])
prices = pd.Series([30.20, 30.5, 10.2, 13.1], index=['CLZ6', 'COZ6',
'GCZ6', 'extra'])
res_exp = pd.Series([-30.20, 2 * 30.5, 10.2],
index=['CLZ6', 'COZ6', 'GCZ6'])
res = util.to_notional(instrs, prices, multipliers)
assert_series_equal(res, res_exp)
def test_to_notional_missing_prices():
instrs = pd.Series([-1, 2, 1], index=['CLZ6', 'COZ6', 'GCZ6'])
multipliers = pd.Series([1, 1, 1], index=['CLZ6', 'COZ6', 'GCZ6'])
prices = pd.Series([30.20, 30.5], index=['CLZ6', 'COZ6'])
res_exp = pd.Series([-30.20, 2 * 30.5, pd.np.NaN],
index=['CLZ6', 'COZ6', 'GCZ6'])
res = util.to_notional(instrs, prices, multipliers)
assert_series_equal(res, res_exp)
def test_to_notional_different_fx():
instrs = pd.Series([-1, 2, 1], index=['CLZ6', 'COZ6', 'GCZ6'])
multipliers = pd.Series([1, 1, 1], index=['CLZ6', 'COZ6', 'GCZ6'])
prices = pd.Series([30.20, 30.5, 10.2], index=['CLZ6', 'COZ6', 'GCZ6'])
instr_fx = pd.Series(['USD', 'CAD', 'AUD'],
index=['CLZ6', 'COZ6', 'GCZ6'])
fx_rates = pd.Series([1.32, 0.8], index=['USDCAD', 'AUDUSD'])
res_exp = pd.Series([-30.20, 2 * 30.5 / 1.32, 10.2 * 0.8],
index=['CLZ6', 'COZ6', 'GCZ6'])
res = util.to_notional(instrs, prices, multipliers, desired_ccy='USD',
instr_fx=instr_fx, fx_rates=fx_rates)
assert_series_equal(res, res_exp)
def test_to_notional_duplicates():
instrs = pd.Series([1, 1], index=['A', 'A'])
prices = pd.Series([200.37], index=['A'])
mults = pd.Series([100], index=['A'])
with pytest.raises(ValueError):
util.to_notional(instrs, prices, mults)
instrs = pd.Series([1], index=['A'])
prices = pd.Series([200.37, 200.37], index=['A', 'A'])
mults = pd.Series([100], index=['A'])
with pytest.raises(ValueError):
util.to_notional(instrs, prices, mults)
instrs = pd.Series([1], index=['A'])
prices = pd.Series([200.37], index=['A'])
mults = pd.Series([100, 100], index=['A', 'A'])
with pytest.raises(ValueError):
util.to_notional(instrs, prices, mults)
instrs = pd.Series([1], index=['A'])
prices = pd.Series([200.37], index=['A'])
mults = pd.Series([100], index=['A'])
desired_ccy = "CAD"
instr_fx = pd.Series(['USD', 'USD'], index=['A', 'A'])
fx_rate = pd.Series([1.32], index=['USDCAD'])
with pytest.raises(ValueError):
util.to_notional(instrs, prices, mults, desired_ccy,
instr_fx, fx_rate)
instrs = pd.Series([1], index=['A'])
prices = pd.Series([200.37], index=['A'])
mults = pd.Series([100], index=['A'])
desired_ccy = "CAD"
instr_fx = pd.Series(['USD'], index=['A'])
fx_rate = pd.Series([1.32, 1.32], index=['USDCAD', 'USDCAD'])
with pytest.raises(ValueError):
util.to_notional(instrs, prices, mults, desired_ccy,
instr_fx, fx_rate)
def test_to_notional_bad_fx():
instrs = pd.Series([1], index=['A'])
prices = pd.Series([200.37], index=['A'])
mults = pd.Series([100], index=['A'])
instr_fx = pd.Series(['JPY'], index=['A'])
fx_rates = pd.Series([1.32], index=['GBPCAD'])
with pytest.raises(ValueError):
util.to_notional(instrs, prices, mults, desired_ccy='USD',
instr_fx=instr_fx, fx_rates=fx_rates)
def test_to_contracts_rounder():
prices = pd.Series([30.20, 30.5], index=['CLZ6', 'COZ6'])
multipliers = pd.Series([1, 1], index=['CLZ6', 'COZ6'])
# 30.19 / 30.20 is slightly less than 1 so will round to 0
notional = pd.Series([30.19, 2 * 30.5], index=['CLZ6', 'COZ6'])
res = util.to_contracts(notional, prices, multipliers,
rounder=pd.np.floor)
res_exp = pd.Series([0, 2], index=['CLZ6', 'COZ6'])
assert_series_equal(res, res_exp)
def test_to_contract_different_fx_with_multiplier():
notionals = pd.Series([-30.20, 2 * 30.5 / 1.32 * 10, 10.2 * 0.8 * 100],
index=['CLZ6', 'COZ6', 'GCZ6'])
prices = pd.Series([30.20, 30.5, 10.2], index=['CLZ6', 'COZ6', 'GCZ6'])
instr_fx = pd.Series(['USD', 'CAD', 'AUD'],
index=['CLZ6', 'COZ6', 'GCZ6'])
fx_rates = pd.Series([1.32, 0.8], index=['USDCAD', 'AUDUSD'])
multipliers = pd.Series([1, 10, 100], index=['CLZ6', 'COZ6', 'GCZ6'])
res_exp = pd.Series([-1, 2, 1], index=['CLZ6', 'COZ6', 'GCZ6'])
res = util.to_contracts(notionals, prices, desired_ccy='USD',
instr_fx=instr_fx, fx_rates=fx_rates,
multipliers=multipliers)
assert_series_equal(res, res_exp)
def test_to_contract_different_fx_with_multiplier_rounding():
# won't work out to integer number of contracts so this tests rounding
notionals = pd.Series([-30.21, 2 * 30.5 / 1.32 * 10, 10.2 * 0.8 * 100],
index=['CLZ6', 'COZ6', 'GCZ6'])
prices = pd.Series([30.20, 30.5, 10.2], index=['CLZ6', 'COZ6', 'GCZ6'])
instr_fx = pd.Series(['USD', 'CAD', 'AUD'],
index=['CLZ6', 'COZ6', 'GCZ6'])
fx_rates = pd.Series([1.32, 0.8], index=['USDCAD', 'AUDUSD'])
multipliers = pd.Series([1, 10, 100], index=['CLZ6', 'COZ6', 'GCZ6'])
res_exp = pd.Series([-1, 2, 1], index=['CLZ6', 'COZ6', 'GCZ6'])
res = util.to_contracts(notionals, prices, desired_ccy='USD',
instr_fx=instr_fx, fx_rates=fx_rates,
multipliers=multipliers)
assert_series_equal(res, res_exp)
def test_trade_with_zero_amount():
wts = pd.DataFrame([[0.5, 0], [0.5, 0.5], [0, 0.5]],
index=["CLX16", "CLZ16", "CLF17"],
columns=[0, 1])
desired_holdings = pd.Series([200000, 0], index=[0, 1])
current_contracts = pd.Series([0, 1, 0],
index=['CLX16', 'CLZ16', 'CLF17'])
prices = pd.Series([50.32, 50.41, 50.48],
index=['CLX16', 'CLZ16', 'CLF17'])
multiplier = pd.Series([100, 100, 100],
index=['CLX16', 'CLZ16', 'CLF17'])
trades = util.calc_trades(current_contracts, desired_holdings, wts,
prices, multipliers=multiplier)
# 200000 * 0.5 / (50.32*100) - 0,
# 200000 * 0.5 / (50.41*100) + 0 * 0.5 / (50.41*100) - 1,
# 0 * 0.5 / (50.48*100) - 0,
exp_trades = pd.Series([20, 19], index=['CLX16', 'CLZ16'])
assert_series_equal(trades, exp_trades)
def test_trade_all_zero_amount_return_empty():
wts = pd.DataFrame([1], index=["CLX16"], columns=[0])
desired_holdings = pd.Series([13], index=[0])
current_contracts = 0
prices = pd.Series([50.32], index=['CLX16'])
multiplier = pd.Series([100], index=['CLX16'])
trades = util.calc_trades(current_contracts, desired_holdings, wts,
prices, multipliers=multiplier)
exp_trades = pd.Series(dtype="int64")
assert_series_equal(trades, exp_trades)
def test_trade_one_asset():
wts = pd.DataFrame([[0.5, 0], [0.5, 0.5], [0, 0.5]],
index=["CLX16", "CLZ16", "CLF17"],
columns=[0, 1])
desired_holdings = pd.Series([200000, -50000], index=[0, 1])
current_contracts = pd.Series([0, 1, 0],
index=['CLX16', 'CLZ16', 'CLF17'])
prices = pd.Series([50.32, 50.41, 50.48],
index=['CLX16', 'CLZ16', 'CLF17'])
multiplier = pd.Series([100, 100, 100],
index=['CLX16', 'CLZ16', 'CLF17'])
trades = util.calc_trades(current_contracts, desired_holdings, wts,
prices, multipliers=multiplier)
# 200000 * 0.5 / (50.32*100) - 0,
# 200000 * 0.5 / (50.41*100) - 50000 * 0.5 / (50.41*100) - 1,
# -50000 * 0.5 / (50.48*100) - 0,
exp_trades = pd.Series([20, 14, -5], index=['CLX16', 'CLZ16', 'CLF17'])
exp_trades = exp_trades.sort_index()
assert_series_equal(trades, exp_trades)
def test_trade_multi_asset():
wts1 = pd.DataFrame([[0.5, 0], [0.5, 0.5], [0, 0.5]],
index=["CLX16", "CLZ16", "CLF17"],
columns=["CL0", "CL1"])
wts2 = pd.DataFrame([1], index=["COX16"], columns=["CO0"])
wts = {"CL": wts1, "CO": wts2}
desired_holdings = pd.Series([200000, -50000, 100000],
index=["CL0", "CL1", "CO0"])
current_contracts = pd.Series([0, 1, 0, 5],
index=['CLX16', 'CLZ16', 'CLF17',
'COX16'])
prices = pd.Series([50.32, 50.41, 50.48, 49.50],
index=['CLX16', 'CLZ16', 'CLF17', 'COX16'])
multiplier = pd.Series([100, 100, 100, 100],
index=['CLX16', 'CLZ16', 'CLF17', 'COX16'])
trades = util.calc_trades(current_contracts, desired_holdings, wts,
prices, multipliers=multiplier)
# 200000 * 0.5 / (50.32*100) - 0,
# 200000 * 0.5 / (50.41*100) - 50000 * 0.5 / (50.41*100) - 1,
# -50000 * 0.5 / (50.48*100) - 0,
# 100000 * 1 / (49.50*100) - 5,
exp_trades = pd.Series([20, 14, -5, 15], index=['CLX16', 'CLZ16',
'CLF17', 'COX16'])
exp_trades = exp_trades.sort_index()
assert_series_equal(trades, exp_trades)
def test_trade_extra_desired_holdings_without_weights():
wts = pd.DataFrame([0], index=["CLX16"], columns=["CL0"])
desired_holdings = pd.Series([200000, 10000], index=["CL0", "CL1"])
current_contracts = pd.Series([0], index=['CLX16'])
prices = pd.Series([50.32], index=['CLX16'])
multipliers = pd.Series([1], index=['CLX16'])
with pytest.raises(ValueError):
util.calc_trades(current_contracts, desired_holdings, wts, prices,
multipliers)
def test_trade_extra_desired_holdings_without_current_contracts():
# this should treat the missing holdings as 0, since this would often
# happen when adding new positions without any current holdings
wts = pd.DataFrame([[0.5, 0], [0.5, 0.5], [0, 0.5]],
index=["CLX16", "CLZ16", "CLF17"],
columns=[0, 1])
desired_holdings = pd.Series([200000, -50000], index=[0, 1])
current_contracts = pd.Series([0, 1],
index=['CLX16', 'CLZ16'])
prices = pd.Series([50.32, 50.41, 50.48],
index=['CLX16', 'CLZ16', 'CLF17'])
multiplier = pd.Series([100, 100, 100],
index=['CLX16', 'CLZ16', 'CLF17'])
trades = util.calc_trades(current_contracts, desired_holdings, wts,
prices, multipliers=multiplier)
# 200000 * 0.5 / (50.32*100) - 0,
# 200000 * 0.5 / (50.41*100) - 50000 * 0.5 / (50.41*100) - 1,
# -50000 * 0.5 / (50.48*100) - 0,
exp_trades = pd.Series([20, 14, -5], index=['CLX16', 'CLZ16', 'CLF17'])
exp_trades = exp_trades.sort_index()
# non existent contract holdings result in fill value being a float,
# which casts to float64
assert_series_equal(trades, exp_trades, check_dtype=False)
def test_trade_extra_weights():
# extra weights should be ignored
wts = pd.DataFrame([[0.5, 0], [0.5, 0.5], [0, 0.5]],
index=["CLX16", "CLZ16", "CLF17"],
columns=[0, 1])
desired_holdings = pd.Series([200000], index=[0])
current_contracts = pd.Series([0, 2], index=['CLX16', 'CLZ16'])
prices = pd.Series([50.32, 50.41], index=['CLX16', 'CLZ16'])
multiplier = pd.Series([100, 100], index=['CLX16', 'CLZ16'])
trades = util.calc_trades(current_contracts, desired_holdings, wts,
prices, multipliers=multiplier)
# 200000 * 0.5 / (50.32*100) - 0,
# 200000 * 0.5 / (50.41*100) - 2,
exp_trades = pd.Series([20, 18], index=['CLX16', 'CLZ16'])
assert_series_equal(trades, exp_trades)
def test_get_multiplier_dataframe_weights():
wts = pd.DataFrame([[0.5, 0], [0.5, 0.5], [0, 0.5]],
index=["CLX16", "CLZ16", "CLF17"],
columns=[0, 1])
ast_mult = pd.Series([1000], index=["CL"])
imults = util.get_multiplier(wts, ast_mult)
imults_exp = pd.Series([1000, 1000, 1000],
index=["CLF17", "CLX16", "CLZ16"])
assert_series_equal(imults, imults_exp)
def test_get_multiplier_dict_weights():
wts1 = pd.DataFrame([[0.5, 0], [0.5, 0.5], [0, 0.5]],
index=["CLX16", "CLZ16", "CLF17"],
columns=[0, 1])
wts2 = pd.DataFrame([0.5, 0.5], index=["COX16", "COZ16"], columns=[0])
wts = {"CL": wts1, "CO": wts2}
ast_mult = pd.Series([1000, 1000], index=["CL", "CO"])
imults = util.get_multiplier(wts, ast_mult)
imults_exp = pd.Series([1000, 1000, 1000, 1000, 1000],
index=["CLF17", "CLX16", "CLZ16", "COX16",
"COZ16"])
assert_series_equal(imults, imults_exp)
def test_get_multiplier_dataframe_weights_multiplier_asts_error():
wts = pd.DataFrame([[0.5, 0], [0.5, 0.5], [0, 0.5]],
index=["CLX16", "CLZ16", "CLF17"],
columns=[0, 1])
ast_mult = pd.Series([1000, 1000], index=["CL", "CO"])
with pytest.raises(ValueError):
util.get_multiplier(wts, ast_mult)
def test_weighted_expiration_two_generics():
vals = [[1, 0, 1/2, 1/2, 0, 1, 0], [0, 1, 0, 1/2, 1/2, 0, 1]]
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF15'),
(TS('2015-01-03'), 'CLG15'),
(TS('2015-01-04'), 'CLF15'),
(TS('2015-01-04'), 'CLG15'),
(TS('2015-01-04'), 'CLH15'),
(TS('2015-01-05'), 'CLG15'),
(TS('2015-01-05'), 'CLH15')])
weights = pd.DataFrame({"CL1": vals[0], "CL2": vals[1]}, index=idx)
contract_dates = pd.Series([TS('2015-01-20'),
TS('2015-02-21'),
TS('2015-03-20')],
index=['CLF15', 'CLG15', 'CLH15'])
wexp = util.weighted_expiration(weights, contract_dates)
exp_wexp = pd.DataFrame([[17.0, 49.0], [32.0, 61.5], [47.0, 74.0]],
index=[TS('2015-01-03'),
TS('2015-01-04'),
TS('2015-01-05')],
columns=["CL1", "CL2"])
assert_frame_equal(wexp, exp_wexp)
def test_flatten():
vals = [[1, 0], [0, 1], [1, 0], [0, 1]]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5')])
weights = pd.DataFrame(vals, index=widx, columns=["CL1", "CL2"])
flat_wts = util.flatten(weights)
flat_wts_exp = pd.DataFrame(
{"date": [TS('2015-01-03')] * 4 + [TS('2015-01-04')] * 4,
"contract": ['CLF5'] * 2 + ['CLG5'] * 4 + ['CLH5'] * 2,
"generic": ["CL1", "CL2"] * 4,
"weight": [1, 0, 0, 1, 1, 0, 0, 1]}
).loc[:, ["date", "contract", "generic", "weight"]]
assert_frame_equal(flat_wts, flat_wts_exp)
def test_flatten_dict():
vals = [[1, 0], [0, 1], [1, 0], [0, 1]]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5')])
weights1 = pd.DataFrame(vals, index=widx, columns=["CL1", "CL2"])
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'COF5')])
weights2 = pd.DataFrame(1, index=widx, columns=["CO1"])
weights = {"CL": weights1, "CO": weights2}
flat_wts = util.flatten(weights)
flat_wts_exp = pd.DataFrame(
{"date": ([TS('2015-01-03')] * 4 + [TS('2015-01-04')] * 4
+ [TS('2015-01-03')]),
"contract": (['CLF5'] * 2 + ['CLG5'] * 4 + ['CLH5'] * 2
+ ["COF5"]),
"generic": ["CL1", "CL2"] * 4 + ["CO1"],
"weight": [1, 0, 0, 1, 1, 0, 0, 1, 1],
"key": ["CL"] * 8 + ["CO"]}
).loc[:, ["date", "contract", "generic", "weight", "key"]]
assert_frame_equal(flat_wts, flat_wts_exp)
def test_flatten_bad_input():
dummy = 0
with pytest.raises(ValueError):
util.flatten(dummy)
def test_unflatten():
flat_wts = pd.DataFrame(
{"date": [TS('2015-01-03')] * 4 + [TS('2015-01-04')] * 4,
"contract": ['CLF5'] * 2 + ['CLG5'] * 4 + ['CLH5'] * 2,
"generic": ["CL1", "CL2"] * 4,
"weight": [1, 0, 0, 1, 1, 0, 0, 1]}
).loc[:, ["date", "contract", "generic", "weight"]]
wts = util.unflatten(flat_wts)
vals = [[1, 0], [0, 1], [1, 0], [0, 1]]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5')],
names=("date", "contract"))
cols = pd.Index(["CL1", "CL2"], name="generic")
wts_exp = pd.DataFrame(vals, index=widx, columns=cols)
assert_frame_equal(wts, wts_exp)
def test_unflatten_dict():
flat_wts = pd.DataFrame(
{"date": ([TS('2015-01-03')] * 4 + [TS('2015-01-04')] * 4
+ [TS('2015-01-03')]),
"contract": (['CLF5'] * 2 + ['CLG5'] * 4 + ['CLH5'] * 2
+ ["COF5"]),
"generic": ["CL1", "CL2"] * 4 + ["CO1"],
"weight": [1, 0, 0, 1, 1, 0, 0, 1, 1],
"key": ["CL"] * 8 + ["CO"]}
).loc[:, ["date", "contract", "generic", "weight", "key"]]
wts = util.unflatten(flat_wts)
vals = [[1, 0], [0, 1], [1, 0], [0, 1]]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5')],
names=("date", "contract"))
cols = pd.Index(["CL1", "CL2"], name="generic")
weights1 = pd.DataFrame(vals, index=widx, columns=cols)
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'COF5')],
names=("date", "contract"))
cols = pd.Index(["CO1"], name="generic")
weights2 = pd.DataFrame(1, index=widx, columns=cols)
wts_exp = {"CL": weights1, "CO": weights2}
assert_dict_of_frames(wts, wts_exp)
def test_reindex():
# related to https://github.com/matthewgilbert/mapping/issues/11
# no op
idx = pd.MultiIndex.from_tuples([(TS('2015-01-02'), 'CLF5'),
(TS('2015-01-02'), 'CLH5'),
(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLH5')])
prices = pd.Series([103, 101, 102, 100], index=idx)
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLH5')])
new_prices = util.reindex(prices, widx, limit=0)
exp_prices = prices
assert_series_equal(exp_prices, new_prices)
# missing front prices error
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5')])
prices = pd.Series([100], index=idx)
widx = pd.MultiIndex.from_tuples([( | TS('2015-01-03') | pandas.Timestamp |
#!/usr/bin/env python
# coding: utf-8
# # Attrition Rate Analytics
# Customer Attrition is a tendency of customers to abandon a brand and stop being a paying client of a particular business. The percentage of customers that discontinue using a company’s products or services during a particular time period is called Customer Attrition Rate.
#
# The objective of this project is to analyze Customer Attrition Rate of a Telecom company using Machine-Learning. We will build our model using Train dataset and make our predictions for each customer in Test dataset.
# ## Importing Relevant Libraries
# In[1]:
#Import necessary libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
get_ipython().run_line_magic('matplotlib', 'inline')
import seaborn as sns
import warnings
warnings.filterwarnings('ignore')
# In[2]:
#Import Train and Test dataset
train_df = pd.read_csv('Train_Data.csv')
test_df = pd.read_csv('Test_Data.csv')
# In[3]:
train_df.head()
# In[4]:
test_df.head()
# ## Data Inspection and Cleaning
# In[5]:
#Size of Train dataset
train_df.shape
# In[6]:
#Size of Test dataset
test_df.shape
# In[7]:
#Having a look at the Train data types
train_df.info()
# There are 20 features (Independent variables) and 1 target (Dependent variable) for 4224 customers in Train dataset. Target variable indicates if a customer has has left the company (i.e. 'Churn' = Yes) within the last month. Since the target variable has two states (Yes/No or 1/0), this is a binary classification problem.
# In[8]:
#Having a look at the Test data types
test_df.info()
# As expected, there are 20 features (Independent variables) and we have to predict the target (Dependent variable) for 2819 customers in Test dataset.
# In[9]:
#NULL Checking in Train data
train_df.isnull().sum()
# 7 Null values found under 'TotalCharges' column in Train dataset.
# In[10]:
#NULL Checking in Test data
test_df.isnull().sum()
# 4 Null values found under 'TotalCharges' column in Test dataset.
# In[11]:
#Imputing with Mean
train_df['TotalCharges'] = train_df['TotalCharges'].fillna(train_df['TotalCharges'].mean())
test_df['TotalCharges'] = test_df['TotalCharges'].fillna(test_df['TotalCharges'].mean())
# In[12]:
train_df['TotalCharges'].isnull().sum()
# In[13]:
test_df['TotalCharges'].isnull().sum()
# Null values filled in both Train and Test dataset with mean value of 'TotalCharges'.
# ## Exploratory Data Analysis
# At first glance, only 'customerID' seems irrelevant to Attrition Rate. Other variables may or may not have an effect on Attrition Rate. We will figure out.
# Target Variable 'Churn' signifies Attrition Rate.
# Let us now perform EDA on Train dataset.
# In[14]:
#Get the number of customers that churned
train_df['Churn'].value_counts()
# In[15]:
#Percentage of customers leaving
retained = train_df[train_df.Churn == 'No']
churned = train_df[train_df.Churn == 'Yes']
num_retained = retained.shape[0]
num_churned = churned.shape[0]
#Percentage of customers that Stayed with the company
print( num_retained / (num_retained + num_churned) * 100 , "% of customers Stayed with the company.")
#Percentage of customers that left the company
print( num_churned / (num_retained + num_churned) * 100,"% of customers Left the company.")
# Target variable has imbalanced class distribution. Negative class ('Churn' = No) is much less than Positive class ('Churn' = Yes). Imbalanced class distributions influence the performance of a Machine Learning model negatively. We will use upsampling or downsampling to overcome this issue.
# It is always beneficial to explore the features (Independent variables) before trying to build a model. Let's first discover the features that only have two values.
# In[16]:
columns = train_df.columns
binary_cols = []
for col in columns:
if train_df[col].value_counts().shape[0] == 2:
binary_cols.append(col)
# In[17]:
#Categorical features with two classes
binary_cols # categorical features with two classes
# The remaining categorical variables have more than two values (or classes).
# In[18]:
#Categorical features with multiple classes
multiple_cols_cat = ['MultipleLines', 'InternetService', 'OnlineSecurity', 'OnlineBackup',
'DeviceProtection', 'TechSupport', 'StreamingTV', 'StreamingMovies', 'Contract','PaymentMethod']
# ### Binary Categorical Features
# In[19]:
#Checking the class distribution of binary features
fig, axes = plt.subplots(2, 3, figsize=(12, 7), sharey=True)
sns.countplot("gender", data=train_df, ax=axes[0,0])
sns.countplot("SeniorCitizen", data=train_df, ax=axes[0,1])
sns.countplot("Partner", data=train_df, ax=axes[0,2])
sns.countplot("Dependents", data=train_df, ax=axes[1,0])
sns.countplot("PhoneService", data=train_df, ax=axes[1,1])
sns.countplot("PaperlessBilling", data=train_df, ax=axes[1,2])
# There is a high imbalance in 'SeniorCitizen' and 'PhoneService' variables. Most of the customers are not senior and similarly, most customers have a Phone Service.
# It is better to check how the target variable ('Churn') changes according to the binary features. To be able to make calculations, we need to change the values of target variable. 'Yes' will be 1 and 'No' will be 0.
# In[20]:
churn_numeric = {'Yes':1, 'No':0}
train_df.Churn.replace(churn_numeric, inplace=True)
# In[21]:
train_df[['gender','Churn']].groupby(['gender']).mean()
# Average Attrition Rate for Males and Females are approximately the same which indicates 'gender' variable does not bring a valuable prediction power to a model. Therefore, I will not use 'gender' variable in the Machine Learning model.
# In[22]:
train_df[['SeniorCitizen','Churn']].groupby(['SeniorCitizen']).mean()
# In[23]:
train_df[['Partner','Churn']].groupby(['Partner']).mean()
# In[24]:
train_df[['Dependents','Churn']].groupby(['Dependents']).mean()
# In[25]:
train_df[['PhoneService','Churn']].groupby(['PhoneService']).mean()
# In[26]:
train_df[['PaperlessBilling','Churn']].groupby(['PaperlessBilling']).mean()
# The other binary features have an effect on the target variable. The 'PhoneService' may also be skipped if you think 2% difference can be ignored.
# ### Other Categorical Features
# It is time to explore other categorical features. We also have continuous features such as 'tenure', 'MonthlyCharges' and 'TotalCharges' which we will discuss in the next part.
#
# There are 6 variables that come with 'InternetService'. There variables come into play if customer has 'InternetService'.
# ### InternetService
# In[27]:
sns.countplot("InternetService", data=train_df)
# In[28]:
train_df[['InternetService','Churn']].groupby('InternetService').mean()
# 'InternetService' variable is definitely important in predicting Attrition Rate. As you can see, customers with 'Fiber optic' Internet Service are much likely to exit than other customers although there is not a big difference in the number of customers with 'DSL' and 'Fiber optic'. This company may have some problems with 'Fiber optic' connection. However, it is not a good way to make assumptions based on only one variable. Let's also check the 'MonthlyCharges'.
# In[29]:
train_df[['InternetService','MonthlyCharges']].groupby('InternetService').mean()
# 'Fiber optic' service is much more expensive than 'DSL' which may be one of the reasons why customers exit.
# In[30]:
fig, axes = plt.subplots(2, 3, figsize=(12, 7), sharey=True)
sns.countplot("StreamingTV", data=train_df, ax=axes[0,0])
sns.countplot("StreamingMovies", data=train_df, ax=axes[0,1])
sns.countplot("OnlineSecurity", data=train_df, ax=axes[0,2])
sns.countplot("OnlineBackup", data=train_df, ax=axes[1,0])
sns.countplot("DeviceProtection", data=train_df, ax=axes[1,1])
sns.countplot("TechSupport", data=train_df, ax=axes[1,2])
# In[31]:
train_df[['StreamingTV','Churn']].groupby('StreamingTV').mean()
# In[32]:
train_df[['StreamingMovies','Churn']].groupby('StreamingMovies').mean()
# In[33]:
train_df[['OnlineSecurity','Churn']].groupby('OnlineSecurity').mean()
# In[34]:
train_df[['OnlineBackup','Churn']].groupby('OnlineBackup').mean()
# In[35]:
train_df[['DeviceProtection','Churn']].groupby('DeviceProtection').mean()
# In[36]:
train_df[['TechSupport','Churn']].groupby('TechSupport').mean()
# All 'InternetService' related features seem to have different Attrition Rates for their classes.
# ### PhoneService
# In[37]:
train_df.PhoneService.value_counts()
# In[38]:
train_df.MultipleLines.value_counts()
# If a customer does not have a 'PhoneService', he/she cannot have 'MultipleLines'.
#
# 'MultipleLines' column includes more specific data compared to 'PhoneService' column. So we will not include 'PhoneService' column as we can understand the number of people who have 'PhoneService' from 'MultipleLines' column.
#
# 'MultipleLines' column takes the 'PhoneService' column one step further.
# In[39]:
train_df[['MultipleLines','Churn']].groupby('MultipleLines').mean()
# ### Contract and Payment Method
# In[40]:
plt.figure(figsize=(8,5))
sns.countplot("Contract", data=train_df)
# In[41]:
train_df[['Contract','Churn']].groupby('Contract').mean()
# It seems like, as expected, customers with short-term 'Contract' are more likely to exit. This clearly explains the motivation for companies to have long-term relationship with their customers.
# In[42]:
plt.figure(figsize=(10,6))
sns.countplot("PaymentMethod", data=train_df)
# In[43]:
train_df[['PaymentMethod','Churn']].groupby('PaymentMethod').mean()
# ### Continuous Variables
# The continuous features are 'tenure', 'MonthlyCharges' and 'TotalCharges'. The amount in 'TotalCharges' columns is proportional to 'tenure' (months) multiplied by 'MonthlyCharges'. So it is unnecessary to include 'TotalCharges' in the model. Adding unnecassary features will increase the model complexity. It is better to have a simpler model when possible. Complex models tend to overfit and not generalize well to new, previously unseen observations. Since the goal of a Machine Learning model is to predict or explain new observations, overfitting is a crucial issue.
# In[44]:
#Distribution of continuous features
fig, axes = plt.subplots(1,2, figsize=(12, 7))
sns.distplot(train_df["tenure"], ax=axes[0])
sns.distplot(train_df["MonthlyCharges"], ax=axes[1])
# In[45]:
train_df[['tenure','MonthlyCharges','Churn']].groupby('Churn').mean()
# It is clear that people who have been a customer for a long time tend to stay with the company. The average 'tenure' in months for people who left the company is 20 months less than the average for people who stay.
# It seems like 'MonthlyCharges' also have an effect on Attrition Rate.
# 'Contract' and 'tenure' features may be correlated because customer with long term 'Contract' are likely to stay longer with the company. Let's figure out.
# In[46]:
train_df[['Contract','tenure']].groupby('Contract').mean()
# As expected, 'Contract' and 'tenure' are highly correlated. Customers with long Contracts have been a customer for longer time than customers with short-term Contracts. It seems 'Contract' will add little to no value to 'tenure' feature so we will not use 'Contract' feature in the model.
# After exploring the variables, we have decided not to use following variables because they add little or no informative power to the model:
# * customerID
# * gender
# * PhoneService
# * Contract
# * TotalCharges
# In[47]:
train_df.drop(['customerID','gender','PhoneService','Contract','TotalCharges'], axis=1, inplace=True)
cusID = test_df['customerID']
test_df.drop(['customerID','gender','PhoneService','Contract','TotalCharges'], axis=1, inplace=True)
# In[48]:
train_df.head()
# ## Feature Engineering
# Categorical features need to be converted to numbers so that they can be included in calculations done by a Machine Learning model. The categorical variables in our data set are not ordinal (i.e. there is no order in them). For example, 'DSL' Internet Service is not superior to 'Fiber optic' Internet Service. An example for an ordinal categorical variable would be ratings from 1 to 5 or a variable with categories like 'bad', 'average' and 'good'.
# When we encode the categorical variables, a number will be assigned to each category. The category with higher numbers will be considered more important or effect the model more. Therefore, we need to encode the variables in a way that each category will be represented by a column and the value in that column will be 0 or 1.
# We also need to scale continuous variables. Otherwise, variables with higher values will be given more importance which effects the accuracy of the model.
# In[49]:
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
from sklearn.preprocessing import MinMaxScaler
# In[50]:
#One-Hot Encoding categorical variables of Train dataset
cat_features = ['SeniorCitizen', 'Partner', 'Dependents',
'MultipleLines', 'InternetService', 'OnlineSecurity',
'OnlineBackup', 'DeviceProtection', 'TechSupport', 'StreamingTV',
'StreamingMovies', 'PaperlessBilling', 'PaymentMethod']
train_df = | pd.get_dummies(train_df, columns=cat_features, drop_first=True) | pandas.get_dummies |
# -*- coding: utf-8 -*-
from __future__ import print_function
from datetime import datetime, timedelta
import functools
import itertools
import numpy as np
import numpy.ma as ma
import numpy.ma.mrecords as mrecords
from numpy.random import randn
import pytest
from pandas.compat import (
PY3, PY36, OrderedDict, is_platform_little_endian, lmap, long, lrange,
lzip, range, zip)
from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike
from pandas.core.dtypes.common import is_integer_dtype
import pandas as pd
from pandas import (
Categorical, DataFrame, Index, MultiIndex, Series, Timedelta, Timestamp,
compat, date_range, isna)
from pandas.tests.frame.common import TestData
import pandas.util.testing as tm
MIXED_FLOAT_DTYPES = ['float16', 'float32', 'float64']
MIXED_INT_DTYPES = ['uint8', 'uint16', 'uint32', 'uint64', 'int8', 'int16',
'int32', 'int64']
class TestDataFrameConstructors(TestData):
def test_constructor(self):
df = DataFrame()
assert len(df.index) == 0
df = DataFrame(data={})
assert len(df.index) == 0
def test_constructor_mixed(self):
index, data = tm.getMixedTypeDict()
# TODO(wesm), incomplete test?
indexed_frame = DataFrame(data, index=index) # noqa
unindexed_frame = DataFrame(data) # noqa
assert self.mixed_frame['foo'].dtype == np.object_
def test_constructor_cast_failure(self):
foo = DataFrame({'a': ['a', 'b', 'c']}, dtype=np.float64)
assert foo['a'].dtype == object
# GH 3010, constructing with odd arrays
df = DataFrame(np.ones((4, 2)))
# this is ok
df['foo'] = np.ones((4, 2)).tolist()
# this is not ok
pytest.raises(ValueError, df.__setitem__, tuple(['test']),
np.ones((4, 2)))
# this is ok
df['foo2'] = np.ones((4, 2)).tolist()
def test_constructor_dtype_copy(self):
orig_df = DataFrame({
'col1': [1.],
'col2': [2.],
'col3': [3.]})
new_df = pd.DataFrame(orig_df, dtype=float, copy=True)
new_df['col1'] = 200.
assert orig_df['col1'][0] == 1.
def test_constructor_dtype_nocast_view(self):
df = DataFrame([[1, 2]])
should_be_view = DataFrame(df, dtype=df[0].dtype)
should_be_view[0][0] = 99
assert df.values[0, 0] == 99
should_be_view = DataFrame(df.values, dtype=df[0].dtype)
should_be_view[0][0] = 97
assert df.values[0, 0] == 97
def test_constructor_dtype_list_data(self):
df = DataFrame([[1, '2'],
[None, 'a']], dtype=object)
assert df.loc[1, 0] is None
assert df.loc[0, 1] == '2'
def test_constructor_list_frames(self):
# see gh-3243
result = DataFrame([DataFrame([])])
assert result.shape == (1, 0)
result = DataFrame([DataFrame(dict(A=lrange(5)))])
assert isinstance(result.iloc[0, 0], DataFrame)
def test_constructor_mixed_dtypes(self):
def _make_mixed_dtypes_df(typ, ad=None):
if typ == 'int':
dtypes = MIXED_INT_DTYPES
arrays = [np.array(np.random.rand(10), dtype=d)
for d in dtypes]
elif typ == 'float':
dtypes = MIXED_FLOAT_DTYPES
arrays = [np.array(np.random.randint(
10, size=10), dtype=d) for d in dtypes]
zipper = lzip(dtypes, arrays)
for d, a in zipper:
assert(a.dtype == d)
if ad is None:
ad = dict()
ad.update({d: a for d, a in zipper})
return DataFrame(ad)
def _check_mixed_dtypes(df, dtypes=None):
if dtypes is None:
dtypes = MIXED_FLOAT_DTYPES + MIXED_INT_DTYPES
for d in dtypes:
if d in df:
assert(df.dtypes[d] == d)
# mixed floating and integer coexinst in the same frame
df = _make_mixed_dtypes_df('float')
_check_mixed_dtypes(df)
# add lots of types
df = _make_mixed_dtypes_df('float', dict(A=1, B='foo', C='bar'))
_check_mixed_dtypes(df)
# GH 622
df = _make_mixed_dtypes_df('int')
_check_mixed_dtypes(df)
def test_constructor_complex_dtypes(self):
# GH10952
a = np.random.rand(10).astype(np.complex64)
b = np.random.rand(10).astype(np.complex128)
df = DataFrame({'a': a, 'b': b})
assert a.dtype == df.a.dtype
assert b.dtype == df.b.dtype
def test_constructor_dtype_str_na_values(self, string_dtype):
# https://github.com/pandas-dev/pandas/issues/21083
df = DataFrame({'A': ['x', None]}, dtype=string_dtype)
result = df.isna()
expected = DataFrame({"A": [False, True]})
tm.assert_frame_equal(result, expected)
assert df.iloc[1, 0] is None
df = DataFrame({'A': ['x', np.nan]}, dtype=string_dtype)
assert np.isnan(df.iloc[1, 0])
def test_constructor_rec(self):
rec = self.frame.to_records(index=False)
if PY3:
# unicode error under PY2
rec.dtype.names = list(rec.dtype.names)[::-1]
index = self.frame.index
df = DataFrame(rec)
tm.assert_index_equal(df.columns, pd.Index(rec.dtype.names))
df2 = DataFrame(rec, index=index)
tm.assert_index_equal(df2.columns, pd.Index(rec.dtype.names))
tm.assert_index_equal(df2.index, index)
rng = np.arange(len(rec))[::-1]
df3 = DataFrame(rec, index=rng, columns=['C', 'B'])
expected = DataFrame(rec, index=rng).reindex(columns=['C', 'B'])
tm.assert_frame_equal(df3, expected)
def test_constructor_bool(self):
df = DataFrame({0: np.ones(10, dtype=bool),
1: np.zeros(10, dtype=bool)})
assert df.values.dtype == np.bool_
def test_constructor_overflow_int64(self):
# see gh-14881
values = np.array([2 ** 64 - i for i in range(1, 10)],
dtype=np.uint64)
result = DataFrame({'a': values})
assert result['a'].dtype == np.uint64
# see gh-2355
data_scores = [(6311132704823138710, 273), (2685045978526272070, 23),
(8921811264899370420, 45),
(long(17019687244989530680), 270),
(long(9930107427299601010), 273)]
dtype = [('uid', 'u8'), ('score', 'u8')]
data = np.zeros((len(data_scores),), dtype=dtype)
data[:] = data_scores
df_crawls = DataFrame(data)
assert df_crawls['uid'].dtype == np.uint64
@pytest.mark.parametrize("values", [np.array([2**64], dtype=object),
np.array([2**65]), [2**64 + 1],
np.array([-2**63 - 4], dtype=object),
np.array([-2**64 - 1]), [-2**65 - 2]])
def test_constructor_int_overflow(self, values):
# see gh-18584
value = values[0]
result = DataFrame(values)
assert result[0].dtype == object
assert result[0][0] == value
def test_constructor_ordereddict(self):
import random
nitems = 100
nums = lrange(nitems)
random.shuffle(nums)
expected = ['A%d' % i for i in nums]
df = DataFrame(OrderedDict(zip(expected, [[0]] * nitems)))
assert expected == list(df.columns)
def test_constructor_dict(self):
frame = DataFrame({'col1': self.ts1,
'col2': self.ts2})
# col2 is padded with NaN
assert len(self.ts1) == 30
assert len(self.ts2) == 25
tm.assert_series_equal(self.ts1, frame['col1'], check_names=False)
exp = pd.Series(np.concatenate([[np.nan] * 5, self.ts2.values]),
index=self.ts1.index, name='col2')
tm.assert_series_equal(exp, frame['col2'])
frame = DataFrame({'col1': self.ts1,
'col2': self.ts2},
columns=['col2', 'col3', 'col4'])
assert len(frame) == len(self.ts2)
assert 'col1' not in frame
assert isna(frame['col3']).all()
# Corner cases
assert len(DataFrame({})) == 0
# mix dict and array, wrong size - no spec for which error should raise
# first
with pytest.raises(ValueError):
DataFrame({'A': {'a': 'a', 'b': 'b'}, 'B': ['a', 'b', 'c']})
# Length-one dict micro-optimization
frame = DataFrame({'A': {'1': 1, '2': 2}})
tm.assert_index_equal(frame.index, pd.Index(['1', '2']))
# empty dict plus index
idx = Index([0, 1, 2])
frame = DataFrame({}, index=idx)
assert frame.index is idx
# empty with index and columns
idx = Index([0, 1, 2])
frame = DataFrame({}, index=idx, columns=idx)
assert frame.index is idx
assert frame.columns is idx
assert len(frame._series) == 3
# with dict of empty list and Series
frame = DataFrame({'A': [], 'B': []}, columns=['A', 'B'])
tm.assert_index_equal(frame.index, Index([], dtype=np.int64))
# GH 14381
# Dict with None value
frame_none = DataFrame(dict(a=None), index=[0])
frame_none_list = DataFrame(dict(a=[None]), index=[0])
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
assert frame_none.get_value(0, 'a') is None
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
assert frame_none_list.get_value(0, 'a') is None
tm.assert_frame_equal(frame_none, frame_none_list)
# GH10856
# dict with scalar values should raise error, even if columns passed
msg = 'If using all scalar values, you must pass an index'
with pytest.raises(ValueError, match=msg):
DataFrame({'a': 0.7})
with pytest.raises(ValueError, match=msg):
DataFrame({'a': 0.7}, columns=['a'])
@pytest.mark.parametrize("scalar", [2, np.nan, None, 'D'])
def test_constructor_invalid_items_unused(self, scalar):
# No error if invalid (scalar) value is in fact not used:
result = DataFrame({'a': scalar}, columns=['b'])
expected = DataFrame(columns=['b'])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("value", [2, np.nan, None, float('nan')])
def test_constructor_dict_nan_key(self, value):
# GH 18455
cols = [1, value, 3]
idx = ['a', value]
values = [[0, 3], [1, 4], [2, 5]]
data = {cols[c]: Series(values[c], index=idx) for c in range(3)}
result = DataFrame(data).sort_values(1).sort_values('a', axis=1)
expected = DataFrame(np.arange(6, dtype='int64').reshape(2, 3),
index=idx, columns=cols)
tm.assert_frame_equal(result, expected)
result = DataFrame(data, index=idx).sort_values('a', axis=1)
tm.assert_frame_equal(result, expected)
result = DataFrame(data, index=idx, columns=cols)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("value", [np.nan, None, float('nan')])
def test_constructor_dict_nan_tuple_key(self, value):
# GH 18455
cols = Index([(11, 21), (value, 22), (13, value)])
idx = Index([('a', value), (value, 2)])
values = [[0, 3], [1, 4], [2, 5]]
data = {cols[c]: Series(values[c], index=idx) for c in range(3)}
result = (DataFrame(data)
.sort_values((11, 21))
.sort_values(('a', value), axis=1))
expected = DataFrame(np.arange(6, dtype='int64').reshape(2, 3),
index=idx, columns=cols)
tm.assert_frame_equal(result, expected)
result = DataFrame(data, index=idx).sort_values(('a', value), axis=1)
tm.assert_frame_equal(result, expected)
result = DataFrame(data, index=idx, columns=cols)
tm.assert_frame_equal(result, expected)
@pytest.mark.skipif(not PY36, reason='Insertion order for Python>=3.6')
def test_constructor_dict_order_insertion(self):
# GH19018
# initialization ordering: by insertion order if python>= 3.6
d = {'b': self.ts2, 'a': self.ts1}
frame = DataFrame(data=d)
expected = DataFrame(data=d, columns=list('ba'))
tm.assert_frame_equal(frame, expected)
@pytest.mark.skipif(PY36, reason='order by value for Python<3.6')
def test_constructor_dict_order_by_values(self):
# GH19018
# initialization ordering: by value if python<3.6
d = {'b': self.ts2, 'a': self.ts1}
frame = DataFrame(data=d)
expected = DataFrame(data=d, columns=list('ab'))
tm.assert_frame_equal(frame, expected)
def test_constructor_multi_index(self):
# GH 4078
# construction error with mi and all-nan frame
tuples = [(2, 3), (3, 3), (3, 3)]
mi = MultiIndex.from_tuples(tuples)
df = DataFrame(index=mi, columns=mi)
assert pd.isna(df).values.ravel().all()
tuples = [(3, 3), (2, 3), (3, 3)]
mi = MultiIndex.from_tuples(tuples)
df = DataFrame(index=mi, columns=mi)
assert pd.isna(df).values.ravel().all()
def test_constructor_error_msgs(self):
msg = "Empty data passed with indices specified."
# passing an empty array with columns specified.
with pytest.raises(ValueError, match=msg):
DataFrame(np.empty(0), columns=list('abc'))
msg = "Mixing dicts with non-Series may lead to ambiguous ordering."
# mix dict and array, wrong size
with pytest.raises(ValueError, match=msg):
DataFrame({'A': {'a': 'a', 'b': 'b'},
'B': ['a', 'b', 'c']})
# wrong size ndarray, GH 3105
msg = r"Shape of passed values is \(3, 4\), indices imply \(3, 3\)"
with pytest.raises(ValueError, match=msg):
DataFrame(np.arange(12).reshape((4, 3)),
columns=['foo', 'bar', 'baz'],
index=pd.date_range('2000-01-01', periods=3))
# higher dim raise exception
with pytest.raises(ValueError, match='Must pass 2-d input'):
DataFrame(np.zeros((3, 3, 3)), columns=['A', 'B', 'C'], index=[1])
# wrong size axis labels
msg = ("Shape of passed values "
r"is \(3, 2\), indices "
r"imply \(3, 1\)")
with pytest.raises(ValueError, match=msg):
DataFrame(np.random.rand(2, 3), columns=['A', 'B', 'C'], index=[1])
msg = ("Shape of passed values "
r"is \(3, 2\), indices "
r"imply \(2, 2\)")
with pytest.raises(ValueError, match=msg):
DataFrame(np.random.rand(2, 3), columns=['A', 'B'], index=[1, 2])
msg = ("If using all scalar "
"values, you must pass "
"an index")
with pytest.raises(ValueError, match=msg):
DataFrame({'a': False, 'b': True})
def test_constructor_with_embedded_frames(self):
# embedded data frames
df1 = DataFrame({'a': [1, 2, 3], 'b': [3, 4, 5]})
df2 = DataFrame([df1, df1 + 10])
df2.dtypes
str(df2)
result = df2.loc[0, 0]
tm.assert_frame_equal(result, df1)
result = df2.loc[1, 0]
tm.assert_frame_equal(result, df1 + 10)
def test_constructor_subclass_dict(self):
# Test for passing dict subclass to constructor
data = {'col1': tm.TestSubDict((x, 10.0 * x) for x in range(10)),
'col2': tm.TestSubDict((x, 20.0 * x) for x in range(10))}
df = DataFrame(data)
refdf = DataFrame({col: dict(compat.iteritems(val))
for col, val in compat.iteritems(data)})
tm.assert_frame_equal(refdf, df)
data = tm.TestSubDict(compat.iteritems(data))
df = DataFrame(data)
tm.assert_frame_equal(refdf, df)
# try with defaultdict
from collections import defaultdict
data = {}
self.frame['B'][:10] = np.nan
for k, v in compat.iteritems(self.frame):
dct = defaultdict(dict)
dct.update(v.to_dict())
data[k] = dct
frame = DataFrame(data)
tm.assert_frame_equal(self.frame.sort_index(), frame)
def test_constructor_dict_block(self):
expected = np.array([[4., 3., 2., 1.]])
df = DataFrame({'d': [4.], 'c': [3.], 'b': [2.], 'a': [1.]},
columns=['d', 'c', 'b', 'a'])
tm.assert_numpy_array_equal(df.values, expected)
def test_constructor_dict_cast(self):
# cast float tests
test_data = {
'A': {'1': 1, '2': 2},
'B': {'1': '1', '2': '2', '3': '3'},
}
frame = DataFrame(test_data, dtype=float)
assert len(frame) == 3
assert frame['B'].dtype == np.float64
assert frame['A'].dtype == np.float64
frame = DataFrame(test_data)
assert len(frame) == 3
assert frame['B'].dtype == np.object_
assert frame['A'].dtype == np.float64
# can't cast to float
test_data = {
'A': dict(zip(range(20), tm.makeStringIndex(20))),
'B': dict(zip(range(15), randn(15)))
}
frame = DataFrame(test_data, dtype=float)
assert len(frame) == 20
assert frame['A'].dtype == np.object_
assert frame['B'].dtype == np.float64
def test_constructor_dict_dont_upcast(self):
d = {'Col1': {'Row1': 'A String', 'Row2': np.nan}}
df = DataFrame(d)
assert isinstance(df['Col1']['Row2'], float)
dm = DataFrame([[1, 2], ['a', 'b']], index=[1, 2], columns=[1, 2])
assert isinstance(dm[1][1], int)
def test_constructor_dict_of_tuples(self):
# GH #1491
data = {'a': (1, 2, 3), 'b': (4, 5, 6)}
result = DataFrame(data)
expected = DataFrame({k: list(v) for k, v in compat.iteritems(data)})
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_constructor_dict_multiindex(self):
def check(result, expected):
return tm.assert_frame_equal(result, expected, check_dtype=True,
check_index_type=True,
check_column_type=True,
check_names=True)
d = {('a', 'a'): {('i', 'i'): 0, ('i', 'j'): 1, ('j', 'i'): 2},
('b', 'a'): {('i', 'i'): 6, ('i', 'j'): 5, ('j', 'i'): 4},
('b', 'c'): {('i', 'i'): 7, ('i', 'j'): 8, ('j', 'i'): 9}}
_d = sorted(d.items())
df = DataFrame(d)
expected = DataFrame(
[x[1] for x in _d],
index=MultiIndex.from_tuples([x[0] for x in _d])).T
expected.index = MultiIndex.from_tuples(expected.index)
check(df, expected)
d['z'] = {'y': 123., ('i', 'i'): 111, ('i', 'j'): 111, ('j', 'i'): 111}
_d.insert(0, ('z', d['z']))
expected = DataFrame(
[x[1] for x in _d],
index=Index([x[0] for x in _d], tupleize_cols=False)).T
expected.index = Index(expected.index, tupleize_cols=False)
df = DataFrame(d)
df = df.reindex(columns=expected.columns, index=expected.index)
check(df, expected)
def test_constructor_dict_datetime64_index(self):
# GH 10160
dates_as_str = ['1984-02-19', '1988-11-06', '1989-12-03', '1990-03-15']
def create_data(constructor):
return {i: {constructor(s): 2 * i}
for i, s in enumerate(dates_as_str)}
data_datetime64 = create_data(np.datetime64)
data_datetime = create_data(lambda x: datetime.strptime(x, '%Y-%m-%d'))
data_Timestamp = create_data(Timestamp)
expected = DataFrame([{0: 0, 1: None, 2: None, 3: None},
{0: None, 1: 2, 2: None, 3: None},
{0: None, 1: None, 2: 4, 3: None},
{0: None, 1: None, 2: None, 3: 6}],
index=[Timestamp(dt) for dt in dates_as_str])
result_datetime64 = DataFrame(data_datetime64)
result_datetime = DataFrame(data_datetime)
result_Timestamp = DataFrame(data_Timestamp)
tm.assert_frame_equal(result_datetime64, expected)
tm.assert_frame_equal(result_datetime, expected)
tm.assert_frame_equal(result_Timestamp, expected)
def test_constructor_dict_timedelta64_index(self):
# GH 10160
td_as_int = [1, 2, 3, 4]
def create_data(constructor):
return {i: {constructor(s): 2 * i}
for i, s in enumerate(td_as_int)}
data_timedelta64 = create_data(lambda x: np.timedelta64(x, 'D'))
data_timedelta = create_data(lambda x: timedelta(days=x))
data_Timedelta = create_data(lambda x: Timedelta(x, 'D'))
expected = DataFrame([{0: 0, 1: None, 2: None, 3: None},
{0: None, 1: 2, 2: None, 3: None},
{0: None, 1: None, 2: 4, 3: None},
{0: None, 1: None, 2: None, 3: 6}],
index=[Timedelta(td, 'D') for td in td_as_int])
result_timedelta64 = DataFrame(data_timedelta64)
result_timedelta = DataFrame(data_timedelta)
result_Timedelta = DataFrame(data_Timedelta)
tm.assert_frame_equal(result_timedelta64, expected)
tm.assert_frame_equal(result_timedelta, expected)
tm.assert_frame_equal(result_Timedelta, expected)
def test_constructor_period(self):
# PeriodIndex
a = pd.PeriodIndex(['2012-01', 'NaT', '2012-04'], freq='M')
b = pd.PeriodIndex(['2012-02-01', '2012-03-01', 'NaT'], freq='D')
df = pd.DataFrame({'a': a, 'b': b})
assert df['a'].dtype == a.dtype
assert df['b'].dtype == b.dtype
# list of periods
df = pd.DataFrame({'a': a.astype(object).tolist(),
'b': b.astype(object).tolist()})
assert df['a'].dtype == a.dtype
assert df['b'].dtype == b.dtype
def test_nested_dict_frame_constructor(self):
rng = pd.period_range('1/1/2000', periods=5)
df = DataFrame(randn(10, 5), columns=rng)
data = {}
for col in df.columns:
for row in df.index:
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
data.setdefault(col, {})[row] = df.get_value(row, col)
result = DataFrame(data, columns=rng)
tm.assert_frame_equal(result, df)
data = {}
for col in df.columns:
for row in df.index:
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
data.setdefault(row, {})[col] = df.get_value(row, col)
result = DataFrame(data, index=rng).T
tm.assert_frame_equal(result, df)
def _check_basic_constructor(self, empty):
# mat: 2d matrix with shape (3, 2) to input. empty - makes sized
# objects
mat = empty((2, 3), dtype=float)
# 2-D input
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert len(frame.index) == 2
assert len(frame.columns) == 3
# 1-D input
frame = DataFrame(empty((3,)), columns=['A'], index=[1, 2, 3])
assert len(frame.index) == 3
assert len(frame.columns) == 1
# cast type
frame = DataFrame(mat, columns=['A', 'B', 'C'],
index=[1, 2], dtype=np.int64)
assert frame.values.dtype == np.int64
# wrong size axis labels
msg = r'Shape of passed values is \(3, 2\), indices imply \(3, 1\)'
with pytest.raises(ValueError, match=msg):
DataFrame(mat, columns=['A', 'B', 'C'], index=[1])
msg = r'Shape of passed values is \(3, 2\), indices imply \(2, 2\)'
with pytest.raises(ValueError, match=msg):
DataFrame(mat, columns=['A', 'B'], index=[1, 2])
# higher dim raise exception
with pytest.raises(ValueError, match='Must pass 2-d input'):
DataFrame(empty((3, 3, 3)), columns=['A', 'B', 'C'],
index=[1])
# automatic labeling
frame = DataFrame(mat)
tm.assert_index_equal(frame.index, pd.Index(lrange(2)))
tm.assert_index_equal(frame.columns, pd.Index(lrange(3)))
frame = DataFrame(mat, index=[1, 2])
tm.assert_index_equal(frame.columns, pd.Index(lrange(3)))
frame = DataFrame(mat, columns=['A', 'B', 'C'])
tm.assert_index_equal(frame.index, pd.Index(lrange(2)))
# 0-length axis
frame = DataFrame(empty((0, 3)))
assert len(frame.index) == 0
frame = DataFrame(empty((3, 0)))
assert len(frame.columns) == 0
def test_constructor_ndarray(self):
self._check_basic_constructor(np.ones)
frame = DataFrame(['foo', 'bar'], index=[0, 1], columns=['A'])
assert len(frame) == 2
def test_constructor_maskedarray(self):
self._check_basic_constructor(ma.masked_all)
# Check non-masked values
mat = ma.masked_all((2, 3), dtype=float)
mat[0, 0] = 1.0
mat[1, 2] = 2.0
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert 1.0 == frame['A'][1]
assert 2.0 == frame['C'][2]
# what is this even checking??
mat = ma.masked_all((2, 3), dtype=float)
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert np.all(~np.asarray(frame == frame))
def test_constructor_maskedarray_nonfloat(self):
# masked int promoted to float
mat = ma.masked_all((2, 3), dtype=int)
# 2-D input
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert len(frame.index) == 2
assert len(frame.columns) == 3
assert np.all(~np.asarray(frame == frame))
# cast type
frame = DataFrame(mat, columns=['A', 'B', 'C'],
index=[1, 2], dtype=np.float64)
assert frame.values.dtype == np.float64
# Check non-masked values
mat2 = ma.copy(mat)
mat2[0, 0] = 1
mat2[1, 2] = 2
frame = DataFrame(mat2, columns=['A', 'B', 'C'], index=[1, 2])
assert 1 == frame['A'][1]
assert 2 == frame['C'][2]
# masked np.datetime64 stays (use NaT as null)
mat = ma.masked_all((2, 3), dtype='M8[ns]')
# 2-D input
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert len(frame.index) == 2
assert len(frame.columns) == 3
assert isna(frame).values.all()
# cast type
frame = DataFrame(mat, columns=['A', 'B', 'C'],
index=[1, 2], dtype=np.int64)
assert frame.values.dtype == np.int64
# Check non-masked values
mat2 = ma.copy(mat)
mat2[0, 0] = 1
mat2[1, 2] = 2
frame = DataFrame(mat2, columns=['A', 'B', 'C'], index=[1, 2])
assert 1 == frame['A'].view('i8')[1]
assert 2 == frame['C'].view('i8')[2]
# masked bool promoted to object
mat = ma.masked_all((2, 3), dtype=bool)
# 2-D input
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert len(frame.index) == 2
assert len(frame.columns) == 3
assert np.all(~np.asarray(frame == frame))
# cast type
frame = DataFrame(mat, columns=['A', 'B', 'C'],
index=[1, 2], dtype=object)
assert frame.values.dtype == object
# Check non-masked values
mat2 = ma.copy(mat)
mat2[0, 0] = True
mat2[1, 2] = False
frame = DataFrame(mat2, columns=['A', 'B', 'C'], index=[1, 2])
assert frame['A'][1] is True
assert frame['C'][2] is False
def test_constructor_mrecarray(self):
# Ensure mrecarray produces frame identical to dict of masked arrays
# from GH3479
assert_fr_equal = functools.partial(tm.assert_frame_equal,
check_index_type=True,
check_column_type=True,
check_frame_type=True)
arrays = [
('float', np.array([1.5, 2.0])),
('int', np.array([1, 2])),
('str', np.array(['abc', 'def'])),
]
for name, arr in arrays[:]:
arrays.append(('masked1_' + name,
np.ma.masked_array(arr, mask=[False, True])))
arrays.append(('masked_all', np.ma.masked_all((2,))))
arrays.append(('masked_none',
np.ma.masked_array([1.0, 2.5], mask=False)))
# call assert_frame_equal for all selections of 3 arrays
for comb in itertools.combinations(arrays, 3):
names, data = zip(*comb)
mrecs = mrecords.fromarrays(data, names=names)
# fill the comb
comb = {k: (v.filled() if hasattr(v, 'filled') else v)
for k, v in comb}
expected = DataFrame(comb, columns=names)
result = DataFrame(mrecs)
assert_fr_equal(result, expected)
# specify columns
expected = DataFrame(comb, columns=names[::-1])
result = DataFrame(mrecs, columns=names[::-1])
assert_fr_equal(result, expected)
# specify index
expected = DataFrame(comb, columns=names, index=[1, 2])
result = DataFrame(mrecs, index=[1, 2])
assert_fr_equal(result, expected)
def test_constructor_corner_shape(self):
df = DataFrame(index=[])
assert df.values.shape == (0, 0)
@pytest.mark.parametrize("data, index, columns, dtype, expected", [
(None, lrange(10), ['a', 'b'], object, np.object_),
(None, None, ['a', 'b'], 'int64', np.dtype('int64')),
(None, lrange(10), ['a', 'b'], int, np.dtype('float64')),
({}, None, ['foo', 'bar'], None, np.object_),
({'b': 1}, lrange(10), list('abc'), int, np.dtype('float64'))
])
def test_constructor_dtype(self, data, index, columns, dtype, expected):
df = DataFrame(data, index, columns, dtype)
assert df.values.dtype == expected
def test_constructor_scalar_inference(self):
data = {'int': 1, 'bool': True,
'float': 3., 'complex': 4j, 'object': 'foo'}
df = DataFrame(data, index=np.arange(10))
assert df['int'].dtype == np.int64
assert df['bool'].dtype == np.bool_
assert df['float'].dtype == np.float64
assert df['complex'].dtype == np.complex128
assert df['object'].dtype == np.object_
def test_constructor_arrays_and_scalars(self):
df = DataFrame({'a': randn(10), 'b': True})
exp = DataFrame({'a': df['a'].values, 'b': [True] * 10})
tm.assert_frame_equal(df, exp)
with pytest.raises(ValueError, match='must pass an index'):
DataFrame({'a': False, 'b': True})
def test_constructor_DataFrame(self):
df = DataFrame(self.frame)
tm.assert_frame_equal(df, self.frame)
df_casted = DataFrame(self.frame, dtype=np.int64)
assert df_casted.values.dtype == np.int64
def test_constructor_more(self):
# used to be in test_matrix.py
arr = randn(10)
dm = DataFrame(arr, columns=['A'], index=np.arange(10))
assert dm.values.ndim == 2
arr = randn(0)
dm = DataFrame(arr)
assert dm.values.ndim == 2
assert dm.values.ndim == 2
# no data specified
dm = DataFrame(columns=['A', 'B'], index=np.arange(10))
assert dm.values.shape == (10, 2)
dm = DataFrame(columns=['A', 'B'])
assert dm.values.shape == (0, 2)
dm = DataFrame(index=np.arange(10))
assert dm.values.shape == (10, 0)
# can't cast
mat = np.array(['foo', 'bar'], dtype=object).reshape(2, 1)
with pytest.raises(ValueError, match='cast'):
DataFrame(mat, index=[0, 1], columns=[0], dtype=float)
dm = DataFrame(DataFrame(self.frame._series))
tm.assert_frame_equal(dm, self.frame)
# int cast
dm = DataFrame({'A': np.ones(10, dtype=int),
'B': np.ones(10, dtype=np.float64)},
index=np.arange(10))
assert len(dm.columns) == 2
assert dm.values.dtype == np.float64
def test_constructor_empty_list(self):
df = DataFrame([], index=[])
expected = DataFrame(index=[])
tm.assert_frame_equal(df, expected)
# GH 9939
df = DataFrame([], columns=['A', 'B'])
expected = DataFrame({}, columns=['A', 'B'])
tm.assert_frame_equal(df, expected)
# Empty generator: list(empty_gen()) == []
def empty_gen():
return
yield
df = DataFrame(empty_gen(), columns=['A', 'B'])
tm.assert_frame_equal(df, expected)
def test_constructor_list_of_lists(self):
# GH #484
df = DataFrame(data=[[1, 'a'], [2, 'b']], columns=["num", "str"])
assert is_integer_dtype(df['num'])
assert df['str'].dtype == np.object_
# GH 4851
# list of 0-dim ndarrays
expected = DataFrame({0: np.arange(10)})
data = [np.array(x) for x in range(10)]
result = | DataFrame(data) | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jul 8 12:39:17 2019
@author: nmei
This script systematically test the encoding model (Ridge regression) with different
embedding features predicting the BOLD signal, within each small ROIs (15 in total)
"""
import os
import numpy as np
import pandas as pd
from shutil import copyfile
copyfile("../../../utils.py","utils.py")
import utils
from glob import glob
from tqdm import tqdm
from sklearn.utils import shuffle
from sklearn.model_selection import cross_validate
from sklearn import metrics,linear_model
from collections import OrderedDict
# import warnings filter
from warnings import simplefilter
# ignore all future warnings
simplefilter(action='ignore', category=FutureWarning)
def AIC(y_true,y_pred,n_features):
VE = metrics.r2_score(y_true,y_pred,multioutput='raw_values')
VE[VE < 0] = np.nan
VE_mean = np.nanmean(VE)
aic = 2 * np.log(n_features) - 2 * np.log(VE_mean)
return aic
def AIC_corrected(y_true,y_pred,n_features,n_observations):
VE = metrics.r2_score(y_true,y_pred,multioutput='raw_values')
VE[VE < 0] = np.nan
VE_mean = np.nanmean(VE)
aic = 2 * np.log(n_features) - 2 * np.log(VE_mean)
aicc = aic + (2 * np.log(n_features))*(np.log(n_features) + 1) / (np.log(n_observations) - np.log(n_features) - 1)
return aicc
def BIC(y_true,y_pred,n_features,n_observations):
VE = metrics.r2_score(y_true,y_pred,multioutput='raw_values')
VE = metrics.r2_score(y_true,y_pred,multioutput='raw_values')
VE[VE < 0] = np.nan
VE_mean = np.nanmean(VE)
bic = np.log(n_features) * np.log(n_observations) - 2 * np.log(VE_mean)
return bic
def CP(y_true,y_pred,n_features,n_observations):
SSE = np.sum((y_true - y_pred)**2)
SS = np.sum((y_true - y_true.mean())**2)
cp = SSE/SS - np.log(n_observations) + 2 * np.log(n_features)
return cp
## parameters
experiment = 'metasema'
working_dir = '../../../../../{}/preprocessed_uncombined_with_invariant/'.format(experiment) # where the data locates
here = 'encoding_model_15_ROIs'
saving_dir = '../../../../results/{}/RP/{}'.format(experiment,here) # where the outputs will go
if not os.path.exists(saving_dir):
os.mkdir(saving_dir)
array_dir = '../../../../results/{}/RP/encoding_model_15_ROIs_arrays'.format(experiment)
if not os.path.exists(array_dir):
os.mkdir(array_dir)
image2vec_dir = '../../../../results/{}/img2vec_features'.format(experiment)
word2vec_dir = '../../../../results/{}/word2vec_features'.format(experiment)
label_map = dict(animal =[1,0],
tool =[0,1])
sub = '6735'# star means all subjects
average = True # averaging the trainig data
transfer = False # do I do domain adaptation
print_train = False # do I want to see the training process
concatenate = False # specifically for domain adaptation
n_splits = 300 # number of cross validation
n_jobs = 1 #
alpha = 100
# get the data file names
working_fmri = np.sort(glob(os.path.join(working_dir,'{}/*.npy'.format(sub))))
working_data = np.sort(glob(os.path.join(working_dir,'{}/*.csv'.format(sub))))
# get the encoding model features
image2vec_vecs = [ | pd.read_csv(f) | pandas.read_csv |
#%%
import os
import sys
try:
os.chdir('/Volumes/GoogleDrive/My Drive/python_code/connectome_tools/')
print(os.getcwd())
except:
pass
from pymaid_creds import url, name, password, token
import pymaid
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
# allows text to be editable in Illustrator
plt.rcParams['pdf.fonttype'] = 42
plt.rcParams['ps.fonttype'] = 42
plt.rcParams['font.size'] = 6
rm = pymaid.CatmaidInstance(url, token, name, password)
adj = pd.read_csv('VNC_interaction/data/axon-dendrite.csv', header = 0, index_col = 0)
inputs = pd.read_csv('VNC_interaction/data/input_counts.csv', index_col = 0)
inputs = pd.DataFrame(inputs.values, index = inputs.index, columns = ['axon_input', 'dendrite_input'])
pairs = pd.read_csv('VNC_interaction/data/pairs-2020-10-26.csv', header = 0) # import pairs
# %%
from connectome_tools.process_matrix import Adjacency_matrix, Promat
from datetime import date
VNC_adj = Adjacency_matrix(adj.values, adj.index, pairs, inputs,'axo-dendritic')
dVNC = pymaid.get_skids_by_annotation('mw dVNC')
A1 = pymaid.get_skids_by_annotation('mw A1 neurons paired')
A1_MN = pymaid.get_skids_by_annotation('mw A1 MN')
A1_ascending = pymaid.get_skids_by_annotation('mw A1 neurons paired ascending')
A1_proprio = pymaid.get_skids_by_annotation('mw A1 proprio')
A1_somato = pymaid.get_skids_by_annotation('mw A1 somato')
A1_chord = pymaid.get_skids_by_annotation('mw A1 chordotonals')
A1_noci = pymaid.get_skids_by_annotation('mw A1 noci')
A1_external = pymaid.get_skids_by_annotation('mw A1 external sensories')
# %%
# comparison of motorneurons contained in each path
from tqdm import tqdm
from connectome_tools.cascade_analysis import Celltype, Celltype_Analyzer
threshold = 0.01
source_dVNC, ds_dVNC = VNC_adj.downstream(dVNC, threshold, exclude=dVNC)
edges, ds_dVNC_cleaned = VNC_adj.edge_threshold(source_dVNC, ds_dVNC, threshold, direction='downstream')
edges[edges.overthres==True]
source_dVNC_cleaned = np.unique(edges[edges.overthres==True].upstream_pair_id)
source_dVNC_pairs = VNC_adj.adj_inter.loc[(slice(None), source_dVNC_cleaned), :].index
source_dVNC_pairs = [x[2] for x in source_dVNC_pairs]
source_dVNC_pairs = Promat.extract_pairs_from_list(source_dVNC_pairs, pairs)[0]
source_dVNC_pair_paths = []
for index in tqdm(range(0, len(source_dVNC_pairs))):
ds_dVNC = VNC_adj.downstream_multihop(list(source_dVNC_pairs.loc[index]), threshold, min_members = 0, hops=5)
source_dVNC_pair_paths.append(ds_dVNC)
order = [16, 0, 2, 11, 1, 5, 7, 12, 13, 8, 3, 9, 10, 15, 4, 6, 14, 17, 18] # added 17 and 18 because there appear to be more dVNCs?
motor_layers,motor_skids = VNC_adj.layer_id(source_dVNC_pair_paths, source_dVNC_pairs.leftid, A1_MN)
motor_layers = motor_layers.iloc[order, :]
motor_skids = motor_skids.T.iloc[order, :]
motor_skids_allhops = []
for index in motor_skids.index:
skids_allhops = [x for sublist in motor_skids.loc[index].values for x in sublist if x!='']
motor_skids_allhops.append(skids_allhops)
motorneuron_celltypes = [Celltype(motor_layers.index[i], skids) for i, skids in enumerate(motor_skids_allhops)]
celltypes = Celltype_Analyzer(motorneuron_celltypes)
iou_matrix = celltypes.compare_membership()
'''
fig, axs = plt.subplots(
1, 1, figsize=(5, 5)
)
ax = axs
fig.tight_layout(pad=2.0)
sns.heatmap(iou_matrix, ax = ax, square = True)
'''
sns.clustermap(iou_matrix, figsize = (5, 5), square=True)
plt.savefig(f'VNC_interaction/plots/Threshold-{threshold}_individual_dVNC_paths_MN_comparison.pdf', bbox_inches='tight')
# %%
# same with ascendings
ascending_layers,ascending_skids = VNC_adj.layer_id(source_dVNC_pair_paths, source_dVNC_pairs.leftid, A1_ascending)
ascending_layers = ascending_layers.iloc[order, :]
ascending_skids = ascending_skids.T.iloc[order, :]
ascending_skids_allhops = []
for index in motor_skids.index:
skids_allhops = [x for sublist in ascending_skids.loc[index].values for x in sublist if x!='']
ascending_skids_allhops.append(skids_allhops)
ascending_celltypes = [Celltype(ascending_layers.index[i], skids) for i, skids in enumerate(ascending_skids_allhops)]
ascending_celltypes = Celltype_Analyzer(ascending_celltypes)
ascending_iou_matrix = ascending_celltypes.compare_membership()
'''
fig, axs = plt.subplots(
1, 1, figsize=(5, 5)
)
ax = axs
fig.tight_layout(pad=2.0)
sns.heatmap(ascending_iou_matrix, ax = ax, square = True)
'''
sns.clustermap(ascending_iou_matrix, figsize = (5, 5), square=True)
plt.savefig(f'VNC_interaction/plots/Threshold-{threshold}_individual_dVNC_paths_ascendings_comparison.pdf', bbox_inches='tight')
# %%
# multiple-hop connectivity matrix of dVNCs to motorneurons and ascendings
# **** should probably use these for some figure
# **** order MNs by muscle type in the future
A1_MN_pairs = Promat.extract_pairs_from_list(A1_MN, pairs)[0]
motor_layers,motor_skids = VNC_adj.layer_id(source_dVNC_pair_paths, source_dVNC_pairs.leftid, A1_MN)
motor_layers = motor_layers.iloc[order, :]
motor_skids = motor_skids.T.iloc[order, :]
dVNC_motor_mat, dVNC_motor_mat_plotting = VNC_adj.hop_matrix(motor_skids, source_dVNC_pairs.leftid[order], A1_MN_pairs.leftid)
annotations = dVNC_motor_mat.astype(int).astype(str)
annotations[annotations=='0']=''
sns.clustermap(dVNC_motor_mat_plotting, annot = annotations, fmt = 's',
row_cluster = False, cmap='Reds', figsize = (3.5, 3))
plt.savefig(f'VNC_interaction/plots/Threshold-{threshold}_Hopwise_Connectivity_dVNC-motor_annots.pdf', bbox_inches='tight')
sns.clustermap(dVNC_motor_mat_plotting,
col_cluster = False, cmap='Reds', figsize = (3.5, 3), square = True)
plt.savefig(f'VNC_interaction/plots/Threshold-{threshold}_Hopwise_Connectivity_dVNC-motor.pdf', bbox_inches='tight')
# dVNC to ascendings multihop plot
ascending_pairs = Promat.extract_pairs_from_list(A1_ascending, pairs)[0]
ascending_layers,ascending_skids = VNC_adj.layer_id(source_dVNC_pair_paths, source_dVNC_pairs.leftid, A1_ascending)
ascending_layers = ascending_layers.iloc[order, :]
ascending_skids = ascending_skids.T.iloc[order, :]
dVNC_asc_mat, dVNC_asc_mat_plotting = VNC_adj.hop_matrix(ascending_skids, source_dVNC_pairs.leftid[order], ascending_pairs.leftid)
dVNC_asc_mat_plotting = dVNC_asc_mat_plotting.loc[:, (dVNC_asc_mat_plotting).sum(axis=0)>0]
sns.clustermap(dVNC_asc_mat_plotting,
row_cluster = False, cmap='Blues', figsize = (2, 3), square = True)
plt.savefig(f'VNC_interaction/plots/Threshold-{threshold}_Hopwise_Connectivity_dVNC-ascending.pdf', bbox_inches='tight')
# %%
# multihop plot of dVNCs to dVNCs via ascending neurons
# first, run 2-hop paths of each ascending
# second, identify ascendings neurons ds of each dVNC
# third, sum hops from dVNC->ascending and then ascending->new dVNC in brain
adj = | pd.read_csv('VNC_interaction/data/brA1_axon-dendrite.csv', header = 0, index_col = 0) | pandas.read_csv |
from multiprocessing import Process, Queue
import matplotlib.pyplot as plt
from PyQt5.QtWidgets import QMainWindow, QApplication, QPushButton, QWidget
from PyQt5.QtWidgets import QAction, QTabWidget,QVBoxLayout, QFileDialog
import os
from pysilcam.config import PySilcamSettings
import pysilcam.oilgas as scog
import numpy as np
import pysilcam.postprocess as sc_pp
import pandas as pd
from enum import Enum
import pygame
import time
import psutil
from tqdm import tqdm
def get_data(self):
try:
rts = self.q.get(timeout=0.1)
except:
rts = None
return rts
def count_data(datadir):
silcfiles = [os.path.join(datadir, f) for f in
sorted(os.listdir(datadir))
if f.endswith('.silc')]
bmpfiles = [os.path.join(datadir, f) for f in
sorted(os.listdir(datadir))
if f.endswith('.bmp')]
silc = len(silcfiles)
bmp = len(bmpfiles)
return silc, bmp
def extract_stats_im(guidata):
imc = guidata['imc']
del guidata['imc']
stats = pd.DataFrame.from_dict(guidata)
return stats, imc
def export_timeseries(configfile, statsfile):
settings = PySilcamSettings(configfile)
print('Loading STATS data: ', statsfile)
stats = pd.read_csv(statsfile)
stats['timestamp'] = pd.to_datetime(stats['timestamp'])
stats.sort_values(by='timestamp', inplace=True)
print('Extracting oil and gas')
stats_oil = scog.extract_oil(stats)
stats_gas = scog.extract_gas(stats)
print('Calculating timeseries')
u = pd.to_datetime(stats['timestamp']).unique()
sample_volume = sc_pp.get_sample_volume(settings.PostProcess.pix_size, path_length=settings.PostProcess.path_length)
td = pd.to_timedelta('00:00:' + str(settings.PostProcess.window_size / 2.))
vdts_all = []
vdts_oil = []
vdts_gas = []
d50_all = []
d50_oil = []
d50_gas = []
timestamp = []
d50_av_all = []
d50_av_oil = []
d50_av_gas = []
gor = []
for s in tqdm(u):
timestamp.append(pd.to_datetime(s))
dt = pd.to_datetime(s)
dias, vd_all = sc_pp.vd_from_stats(stats[stats['timestamp'] == s],
settings.PostProcess)
dias, vd_oil = sc_pp.vd_from_stats(stats_oil[stats_oil['timestamp'] == s],
settings.PostProcess)
dias, vd_gas = sc_pp.vd_from_stats(stats_gas[stats_gas['timestamp'] == s],
settings.PostProcess)
nims = sc_pp.count_images_in_stats(stats[stats['timestamp'] == s])
sv = sample_volume * nims
vd_all /= sv
vd_oil /= sv
vd_gas /= sv
d50_all.append(sc_pp.d50_from_vd(vd_all, dias))
d50_oil.append(sc_pp.d50_from_vd(vd_oil, dias))
d50_gas.append(sc_pp.d50_from_vd(vd_gas, dias))
vdts_all.append(vd_all)
vdts_oil.append(vd_oil)
vdts_gas.append(vd_gas)
stats_av = stats[(stats['timestamp']<(dt+td)) & (stats['timestamp']>(dt-td))]
stats_av_oil = scog.extract_oil(stats_av)
stats_av_gas = scog.extract_gas(stats_av)
d50_av_all.append(sc_pp.d50_from_stats(stats_av, settings.PostProcess))
d50_av_oil.append(sc_pp.d50_from_stats(stats_av_oil, settings.PostProcess))
d50_av_gas.append(sc_pp.d50_from_stats(stats_av_gas, settings.PostProcess))
dias, vdts_av = sc_pp.vd_from_stats(stats_av, settings.PostProcess)
dias, vdts_av_oil = sc_pp.vd_from_stats(stats_av_oil, settings.PostProcess)
dias, vdts_av_gas = sc_pp.vd_from_stats(stats_av_gas, settings.PostProcess)
nims = sc_pp.count_images_in_stats(stats_av)
sv = sample_volume * nims
vdts_av /= sv
vdts_av_oil /= sv
vdts_av_gas /= sv
gor.append(np.sum(vdts_av_gas)/np.sum(vdts_av_oil))
outpath, outfile = os.path.split(statsfile)
outfile = outfile.replace('-STATS.csv','')
outfile = os.path.join(outpath, outfile)
time_series = pd.DataFrame(data=np.squeeze(vdts_all), columns=dias)
time_series['D50'] = d50_all
time_series['Time'] = timestamp
time_series.to_excel(outfile +
'-TIMESERIES' + '' + '.xlsx')
time_series = pd.DataFrame(data=np.squeeze(vdts_oil), columns=dias)
time_series['D50'] = d50_oil
time_series['Time'] = timestamp
time_series.to_excel(outfile +
'-TIMESERIES' + 'oil' + '.xlsx')
time_series = pd.DataFrame(data=np.squeeze(vdts_gas), columns=dias)
time_series['D50'] = d50_gas
time_series['Time'] = timestamp
time_series.to_excel(outfile +
'-TIMESERIES' + 'gas' + '.xlsx')
plt.figure(figsize=(20, 10))
if not np.min(np.isnan(d50_oil)):
plt.plot(timestamp, d50_oil, 'ro')
if not np.min(np.isnan(d50_av_oil)):
plt.plot(timestamp, d50_av_oil, 'r-')
lns1 = plt.plot(np.nan, np.nan, 'r-', label='OIL')
if not np.min(np.isnan(d50_gas)):
plt.plot(timestamp, d50_gas, 'bo')
if not np.min(np.isnan(d50_av_gas)):
plt.plot(timestamp, d50_av_gas, 'b-')
lns2 = plt.plot(np.nan, np.nan, 'b-', label='GAS')
plt.ylabel('d50 [um]')
plt.ylim(0, max(plt.gca().get_ylim()))
ax = plt.gca().twinx()
plt.sca(ax)
plt.ylabel('GOR')
if not np.min(np.isnan(gor)):
plt.plot(timestamp, gor, 'k')
lns3 = plt.plot(np.nan, np.nan, 'k', label='GOR')
plt.ylim(0, max(plt.gca().get_ylim()))
lns = lns1 + lns2 + lns3
labs = [l.get_label() for l in lns]
plt.legend(lns, labs)
plt.savefig(outfile +
'-d50_TimeSeries.png', dpi=600, bbox_inches='tight')
plt.close()
print('Export figure made. ')
print('Exporting averages... ')
# average all
dias, vd = sc_pp.vd_from_stats(stats,
settings.PostProcess)
nims = sc_pp.count_images_in_stats(stats)
sv = sample_volume * nims
vd /= sv
d50 = sc_pp.d50_from_vd(vd, dias)
dfa = pd.DataFrame(data=[vd], columns=dias)
dfa['d50'] = d50
timestamp = np.min(pd.to_datetime(stats['timestamp']))
dfa['Time'] = timestamp
dfa.to_excel(statsfile.replace('-STATS.csv', '') +
'-AVERAGE' + '' + '.xlsx')
#average oil
dias, vd = sc_pp.vd_from_stats(stats_oil,
settings.PostProcess)
vd /= sv # sample volume remains the same as 'all'
d50 = sc_pp.d50_from_vd(vd, dias)
dfa = pd.DataFrame(data=[vd], columns=dias)
dfa['d50'] = d50
timestamp = np.min(pd.to_datetime(stats['timestamp'])) # still use total stats for this time
dfa['Time'] = timestamp
dfa.to_excel(statsfile.replace('-STATS.csv', '') +
'-AVERAGE' + 'oil' + '.xlsx')
#average gas
dias, vd = sc_pp.vd_from_stats(stats_gas,
settings.PostProcess)
vd /= sv # sample volume remains the same as 'all'
d50 = sc_pp.d50_from_vd(vd, dias)
dfa = | pd.DataFrame(data=[vd], columns=dias) | pandas.DataFrame |
"""
Movie Recommendation Skill.
- movies like <movie-name>
"""
import numpy as np
import pandas as pd
from nltk import edit_distance
# Local Imports.
from backend.config import cosine_sim_scores_path, movie_data_path
def find_nearest_title(user_input_title):
"""
Checks for nearest movie title in dataset
Parameters
----------
user_input_title: str.
Returns
-------
nearest_title
"""
movies = pd.read_csv(movie_data_path)
movie_titles = movies["title"]
distances = {}
for titles in movie_titles:
distances[titles] = edit_distance(user_input_title, titles)
sorted_distances = sorted(distances.items(), key=lambda x: x[1], reverse=False)
nearest_title = sorted_distances[0][0]
return nearest_title
def get_movie_plot(user_input_tokens):
"""
Returns movie's summary.
Parameters
----------
user_input_tokens: list.
Returns
-------
summary.
"""
# Process movie title from user.
user_input_title = user_input_tokens[1:]
user_input_title = ' '.join(user_input_title)
# Find nearest title.
movie_title = find_nearest_title(user_input_title)
movie_data = pd.read_csv(movie_data_path)
# Find Plot.
plot = movie_data[movie_data["title"] == movie_title]["summary"].values[0]
year_of_release = movie_data[movie_data["title"] == movie_title]["year_of_release"].values[0]
genre = movie_data[movie_data["title"] == movie_title]["genres"].values[0]
# Format Response.
movie_plot = f"{movie_title.capitalize()} ({year_of_release}, {genre}): {plot}"
return movie_plot
def get_recommendations(user_input_tokens):
"""
Computes Top 5 movie recommendation.
Parameters
----------
user_input_tokens: tokenized input.
Returns
-------
5 similar movies.
"""
# Process movie title from user input.
user_input_title = user_input_tokens[2:]
user_input_title = ' '.join(user_input_title)
movie_title = find_nearest_title(user_input_title)
# Read files from db.
movie_data = pd.read_csv(movie_data_path)
cosine_sim_scores = np.loadtxt(cosine_sim_scores_path)
# Construct titles dictionary.
titles = | pd.Series(movie_data.index, index=movie_data["title"]) | pandas.Series |
import re
import io
import bs4
import csv
import copy
import urllib
import pandas as pd
import numpy as np
from .utils import *
from .web import *
from pyhelpers.dir import validate_input_data_dir
from pyhelpers.ops import confirmed, download_file_from_url, fake_requests_headers,update_nested_dict
from pyhelpers.store import load_pickle, save_pickle
from pyhelpers.text import find_similar_str
class OverpassDownloader():
def __init__(self):
self.Name = 'Overpass OpenStreetMap data extracts (more than 5000 data)'
self.URL = overpass_homepage()
self.ValidFileFormats = [".osm"]
def __get_download_url(self, box):
min_lat, max_lat, min_lon, max_lon = box
url = self.URL + str(min_lon) + ',' + str(min_lat) + ',' + str(max_lon) + ',' + str(max_lat)
return url
def __make_download_file(self, subarea_name, download_dir):
osm_filename = os.path.join(download_dir, subarea_name) + self.ValidFileFormats[0]
if os.path.isfile(osm_filename):
print("\"{}\" is already available at \"\\{}\".".format(
os.path.basename(osm_filename),
os.path.relpath(os.path.dirname(osm_filename))))
return True,osm_filename
return False,osm_filename
def download_osm_data(self,subarea_names=None,boxs=None,download_dir='osmfile',interval_sec=10,random_header=False,
ret_download_path=False):
# 验证下载路径的有效性
download_dir_ = validate_download_dir(download_dir)
# 记录文件下载路径
download_paths = []
#通过名称下载
if subarea_names:
# 将输入参数转化为list
subarea_names_ = [subarea_names] if isinstance(subarea_names, str) else subarea_names.copy()
# 逐个下载文件
for subarea_name in subarea_names_:
subarea_name_=subarea_name.replace(' ','_')
is_downloaded,path_to_file = self.__make_download_file(subarea_name_, download_dir_)
download_paths.append(path_to_file)
if not is_downloaded:
print('Downloading {}.osm to {}'.format(subarea_name_,os.path.relpath(os.path.dirname(path_to_file))))
box = get_subregion_download_range(subarea_name)
if box:
url = self.__get_download_url(box)
try:
download_osmfile_from_url(url=url,path_to_file=path_to_file,random_header=random_header)
print('Done.')
except Exception as e:
print("Failed. {}.".format(e))
if interval_sec and len(subarea_names_)>1:
time.sleep(interval_sec)
if boxs:
# 将输入参数转化为list
boxs_ = [boxs] if isinstance(boxs, tuple) else boxs.copy()
for id,box in enumerate(boxs_):
subbox_name='map_'+str(id+1)
print('Downloading {}.osm '.format(subbox_name))
is_downloaded,path_to_file = self.__make_download_file(subbox_name, download_dir_)
download_paths.append(path_to_file)
if not is_downloaded:
try:
url = self.__get_download_url(box)
download_osmfile_from_url(url=url,path_to_file=path_to_file,random_header=random_header)
print('Done.')
except Exception as e:
print("Failed. {}.".format(e))
if interval_sec and len(boxs_)>1:
time.sleep(interval_sec)
if ret_download_path:
return download_paths
class GeofabrikDownloader():
def __init__(self):
self.Name = 'Geofabrik OpenStreetMap data extracts'
self.URL = geofabrik_homepage()
self.DownloadIndexURL = urllib.parse.urljoin(self.URL, 'index-v1.json')
self.ValidFileFormats = [".osm.pbf", ".shp.zip", ".osm.bz2"]
self.DownloadIndexName = 'Geofabrik index of all downloads'
self.ContinentSubregionTableName = 'Geofabrik continent subregions'
self.RegionSubregionTier = 'Geofabrik region-subregion tier'
self.DownloadsCatalogue = 'Geofabrik downloads catalogue'
self.SubregionNameList = 'Geofabrik subregion name list'
@staticmethod
def get_raw_directory_index(url, verbose=False):
try:
import humanfriendly
raw_directory_index = pd.read_html(url, match='file', header=0,parse_dates=['date'])
raw_directory_index = pd.concat(raw_directory_index, ignore_index=True)
raw_directory_index.columns = [c.title() for c in raw_directory_index.columns]
# Clean the DataFrame
raw_directory_index.Size = raw_directory_index.Size.apply(humanfriendly.format_size)
raw_directory_index.sort_values('Date', ascending=False, inplace=True)
raw_directory_index.index = range(len(raw_directory_index))
raw_directory_index['FileURL'] = raw_directory_index.File.map(
lambda x: urllib.parse.urljoin(url, x))
except (urllib.error.HTTPError, TypeError, ValueError):
if len(urllib.parse.urlparse(url).path) <= 1 and verbose:
print("The web page does not have a raw directory index.")
raw_directory_index = None
return raw_directory_index
def __get_subregion_table(self, url, verbose=False):
try:
subregion_table = pd.read_html(
url, match=re.compile(r'(Special )?Sub[ \-]Regions?'), encoding='UTF-8')
subregion_table = pd.concat(subregion_table, axis=0, ignore_index=True)
# Specify column names
file_formats = self.ValidFileFormats
column_names = ['Subregion'] + file_formats
column_names.insert(2, '.osm.pbf.Size')
# Add column/names
if len(subregion_table.columns) == 4:
subregion_table.insert(2, '.osm.pbf.Size', np.nan)
subregion_table.columns = column_names
subregion_table.replace(
{'.osm.pbf.Size': {re.compile('[()]'): '', re.compile('\xa0'): ' '}},
inplace=True)
# Get the URLs
source = requests.get(url, headers=fake_requests_headers())
soup = bs4.BeautifulSoup(source.content, 'lxml')
source.close()
for file_type in file_formats:
text = '[{}]'.format(file_type)
urls = [urllib.parse.urljoin(url, link['href']) for link in
soup.find_all(name='a', href=True, text=text)]
subregion_table.loc[
subregion_table[file_type].notnull(), file_type] = urls
try:
subregion_urls = [
urllib.parse.urljoin(url, soup.find('a', text=text).get('href'))
for text in subregion_table.Subregion]
except (AttributeError, TypeError):
subregion_urls = [kml['onmouseover']
for kml in soup.find_all('tr', onmouseover=True)]
subregion_urls = [
s[s.find('(') + 1:s.find(')')][1:-1].replace('kml', 'html')
for s in subregion_urls]
subregion_urls = [urllib.parse.urljoin(url, sub_url)
for sub_url in subregion_urls]
subregion_table['SubregionURL'] = subregion_urls
column_names = list(subregion_table.columns)
column_names.insert(1, column_names.pop(len(column_names) - 1))
subregion_table = subregion_table[column_names]
subregion_table['.osm.pbf.Size'] = \
subregion_table['.osm.pbf.Size'].str.replace('(', '').str.replace(')', '')
subregion_table = subregion_table.where(pd.notnull(subregion_table), None)
except (ValueError, TypeError, ConnectionRefusedError, ConnectionError):
# No more data available for subregions within the region
if verbose:
print("Checked out \"{}\".".format(
url.split('/')[-1].split('.')[0].title()))
subregion_table = None
return subregion_table
def __get_download_index(self, update=False, confirmation_required=True, verbose=False):
path_to_download_index = cd_dat(self.DownloadIndexName.replace(" ", "-") + ".pickle")
if os.path.isfile(path_to_download_index) and not update:
download_index = load_pickle(path_to_download_index)
else:
if confirmed("To get {}?".format(self.DownloadIndexName),
confirmation_required=confirmation_required):
if verbose == 2:
print("Collecting {}".format(self.DownloadIndexName), end=" ... ")
try:
import geopandas as gpd
download_index_ = gpd.read_file(self.DownloadIndexURL)
# Note that '<br />' exists in all the names of Poland' subregions
download_index_.name = download_index_.name.str.replace('<br />', ' ')
urls = download_index_.urls.map(
lambda x: pd.DataFrame.from_dict(x, 'index').T)
urls_ = pd.concat(urls.values, ignore_index=True)
download_index = pd.concat([download_index_, urls_], axis=1)
print("Done. ") if verbose == 2 else ""
save_pickle(download_index, path_to_download_index, verbose=verbose)
except Exception as e:
print("Failed. {}.".format(e))
download_index = None
else:
download_index = None
if verbose:
print("No data of {} is available.".format(self.DownloadIndexName))
return download_index
def __get_continents_subregion_tables(self, update=False, confirmation_required=True, verbose=False):
path_to_pickle =cd_dat(self.ContinentSubregionTableName.replace(" ", "-") + ".pickle")
if os.path.isfile(path_to_pickle) and not update:
subregion_tables = load_pickle(path_to_pickle)
else:
if confirmed("To collect information of {}?".format(
self.ContinentSubregionTableName),
confirmation_required=confirmation_required):
if verbose == 2:
print("Collecting a table of {}".format(
self.ContinentSubregionTableName), end=" ... ")
try:
# Scan the homepage to collect info of regions for each continent
source = requests.get(self.URL, headers=fake_requests_headers())
soup = bs4.BeautifulSoup(source.text, 'lxml').find_all(
'td', {'class': 'subregion'})
source.close()
continent_names = [td.a.text for td in soup]
continent_links = [urllib.parse.urljoin(self.URL, td.a['href'])
for td in soup]
subregion_tables = dict(
zip(continent_names,
[self.__get_subregion_table(url, verbose)
for url in continent_links]))
print("Done. ") if verbose == 2 else ""
save_pickle(subregion_tables, path_to_pickle, verbose=verbose)
except Exception as e:
print("Failed. {}.".format(e))
subregion_tables = None
else:
subregion_tables = None
if verbose:
print(f"No data of {self.ContinentSubregionTableName} is available.")
return subregion_tables
def __get_region_subregion_tier(self, update=False, confirmation_required=True,verbose=False):
path_to_file = cd_dat(self.RegionSubregionTier.replace(" ", "-") + ".pickle")
if os.path.isfile(path_to_file) and not update:
region_subregion_tier, non_subregions = load_pickle(path_to_file, verbose=verbose)
else:
def compile_region_subregion_tier(sub_reg_tbls):
having_subregions = sub_reg_tbls.copy()
region_subregion_tiers = having_subregions.copy()
non_subregions_list = []
for k, v in sub_reg_tbls.items():
if v is not None and isinstance(v, pd.DataFrame):
region_subregion_tiers =update_nested_dict(sub_reg_tbls, {k: set(v.Subregion)})
else:
non_subregions_list.append(k)
for x in non_subregions_list:
having_subregions.pop(x)
having_subregions_temp = copy.deepcopy(having_subregions)
while having_subregions_temp:
for region_name, subregion_table in having_subregions.items():
subregion_names = subregion_table.Subregion
subregion_links = subregion_table.SubregionURL
sub_subregion_tables = dict(
zip(subregion_names,
[self.__get_subregion_table(link)
for link in subregion_links]))
subregion_index, without_subregion_ =compile_region_subregion_tier(sub_subregion_tables)
non_subregions_list += without_subregion_
region_subregion_tiers.update({region_name: subregion_index})
having_subregions_temp.pop(region_name)
# Russian Federation in both pages of Asia and Europe,
# so there are duplicates in non_subregions_list
import more_itertools
non_subregions_list = list(more_itertools.unique_everseen(non_subregions_list))
return region_subregion_tiers, non_subregions_list
if confirmed("To compile {}? (Note this may take up to a few minutes.)".format(self.RegionSubregionTier),
confirmation_required=confirmation_required):
if verbose == 2:
print("Compiling {} ... ".format(self.RegionSubregionTier), end="")
# Scan the download pages to collect a catalogue of region-subregion tier
try:
subregion_tables = self.__get_continents_subregion_tables(update=update)
region_subregion_tier, non_subregions = compile_region_subregion_tier(subregion_tables)
print("Done. ") if verbose == 2 else ""
save_pickle((region_subregion_tier, non_subregions), path_to_file,
verbose=verbose)
except Exception as e:
print("Failed. {}.".format(e))
region_subregion_tier, non_subregions = None, None
else:
region_subregion_tier, non_subregions = None, None
if verbose:
print("No data of {} is available.".format(self.RegionSubregionTier))
return region_subregion_tier, non_subregions
def __get_download_catalogue(self, update=False, confirmation_required=True, verbose=False):
path_to_downloads_catalogue = cd_dat(self.DownloadsCatalogue.replace(" ", "-") + ".pickle")
if os.path.isfile(path_to_downloads_catalogue) and not update:
subregion_downloads_catalogue = load_pickle(path_to_downloads_catalogue)
else:
if confirmed("To collect {}? (Note that it may take a few minutes.)".format(self.DownloadsCatalogue),
confirmation_required=confirmation_required):
if verbose == 2:
print("Collecting {}".format(self.DownloadsCatalogue), end=" ... ")
try:
source = requests.get(self.URL, headers=fake_requests_headers())
soup = bs4.BeautifulSoup(source.text, 'lxml')
source.close()
subregion_href = soup.find_all('td', {'class': 'subregion'})
avail_subregion_urls = (urllib.parse.urljoin(self.URL, td.a['href'])
for td in subregion_href)
avail_subregion_url_tables_0 = (self.__get_subregion_table(sub_url, verbose)
for sub_url in avail_subregion_urls)
avail_subregion_url_tables = [tbl for tbl in avail_subregion_url_tables_0 if tbl is not None]
subregion_url_tables = list(avail_subregion_url_tables)
while subregion_url_tables:
subregion_url_tables_ = []
for subregion_url_table in subregion_url_tables:
subregion_urls = list(subregion_url_table.SubregionURL)
subregion_url_tables_0 = [
self.__get_subregion_table(sr_url, verbose)
for sr_url in subregion_urls]
subregion_url_tables_ += [
tbl for tbl in subregion_url_tables_0 if tbl is not None]
avail_subregion_url_tables += subregion_url_tables_
subregion_url_tables = list(subregion_url_tables_)
# All available URLs for downloading
home_subregion_url_table = self.__get_subregion_table(self.URL)
avail_subregion_url_tables.append(home_subregion_url_table)
subregion_downloads_catalogue = pd.concat(avail_subregion_url_tables,
ignore_index=True)
subregion_downloads_catalogue.drop_duplicates(inplace=True)
duplicated = subregion_downloads_catalogue[subregion_downloads_catalogue.Subregion.duplicated(keep=False)]
if not duplicated.empty:
import humanfriendly
for i in range(0, 2, len(duplicated)):
temp = duplicated.iloc[i:i + 2]
size = temp['.osm.pbf.Size'].map(lambda x: humanfriendly.parse_size(
x.strip('(').strip(')').replace('\xa0', ' ')))
idx = size[size == size.min()].index
subregion_downloads_catalogue.drop(idx, inplace=True)
subregion_downloads_catalogue.index = range(len(subregion_downloads_catalogue))
# Save subregion_index_downloads to local disk
save_pickle(subregion_downloads_catalogue,
path_to_downloads_catalogue, verbose=verbose)
except Exception as e:
print("Failed. {}.".format(e))
subregion_downloads_catalogue = None
else:
subregion_downloads_catalogue = None
if verbose:
print("No data of {} is available.".format(self.DownloadsCatalogue))
return subregion_downloads_catalogue
def __get_list_of_subregion_names(self, update=False, confirmation_required=True,verbose=False):
path_to_name_list = cd_dat(self.SubregionNameList.replace(" ", "-") + ".pickle")
if os.path.isfile(path_to_name_list) and not update:
subregion_name_list = load_pickle(path_to_name_list)
else:
if confirmed("To get {}?".format(self.SubregionNameList),confirmation_required=confirmation_required):
downloads_catalogue = self.__get_download_catalogue(update=update, confirmation_required=False)
subregion_name_list = downloads_catalogue.Subregion.to_list()
save_pickle(subregion_name_list, path_to_name_list, verbose=verbose)
else:
subregion_name_list = []
if verbose:
print("No data of {} is available.".format(self.SubregionNameList))
return subregion_name_list
def __validate_input_subregion_name(self, subregion_name):
assert isinstance(subregion_name, str)
# Get a list of available
subregion_names = self.__get_list_of_subregion_names()
if os.path.isdir(os.path.dirname(subregion_name)) or urllib.parse.urlparse(subregion_name).path:
subregion_name_ = find_similar_str(os.path.basename(subregion_name),
subregion_names)
else:
subregion_name_ = find_similar_str(subregion_name, subregion_names)
if not subregion_name_:
raise ValueError(
"The input subregion name is not identified.\n"
"Check if the required subregion exists in the catalogue and retry.")
return subregion_name_
def __validate_input_file_format(self, osm_file_format):
osm_file_format_ = find_similar_str(osm_file_format, self.ValidFileFormats)
assert osm_file_format_ in self.ValidFileFormats,"The input file format must be one from {}.".format(self.ValidFileFormats)
return osm_file_format_
def __get_subregion_download_url(self, subregion_name, osm_file_format, update=False,verbose=False):
# Get an index of download URLs
subregion_downloads_index = self.__get_download_catalogue(update=update, verbose=verbose)
subregion_downloads_index.set_index('Subregion', inplace=True)
subregion_name_ = self.__validate_input_subregion_name(subregion_name)
osm_file_format_ = self.__validate_input_file_format(osm_file_format)
# Get the URL
download_url = subregion_downloads_index.loc[subregion_name_, osm_file_format_]
return subregion_name_, download_url
def __get_default_osm_filename(self, subregion_name, osm_file_format, update=False):
subregion_name_ = self.__validate_input_subregion_name(subregion_name)
osm_file_format_ = self.__validate_input_file_format(osm_file_format)
_, download_url = self.__get_subregion_download_url(
subregion_name_, osm_file_format_, update=update)
if download_url is None:
print("No {} data is available to download for {}.".format(
osm_file_format_, subregion_name_))
else:
subregion_filename = os.path.split(download_url)[-1]
return subregion_filename
def __get_default_path_to_osm_file(self, subregion_name, osm_file_format, mkdir=False,update=False, verbose=False):
subregion_name_ = self.__validate_input_subregion_name(subregion_name)
osm_file_format_ = self.__validate_input_file_format(osm_file_format)
subregion_name_, download_url = self.__get_subregion_download_url(
subregion_name_, osm_file_format_, update=update)
if download_url is None:
if verbose:
print("{} data is not available for {}".format(
osm_file_format_, subregion_name_))
default_filename, default_file_path = None, None
else:
parsed_path = urllib.parse.urlparse(download_url).path.lstrip('/').split('/')
if len(parsed_path) == 1:
parsed_path = [subregion_name_] + parsed_path
subregion_names = self.__get_list_of_subregion_names()
directory = cd_dat_geofabrik(*[find_similar_str(x, subregion_names) if x != 'us' else 'United States'
for x in parsed_path[0:-1]],
mkdir=mkdir)
default_filename = parsed_path[-1]
default_file_path = os.path.join(directory, default_filename)
return default_filename, default_file_path
def __search_for_subregions(self, *subregion_name, deep=False):
region_subregion_tier, non_subregions_list = self.__get_region_subregion_tier()
if not subregion_name:
subregion_names = non_subregions_list
else:
def find_subregions(reg_name, reg_sub_idx):
for k, v in reg_sub_idx.items():
if reg_name == k:
if isinstance(v, dict):
yield list(v.keys())
else:
yield [reg_name] if isinstance(reg_name, str) else reg_name
elif isinstance(v, dict):
for sub in find_subregions(reg_name, v):
if isinstance(sub, dict):
yield list(sub.keys())
else:
yield [sub] if isinstance(sub, str) else sub
res = []
for region in subregion_name:
res += list(find_subregions(self.__validate_input_subregion_name(region),region_subregion_tier))[0]
if not deep:
subregion_names = res
else:
check_list = [x for x in res if x not in non_subregions_list]
if check_list:
res_ = list(set(res) - set(check_list))
res_ += self.__search_for_subregions(*check_list)
else:
res_ = res
del non_subregions_list, region_subregion_tier, check_list
subregion_names = list(dict.fromkeys(res_))
return subregion_names
def __make_sub_download_dir(self, subregion_name, osm_file_format, download_dir=None, mkdir=False):
subregion_name_ = self.__validate_input_subregion_name(subregion_name)
osm_file_format_ = self.__validate_input_file_format(osm_file_format)
default_filename, default_file_path = self.__get_default_path_to_osm_file(
subregion_name_, osm_file_format_)
if not default_filename:
default_sub_dir = re.sub( r"[. ]", "-", subregion_name_.lower() + osm_file_format_)
else:
default_sub_dir = re.sub(r"[. ]", "-", default_filename).lower()
if not download_dir:
default_download_dir = cd_dat_geofabrik(os.path.dirname(default_file_path),default_sub_dir, mkdir=mkdir)
else:
default_download_dir = cd(validate_input_data_dir(download_dir),default_sub_dir, mkdir=mkdir)
return default_download_dir
def download_osm_data(self, subregion_names, osm_file_format, download_dir='osmfile',
update=False, confirmation_required=False, deep_retry=False,
interval_sec=10, verbose=False,random_header=False,ret_download_path=False):
subregion_names_ = [subregion_names] if isinstance(subregion_names, str) else subregion_names.copy()
subregion_names_ = [self.__validate_input_subregion_name(x) for x in subregion_names_]
osm_file_format_ = self.__validate_input_file_format(osm_file_format)
if confirmed(
"Confirmed to download {} data of the following geographic region(s):"
"\n\t{}\n?".format(osm_file_format_, "\n\t".join(subregion_names_)),
confirmation_required=confirmation_required):
download_paths = []
for sub_reg_name in subregion_names_:
# Get download URL
subregion_name_, download_url = self.__get_subregion_download_url(
sub_reg_name, osm_file_format_)
if download_url is None:
if verbose:
print("The {} data is not found for \"{}\".".format(
osm_file_format_, subregion_name_))
if confirmed("Try downloading the data of its subregions instead",
confirmation_required=confirmation_required):
sub_subregions = self.__search_for_subregions(
subregion_name_, deep=deep_retry)
if sub_subregions == [subregion_name_]:
print("No {} data is available "
"for this geographic region.".format(osm_file_format_))
break
else:
if not download_dir:
_, path_to_file_ = self.__get_default_path_to_osm_file(
subregion_name_, ".osm.pbf")
download_dir = os.path.dirname(path_to_file_)
download_dir_ = self.__make_sub_download_dir(
subregion_name_, osm_file_format_, download_dir)
self.download_osm_data(
sub_subregions, osm_file_format=osm_file_format_,
download_dir=download_dir_, update=update,
confirmation_required=False, verbose=verbose,
ret_download_path=ret_download_path)
else:
if not download_dir:
# Download the requested OSM file to default directory
osm_filename, path_to_file = self.__get_default_path_to_osm_file(
subregion_name_, osm_file_format_, mkdir=True)
else:
download_dir_ = validate_input_data_dir(download_dir)
osm_filename = self.__get_default_osm_filename(
subregion_name_, osm_file_format=osm_file_format_)
path_to_file = os.path.join(download_dir_, osm_filename)
download_paths.append(path_to_file)
if os.path.isfile(path_to_file) and not update:
if verbose:
print("\"{}\" is already available at \"\\{}\".".format(
os.path.basename(path_to_file),
os.path.relpath(os.path.dirname(path_to_file))))
else:
if verbose:
print("{} \"{}\" to \"\\{}\" ... ".format(
"Updating" if os.path.isfile(path_to_file)
else "Downloading",
osm_filename,
os.path.relpath(os.path.dirname(path_to_file))))
try:
download_file_from_url(url=download_url, path_to_file=path_to_file,wait_to_retry=interval_sec,
random_header=random_header)
print("Done. ") if verbose else ""
except Exception as e:
print("Failed. {}.".format(e))
if interval_sec:
time.sleep(interval_sec)
if ret_download_path:
if len(download_paths) == 1:
download_paths = download_paths[0]
return download_paths
def __osm_file_exists(self, subregion_name, osm_file_format, data_dir=None,update=False,
verbose=False, ret_file_path=False):
subregion_name_ = self.__validate_input_subregion_name(subregion_name)
osm_file_format_ = self.__validate_input_file_format(osm_file_format)
default_filename, path_to_file = self.__get_default_path_to_osm_file(
subregion_name_, osm_file_format_)
if data_dir:
path_to_file = cd(validate_input_data_dir(data_dir), default_filename)
if os.path.isfile(path_to_file) and not update:
if verbose == 2:
print("\"{}\" of {} is available at \"{}\".".format(
default_filename, subregion_name_,
os.path.relpath(os.path.dirname(path_to_file))))
if ret_file_path:
return path_to_file
else:
return True
else:
return False
def download_subregion_data(self, subregion_names, osm_file_format, download_dir=None, update=False,
verbose=False, ret_download_path=False):
subregion_names_ = [subregion_names] if isinstance(subregion_names, str) else subregion_names.copy()
subregion_names_ = [self.__validate_input_subregion_name(x) for x in subregion_names_]
subregion_names_ = self.__search_for_subregions(*subregion_names_)
subregion_name_list = subregion_names_.copy()
osm_file_format_ = self.__validate_input_file_format(osm_file_format)
for subregion_name in subregion_names_:
if self.__osm_file_exists(subregion_name, osm_file_format_, download_dir, update):
subregion_name_list.remove(subregion_name)
confirmation_required_ = False if not subregion_name_list else True
if confirmed(
"Confirmed to download {} data of the following geographic region(s): "
"\n\t{}\n?".format(osm_file_format_, "\n\t".join(subregion_name_list)),
confirmation_required=confirmation_required_):
download_paths = self.download_osm_data(
subregion_names_, osm_file_format=osm_file_format_,
download_dir=download_dir, update=update, confirmation_required=False,
verbose=verbose, ret_download_path=ret_download_path)
if ret_download_path:
if len(download_paths) == 1:
download_paths = download_paths[0]
return download_paths
class BBBikeDownloader():
def __init__(self):
self.Name = 'BBBike OpenStreetMap data extracts'
self.URL = bbbike_homepage()
self.URLCities = 'https://raw.githubusercontent.com/wosch/bbbike-world/world/etc/cities.txt'
self.CitiesNames = 'BBBike cities'
self.URLCitiesCoordinates = 'https://raw.githubusercontent.com/wosch/bbbike-world/world/etc/cities.csv'
self.CitiesCoordinates = 'BBBike cities coordinates'
self.SubregionCatalogue = 'BBBike subregion catalogue'
self.SubregionNameList = 'BBBike subregion name list'
self.DownloadDictName = 'BBBike download dictionary'
def __get_list_of_cities(self, update=False, confirmation_required=True, verbose=False):
path_to_pickle = cd_dat(self.CitiesNames.replace(" ", "-") + ".pickle")
if os.path.isfile(path_to_pickle) and not update:
cities_names = load_pickle(path_to_pickle)
else:
if confirmed("To collect {}?".format(self.CitiesNames),confirmation_required=confirmation_required):
try:
cities_names_ = pd.read_csv(self.URLCities, header=None)
cities_names = list(cities_names_.values.flatten())
save_pickle(cities_names, path_to_pickle, verbose=verbose)
except Exception as e:
print("Failed. {}.".format(e))
cities_names = None
else:
if verbose:
print("No data of \"{}\" is available.".format(self.CitiesNames))
cities_names = None
return cities_names
def __get_coordinates_of_cities(self, update=False, confirmation_required=True, verbose=False):
path_to_pickle = cd_dat(self.CitiesCoordinates.replace(" ", "-") + ".pickle")
if os.path.isfile(path_to_pickle) and not update:
cities_coordinates = load_pickle(path_to_pickle)
else:
if confirmed("To collect {}?".format(self.CitiesCoordinates),
confirmation_required=confirmation_required):
try:
csv_temp = urllib.request.urlopen(self.URLCitiesCoordinates)
csv_file = list(csv.reader(io.StringIO(csv_temp.read().decode('utf-8')),delimiter=':'))
csv_data = [[x.strip().strip('\u200e').replace('#', '') for x in row] for row in csv_file[5:-1]]
column_names = [x.replace('#', '').strip().capitalize() for x in csv_file[0]]
cities_coords = pd.DataFrame(csv_data, columns=column_names)
coordinates = cities_coords.Coord.str.split(' ').apply(pd.Series)
coords_cols = ['ll_longitude', 'll_latitude1','ur_longitude', 'ur_latitude']
coordinates.columns = coords_cols
cities_coords.drop(['Coord'], axis=1, inplace=True)
cities_coordinates = | pd.concat([cities_coords, coordinates], axis=1) | pandas.concat |
from abc import abstractmethod
from datetime import timedelta
import numpy as np
import pandas as pd
from src import constants
from src.data_generator.day_ahead_extractors.base_day_ahead_extractor import BaseDayAheadExtractor
from src.data_generator.day_ahead_extractors.utils.mappings import ACTUAL_MAPPING, FORECAST_MAPPING
class PseDataDayAheadExtractor(BaseDayAheadExtractor):
def extract(self) -> pd.DataFrame:
data = self.raw_df.copy()
data = data.replace('-', np.NaN)
for column in data.columns:
data[column] = data[column].apply(self.delete_unnecessary_commas_and_add_dot)
if 'Godzina' in data.columns:
data_with_timestamps = self._handle_time_shift(data)
data_with_timestamps = self._get_datetime_from_dates_and_hours(data_with_timestamps)
else:
repeated_data = pd.DataFrame(pd.to_datetime(data['Data']).repeat(24))
repeated_data = repeated_data.sort_values(by='Data').reset_index(drop=True)
for index, row in repeated_data.iterrows():
row['Data'] = row['Data'] + timedelta(hours=index % 24)
repeated_data = repeated_data.rename(columns={'Data': 'date'})
repeated_data['Data'] = repeated_data['date'].apply(lambda x: x.strftime('%Y-%m-%d'))
data_with_timestamps = pd.merge(repeated_data, data, on='Data')
data_with_timestamps = data_with_timestamps.drop(columns=['Data'])
data_indexed = data_with_timestamps.set_index('date')
mapping = FORECAST_MAPPING if self._get_prediction_flag() else ACTUAL_MAPPING
data_transformed = data_indexed.rename(columns=mapping)
return data_transformed
@staticmethod
def delete_unnecessary_commas_and_add_dot(x):
try:
commas_quantity = x.count(',')
if commas_quantity > 1:
for _ in range(commas_quantity - 1):
comma_position = x.find(',')
x = x[:comma_position] + x[comma_position + 1:]
x = x.replace(',', '.')
except AttributeError:
pass
return x
def _get_date_column(self) -> str:
return 'Data'
def _get_datetime_from_dates_and_hours(self, data: pd.DataFrame) -> pd.DataFrame:
date_column = self._get_date_column()
data[date_column] = pd.to_datetime(data[date_column].astype(str))
data['date'] = data.apply(
lambda row: row[date_column] + timedelta(hours=int(row['Godzina']) - 1),
axis=1,
)
data = data.drop(columns=[date_column, 'Godzina'])
return data
@abstractmethod
def _get_prediction_flag(self) -> bool:
raise NotImplementedError(constants.METHOD_NOT_IMPLEMENTED)
def _handle_time_shift(self, data: pd.DataFrame) -> pd.DataFrame:
if any(data['Godzina'] == '2A'):
data = data[data['Godzina'] != '2A']
data['Godzina'] = data['Godzina'].astype('int')
unique_dates = data[self._get_date_column()].unique()
for date in unique_dates:
existing_hours = data.loc[data[self._get_date_column()] == date, 'Godzina'].unique()
if len(existing_hours) == 23:
# 2 is always missing
if self.__class__.__name__ == 'RealUnitsOutagesDayAheadExtractor':
adjacent_data = data.loc[
(data[self._get_date_column()] == date) & (data['Godzina'] == 1)
]
adjacent_data['Godzina'] = 2
else:
adjacent_data = data.loc[
(data[self._get_date_column()] == date) & (data['Godzina'].isin([1, 3]))
]
for column in adjacent_data.columns:
adjacent_data[column] = \
adjacent_data[column].apply(pd.to_numeric, errors='ignore')
adjacent_data = adjacent_data.groupby(by=[self._get_date_column()]).mean()
adjacent_data = adjacent_data.reset_index()
data = | pd.concat([data, adjacent_data]) | pandas.concat |
import errno
import json
import logging
import os
import shutil
import uuid
import zipfile
import re
import subprocess
import pandas as pd
import plotly.express as px
from plotly.offline import plot
import plotly.graph_objs as go
from installed_clients.DataFileUtilClient import DataFileUtil
from installed_clients.KBaseReportClient import KBaseReport
class MDSUtils:
R_BIN = '/kb/deployment/bin'
MDS_OUT_DIR = 'mds_output'
PARAM_IN_WS = 'workspace_name'
PARAM_IN_MATRIX = 'input_obj_ref'
PARAM_OUT_MATRIX = 'mds_matrix_name'
def _mkdir_p(self, path):
"""
_mkdir_p: make directory for given path
"""
if not path:
return
try:
os.makedirs(path)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def _validate_run_mds_params(self, params):
"""
_validate_run_mds_params:
validates params passed to run_mds method
"""
logging.info('start validating run_mds params')
# check for required parameters
for p in [self.PARAM_IN_MATRIX, self.PARAM_IN_WS, self.PARAM_OUT_MATRIX]:
if p not in params:
raise ValueError('"{}" parameter is required, but missing'.format(p))
def _build_rMDS_script(self, params):
"""
_build_rMDS_script: build a sequence of R command calls according to params
Note: To run the NMDS, we will use the function metaMDS from the vegan package.
# The metaMDS function requires only a community-by-species matrix.
"""
data_file_path = params.get('datafile')
if not data_file_path:
return ''
exists = os.path.isfile(os.path.join(self.output_dir, os.path.basename(data_file_path)))
if not exists:
shutil.copyfile(data_file_path,
os.path.join(self.output_dir, os.path.basename(data_file_path)))
associated_matrix_file = params.get('associated_matrix_file')
n_components = params.get('n_components', 2)
max_iter = params.get('max_iter', 300)
run_metric = True if params.get('metric', 0) else False
dist_metric = params.get('distance_metric', 'bray')
mds_cfg = 'distance="' + dist_metric + '",try=20,trymax=' + str(max_iter) + \
',autotransform=TRUE,noshare=0.1,expand=TRUE,trace=1,' + \
'plot=FALSE,engine=c("monoMDS","isoMDS"),k=' + str(n_components)
if run_metric:
mds_cfg += 'metric=True'
mds_scrpt = 'library(vegan)\n'
mds_scrpt += 'library(jsonlite)\n'
mds_scrpt += 'vg_data <- read.table("' + data_file_path + \
'",header=TRUE,row.names=1,sep="")\n'
# remove the last (taxonomy) column
# mds_scrpt += 'vg_data<-vg_data[,1:dim(vg_data)[2]-1]\n'
# Function metaMDS returns an object of class metaMDS.
mds_scrpt += 'vg_data.mds <- metaMDS(vg_data,' + mds_cfg + ')\n'
mds_scrpt += 'vg_data.mds\n'
# save the results in the memory
# 1) store species ordination
mds_scrpt += 'variableScores <- vg_data.mds$species\n'
# 2) store site ordination
mds_scrpt += 'sampleScores <- vg_data.mds$points\n'
# 3) store other ordination results
mds_scrpt += 'stress <- vg_data.mds$stress\n'
mds_scrpt += 'dist_metric <- vg_data.mds$distance\n'
mds_scrpt += 'dist_matrix <- vg_data.mds$diss\n'
mds_scrpt += 'dist_call <- vg_data.mds$distcall\n'
mds_scrpt += 'converged <- vg_data.mds$converged\n'
mds_scrpt += 'dims <- vg_data.mds$ndim\n'
mds_scrpt += 'tries <- vg_data.mds$tries\n'
mds_scrpt += 'maxits <- vg_data.mds$maxits\n'
# save the results to the current dir
# Write CSV in R
mds_scrpt += 'write.csv(dist_matrix,file="dist_matrix.csv",row.names=TRUE,na="")\n'
mds_scrpt += 'write.csv(variableScores,file="species_ordination.csv",' + \
'row.names=TRUE,na="")\n'
mds_scrpt += 'write.csv(sampleScores,file="site_ordination.csv",row.names=TRUE,na="")\n'
if associated_matrix_file:
mds_scrpt += 'chem_data <- read.table("' + associated_matrix_file + \
'",header=TRUE,row.names=1,sep="")\n'
mds_scrpt += '(fit <- envfit(vg_data.mds,chem_data,perm=999))\n'
mds_scrpt += 'vectors <- scores(fit, "vectors")\n'
mds_scrpt += 'write.csv(vectors,file="vectors.csv",row.names=TRUE,na="")\n'
# Write JSON in R
mds_scrpt += 'item_name=c("stress","distance_metric","dist_call","converged",' + \
'"dimesions","trials","maxits")\n'
mds_scrpt += 'item_value=c(stress,dist_metric,dist_call,converged,dims,tries,maxits)\n'
mds_scrpt += 'df <- data.frame(item_name,item_value,stringsAsFactors=FALSE)\n'
mds_scrpt += 'write_json(toJSON(df),path="others.json",pretty=TRUE,auto_unbox=FALSE)\n'
# If there is user input plotting script:
plt_scrpt = params.get('plot_script', '').lower()
if plt_scrpt and re.match("^plot\(\s*[a-zA-Z]+.*\)$", plt_scrpt):
arr_plt = plt_scrpt.split(',')
arr_plt[0] = 'plot(vg_data.mds' # make sure to pass the correct data
plt_scrpt = (',').join(arr_plt)
if len(arr_plt) == 1:
plt_scrpt += ')'
plt_type = params.get('plot_type', 'pdf').lower()
if not plt_type:
plt_type = 'pdf'
plt_name = params.get('plot_name', 'usr_plt_name').lower()
if not plt_name:
plt_name = 'usr_plt_name'
plt_name += '.' + plt_type
if plt_type == 'jpg':
plt_type = 'jpeg'
if plt_type == 'ps':
plt_type = 'postscript'
mds_scrpt += plt_type
mds_scrpt += '(file="' + plt_name + '")\n'
if plt_type == 'tiff':
mds_scrpt += plt_type
mds_scrpt += '(file="' + plt_name + '",width=4,height=4,units="in",' + \
'compression="lzw",res=300)\n'
if plt_type in ['jpg', 'jpeg', 'bmp', 'png']:
mds_scrpt += plt_type
mds_scrpt += '(file="' + plt_name + '",width=580,height=580,units="px",' + \
'res=100, pointsize=12)\n'
mds_scrpt += plt_scrpt + '\n'
if associated_matrix_file:
mds_scrpt += 'plot(fit)\n'
mds_scrpt += 'dev.off()\n'
logging.info('R script: {}'.format(mds_scrpt))
mds_rscript = 'mds_script.R'
rscrpt_file_path = os.path.join(self.output_dir, mds_rscript)
with open(rscrpt_file_path, 'w') as r_file:
r_file.write(mds_scrpt)
return rscrpt_file_path
def _execute_r_script(self, rfile_name):
"""
_execute_r_script: Calling the Rscript executable to run the R script in rfile_name
"""
logging.info('Calling R......')
result_dir = os.path.dirname(rfile_name)
if not result_dir:
result_dir = self.working_dir
rcmd = [os.path.join(self.R_BIN, 'Rscript')]
rcmd.append(rfile_name)
logging.info('Running metaMDS script in current working directory: {}'.format(result_dir))
exitCode = 0
try:
complete_proc = subprocess.run(rcmd, cwd=result_dir, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
close_fds=True)
exitCode = complete_proc.returncode
if (exitCode == 0):
logging.info('\n{}'.format(complete_proc.stdout))
logging.info('\n{} was executed successfully, exit code was: {}'.format(
' '.join(rcmd), str(exitCode)))
logging.info("Finished calling R.")
else:
logging.info('Error running command: {} Exit Code: {}'.format(' '.join(rcmd),
str(exitCode)))
logging.info('\n{}'.format(complete_proc.stderr))
logging.info('\n{}'.format(complete_proc.stdout))
except subprocess.CalledProcessError as sub_e:
exitCode = -99
logging.info('Caught subprocess.CalledProcessError {}'.format(sub_e))
logging.info('created files in {}:\n{}'.format(result_dir, os.listdir(result_dir)))
return exitCode
def _df_to_list(self, df):
"""
_df_to_list: convert Dataframe to FloatMatrix2D matrix data
"""
df.index = df.index.astype('str')
df.columns = df.columns.astype('str')
df.fillna(0, inplace=True)
matrix_data = {'row_ids': df.index.tolist(),
'col_ids': df.columns.tolist(),
'values': df.values.tolist()}
return matrix_data
def _save_mds_matrix(self, workspace_name, input_obj_ref, mds_matrix_name,
distance_df, mds_params_df, site_ordin_df, species_ordin_df):
logging.info('Saving MDSMatrix...')
if not isinstance(workspace_name, int):
ws_name_id = self.dfu.ws_name_to_id(workspace_name)
else:
ws_name_id = workspace_name
mds_data = {}
mds_data.update({'distance_matrix': self._df_to_list(distance_df)})
mds_data.update({'site_ordination': self._df_to_list(site_ordin_df)})
mds_data.update({'species_ordination': self._df_to_list(species_ordin_df)})
mds_data.update({'mds_parameters': self._df_to_list(mds_params_df)})
mds_data.update({'original_matrix_ref': input_obj_ref})
mds_data.update({'rotation_matrix': self._df_to_list(distance_df)})
obj_type = 'KBaseExperiments.PCAMatrix'
info = self.dfu.save_objects({
"id": ws_name_id,
"objects": [{
"type": obj_type,
"data": mds_data,
"name": mds_matrix_name
}]
})[0]
return "%s/%s/%s" % (info[6], info[0], info[4])
def _zip_folder(self, folder_path, output_path):
"""
_zip_folder: Zip the contents of an entire folder (with that folder included in the
archive). Empty subfolders could be included in the archive as well if the 'Included
all subfolders, including empty ones' portion.
portion is used.
"""
with zipfile.ZipFile(output_path, 'w',
zipfile.ZIP_DEFLATED,
allowZip64=True) as ziph:
for root, folders, files in os.walk(folder_path):
# Include all subfolders, including empty ones.
for folder_name in folders:
absolute_fpath = os.path.join(root, folder_name)
relative_fpath = os.path.join(os.path.basename(root), folder_name)
logging.info("Adding folder {} to archive.".format(absolute_fpath))
ziph.write(absolute_fpath, relative_fpath)
for f in files:
absolute_path = os.path.join(root, f)
relative_path = os.path.join(os.path.basename(root), f)
logging.info("Adding file {} to archive.".format(absolute_path))
ziph.write(absolute_path, relative_path)
logging.info("{} created successfully.".format(output_path))
def _generate_output_file_list(self, out_dir):
"""
_generate_output_file_list: zip result files and generate file_links for report
"""
logging.info('Start packing result files from MDS...')
output_files = list()
output_dir = os.path.join(self.working_dir, str(uuid.uuid4()))
self._mkdir_p(output_dir)
mds_output = os.path.join(output_dir, 'metaMDS_output.zip')
self._zip_folder(out_dir, mds_output)
output_files.append({'path': mds_output,
'name': os.path.basename(mds_output),
'label': os.path.basename(mds_output),
'description': 'Output file(s) generated by metaMDS'})
return output_files
def _generate_mds_html_report(self, mds_outdir, n_components):
logging.info('Start generating html report for MDS results...')
html_report = list()
mds_plots = list()
for root, folders, files in os.walk(mds_outdir):
# Find the image files by their extensions.
for f in files:
if re.match('^[a-zA-Z]+.*.(html)$', f): # jpeg|jpg|bmp|png|tiff|pdf|ps|
absolute_path = os.path.join(root, f)
logging.info("Adding file {} to plot archive.".format(absolute_path))
mds_plots.append(absolute_path)
result_dir = os.path.join(self.working_dir, str(uuid.uuid4()))
self._mkdir_p(result_dir)
result_file_path = os.path.join(result_dir, 'mds_result.html')
visualization_content = ''
for mds_plot in mds_plots:
shutil.copy2(mds_plot,
os.path.join(result_dir, os.path.basename(mds_plot)))
visualization_content += '<iframe height="900px" width="100%" '
visualization_content += 'src="{}" '.format(os.path.basename(mds_plot))
visualization_content += 'style="border:none;"></iframe>\n<p></p>\n'
with open(result_file_path, 'w') as result_file:
with open(os.path.join(os.path.dirname(__file__), 'templates', 'mds_template.html'),
'r') as report_template_file:
report_template = report_template_file.read()
report_template = report_template.replace('<p>Visualization_Content</p>',
visualization_content)
report_template = report_template.replace('n_components',
'{} Components'.format(n_components))
result_file.write(report_template)
report_shock_id = self.dfu.file_to_shock({'file_path': result_dir,
'pack': 'zip'})['shock_id']
html_report.append({'shock_id': report_shock_id,
'name': os.path.basename(result_file_path),
'label': os.path.basename(result_file_path),
'description': 'HTML summary report for MDS Matrix App'
})
return html_report
def _generate_mds_report(self, mds_ref, output_dir, workspace_name, n_components):
logging.info('Creating MDS report...')
output_files = self._generate_output_file_list(output_dir)
output_html_files = self._generate_mds_html_report(output_dir, n_components)
objects_created = list()
objects_created.append({'ref': mds_ref,
'description': 'MDS Matrix'})
report_params = {'message': '',
'workspace_name': workspace_name,
'file_links': output_files,
'objects_created': objects_created,
'html_links': output_html_files,
'direct_html_link_index': 0,
'html_window_height': 666,
'report_object_name': 'kb_mds_report_' + str(uuid.uuid4())}
kbase_report_client = KBaseReport(self.callback_url)
output = kbase_report_client.create_extended_report(report_params)
report_output = {'report_name': output['name'], 'report_ref': output['ref']}
return report_output
def _get_asso_matrix_meta(self, associated_matrix_obj_ref, dimension, scale_size_by,
mdf_indx, mdf):
logging.info('Getting metadata from associated matrix')
matrix_data = self.dfu.get_objects({
'object_refs': [associated_matrix_obj_ref]})['data'][0]['data']['data']
size_data = list()
if dimension == 'col':
size_index = matrix_data['row_ids'].index(scale_size_by)
for sample in mdf_indx:
if sample in matrix_data['col_ids']:
idx = matrix_data['col_ids'].index(sample)
size_data.append(matrix_data['values'][size_index][idx])
else:
size_data.append(None)
else:
size_index = matrix_data['col_ids'].index(scale_size_by)
for sample in mdf_indx:
if sample in matrix_data['row_ids']:
idx = matrix_data['row_ids'].index(sample)
size_data.append(matrix_data['values'][idx][size_index])
else:
size_data.append(None)
mdf[scale_size_by] = size_data
def _get_asso_matrix_meta_obj_size_only(self, associated_matrix_obj_ref, dimension,
scale_size_by, color_marker_by):
logging.info('Getting metadata from associated matrix')
matrix_data = self.dfu.get_objects({
'object_refs': [associated_matrix_obj_ref]})['data'][0]['data']['data']
if dimension == 'col':
size_index = matrix_data['row_ids'].index(scale_size_by)
size_data = matrix_data['values'][size_index]
mdf = pd.DataFrame(index=matrix_data['col_ids'],
columns=[color_marker_by, scale_size_by])
mdf[scale_size_by] = size_data
else:
size_index = matrix_data['col_ids'].index(scale_size_by)
size_data = list()
for value in matrix_data['values']:
size_data.append(value[size_index])
mdf = pd.DataFrame(index=matrix_data['col_ids'],
columns=[color_marker_by, scale_size_by])
mdf[scale_size_by] = size_data
return mdf
def _get_attribute_meta(self, attr_obj, attr_l, scale_size_by, mdf_indx, mdf):
logging.info('Getting metadata from attribute mapping')
size_index = None
for i in range(len(attr_l)):
if attr_l[i]['attribute'] == scale_size_by:
size_index = i
break
size_data = []
for sample in mdf_indx:
try:
size_data.append(
float(
attr_obj['data'][0]['data']['instances'][sample][size_index]))
except Exception:
logging.info(
'ERROR: scaling is not int or float. scaling has been dropped')
scale_size_by = None
size_index = None
if size_index is not None:
mdf[scale_size_by] = size_data
def _get_attribute_meta_size_only(self, attribute_mapping_obj_ref,
scale_size_by, color_marker_by):
logging.info('Getting metadata from attribute mapping')
attr_obj = self.dfu.get_objects({'object_refs': [attribute_mapping_obj_ref]})
attr_l = attr_obj['data'][0]['data']['attributes']
size_index = None
for i in range(len(attr_l)):
if attr_l[i]['attribute'] == scale_size_by:
size_index = i
break
size_data = []
mdf_indx = attr_obj['data'][0]['data']['instances'].keys()
for sample in mdf_indx:
try:
size_data.append(float(
attr_obj['data'][0]['data']['instances'][sample][size_index]))
except Exception:
err_msg = 'ERROR: scaling is not int or float. scaling has been dropped'
logging.info(err_msg)
scale_size_by = None
size_index = None
mdf = pd.DataFrame(index=mdf_indx,
columns=[color_marker_by, scale_size_by])
if size_index is not None:
mdf[scale_size_by] = size_data
return mdf
def _get_metadata_from_obj(self, dimension,
associated_matrix_obj_ref, attribute_mapping_obj_ref,
color_marker_by, scale_size_by):
logging.info('Retrieving metadata..')
# build color_marker_by only
if color_marker_by is not None:
attr_obj = self.dfu.get_objects({'object_refs': [attribute_mapping_obj_ref]})
attr_l = attr_obj['data'][0]['data']['attributes']
color_index = None
for i in range(len(attr_l)):
if attr_l[i]['attribute'] == color_marker_by:
color_index = i
break
color_data = []
mdf_indx = attr_obj['data'][0]['data']['instances'].keys()
for sample in mdf_indx:
color_data.append(attr_obj['data'][0]['data']['instances'][sample][color_index])
mdf = pd.DataFrame(index=mdf_indx, columns=[color_marker_by, scale_size_by])
if color_index is not None:
mdf[color_marker_by] = color_data
if scale_size_by is not None:
if associated_matrix_obj_ref is not None:
try:
self._get_asso_matrix_meta(associated_matrix_obj_ref, dimension,
scale_size_by, mdf_indx, mdf)
except Exception:
self._get_attribute_meta(attr_obj, attr_l, scale_size_by, mdf_indx, mdf)
else:
self._get_attribute_meta(attr_obj, attr_l, scale_size_by, mdf_indx, mdf)
# build scale_size_by only
else:
if associated_matrix_obj_ref is not None:
try:
mdf = self._get_asso_matrix_meta_obj_size_only(associated_matrix_obj_ref,
dimension, scale_size_by,
color_marker_by)
except Exception:
mdf = self._get_attribute_meta_size_only(attribute_mapping_obj_ref,
scale_size_by, color_marker_by)
else:
mdf = self._get_attribute_meta_size_only(attribute_mapping_obj_ref,
scale_size_by, color_marker_by)
logging.info('created metadata df:\n{}'.format(mdf))
return mdf
def _get_metadata_from_file(self, metadata_file, color_marker_by, scale_size_by):
"""
Get metadata from file and return simplified pd.DataFrame
:return:
"""
logging.info('Retrieving metadata..')
mdf = pd.read_csv(metadata_file, sep='\t', index_col=0)
logging.info('MDF: {}'.format(mdf))
mdf = mdf[[color_marker_by, scale_size_by]]
return mdf
def _plot_without_grouping(self, dimension):
# Get site data from previously saved file
site_ordin_df = pd.read_csv(os.path.join(self.output_dir, "site_ordination.csv"),
index_col=0)
logging.info('SITE_ORDIN_DF:\n {}'.format(site_ordin_df))
site_ordin_df.fillna('na', inplace=True)
fig = px.scatter(site_ordin_df, x="MDS1", y="MDS2", hover_name=site_ordin_df.index)
# Save plotly_fig.html and return path
plotly_html_file_path = os.path.join(self.output_dir, "plotly_fig.html")
plot(fig, filename=plotly_html_file_path)
return plotly_html_file_path
def _plot_with_grouping(self, dimension, associated_matrix_obj_ref, attribute_mapping_obj_ref,
metadata_file, color_marker_by, scale_size_by, highlight,
only_highlight):
logging.info('Plotting with grouping: "{}", and "{}"'.format(color_marker_by,
scale_size_by))
# Both can not be the same right now.. mdf is now new pd would lead to problems
if color_marker_by == scale_size_by:
logging.info('ERROR: both color and scale are same field. scale set to None')
scale_size_by = None
if (attribute_mapping_obj_ref is not None or
associated_matrix_obj_ref is not None):
mdf = self._get_metadata_from_obj(dimension,
associated_matrix_obj_ref,
attribute_mapping_obj_ref,
color_marker_by,
scale_size_by)
elif metadata_file is not None:
mdf = self._get_metadata_from_file(metadata_file, color_marker_by, scale_size_by)
else:
raise ValueError('No metadata file was specified')
grouping_meta_file = os.path.join(self.output_dir, 'grouping_meta.csv')
with open(grouping_meta_file, 'w') as m_file:
mdf.to_csv(m_file, sep='\t')
# Get site data from previously saved file
site_ordin_file = os.path.join(self.output_dir, "site_ordination.csv")
if not os.path.exists(site_ordin_file):
raise ValueError('failed to generate metaMDS points')
site_ordin_df = pd.read_csv(site_ordin_file, index_col=0)
logging.info('SITE_ORDIN_DF:\n {}'.format(site_ordin_df))
# Check if metadata file is valid for this method
for sample in site_ordin_df.index:
try:
mdf.loc[sample]
except KeyError:
raise KeyError('One or more samples in site_ordination is not found in chosen '
'metadata obj. If you ran this using files, you might need to '
'transpose the data in your files so samples are rows and OTU '
'are columns.')
# Fill site_ordin_df with metadata from mdf
site_ordin_df['color'] = None
site_ordin_df['size'] = None
for ID in site_ordin_df.index:
site_ordin_df['color'].loc[ID] = mdf[color_marker_by].loc[ID]
site_ordin_df['size'].loc[ID] = mdf[scale_size_by].loc[ID]
site_ordin_df.fillna('na', inplace=True)
# Plot
if color_marker_by is not None and scale_size_by is not None and all(
isinstance(x, (int, float)) for x in list(site_ordin_df['size'])):
fig = px.scatter(site_ordin_df, x="MDS1", y="MDS2", color="color", size="size",
hover_name=site_ordin_df.index)
elif color_marker_by is not None:
fig = px.scatter(site_ordin_df, x="MDS1", y="MDS2", color="color",
hover_name=site_ordin_df.index)
elif scale_size_by is not None:
fig = px.scatter(site_ordin_df, x="MDS1", y="MDS2", size="size",
hover_name=site_ordin_df.index)
# add vectors
vector_file = os.path.join(self.output_dir, "vectors.csv")
if os.path.exists(vector_file):
vector_df = pd.read_csv(vector_file, index_col=0)
logging.info('VECTOR_DF:\n {}'.format(vector_df))
loading_x, loading_y, loading_text = list(), list(), list()
highlight_x, highlight_y, highlight_text = list(), list(), list()
for idx, row in vector_df.iterrows():
x, y, name = row[0], row[1], idx
if name in highlight:
highlight_x.extend([0, x])
highlight_y.extend([0, y])
highlight_text.extend(['0', name])
fig.add_annotation(x=x, y=y, ax=0, ay=0, xanchor="center", yanchor="bottom",
text=name, font=dict(color="mediumvioletred"))
else:
loading_x.extend([0, x])
loading_y.extend([0, y])
loading_text.extend(['0', name])
if not (highlight and only_highlight):
fig.add_trace(go.Scatter(
x=loading_x,
y=loading_y,
mode="lines+markers",
name="environmental vectors",
text=loading_text,
textposition="bottom center",
line=dict(color="RoyalBlue", width=0.5)
))
fig.add_trace(go.Scatter(
x=highlight_x,
y=highlight_y,
mode="lines+markers",
name="selected environmental vectors",
text=highlight_text,
textposition="bottom center",
line=dict(color="mediumvioletred", width=1.5)
))
# Save plotly_fig.html and return path
plotly_html_file_path = os.path.join(self.output_dir, "plotly_fig.html")
plot(fig, filename=plotly_html_file_path)
return plotly_html_file_path
def __init__(self, config):
self.ws_url = config["workspace-url"]
self.callback_url = config['SDK_CALLBACK_URL']
self.token = config['KB_AUTH_TOKEN']
self.scratch = config['scratch']
self.working_dir = self.scratch
self.dfu = DataFileUtil(self.callback_url)
self.output_dir = os.path.join(self.working_dir, self.MDS_OUT_DIR)
self._mkdir_p(self.output_dir)
def run_metaMDS(self, params):
"""
run_metaMDS: perform metaMDS analysis on matrix
:param input_obj_ref: object reference of a matrix
:param workspace_name: the name of the workspace
:param mds_matrix_name: name of MDS (KBaseExperiments.MDSMatrix) object
:param n_components - dimentionality of the reduced space (default 2)
:param max_iter: maximum iterations allowed
:param metric: indication of running metric or non-metric MDS
:param distance_metric: distance the ordination will be performed on, default to "bray"
"""
logging.info('--->\nrunning metaMDS with input\n' +
'params:\n{}'.format(json.dumps(params, indent=1)))
self._validate_run_mds_params(params)
input_obj_ref = params.get(self.PARAM_IN_MATRIX)
workspace_name = params.get(self.PARAM_IN_WS)
mds_matrix_name = params.get(self.PARAM_OUT_MATRIX)
n_components = int(params.get('n_components', 2))
dimension = params.get('dimension', 'col')
res = self.dfu.get_objects({'object_refs': [input_obj_ref]})['data'][0]
obj_data = res['data']
obj_name = res['info'][1]
obj_type = res['info'][2]
max_size = len(obj_data['data']['col_ids'])
if n_components > max_size:
raise ValueError('Number of components should be less than number of samples')
exitCode = -99
if "KBaseMatrices" in obj_type:
# create the input file from obj_data
matrix_tab = obj_data['data']['values']
row_ids = obj_data['data']['row_ids']
col_ids = obj_data['data']['col_ids']
matrix_df = | pd.DataFrame(matrix_tab, index=row_ids, columns=col_ids) | pandas.DataFrame |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.