prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
from functools import partial
import pandas as pd
import pytest
from unittest.mock import Mock, patch
import requests
from bzfunds import constants
from bzfunds.data import *
from bzfunds.dbm import *
from bzfunds.utils import get_url_from_date
# Globals
date_str = "2021-01-01"
date = pd.to_datetime(date_str)
manager = Manager()
get_history = partial(get_history, manager=manager, commit=False)
def test_get_monthly_data_is_typed():
with pytest.raises(TypeError, match=".*datetime.*"):
get_monthly_data(123)
get_monthly_data(date_str)
get_monthly_data({})
def test_get_monthly_data_only_parses_successul_response():
errors = (
requests.exceptions.ConnectionError,
requests.exceptions.Timeout,
requests.exceptions.HTTPError,
)
mocked_get = Mock(side_effect=errors)
with patch("bzfunds.data.requests.get", mocked_get):
assert get_monthly_data(date) is None
assert get_monthly_data(date) is None
assert get_monthly_data(date) is None
def test_get_history_is_typed():
with pytest.raises(TypeError, match=".*datetime.*"):
get_history(123, 456)
get_history(date_str, date_str)
get_history({}, [])
def test_get_history_date_range():
d1, d2 = pd.to_datetime(["1910-9-1", "1910-12-1"])
d3, d4 = | pd.to_datetime(["2110-1-1", "2110-3-1"]) | pandas.to_datetime |
# FIT DATA TO A CURVE
# <NAME> - MIT Licence
# inspired by @dimgrr. Based on
# https://towardsdatascience.com/basic-curve-fitting-of-scientific-data-with-python-9592244a2509?gi=9c7c4ade0880
# https://github.com/venkatesannaveen/python-science-tutorial/blob/master/curve-fitting/curve-fitting-tutorial.ipynb
# https://www.reddit.com/r/CoronavirusUS/comments/fqx8fn/ive_been_working_on_this_extrapolation_for_the/
# to explore : https://github.com/fcpenha/Gompertz-Makehan-Fit/blob/master/script.py
# Import required packages
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
from scipy.optimize import curve_fit
import matplotlib.dates as mdates
import copy, math
from lmfit import Model
import pandas as pd
import streamlit as st
import datetime as dt
from datetime import datetime, timedelta
import matplotlib.animation as animation
import imageio
import streamlit.components.v1 as components
import os
import platform
import webbrowser
from pandas import read_csv, Timestamp, Timedelta, date_range
from io import StringIO
from numpy import log, exp, sqrt, clip, argmax, put
from scipy.special import erfc, erf
from matplotlib.pyplot import subplots
from matplotlib.ticker import StrMethodFormatter
from matplotlib.dates import ConciseDateFormatter, AutoDateLocator
from matplotlib.backends.backend_agg import RendererAgg
from matplotlib.backends.backend_agg import RendererAgg
_lock = RendererAgg.lock
from PIL import Image
import glob
# Functions to calculate values a,b and c ##########################
def exponential(x, a, b, c):
''' Standard gompertz function
a = height, b= halfway point, c = growth rate
https://en.wikipedia.org/wiki/Gompertz_function '''
return a * np.exp(-b * np.exp(-c * x))
def derivate(x, a, b, c):
''' First derivate of the Gompertz function. Might contain an error'''
return (np.exp(b * (-1 * np.exp(-c * x)) - c * x) * a * b * c ) + BASEVALUE
#return a * b * c * np.exp(-b*np.exp(-c*x))*np.exp(-c*x)
def derivate_of_derivate(x,a,b,c):
return a*b*c*(b*c*exp(-c*x) - c)*exp(-b*exp(-c*x) - c*x)
def gaussian(x, a, b, c):
''' Standard Guassian function. Doesnt give results, Not in use'''
return a * np.exp(-np.power(x - b, 2) / (2 * np.power(c, 2)))
def gaussian_2(x, a, b, c):
''' Another gaussian fuctnion. in use
a = height, b = cen (?), c= width '''
return a * np.exp(-((x - b) ** 2) / c)
def growth(x, a, b):
""" Growth model. a is the value at t=0. b is the so-called R number.
Doesnt work. FIX IT """
return np.power(a * 0.5, (x / (4 * (math.log(0.5) / math.log(b)))))
# https://replit.com/@jsalsman/COVID19USlognormals
def lognormal_c(x, s, mu, h): # x, sigma, mean, height
return h * 0.5 * erfc(- (log(x) - mu) / (s * sqrt(2)))
# https://en.wikipedia.org/wiki/Log-normal_distribution#Cumulative_distribution_function
def normal_c(x, s, mu, h): # x, sigma, mean, height
return h * 0.5 * (1 + erf((x - mu) / (s * sqrt(2))))
# #####################################################################
def find_gaussian_curvefit(x_values, y_values):
try:
popt_g2, pcov_g2 = curve_fit(
f=gaussian_2,
xdata=x_values,
ydata=y_values,
p0=[0, 0, 0],
bounds=(-np.inf, np.inf),
maxfev=10000,
)
except RuntimeError as e:
str_e = str(e)
st.error(f"gaussian fit :\n{str_e}")
return tuple(popt_g2)
def use_curvefit(x_values, x_values_extra, y_values, title, daterange,i):
"""
Use the curve-fit from scipy.
IN : x- and y-values. The ___-extra are for "predicting" the curve
"""
with _lock:
st.subheader(f"Curvefit (scipy) - {title}")
fig1x = plt.figure()
try:
a_start, b_start, c_start = 0,0,0
popt, pcov = curve_fit(
f=exponential,
xdata=x_values,
ydata=y_values,
#p0=[4600, 11, 0.5],
p0 = [a_start, b_start, c_start ], # IC BEDDEN MAART APRIL
bounds=(-np.inf, np.inf),
maxfev=10000,
)
plt.plot(
x_values_extra,
exponential(x_values_extra, *popt),
"r-",
label="exponential fit: a=%5.3f, b=%5.3f, c=%5.3f" % tuple(popt),
)
except RuntimeError as e:
str_e = str(e)
st.error(f"Exponential fit :\n{str_e}")
try:
popt_d, pcov_d = curve_fit(
f=derivate,
xdata=x_values,
ydata=y_values,
#p0=[0, 0, 0],
p0 = [a_start, b_start, c_start ], # IC BEDDEN MAART APRIL
bounds=(-np.inf, np.inf),
maxfev=10000,
)
plt.plot(
x_values_extra,
derivate(x_values_extra, *popt_d),
"g-",
label="derivate fit: a=%5.3f, b=%5.3f, c=%5.3f" % tuple(popt_d),
)
except RuntimeError as e:
str_e = str(e)
st.error(f"Derivate fit :\n{str_e}")
# FIXIT
# try:
# popt_growth, pcov_growth = curve_fit(
# f=growth,
# xdata=x_values,
# ydata=y_values,
# p0=[500, 0.0001],
# bounds=(-np.inf, np.inf),
# maxfev=10000,
# )
# plt.plot(
# x_values_extra,
# growth(x_values_extra, *popt_growth),
# "y-",
# label="growth: a=%5.3f, b=%5.3f" % tuple(popt_growth),
# )
# except:
# st.write("Error with growth model fit")
try:
popt_g, pcov_g = curve_fit(
f=gaussian_2,
xdata=x_values,
ydata=y_values,
p0=[a_start, b_start, c_start ],
bounds=(-np.inf, np.inf),
maxfev=10000,
)
plt.plot(
x_values_extra,
gaussian_2(x_values_extra, *popt_g),
"b-",
label="gaussian fit: a=%5.3f, b=%5.3f, c=%5.3f" % tuple(popt_g),
)
except RuntimeError as e:
str_e = str(e)
st.error(f"Gaussian fit :\n{str_e}")
plt.scatter(x_values, y_values, s=20, color="#00b3b3", label="Data")
plt.legend()
plt.title(f"{title} / curve_fit (scipy)")
plt.ylim(bottom=0)
plt.xlabel(f"Days from {from_}")
# POGING OM DATUMS OP DE X-AS TE KRIJGEN (TOFIX)
# plt.xlim(daterange[0], daterange[-1])
# lay-out of the x axis
# plt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%Y-%m-%d'))
# interval_ = 5
# plt.gca().xaxis.set_major_locator(mdates.DayLocator(interval=interval_))
# plt.gcf().autofmt_xdate()
#plt.show()
filename= (f"{OUTPUT_DIR}scipi_{title}_{i}")
plt.savefig(filename, dpi=100, bbox_inches="tight")
st.pyplot(fig1x)
# def make_gif(filelist):
# # Create the frames
# frames = []
# imgs = glob.glob("*.png")
# for i in imgs:
# new_frame = Image.open(i)
# frames.append(new_frame)
#
# # Save into a GIF file that loops forever
# frames[0].save('png_to_gif.gif', format='GIF',
# append_images=frames[1:],
# save_all=True,
# duration=300, loop=0)
def use_lmfit(x_values, y_values, functionlist, title,i, max_y_values):
"""
Use lmfit.
IN : x- and y-values.
functionlist (which functions to use)
adapted from https://stackoverflow.com/a/49843706/4173718
TODO: Make all graphs in one graph
"""
a_start, b_start, c_start = 0,0,0
for function in functionlist:
#placeholder0.subheader(f"LMFIT - {title} - {function}")
# create a Model from the model function
if function == "exponential":
bmodel = Model(exponential)
formula = "a * np.exp(-b * np.exp(-c * x))"
elif function == "derivate":
bmodel = Model(derivate)
formula = "a * b * c * np.exp(b * (-1 * np.exp(-c * x)) - c * x)"
elif function == "gaussian":
bmodel = Model(gaussian_2)
formula = "a * np.exp(-((x - b) ** 2) / c)"
else:
st.write("Please choose a function")
st.stop()
# create Parameters, giving initial values
#params = bmodel.make_params(a=4711, b=12, c=0.06)
params = bmodel.make_params(a=a_start, b=b_start, c=c_start) # IC BEDDEN MAART APRIL
# params = bmodel.make_params()
params["a"].min = a_start
params["b"].min = b_start
params["c"].min = c_start
# do fit, st.write result
result = bmodel.fit(y_values, params, x=x_values)
a = round(result.params['a'].value,5)
b= round(result.params['b'].value,5)
c =round(result.params['c'].value,5)
placeholder1.text(result.fit_report())
with _lock:
#fig1y = plt.figure()
fig1y, ax1 = plt.subplots()
ax2 = ax1.twinx()
# plot results -- note that `best_fit` is already available
ax1.scatter(x_values, y_values, color="#00b3b3", s=2)
#ax1.plot(x_values, result.best_fit, "g")
res = (f"a: {a} / b: {b} / c: {c}")
plt.title(f"{title} / lmfit - {function}\n{formula}\n{res}")
t = np.linspace(0.0, TOTAL_DAYS_IN_GRAPH, 10000)
# use `result.eval()` to evaluate model given params and x
ax1.plot(t, bmodel.eval(result.params, x=t), "r-")
ax2.plot (t, derivate_of_derivate(t,a,b,c), color = 'purple')
ax2.axhline(linewidth=1, color='purple', alpha=0.5, linestyle="--")
#ax1.plot (t, derivate(t,26660.1, 9.01298, 0.032198), color = 'purple')
#ax2.plot (t, derivate_of_derivate(t,26660.1, 9.01298, 0.032198), color = 'yellow')
#plt.ylim(bottom=0)
#ax1.ylim(0, max_y_values*1.1)
#ax1.set_ylim(510,1200)
#ax2.set_ylim(0,12)
ax1.set_xlabel(f"Days from {from_}")
ax1.set_ylabel(f"{title} - red")
ax2.set_ylabel("delta - purple")
#plt.show()
filename= (f"{OUTPUT_DIR}lmfit_{title}_{function}_{i}")
plt.savefig(filename, dpi=100, bbox_inches="tight")
placeholder.pyplot(fig1y)
if prepare_for_animation == False:
with _lock:
fig1z = plt.figure()
# plot results -- note that `best_fit` is already available
if function == "exponential":
plt.plot(t, derivate(t,a,b,c))
function_x = "derivate"
formula_x = "a * b * c * np.exp(b * (-1 * np.exp(-c * x)) - c * x)"
elif function == "derivate":
plt.plot(t, exponential(t, a,b,c))
function_x = "exponential"
formula_x = "a * np.exp(-b * np.exp(-c * x))"
else:
st.error("ERROR")
st.stop()
plt.title(f"{title} / {function_x}\n{formula_x}\n{res}")
t = np.linspace(0.0, TOTAL_DAYS_IN_GRAPH, 10000)
# use `result.eval()` to evaluate model given params and x
#plt.plot(t, bmodel.eval(result.params, x=t), "r-")
plt.ylim(bottom=0)
plt.xlabel(f"Days from {from_}")
plt.ylabel(title)
#plt.show()
#filename= (f"{OUTPUT_DIR}lmfit_{title}_{function}_{i}")
#plt.savefig(filename, dpi=100, bbox_inches="tight")
st.pyplot(fig1z)
return filename
def fit_the_values_really(x_values, y_values, which_method, title, daterange,i, max_y_values):
x_values_extra = np.linspace(
start=0, stop=TOTAL_DAYS_IN_GRAPH - 1, num=TOTAL_DAYS_IN_GRAPH
)
x_values = x_values[:i]
y_values = y_values[:i]
if prepare_for_animation == False:
use_curvefit(x_values, x_values_extra, y_values, title, daterange,i)
return use_lmfit(x_values,y_values, [which_method], title,i, max_y_values)
def fit_the_values(to_do_list , total_days, daterange, which_method, prepare_for_animation):
"""
We are going to fit the values
"""
# Here we go !
st.header("Fitting data to formulas")
infox = (
'<br>Exponential / Standard gompertz function : <i>a * exp(-b * np.exp(-c * x))</i></li>'
'<br>First derivate of the Gompertz function : <i>a * b * c * exp(b * (-1 * exp(-c * x)) - c * x)</i></li>'
'<br>Gaussian : <i>a * exp(-((x - b) ** 2) / c)</i></li>'
'<br>Working on growth model: <i>(a * 0.5 ^ (x / (4 * (math.log(0.5) / math.log(b)))))</i> (b will be the Rt-number)</li>'
)
st.markdown(infox, unsafe_allow_html=True)
global placeholder0, placeholder, placeholder1
placeholder0 = st.empty()
placeholder = st.empty()
placeholder1 = st.empty()
el = st.empty()
for v in to_do_list:
title = v[0]
y_values = v[1]
max_y_values = max(y_values)
# some preperations
number_of_y_values = len(y_values)
global TOTAL_DAYS_IN_GRAPH
TOTAL_DAYS_IN_GRAPH = total_days # number of total days
x_values = np.linspace(start=0, stop=number_of_y_values - 1, num=number_of_y_values)
if prepare_for_animation == True:
filenames = []
for i in range(5, len(x_values)):
filename = fit_the_values_really(x_values, y_values, which_method, title, daterange, i, max_y_values)
filenames.append(filename)
# build gif
with imageio.get_writer('mygif.gif', mode='I') as writer:
for filename_ in filenames:
image = imageio.imread(f"{filename_}.png")
writer.append_data(image)
webbrowser.open('mygif.gif')
# Remove files
for filename__ in set(filenames):
os.remove(f"{filename__}.png")
else:
for i in range(len(x_values)-1, len(x_values)):
filename = fit_the_values_really(x_values, y_values, which_method, title, daterange, i, max_y_values)
# FIXIT
# aq, bq, cq = find_gaussian_curvefit(x_values, y_values)
# st.write(f"Find Gaussian curvefit - a:{aq} b:{bq} c: {cq}")
def select_period(df, show_from, show_until):
""" _ _ _ """
if show_from is None:
show_from = "2020-2-27"
if show_until is None:
show_until = "2020-4-1"
mask = (df[DATEFIELD].dt.date >= show_from) & (df[DATEFIELD].dt.date <= show_until)
df = df.loc[mask]
df = df.reset_index()
return df
def normal_c(df):
#https://replit.com/@jsalsman/COVID19USlognormals
st.subheader("Normal_c")
df = df.set_index(DATEFIELD)
firstday = df.index[0] + Timedelta('1d')
nextday = df.index[-1] + Timedelta('1d')
lastday = df.index[-1] + Timedelta(TOTAL_DAYS_IN_GRAPH - len(df), 'd') # extrapolate
with _lock:
#fig1y = plt.figure()
fig1yz, ax = subplots()
ax.set_title('NL COVID-19 cumulative log-lognormal extrapolations\n'
+ 'Source: repl.it/@jsalsman/COVID19USlognormals')
x = ((df.index - Timestamp('2020-01-01')) # independent
// Timedelta('1d')).values # small day-of-year integers
yi = df['Total_reported_cumm'].values # dependent
yd = df['Deceased_cumm'].values # dependent
exrange = range((Timestamp(nextday)
- Timestamp(firstday)) // Timedelta('1d'),
(Timestamp(lastday) + Timedelta('1d')
- Timestamp(firstday)) // Timedelta('1d')) # day-of-year ints
indates = date_range(df.index[0], df.index[-1])
exdates = date_range(nextday, lastday)
ax.scatter(indates, yi, color="#00b3b3", label='Infected')
ax.scatter(indates, yd, color="#00b3b3", label='Dead')
sqrt2 = sqrt(2)
im = Model(normal_c)
st.write (x)
iparams = im.make_params(s=0.3, mu=4.3, h=16.5)
st.write (iparams)
#iparams['s'].min = 0; iparams['h'].min = 0
iresult = im.fit(log(yi+1), iparams, x=x)
st.text('---- Infections:\n' + iresult.fit_report())
ax.plot(indates, exp(iresult.best_fit)-1, 'b', label='Infections fit')
ipred = iresult.eval(x=exrange)
ax.plot(exdates, exp(ipred)-1, 'b--',
label='Forecast: {:,.0f}'.format(exp(ipred[-1])-1))
iupred = iresult.eval_uncertainty(x=exrange, sigma=0.95) # 95% interval
iintlow = clip(ipred-iupred, ipred[0], None)
put(iintlow, range(argmax(iintlow), len(iintlow)), iintlow[argmax(iintlow)])
ax.fill_between(exdates, exp(iintlow), exp(ipred+iupred), alpha=0.35, color='b')
dm = Model(normal_c)
dparams = dm.make_params(s=19.8, mu=79.1, h=11.4) # initial guesses
dparams['s'].min = 0; iparams['h'].min = 0
dresult = dm.fit(log(yd+1), dparams, x=x)
st.text('---- Deaths:\n' + dresult.fit_report())
ax.plot(indates, exp(dresult.best_fit)-1, 'r', label='Deaths fit')
dpred = dresult.eval(x=exrange)
ax.plot(exdates, exp(dpred)-1, 'r--',
label='Forecast: {:,.0f}'.format(exp(dpred[-1])-1))
dupred = dresult.eval_uncertainty(x=exrange, sigma=0.95) # 95% interval
dintlow = clip(dpred-dupred, log(max(yd)+1), None)
put(dintlow, range(argmax(dintlow), len(dintlow)), dintlow[argmax(dintlow)])
ax.fill_between(exdates, exp(dintlow), exp(dpred+dupred), alpha=0.35, color='r')
ax.fill_between(exdates, 0.012 * (exp(iintlow)), 0.012 * (exp(ipred+iupred)),
alpha=0.85, color='g', label='Deaths from observed fatality rate')
ax.set_xlim(df.index[0], lastday)
#ax.set_yscale('log') # semilog
#ax.set_ylim(0, 1500000)
ax.yaxis.set_major_formatter(StrMethodFormatter('{x:,.0f}')) # comma separators
ax.grid()
ax.legend(loc="upper left")
ax.xaxis.set_major_formatter(ConciseDateFormatter(AutoDateLocator(), show_offset=False))
ax.set_xlabel('95% prediction confidence intervals shaded')
#fig.savefig('plot.png', bbox_inches='tight')
#print('\nTO VIEW GRAPH: click on plot.png in the file pane to the left.')
#fig.show()
st.pyplot(fig1yz)
st.text('Infections at end of period shown: {:,.0f}. Deaths: {:,.0f}.'.format(
exp(ipred[-1])-1, exp(dpred[-1])-1))
def loglognormal(df, what_to_display):
#https://replit.com/@jsalsman/COVID19USlognormals
st.subheader("Log Normal")
df = df.set_index(DATEFIELD)
firstday = df.index[0] + Timedelta('1d')
nextday = df.index[-1] + Timedelta('1d')
lastday = df.index[-1] + Timedelta(TOTAL_DAYS_IN_GRAPH - len(df), 'd') # extrapolate
with _lock:
#fig1y = plt.figure()
fig1yz, ax = subplots()
ax.set_title('NL COVID-19 cumulative log-lognormal extrapolations\n'
+ 'Source: repl.it/@jsalsman/COVID19USlognormals')
x = ((df.index - Timestamp('2020-01-01')) # independent
// Timedelta('1d')).values # small day-of-year integers
yi = df[what_to_display].values # dependent
#yd = df['Deceased_cumm'].values # dependent
exrange = range((Timestamp(nextday)
- Timestamp(firstday)) // Timedelta('1d'),
(Timestamp(lastday) + Timedelta('1d')
- Timestamp(firstday)) // | Timedelta('1d') | pandas.Timedelta |
# -*- coding: utf-8 -*-
"""Datareader for cell testers and potentiostats.
This module is used for loading data and databases created by different cell
testers. Currently it only accepts arbin-type res-files (access) data as
raw data files, but we intend to implement more types soon. It also creates
processed files in the hdf5-format.
Example:
>>> d = CellpyData()
>>> d.loadcell(names = [file1.res, file2.res]) # loads and merges the runs
>>> voltage_curves = d.get_cap()
>>> d.save("mytest.hdf")
Todo:
* Remove mass dependency in summary data
* use pd.loc[row,column] e.g. pd.loc[:,"charge_cap"] for col or
pd.loc[(pd.["step"]==1),"x"]
"""
import os
from pathlib import Path
import logging
import sys
import collections
import warnings
import csv
import itertools
import time
from scipy import interpolate
import numpy as np
import pandas as pd
from pandas.errors import PerformanceWarning
from cellpy.parameters import prms
from cellpy.exceptions import WrongFileVersion, DeprecatedFeature, NullData
from cellpy.parameters.internal_settings import (
get_headers_summary,
get_cellpy_units,
get_headers_normal,
get_headers_step_table,
ATTRS_CELLPYFILE,
)
from cellpy.readers.core import (
FileID,
Cell,
CELLPY_FILE_VERSION,
MINIMUM_CELLPY_FILE_VERSION,
xldate_as_datetime,
)
HEADERS_NORMAL = get_headers_normal()
HEADERS_SUMMARY = get_headers_summary()
HEADERS_STEP_TABLE = get_headers_step_table()
# TODO: @jepe - performance warnings - mixed types within cols (pytables)
performance_warning_level = "ignore" # "ignore", "error"
warnings.filterwarnings(
performance_warning_level, category=pd.io.pytables.PerformanceWarning
)
pd.set_option("mode.chained_assignment", None) # "raise", "warn", None
module_logger = logging.getLogger(__name__)
class CellpyData(object):
"""Main class for working and storing data.
This class is the main work-horse for cellpy where all the functions for
reading, selecting, and tweaking your data is located. It also contains the
header definitions, both for the cellpy hdf5 format, and for the various
cell-tester file-formats that can be read. The class can contain
several tests and each test is stored in a list. If you see what I mean...
Attributes:
cells (list): list of DataSet objects.
"""
def __str__(self):
txt = "<CellpyData>\n"
if self.name:
txt += f"name: {self.name}\n"
if self.table_names:
txt += f"table_names: {self.table_names}\n"
if self.tester:
txt += f"tester: {self.tester}\n"
if self.cells:
txt += "datasets: [ ->\n"
for i, d in enumerate(self.cells):
txt += f" ({i})\n"
for t in str(d).split("\n"):
txt += " "
txt += t
txt += "\n"
txt += "\n"
txt += "]"
else:
txt += "datasets: []"
txt += "\n"
return txt
def __bool__(self):
if self.cells:
return True
else:
return False
def __init__(
self,
filenames=None,
selected_scans=None,
profile=False,
filestatuschecker=None, # "modified"
fetch_one_liners=False,
tester=None,
initialize=False,
):
"""CellpyData object
Args:
filenames: list of files to load.
selected_scans:
profile: experimental feature.
filestatuschecker: property to compare cellpy and raw-files;
default read from prms-file.
fetch_one_liners: experimental feature.
tester: instrument used (e.g. "arbin") (checks prms-file as
default).
initialize: create a dummy (empty) dataset; defaults to False.
"""
if tester is None:
self.tester = prms.Instruments.tester
else:
self.tester = tester
self.loader = None # this will be set in the function set_instrument
self.logger = logging.getLogger(__name__)
self.logger.debug("created CellpyData instance")
self.name = None
self.profile = profile
self.minimum_selection = {}
if filestatuschecker is None:
self.filestatuschecker = prms.Reader.filestatuschecker
else:
self.filestatuschecker = filestatuschecker
self.forced_errors = 0
self.summary_exists = False
if not filenames:
self.file_names = []
else:
self.file_names = filenames
if not self._is_listtype(self.file_names):
self.file_names = [self.file_names]
if not selected_scans:
self.selected_scans = []
else:
self.selected_scans = selected_scans
if not self._is_listtype(self.selected_scans):
self.selected_scans = [self.selected_scans]
self.cells = []
self.status_datasets = []
self.selected_cell_number = 0
self.number_of_datasets = 0
self.capacity_modifiers = ["reset"]
self.list_of_step_types = [
"charge",
"discharge",
"cv_charge",
"cv_discharge",
"taper_charge",
"taper_discharge",
"charge_cv",
"discharge_cv",
"ocvrlx_up",
"ocvrlx_down",
"ir",
"rest",
"not_known",
]
# - options
self.force_step_table_creation = prms.Reader.force_step_table_creation
self.force_all = prms.Reader.force_all
self.sep = prms.Reader.sep
self._cycle_mode = prms.Reader.cycle_mode
# self.max_res_filesize = prms.Reader.max_res_filesize
self.load_only_summary = prms.Reader.load_only_summary
self.select_minimal = prms.Reader.select_minimal
# self.chunk_size = prms.Reader.chunk_size # 100000
# self.max_chunks = prms.Reader.max_chunks
# self.last_chunk = prms.Reader.last_chunk
self.limit_loaded_cycles = prms.Reader.limit_loaded_cycles
# self.load_until_error = prms.Reader.load_until_error
self.ensure_step_table = prms.Reader.ensure_step_table
self.daniel_number = prms.Reader.daniel_number
# self.raw_datadir = prms.Reader.raw_datadir
self.raw_datadir = prms.Paths.rawdatadir
# self.cellpy_datadir = prms.Reader.cellpy_datadir
self.cellpy_datadir = prms.Paths.cellpydatadir
# search in prm-file for res and hdf5 dirs in loadcell:
self.auto_dirs = prms.Reader.auto_dirs
# - headers and instruments
self.headers_normal = get_headers_normal()
self.headers_summary = get_headers_summary()
self.headers_step_table = get_headers_step_table()
self.table_names = None # dictionary defined in set_instruments
self.set_instrument()
# - units used by cellpy
self.cellpy_units = get_cellpy_units()
if initialize:
self.initialize()
def initialize(self):
self.logger.debug("Initializing...")
self.cells.append(Cell())
@property
def cell(self):
"""returns the DataSet instance"""
# could insert a try-except thingy here...
cell = self.cells[self.selected_cell_number]
return cell
@property
def dataset(self):
"""returns the DataSet instance"""
# could insert a try-except thingy here...
warnings.warn("The .dataset property is deprecated, please use .cell instead.")
cell = self.cells[self.selected_cell_number]
return cell
@property
def empty(self):
"""gives False if the CellpyData object is empty (or un-functional)"""
return not self.check()
# TODO: @jepe - merge the _set_xxinstrument methods into one method
def set_instrument(self, instrument=None):
"""Set the instrument (i.e. tell cellpy the file-type you use).
Args:
instrument: (str) in ["arbin", "bio-logic-csv", "bio-logic-bin",...]
Sets the instrument used for obtaining the data (i.e. sets fileformat)
"""
self.logger.debug(f"Setting instrument: {instrument}")
if instrument is None:
instrument = self.tester
if instrument in ["arbin", "arbin_res"]:
self._set_arbin()
self.tester = "arbin"
elif instrument == "arbin_sql":
self._set_arbin_sql()
self.tester = "arbin"
elif instrument == "arbin_experimental":
self._set_arbin_experimental()
self.tester = "arbin"
elif instrument in ["pec", "pec_csv"]:
self._set_pec()
self.tester = "pec"
elif instrument in ["biologics", "biologics_mpr"]:
self._set_biologic()
self.tester = "biologic"
elif instrument == "custom":
self._set_custom()
self.tester = "custom"
else:
raise Exception(f"option does not exist: '{instrument}'")
def _set_biologic(self):
warnings.warn("Experimental! Not ready for production!")
from cellpy.readers.instruments import biologics_mpr as instr
self.loader_class = instr.MprLoader()
# ----- get information --------------------------
self.raw_units = self.loader_class.get_raw_units()
self.raw_limits = self.loader_class.get_raw_limits()
# ----- create the loader ------------------------
self.loader = self.loader_class.loader
def _set_pec(self):
warnings.warn("Experimental! Not ready for production!")
from cellpy.readers.instruments import pec as instr
self.loader_class = instr.PECLoader()
# ----- get information --------------------------
self.raw_units = self.loader_class.get_raw_units()
self.raw_limits = self.loader_class.get_raw_limits()
# ----- create the loader ------------------------
self.loader = self.loader_class.loader
def _set_maccor(self):
warnings.warn("not implemented")
def _set_custom(self):
# use a custom format (csv with information lines on top)
from cellpy.readers.instruments import custom as instr
self.loader_class = instr.CustomLoader()
# ----- get information --------------------------
self.raw_units = self.loader_class.get_raw_units()
self.raw_limits = self.loader_class.get_raw_limits()
# ----- create the loader ------------------------
logging.debug("setting custom file-type (will be used when loading raw")
self.loader = self.loader_class.loader
def _set_arbin_sql(self):
warnings.warn("not implemented")
def _set_arbin(self):
from cellpy.readers.instruments import arbin as instr
self.loader_class = instr.ArbinLoader()
# ----- get information --------------------------
self.raw_units = self.loader_class.get_raw_units()
self.raw_limits = self.loader_class.get_raw_limits()
# ----- create the loader ------------------------
self.loader = self.loader_class.loader
# def _set_arbin_experimental(self):
# # Note! All these _set_instrument methods can be generalized to one
# # method. At the moment, I find it
# # more transparent to separate them into respective methods pr
# # instrument.
# from .instruments import arbin_experimental as instr
# self.loader_class = instr.ArbinLoader()
# # get information
# self.raw_units = self.loader_class.get_raw_units()
# self.raw_limits = self.loader_class.get_raw_limits()
# # send information (should improve this later)
# # loader_class.load_only_summary = self.load_only_summary
# # loader_class.select_minimal = self.select_minimal
# # loader_class.max_res_filesize = self.max_res_filesize
# # loader_class.chunk_size = self.chunk_size
# # loader_class.max_chunks = self.max_chunks
# # loader_class.last_chunk = self.last_chunk
# # loader_class.limit_loaded_cycles = self.limit_loaded_cycles
# # loader_class.load_until_error = self.load_until_error
#
# # create loader
# self.loader = self.loader_class.loader
def _create_logger(self):
from cellpy import log
self.logger = logging.getLogger(__name__)
log.setup_logging(default_level="DEBUG")
def set_cycle_mode(self, cycle_mode):
"""set the cycle mode"""
self._cycle_mode = cycle_mode
@property
def cycle_mode(self):
return self._cycle_mode
@cycle_mode.setter
def cycle_mode(self, cycle_mode):
self.logger.debug(f"-> cycle_mode: {cycle_mode}")
self._cycle_mode = cycle_mode
def set_raw_datadir(self, directory=None):
"""Set the directory containing .res-files.
Used for setting directory for looking for res-files.@
A valid directory name is required.
Args:
directory (str): path to res-directory
Example:
>>> d = CellpyData()
>>> directory = "MyData/Arbindata"
>>> d.set_raw_datadir(directory)
"""
if directory is None:
self.logger.info("No directory name given")
return
if not os.path.isdir(directory):
self.logger.info(directory)
self.logger.info("Directory does not exist")
return
self.raw_datadir = directory
def set_cellpy_datadir(self, directory=None):
"""Set the directory containing .hdf5-files.
Used for setting directory for looking for hdf5-files.
A valid directory name is required.
Args:
directory (str): path to hdf5-directory
Example:
>>> d = CellpyData()
>>> directory = "MyData/HDF5"
>>> d.set_raw_datadir(directory)
"""
if directory is None:
self.logger.info("No directory name given")
return
if not os.path.isdir(directory):
self.logger.info("Directory does not exist")
return
self.cellpy_datadir = directory
def check_file_ids(self, rawfiles, cellpyfile):
"""Check the stats for the files (raw-data and cellpy hdf5).
This function checks if the hdf5 file and the res-files have the same
timestamps etc to find out if we need to bother to load .res -files.
Args:
cellpyfile (str): filename of the cellpy hdf5-file.
rawfiles (list of str): name(s) of raw-data file(s).
Returns:
False if the raw files are newer than the cellpy hdf5-file
(update needed).
If return_res is True it also returns list of raw-file_names as
second argument.
"""
txt = "Checking file ids - using '%s'" % self.filestatuschecker
self.logger.info(txt)
ids_cellpy_file = self._check_cellpy_file(cellpyfile)
self.logger.debug(f"cellpyfile ids: {ids_cellpy_file}")
if not ids_cellpy_file:
# self.logger.debug("hdf5 file does not exist - needs updating")
return False
ids_raw = self._check_raw(rawfiles)
similar = self._compare_ids(ids_raw, ids_cellpy_file)
if not similar:
# self.logger.debug("hdf5 file needs updating")
return False
else:
# self.logger.debug("hdf5 file is updated")
return True
def _check_raw(self, file_names, abort_on_missing=False):
"""Get the file-ids for the res_files."""
strip_file_names = True
check_on = self.filestatuschecker
if not self._is_listtype(file_names):
file_names = [file_names]
ids = dict()
for f in file_names:
self.logger.debug(f"checking res file {f}")
fid = FileID(f)
# self.logger.debug(fid)
if fid.name is None:
warnings.warn(f"file does not exist: {f}")
if abort_on_missing:
sys.exit(-1)
else:
if strip_file_names:
name = os.path.basename(f)
else:
name = f
if check_on == "size":
ids[name] = int(fid.size)
elif check_on == "modified":
ids[name] = int(fid.last_modified)
else:
ids[name] = int(fid.last_accessed)
return ids
def _check_cellpy_file(self, filename):
"""Get the file-ids for the cellpy_file."""
strip_filenames = True
check_on = self.filestatuschecker
self.logger.debug("checking cellpy-file")
self.logger.debug(filename)
if not os.path.isfile(filename):
self.logger.debug("cellpy-file does not exist")
return None
try:
store = pd.HDFStore(filename)
except Exception as e:
self.logger.debug(f"could not open cellpy-file ({e})")
return None
try:
fidtable = store.select("CellpyData/fidtable")
except KeyError:
self.logger.warning("no fidtable -" " you should update your hdf5-file")
fidtable = None
finally:
store.close()
if fidtable is not None:
raw_data_files, raw_data_files_length = self._convert2fid_list(fidtable)
txt = "contains %i res-files" % (len(raw_data_files))
self.logger.debug(txt)
ids = dict()
for fid in raw_data_files:
full_name = fid.full_name
size = fid.size
mod = fid.last_modified
self.logger.debug(f"fileID information for: {full_name}")
self.logger.debug(f" modified: {mod}")
self.logger.debug(f" size: {size}")
if strip_filenames:
name = os.path.basename(full_name)
else:
name = full_name
if check_on == "size":
ids[name] = int(fid.size)
elif check_on == "modified":
ids[name] = int(fid.last_modified)
else:
ids[name] = int(fid.last_accessed)
return ids
else:
return None
@staticmethod
def _compare_ids(ids_res, ids_cellpy_file):
similar = True
l_res = len(ids_res)
l_cellpy = len(ids_cellpy_file)
if l_res == l_cellpy and l_cellpy > 0:
for name, value in list(ids_res.items()):
if ids_cellpy_file[name] != value:
similar = False
else:
similar = False
return similar
def loadcell(
self,
raw_files,
cellpy_file=None,
mass=None,
summary_on_raw=False,
summary_ir=True,
summary_ocv=False,
summary_end_v=True,
only_summary=False,
only_first=False,
force_raw=False,
use_cellpy_stat_file=None,
):
"""Loads data for given cells.
Args:
raw_files (list): name of res-files
cellpy_file (path): name of cellpy-file
mass (float): mass of electrode or active material
summary_on_raw (bool): use raw-file for summary
summary_ir (bool): summarize ir
summary_ocv (bool): summarize ocv steps
summary_end_v (bool): summarize end voltage
only_summary (bool): get only the summary of the runs
only_first (bool): only use the first file fitting search criteria
force_raw (bool): only use raw-files
use_cellpy_stat_file (bool): use stat file if creating summary
from raw
Example:
>>> srnos = my_dbreader.select_batch("testing_new_solvent")
>>> cell_datas = []
>>> for srno in srnos:
>>> ... my_run_name = my_dbreader.get_cell_name(srno)
>>> ... mass = my_dbreader.get_mass(srno)
>>> ... rawfiles, cellpyfiles = \
>>> ... filefinder.search_for_files(my_run_name)
>>> ... cell_data = cellreader.CellpyData()
>>> ... cell_data.loadcell(raw_files=rawfiles,
>>> ... cellpy_file=cellpyfiles)
>>> ... cell_data.set_mass(mass)
>>> ... if not cell_data.summary_exists:
>>> ... cell_data.make_summary() # etc. etc.
>>> ... cell_datas.append(cell_data)
>>>
"""
# This is a part of a dramatic API change. It will not be possible to
# load more than one set of datasets (i.e. one single cellpy-file or
# several raw-files that will be automatically merged)
self.logger.info("Started cellpy.cellreader.loadcell")
if cellpy_file is None:
similar = False
elif force_raw:
similar = False
else:
similar = self.check_file_ids(raw_files, cellpy_file)
self.logger.debug("checked if the files were similar")
if only_summary:
self.load_only_summary = True
else:
self.load_only_summary = False
if not similar:
self.logger.debug("cellpy file(s) needs updating - loading raw")
self.logger.info("Loading raw-file")
self.logger.debug(raw_files)
self.from_raw(raw_files)
self.logger.debug("loaded files")
# Check if the run was loaded ([] if empty)
if self.status_datasets:
if mass:
self.set_mass(mass)
if summary_on_raw:
self.make_summary(
all_tests=False,
find_ocv=summary_ocv,
find_ir=summary_ir,
find_end_voltage=summary_end_v,
use_cellpy_stat_file=use_cellpy_stat_file,
)
else:
self.logger.warning("Empty run!")
else:
self.load(cellpy_file)
if mass:
self.set_mass(mass)
return self
def from_raw(self, file_names=None, **kwargs):
"""Load a raw data-file.
Args:
file_names (list of raw-file names): uses CellpyData.file_names if
None. If the list contains more than one file name, then the
runs will be merged together.
"""
# This function only loads one test at a time (but could contain several
# files). The function from_res() also implements loading several
# datasets (using list of lists as input).
if file_names:
self.file_names = file_names
if not isinstance(file_names, (list, tuple)):
self.file_names = [file_names]
# file_type = self.tester
raw_file_loader = self.loader
set_number = 0
test = None
counter = 0
self.logger.debug("start iterating through file(s)")
for f in self.file_names:
self.logger.debug("loading raw file:")
self.logger.debug(f"{f}")
new_tests = raw_file_loader(f, **kwargs)
if new_tests:
if test is not None:
self.logger.debug("continuing reading files...")
_test = self._append(test[set_number], new_tests[set_number])
if not _test:
self.logger.warning(f"EMPTY TEST: {f}")
continue
test[set_number] = _test
self.logger.debug("added this test - started merging")
for j in range(len(new_tests[set_number].raw_data_files)):
raw_data_file = new_tests[set_number].raw_data_files[j]
file_size = new_tests[set_number].raw_data_files_length[j]
test[set_number].raw_data_files.append(raw_data_file)
test[set_number].raw_data_files_length.append(file_size)
counter += 1
if counter > 10:
self.logger.debug("ERROR? Too many files to merge")
raise ValueError(
"Too many files to merge - "
"could be a p2-p3 zip thing"
)
else:
self.logger.debug("getting data from first file")
if new_tests[set_number].no_data:
self.logger.debug("NO DATA")
else:
test = new_tests
else:
self.logger.debug("NOTHING LOADED")
self.logger.debug("finished loading the raw-files")
test_exists = False
if test:
if test[0].no_data:
self.logging.debug(
"the first dataset (or only dataset) loaded from the raw data file is empty"
)
else:
test_exists = True
if test_exists:
if not prms.Reader.sorted_data:
self.logger.debug("sorting data")
test[set_number] = self._sort_data(test[set_number])
self.cells.append(test[set_number])
else:
self.logger.warning("No new datasets added!")
self.number_of_datasets = len(self.cells)
self.status_datasets = self._validate_datasets()
self._invent_a_name()
return self
def from_res(self, filenames=None, check_file_type=True):
"""Convenience function for loading arbin-type data into the
datastructure.
Args:
filenames: ((lists of) list of raw-file names): uses
cellpy.file_names if None.
If list-of-list, it loads each list into separate datasets.
The files in the inner list will be merged.
check_file_type (bool): check file type if True
(res-, or cellpy-format)
"""
raise DeprecatedFeature
def _validate_datasets(self, level=0):
self.logger.debug("validating test")
level = 0
# simple validation for finding empty datasets - should be expanded to
# find not-complete datasets, datasets with missing prms etc
v = []
if level == 0:
for test in self.cells:
# check that it contains all the necessary headers
# (and add missing ones)
# test = self._clean_up_normal_table(test)
# check that the test is not empty
v.append(self._is_not_empty_dataset(test))
self.logger.debug(f"validation array: {v}")
return v
def check(self):
"""Returns False if no datasets exists or if one or more of the datasets
are empty"""
if len(self.status_datasets) == 0:
return False
if all(self.status_datasets):
return True
return False
def _is_not_empty_dataset(self, dataset):
if dataset is self._empty_dataset():
return False
else:
return True
def _clean_up_normal_table(self, test=None, dataset_number=None):
# check that test contains all the necessary headers
# (and add missing ones)
raise NotImplementedError
def _report_empty_dataset(self):
self.logger.info("Empty set")
@staticmethod
def _empty_dataset():
return None
def _invent_a_name(self, filename=None, override=False):
if filename is None:
self.name = "nameless"
return
if self.name and not override:
return
path = Path(filename)
self.name = path.with_suffix("").name
def load(self, cellpy_file, parent_level=None, return_cls=True):
"""Loads a cellpy file.
Args:
cellpy_file (path, str): Full path to the cellpy file.
parent_level (str, optional): Parent level.
return_cls (bool): Return the class.
"""
try:
self.logger.debug("loading cellpy-file (hdf5):")
self.logger.debug(cellpy_file)
new_datasets = self._load_hdf5(cellpy_file, parent_level)
self.logger.debug("cellpy-file loaded")
except AttributeError:
new_datasets = []
self.logger.warning(
"This cellpy-file version is not supported by"
"current reader (try to update cellpy)."
)
if new_datasets:
for dataset in new_datasets:
self.cells.append(dataset)
else:
# raise LoadError
self.logger.warning("Could not load")
self.logger.warning(str(cellpy_file))
self.number_of_datasets = len(self.cells)
self.status_datasets = self._validate_datasets()
self._invent_a_name(cellpy_file)
if return_cls:
return self
def _load_hdf5(self, filename, parent_level=None):
"""Load a cellpy-file.
Args:
filename (str): Name of the cellpy file.
parent_level (str) (optional): name of the parent level
(defaults to "CellpyData")
Returns:
loaded datasets (DataSet-object)
"""
# TODO: option for reading version and relabelling dfsummary etc
# if the version is older
data = None
if parent_level is None:
parent_level = prms._cellpyfile_root
if parent_level != prms._cellpyfile_root:
self.logger.debug(
"Using non-default parent label for the "
"hdf-store: {}".format(parent_level)
)
if CELLPY_FILE_VERSION > 4:
raw_dir = prms._cellpyfile_raw
step_dir = prms._cellpyfile_step
summary_dir = prms._cellpyfile_summary
meta_dir = "/info" # hard-coded
fid_dir = prms._cellpyfile_fid
else:
raw_dir = "/raw"
step_dir = "/step_table"
summary_dir = "/dfsummary"
meta_dir = "/info"
fid_dir = "/fidtable"
if not os.path.isfile(filename):
self.logger.info(f"File does not exist: {filename}")
raise IOError
with pd.HDFStore(filename) as store:
data, meta_table = self._create_initial_data_set_from_cellpy_file(
meta_dir, parent_level, store
)
if data.cellpy_file_version < MINIMUM_CELLPY_FILE_VERSION:
raise WrongFileVersion
if data.cellpy_file_version > CELLPY_FILE_VERSION:
raise WrongFileVersion
if data.cellpy_file_version < CELLPY_FILE_VERSION:
if data.cellpy_file_version < 5:
self.logger.debug(f"version: {data.cellpy_file_version}")
_raw_dir = "/dfdata"
_step_dir = "/step_table"
_summary_dir = "/dfsummary"
_fid_dir = "/fidtable"
self._check_keys_in_cellpy_file(
meta_dir, parent_level, _raw_dir, store, _summary_dir
)
self._extract_summary_from_cellpy_file(
data, parent_level, store, _summary_dir
)
self._extract_raw_from_cellpy_file(
data, parent_level, _raw_dir, store
)
self._extract_steps_from_cellpy_file(
data, parent_level, _step_dir, store
)
fid_table, fid_table_selected = self._extract_fids_from_cellpy_file(
_fid_dir, parent_level, store
)
self._extract_meta_from_cellpy_file(data, meta_table, filename)
warnings.warn(
"Loaded old cellpy-file version (<5). "
"Please update and save again."
)
else:
self._check_keys_in_cellpy_file(
meta_dir, parent_level, raw_dir, store, summary_dir
)
self._extract_summary_from_cellpy_file(
data, parent_level, store, summary_dir
)
self._extract_raw_from_cellpy_file(data, parent_level, raw_dir, store)
self._extract_steps_from_cellpy_file(
data, parent_level, step_dir, store
)
fid_table, fid_table_selected = self._extract_fids_from_cellpy_file(
fid_dir, parent_level, store
)
self._extract_meta_from_cellpy_file(data, meta_table, filename)
if fid_table_selected:
data.raw_data_files, data.raw_data_files_length = self._convert2fid_list(
fid_table
)
else:
data.raw_data_files = None
data.raw_data_files_length = None
# this does not yet allow multiple sets
new_tests = [
data
] # but cellpy is ready when that time comes (if it ever happens)
return new_tests
def _create_initial_data_set_from_cellpy_file(self, meta_dir, parent_level, store):
# Remark that this function is run before selecting loading method
# based on version. If you change the meta_dir prm to something else than
# "/info" it will most likely fail.
data = Cell()
meta_table = None
try:
meta_table = store.select(parent_level + meta_dir)
except KeyError as e:
self.logger.info("This file is VERY old - no info given here")
self.logger.info("You should convert the files to a newer version!")
self.logger.debug(e)
try:
data.cellpy_file_version = self._extract_from_dict(
meta_table, "cellpy_file_version"
)
except Exception as e:
data.cellpy_file_version = 0
warnings.warn(f"Unhandled exception raised: {e}")
self.logger.debug(f"cellpy file version. {data.cellpy_file_version}")
return data, meta_table
def _check_keys_in_cellpy_file(
self, meta_dir, parent_level, raw_dir, store, summary_dir
):
required_keys = [raw_dir, summary_dir, meta_dir]
required_keys = ["/" + parent_level + _ for _ in required_keys]
for key in required_keys:
if key not in store.keys():
self.logger.info(
f"This cellpy-file is not good enough - "
f"at least one key is missing: {key}"
)
raise Exception(
f"OH MY GOD! At least one crucial key" f"is missing {key}!"
)
self.logger.debug(f"Keys in current cellpy-file: {store.keys()}")
def _extract_raw_from_cellpy_file(self, data, parent_level, raw_dir, store):
data.raw = store.select(parent_level + raw_dir)
def _extract_summary_from_cellpy_file(self, data, parent_level, store, summary_dir):
data.summary = store.select(parent_level + summary_dir)
def _extract_fids_from_cellpy_file(self, fid_dir, parent_level, store):
try:
fid_table = store.select(
parent_level + fid_dir
) # remark! changed spelling from
# lower letter to camel-case!
fid_table_selected = True
except Exception as e:
self.logger.debug(e)
self.logger.debug("could not get fid from cellpy-file")
fid_table = []
warnings.warn("no fid_table - you should update your cellpy-file")
fid_table_selected = False
return fid_table, fid_table_selected
def _extract_steps_from_cellpy_file(self, data, parent_level, step_dir, store):
try:
data.steps = store.select(parent_level + step_dir)
except Exception as e:
self.logging.debug("could not get steps from cellpy-file")
data.steps = pd.DataFrame()
warnings.warn(f"Unhandled exception raised: {e}")
def _extract_meta_from_cellpy_file(self, data, meta_table, filename):
# get attributes from meta table
for attribute in ATTRS_CELLPYFILE:
value = self._extract_from_dict(meta_table, attribute)
# some fixes due to errors propagated into the cellpy-files
if attribute == "creator":
if not isinstance(value, str):
value = "no_name"
if attribute == "test_no":
if not isinstance(value, (int, float)):
value = 0
setattr(data, attribute, value)
if data.mass is None:
data.mass = 1.0
else:
data.mass_given = True
data.loaded_from = str(filename)
# hack to allow the renaming of tests to datasets
try:
name = self._extract_from_dict_hard(meta_table, "name")
if not isinstance(name, str):
name = "no_name"
data.name = name
except KeyError:
self.logger.debug(f"missing key in meta table: name")
print(meta_table)
warnings.warn("OLD-TYPE: Recommend to save in new format!")
try:
name = self._extract_from_dict(meta_table, "test_name")
except Exception as e:
name = "no_name"
self.logger.debug("name set to 'no_name")
warnings.warn(f"Unhandled exception raised: {e}")
data.name = name
# unpacking the raw data limits
for key in data.raw_limits:
try:
data.raw_limits[key] = self._extract_from_dict_hard(meta_table, key)
except KeyError:
self.logger.debug(f"missing key in meta_table: {key}")
warnings.warn("OLD-TYPE: Recommend to save in new format!")
@staticmethod
def _extract_from_dict(t, x, default_value=None):
try:
value = t[x].values
if value:
value = value[0]
except KeyError:
value = default_value
return value
@staticmethod
def _extract_from_dict_hard(t, x):
value = t[x].values
if value:
value = value[0]
return value
def _create_infotable(self, dataset_number=None):
# needed for saving class/DataSet to hdf5
dataset_number = self._validate_dataset_number(dataset_number)
if dataset_number is None:
self._report_empty_dataset()
return
test = self.get_cell(dataset_number)
infotable = collections.OrderedDict()
for attribute in ATTRS_CELLPYFILE:
value = getattr(test, attribute)
infotable[attribute] = [value]
infotable["cellpy_file_version"] = [CELLPY_FILE_VERSION]
limits = test.raw_limits
for key in limits:
infotable[key] = limits[key]
infotable = pd.DataFrame(infotable)
self.logger.debug("_create_infotable: fid")
fidtable = collections.OrderedDict()
fidtable["raw_data_name"] = []
fidtable["raw_data_full_name"] = []
fidtable["raw_data_size"] = []
fidtable["raw_data_last_modified"] = []
fidtable["raw_data_last_accessed"] = []
fidtable["raw_data_last_info_changed"] = []
fidtable["raw_data_location"] = []
fidtable["raw_data_files_length"] = []
fids = test.raw_data_files
fidtable["raw_data_fid"] = fids
if fids:
for fid, length in zip(fids, test.raw_data_files_length):
fidtable["raw_data_name"].append(fid.name)
fidtable["raw_data_full_name"].append(fid.full_name)
fidtable["raw_data_size"].append(fid.size)
fidtable["raw_data_last_modified"].append(fid.last_modified)
fidtable["raw_data_last_accessed"].append(fid.last_accessed)
fidtable["raw_data_last_info_changed"].append(fid.last_info_changed)
fidtable["raw_data_location"].append(fid.location)
fidtable["raw_data_files_length"].append(length)
else:
warnings.warn("seems you lost info about your raw-data")
fidtable = pd.DataFrame(fidtable)
return infotable, fidtable
def _convert2fid_list(self, tbl):
self.logger.debug("converting loaded fidtable to FileID object")
fids = []
lengths = []
counter = 0
for item in tbl["raw_data_name"]:
fid = FileID()
fid.name = item
fid.full_name = tbl["raw_data_full_name"][counter]
fid.size = tbl["raw_data_size"][counter]
fid.last_modified = tbl["raw_data_last_modified"][counter]
fid.last_accessed = tbl["raw_data_last_accessed"][counter]
fid.last_info_changed = tbl["raw_data_last_info_changed"][counter]
fid.location = tbl["raw_data_location"][counter]
length = tbl["raw_data_files_length"][counter]
counter += 1
fids.append(fid)
lengths.append(length)
if counter < 1:
self.logger.debug("info about raw files missing")
return fids, lengths
def merge(self, datasets=None, separate_datasets=False):
"""This function merges datasets into one set."""
self.logger.info("Merging")
if separate_datasets:
warnings.warn(
"The option seperate_datasets=True is"
"not implemented yet. Performing merging, but"
"neglecting the option."
)
else:
if datasets is None:
datasets = list(range(len(self.cells)))
first = True
for dataset_number in datasets:
if first:
dataset = self.cells[dataset_number]
first = False
else:
dataset = self._append(dataset, self.cells[dataset_number])
for raw_data_file, file_size in zip(
self.cells[dataset_number].raw_data_files,
self.cells[dataset_number].raw_data_files_length,
):
dataset.raw_data_files.append(raw_data_file)
dataset.raw_data_files_length.append(file_size)
self.cells = [dataset]
self.number_of_datasets = 1
return self
def _append(self, t1, t2, merge_summary=True, merge_step_table=True):
self.logger.debug(
f"merging two datasets (merge summary = {merge_summary}) "
f"(merge step table = {merge_step_table})"
)
if t1.raw.empty:
self.logger.debug("OBS! the first dataset is empty")
if t2.raw.empty:
t1.merged = True
self.logger.debug("the second dataset was empty")
self.logger.debug(" -> merged contains only first")
return t1
test = t1
# finding diff of time
start_time_1 = t1.start_datetime
start_time_2 = t2.start_datetime
diff_time = xldate_as_datetime(start_time_2) - xldate_as_datetime(start_time_1)
diff_time = diff_time.total_seconds()
if diff_time < 0:
self.logger.warning("Wow! your new dataset is older than the old!")
self.logger.debug(f"diff time: {diff_time}")
sort_key = self.headers_normal.datetime_txt # DateTime
# mod data points for set 2
data_point_header = self.headers_normal.data_point_txt
try:
last_data_point = max(t1.raw[data_point_header])
except ValueError:
last_data_point = 0
t2.raw[data_point_header] = t2.raw[data_point_header] + last_data_point
# mod cycle index for set 2
cycle_index_header = self.headers_normal.cycle_index_txt
try:
last_cycle = max(t1.raw[cycle_index_header])
except ValueError:
last_cycle = 0
t2.raw[cycle_index_header] = t2.raw[cycle_index_header] + last_cycle
# mod test time for set 2
test_time_header = self.headers_normal.test_time_txt
t2.raw[test_time_header] = t2.raw[test_time_header] + diff_time
# merging
if not t1.raw.empty:
raw2 = pd.concat([t1.raw, t2.raw], ignore_index=True)
# checking if we already have made a summary file of these datasets
# (to be used if merging summaries (but not properly implemented yet))
if t1.summary_made and t2.summary_made:
dfsummary_made = True
else:
dfsummary_made = False
# checking if we already have made step tables for these datasets
if t1.steps_made and t2.steps_made:
step_table_made = True
else:
step_table_made = False
if merge_summary:
# check if (self-made) summary exists.
self_made_summary = True
try:
test_it = t1.summary[cycle_index_header]
except KeyError as e:
self_made_summary = False
try:
test_it = t2.summary[cycle_index_header]
except KeyError as e:
self_made_summary = False
if self_made_summary:
# mod cycle index for set 2
last_cycle = max(t1.summary[cycle_index_header])
t2.summary[cycle_index_header] = (
t2.summary[cycle_index_header] + last_cycle
)
# mod test time for set 2
t2.summary[test_time_header] = (
t2.summary[test_time_header] + diff_time
)
# to-do: mod all the cumsum stuff in the summary (best to make
# summary after merging) merging
else:
t2.summary[data_point_header] = (
t2.summary[data_point_header] + last_data_point
)
summary2 = | pd.concat([t1.summary, t2.summary], ignore_index=True) | pandas.concat |
"""
# @Description:
Calculate the Expected Information Gain (EIG) for continuous face finding tasks.
"""
import os
import math
import pickle
from tqdm import tqdm
import argparse
from collections import defaultdict
import pandas as pd
import torch
import pyro
import mlflow
import mlflow.pytorch
from experiment_tools.pyro_tools import auto_seed
from experiment_tools.output_utils import get_mlflow_meta
from contrastive.mi import PriorContrastiveEstimation, NestedMonteCarloEstimation
from neural.modules import LazyFn
from face_finding_train_continuous_recurrent import HiddenObjects
def make_data_source(experiment_id, run_id, T, device="cuda", n=1):
fname = f"mlruns/{experiment_id}/{run_id}/artifacts/hostories/results_vi.pickle"
with open(fname, "rb") as f:
data = pickle.load(f)
sample = defaultdict(list)
latent_name = "theta"
for history in data["loop"]:
sample[latent_name].append(history["theta"])
for i in range(T):
sample[f"y{i+1}"].append(history[f"y{i+1}"])
sample[f"xi{i+1}"].append(history[f"xi{i+1}"])
if len(sample[latent_name]) == n:
record = {k: torch.stack(v, 0).to(device) for k, v in sample.items()}
yield record
sample = defaultdict(list)
def get_data_source_meta(experiment_id, run_id):
meta = get_mlflow_meta(experiment_id=experiment_id)
meta = [m for m in meta if run_id == m.info.run_id][0]
fname = f"mlruns/{experiment_id}/{run_id}/artifacts/hostories/results_vi.pickle"
with open(fname, "rb") as f:
data = pickle.load(f)
out = {
"n_rollout": len(data["loop"]),
"noise_scale": float(meta.data.params["noise_scale"]),
"p": int(meta.data.params["p"]),
"K": int(meta.data.params["num_sources"]),
"num_experiments": int(meta.data.params["num_experiments"]),
}
return out
def evaluate_run(
experiment_id,
run_id,
num_experiments_to_perform,
num_inner_samples,
device,
n_rollout,
from_source=False,
seed=-1,
theta_prior_loc=None,
theta_prior_covmat=None,
):
pyro.clear_param_store()
model_location = f"mlruns/{experiment_id}/{run_id}/artifacts/model"
seed = auto_seed(seed)
factor = 16
n_rollout = n_rollout // factor
EIGs_mean = | pd.DataFrame(columns=["lower", "upper"]) | pandas.DataFrame |
"""
##############################################################################
#
# Calculate motif's activity differences Zscores
#
# AUTHOR: Maciej_Bak
# AFFILIATION: University_of_Basel
# AFFILIATION: Swiss_Institute_of_Bioinformatics
# CONTACT: <EMAIL>
# CREATED: 20-01-2020
# LICENSE: Apache_2.0
#
##############################################################################
"""
# imports
import time
import logging
import logging.handlers
from argparse import ArgumentParser, RawTextHelpFormatter
import pandas as pd
import numpy as np
def parse_arguments():
"""Parser of the command-line arguments."""
parser = ArgumentParser(description=__doc__, formatter_class=RawTextHelpFormatter)
parser.add_argument(
"-v",
"--verbosity",
dest="verbosity",
choices=("DEBUG", "INFO", "WARN", "ERROR", "CRITICAL"),
default="ERROR",
help="Verbosity/Log level. Defaults to ERROR",
)
parser.add_argument(
"-l", "--logfile", dest="logfile", help="Store log to this file."
)
parser.add_argument(
"--activity-table",
dest="activity_table",
required=True,
help="Path to the table with motifs activities and their stds.",
)
parser.add_argument(
"--design-table",
dest="design_table",
required=True,
help="Path to the design table.",
)
parser.add_argument(
"--outfile",
dest="outfile",
required=True,
help="Path for the output table with Z-scores.",
)
return parser
##############################################################################
def calculate_Zscores(A_b, design_table):
"""
Calculate Zscores as the ratio: Act.Diff / Act.Diff.Std.
"""
cols_list = []
for s in design_table.index.values:
cols_list.append("A_" + s)
for s in design_table.index.values:
cols_list.append("stdA_" + s)
Zscores_df = | pd.DataFrame(index=A_b.index.values) | pandas.DataFrame |
#!/usr/bin/env python3
import os, argparse
from tuba_seq.fastq import singleMismatcher
from tuba_seq.shared import smart_open, logPrint
from collections import defaultdict
import pandas as pd
parser = argparse.ArgumentParser(description="Split paired-end read files by Illumina indecies.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("forward_read_file", help='FASTQ file of forward reads')
parser.add_argument("reverse_read_file", help='FASTQ file of reverse reads')
parser.add_argument('barcode_file', type=str, help='Tab-delimited file with sample_name, barcode pairs.')
parser.add_argument("--forward_read_dir", default='forward_reads', help='Directory to put split forward reads.')
parser.add_argument("--reverse_read_dir", default='reverse_reads', help='Directory to put split reverse reads.')
parser.add_argument('--compression', default='gz', choices=['bz2', 'gz', 'lzma', 'none'], help='Compression algorithm for output.')
###############################################################################
args = parser.parse_args()
Log = logPrint(args)
samples = | pd.read_csv(args.barcode_file, sep='\t', names=['Samples', 'Index'], index_col=1) | pandas.read_csv |
#!/usr/bin/env python
"""
Pandas-based Table Data Handler
https://github.com/dceoy/pdbio
"""
import bz2
import gzip
import logging
import os
import subprocess
import sys
from abc import ABCMeta, abstractmethod
from collections import OrderedDict
from itertools import product
import pandas as pd
class BaseBioDataFrame(object, metaclass=ABCMeta):
"""Base DataFrame handler for Table Files."""
def __init__(self, path=None, format_name='TSV', delimiter='\t',
column_header=True, chrom_column=None, pos_columns=None,
txt_file_exts=None, bin_file_exts=None, load=True):
for a in [pos_columns, txt_file_exts, bin_file_exts]:
assert type(a) is not str
self.__logger = logging.getLogger(__name__)
self.__format_name = format_name
self.__column_header = column_header
self.__delimiter = delimiter
self.__chrom_column = chrom_column
self.__pos_columns = pos_columns
self.__file_exts = [
*(
[e + c for e, c in product(txt_file_exts, ['', '.gz', '.bz2'])]
if txt_file_exts else list()
),
*(bin_file_exts or list())
]
self.path = path
self.header = list()
self.df = pd.DataFrame()
if path and load:
self.load(path=path)
def load(self, path):
self._update_path(path=path)
self.__logger.info(
'Load {0} file: {1}'.format(self.__format_name, self.path)
)
self.load_table()
self.__logger.debug('self.df shape: {}'.format(self.df.shape))
return self
def _update_path(self, path):
abspath = self.normalize_path(path=path)
if not os.path.isfile(abspath):
raise FileNotFoundError('file not found: {}'.format(abspath))
elif (self.__file_exts and
not [x for x in self.__file_exts if abspath.endswith(x)]):
raise ValueError('invalid file extension: {}'.format(abspath))
elif self.path != abspath:
self.__logger.debug('abspath: {}'.format(abspath))
self.path = abspath
@staticmethod
def normalize_path(path):
return os.path.abspath(os.path.expanduser(os.path.expandvars(path)))
@abstractmethod
def load_table(self):
self.df = pd.read_csv(
self.path, header=self.__column_header, sep=self.__delimiter
)
return self
def convert_lines_to_df(self, lines, update_header=True):
if update_header:
self.header = list()
line_dfs = [
d for d in [self.parse_line(string=s) for s in lines]
if isinstance(d, pd.DataFrame)
]
if line_dfs:
return | pd.concat(line_dfs, ignore_index=True, sort=False) | pandas.concat |
import os
import gc
import math
import psutil
import numpy as np
import pandas as pd
from tqdm import tqdm
from multiprocessing import Pool, RLock
class CompileReactions:
reactions = {"transcription_": "None",
"splicing_": "mol_premrna_",
"translation_": "mol_mrna_",
"premrna_decay_": "mol_premrna_",
"mrna_decay_": "mol_mrna_",
"protein_decay_": "mol_protein_",
"phosphorylation_": "mol_protein_",
"dephosphorylation_": "mol_phospho_protein_",
"phospho_protein_decay_": "mol_phospho_protein_"}
def compiling(self):
with Pool(processes=self.ncpus, initargs=(RLock(),), initializer=tqdm.set_lock) as pool:
jobs = [pool.apply_async(self.compile_reactions, args=(batch_i,)) for batch_i in range(self.nbatches)]
pool.close()
results = [job.get() for job in jobs]
def compile_reactions(self, device_i):
file = f'batch_{device_i}.parquet'
print (f"Simulation: {self.network_name} Starting to Processing Batch {device_i}...", flush=True)
noise_info, noise_network = self.create_duplicates(device_i, file)
species_vec = self.create_species_vector(noise_info, file)
propensity = self.create_propensity_matrix(noise_info, species_vec, file)
self.create_affinity_matrix(noise_network, propensity, species_vec, file)
self.create_change_vector(noise_info, propensity, species_vec, file)
print (f"Simulation: {self.network_name} Current Memory % Usage: {psutil.virtual_memory()[2]} Finished Processing Batch {device_i}...", flush=True)
del propensity, species_vec
def create_duplicates(self, device_i, file):
noise_info = pd.concat([self.feature_info.copy()] * self.nsims_per_device).reset_index()
noise_network = pd.concat([self.feature_network.copy()] * self.nsims_per_device).reset_index()
# setting up species names for downstream processes and calculating simuluation number
noise_info['sim_i'] = (noise_info.index.values // self.feature_info.shape[0] + 1) + (device_i * self.nsims_per_device)
noise_network['sim_i'] = (noise_network.index.values // self.feature_network.shape[0] + 1) + (device_i * self.nsims_per_device)
noise_network['to'] = noise_network['to'] + '_' + noise_network['sim_i'].astype(str)
noise_network['from'] = noise_network['from'] + '_' + noise_network['sim_i'].astype(str)
noise_info['feature_id'] = noise_info['feature_id'] + '_' + noise_info['sim_i'].astype(str)
noise_info = self.get_perturbation(noise_info, file)
noise_info = self.inject_rate_noise(noise_info)
noise_info, regulators = self.get_reactions_regulators(noise_network, noise_info)
noise_network = self.inject_interaction_noise(noise_network, noise_info)
kin_bool = noise_network.kinase_edge
eff_bool = noise_network.effect == 1
grn = noise_network.loc[~kin_bool, ].copy()
phospho = noise_network.loc[(kin_bool) & (eff_bool), ].copy()
dephospho = noise_network.loc[(kin_bool) & (~eff_bool), ].copy()
phospho_tfs = noise_info.loc[(noise_info.is_tf) & (noise_info.is_phosphorylated), 'feature_id'].values
grn['to'] = 'transcription_' + grn['to']
phospho['to'] = 'phosphorylation_' + phospho['to']
dephospho['to'] = 'dephosphorylation_' + dephospho['to']
grn_bool = grn['from'].isin(phospho_tfs)
phospho['from'] = 'mol_phospho_protein_' + phospho['from']
dephospho['from'] = 'mol_phospho_protein_' + dephospho['from']
grn.loc[~grn_bool, 'from'] = 'mol_protein_' + grn.loc[~grn_bool, 'from']
grn.loc[grn_bool, 'from'] = 'mol_phospho_protein_' + grn.loc[grn_bool, 'from']
noise_network = pd.concat([grn, phospho, dephospho])
noise_info.drop(columns=['index'], inplace=True)
noise_network.drop(columns=['index'], inplace=True)
noise_info = noise_info.reset_index(drop=True)
noise_network = noise_network.reset_index(drop=True)
regulators.to_parquet(os.path.join(self.regulators_dir, file), compression='brotli')
noise_info = self.manage_dtypes(noise_info)
noise_network = self.manage_dtypes(noise_network)
return noise_info, noise_network
def create_species_vector(self, feature_info, file):
molecules = ['premrna', 'mrna', 'protein']
mrna = list('mol_mrna_' + feature_info.feature_id.values)
premrna = list('mol_premrna_' + feature_info.feature_id.values)
protein = list('mol_protein_' + feature_info.feature_id.values)
phospho_protein = list('mol_phospho_protein_' + feature_info.loc[feature_info.is_phosphorylated, 'feature_id'].values)
species = premrna + mrna + protein + phospho_protein
species_state = pd.DataFrame({'species': species, 'state': [0] * len(species)})
species_state = self.manage_dtypes(species_state)
species_state['gene'] = species_state['species'].apply(lambda x: '_'.join(x.split('_')[-3:-1]))
species_state['sim_i'] = species_state['species'].apply(lambda x: [int(char) for char in x.split('_') if char.isdigit()][-1])
species_state['molecule_type'] = species_state['species'].apply(lambda x: [char for char in x.split('_') if char in molecules][0])
species_state['filepath'] = os.path.join(self.species_vec_dir, file)
species_state['spec_name'] = species_state['species'].apply(lambda x: '_'.join(x.split("_")[:-1]))
species_state.to_parquet(os.path.join(self.species_vec_dir, file), compression='brotli')
species_state = species_state[['species']]
return species_state
def create_propensity_matrix(self, feature_info, species_vec, file):
# need to add reversible reaction for dephosphorlyation
propensity_dfs = []
phosphos = feature_info.loc[feature_info.is_phosphorylated, ]
for reaction in self.reactions.keys():
col = reaction + 'rate'
basal = 0
rev_reaction = 0
perturbation = 1
independence = 0
effects_sums = 0
reaction_type = 0
base_activity = 0
if col == 'premrna_decay_rate':
col = 'mrna_decay_rate'
if reaction == 'transcription_':
reaction_type = 1
species_needed = 'None'
basal = feature_info.basal.values
reaction_rates = feature_info[col].values
nregulators = feature_info.nregulators.values
effects_sums = feature_info.effects_sums.values
independence = feature_info.independence.values
perturbation = feature_info.perturbation.values
base_activity = feature_info.base_activity.values
reacts = reaction + feature_info.feature_id.values
elif 'phospho' in col:
if 'protein_decay' in col:
col = 'protein_decay_rate'
if 'dephospho' in col:
rev_reaction = 1
reaction_rates = phosphos[col].values
basal = phosphos.basal.values
reaction_rates = phosphos[col].values
independence = phosphos.independence.values
nregulators = phosphos.nregulators.values
effects_sums = phosphos.kinase_effects_sums.values
base_activity = phosphos.kinase_base_activity.values
effects_sums = phosphos.kinase_effects_sums.values
species_needed = self.reactions[reaction] + phosphos.feature_id.values
reacts = reaction + phosphos.feature_id.values
else:
reaction_rates = feature_info[col].values
species_needed = self.reactions[reaction] + feature_info.feature_id.values
reacts = reaction + feature_info.feature_id.values
reaction_set = pd.DataFrame({'reaction': reacts, 'reaction_rate': reaction_rates})
reaction_set['basal'] = basal
reaction_set['effects_sums'] = 0
reaction_set['base_activity'] = 0.0
reaction_set['species'] = species_needed
reaction_set['nregulators'] = nregulators
reaction_set['independence'] = independence
reaction_set['effects_sums'] = effects_sums
reaction_set['perturbation'] = perturbation
reaction_set['base_activity'] = base_activity
reaction_set['reaction_type'] = reaction_type
reaction_set['reversible_reaction'] = rev_reaction
propensity_dfs.append(reaction_set)
propensity = | pd.concat(propensity_dfs) | pandas.concat |
import datetime
import inspect
import numpy.testing as npt
import os.path
import pandas as pd
import pandas.util.testing as pdt
import sys
from tabulate import tabulate
import unittest
# #find parent directory and import model
# parentddir = os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir))
# sys.path.append(parentddir)
from ..screenip_exe import Screenip
test = {}
class TestScreenip(unittest.TestCase):
"""
Unit tests for screenip.
"""
print("screenip unittests conducted at " + str(datetime.datetime.today()))
def setUp(self):
"""
Setup routine for screenip unittest.
:return:
"""
pass
# screenip2 = screenip_model.screenip(0, pd_obj_inputs, pd_obj_exp_out)
# setup the test as needed
# e.g. pandas to open screenip qaqc csv
# Read qaqc csv and create pandas DataFrames for inputs and expected outputs
def tearDown(self):
"""
Teardown routine for screenip unittest.
:return:
"""
pass
# teardown called after each test
# e.g. maybe write test results to some text file
def create_screenip_object(self):
# create empty pandas dataframes to create empty object for testing
df_empty = pd.DataFrame()
# create an empty screenip object
screenip_empty = Screenip(df_empty, df_empty)
return screenip_empty
def test_screenip_unit_fw_bird(self):
"""
unittest for function screenip.fw_bird:
:return:
"""
expected_results = pd.Series([0.0162, 0.0162, 0.0162], dtype='float')
result = pd.Series([], dtype='float')
# create empty pandas dataframes to create empty object for this unittest
screenip_empty = self.create_screenip_object()
try:
# for i in range(0,3):
# result[i] = screenip_empty.fw_bird()
screenip_empty.no_of_runs = len(expected_results)
screenip_empty.fw_bird()
result = screenip_empty.out_fw_bird
npt.assert_allclose(result, expected_results, rtol=1e-4, atol=0, err_msg='', verbose=True )
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_screenip_unit_fw_mamm(self):
"""
unittest for function screenip.fw_mamm:
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
screenip_empty = self.create_screenip_object()
expected_results = pd.Series([0.172, 0.172, 0.172], dtype='float')
result = pd.Series([], dtype='float')
try:
screenip_empty.no_of_runs = len(expected_results)
result = screenip_empty.fw_mamm()
npt.assert_allclose(result, expected_results, rtol=1e-4, atol=0, err_msg='', verbose=True )
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_screenip_unit_dose_bird(self):
"""
unittest for function screenip.dose_bird:
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
screenip_empty = self.create_screenip_object()
expected_results = pd.Series([1000000., 4805.50175, 849727.21122], dtype='float')
result = pd.Series([], dtype='float')
try:
#(self.out_fw_bird * self.solubility)/(self.bodyweight_assessed_bird / 1000.)
screenip_empty.out_fw_bird = pd.Series([10., 0.329, 1.8349], dtype='float')
screenip_empty.solubility = pd.Series([100., 34.9823, 453.83], dtype='float')
screenip_empty.bodyweight_assessed_bird = pd.Series([1.0, 2.395, 0.98], dtype='float')
result = screenip_empty.dose_bird()
npt.assert_allclose(result, expected_results, rtol=1e-4, atol=0, err_msg='', verbose=True )
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_screenip_unit_dose_mamm(self):
"""
unittest for function screenip.dose_mamm:
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
screenip_empty = self.create_screenip_object()
expected_results = pd.Series([8000000., 48205.7595, 3808036.37889], dtype='float')
result = pd.Series([], dtype='float')
try:
#(self.out_fw_mamm * self.solubility)/(self.bodyweight_assessed_mammal / 1000)
screenip_empty.out_fw_mamm = pd.Series([20., 12.843, 6.998], dtype='float')
screenip_empty.solubility = pd.Series([400., 34.9823, 453.83], dtype='float')
screenip_empty.bodyweight_assessed_mammal = pd.Series([1., 9.32, 0.834], dtype='float')
result = screenip_empty.dose_mamm()
npt.assert_allclose(result, expected_results, rtol=1e-4, atol=0, err_msg='', verbose=True )
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_screenip_unit_at_bird(self):
"""
unittest for function screenip.at_bird:
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
screenip_empty = self.create_screenip_object()
expected_results = pd.Series([1000., 687.9231, 109.3361], dtype='float')
result = pd.Series([], dtype='float')
try:
#(self.ld50_avian_water) * ((self.bodyweight_assessed_bird / self.bodyweight_tested_bird)**(self.mineau_scaling_factor - 1.))
screenip_empty.ld50_avian_water = pd.Series([2000., 938.34, 345.83], dtype='float')
screenip_empty.bodyweight_assessed_bird = pd.Series([100., 39.49, 183.54], dtype='float')
screenip_empty.ld50_bodyweight_tested_bird = pd.Series([200., 73.473, 395.485], dtype='float')
screenip_empty.mineau_scaling_factor = pd.Series([2., 1.5, 2.5], dtype='float')
result = screenip_empty.at_bird()
npt.assert_allclose(result, expected_results, rtol=1e-4, atol=0, err_msg='', verbose=True )
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_screenip_unit_at_mamm(self):
"""
unittest for function screenip.at_mamm:
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
screenip_empty = self.create_screenip_object()
expected_results = pd.Series([11.89207, 214.0572, 412.6864], dtype='float')
result = pd.Series([], dtype='float')
try:
#(self.ld50_mammal_water) * ((self.bodyweight_tested_mammal / self.bodyweight_assessed_mammal)**0.25)
screenip_empty.ld50_mammal_water = pd.Series([10., 250., 500.], dtype='float')
screenip_empty.ld50_bodyweight_tested_mammal = pd.Series([200., 39.49, 183.54], dtype='float')
screenip_empty.bodyweight_assessed_mammal = pd.Series([100., 73.473, 395.485], dtype='float')
result = screenip_empty.at_mamm()
npt.assert_allclose(result, expected_results, rtol=1e-4, atol=0, err_msg='', verbose=True )
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_screenip_unit_fi_bird(self):
"""
unittest for function screenip.fi_bird:
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
screenip_empty = self.create_screenip_object()
expected_results = pd.Series([0.012999, 0.026578, 0.020412], dtype='float')
result = pd.Series([], dtype='float')
try:
#0.0582 * ((bw_grams / 1000.)**0.651)
bw_grams = pd.Series([100., 300., 200.], dtype='float')
result = screenip_empty.fi_bird(bw_grams)
npt.assert_allclose(result, expected_results, rtol=1e-4, atol=0, err_msg='', verbose=True )
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_screenip_unit_act(self):
"""
unittest for function screenip.test_act:
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
screenip_empty = self.create_screenip_object()
expected_results = pd.Series([10.5737, 124.8032, 416.4873], dtype='float')
result = pd.Series([], dtype='float')
try:
#(self.noael_mammal_water) * ((self.bodyweight_tested_mammal / self.bodyweight_assessed_mammal)**0.25)
screenip_empty.noael_mammal_water = pd.Series([10., 120., 400.], dtype='float')
screenip_empty.noael_bodyweight_tested_mammal = pd.Series([500., 385.45, 673.854], dtype='float')
screenip_empty.bodyweight_assessed_mammal = pd.Series([400., 329.45, 573.322], dtype='float')
result = screenip_empty.act()
npt.assert_allclose(result, expected_results, rtol=1e-4, atol=0, err_msg='', verbose=True )
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_screenip_unit_det(self):
"""
unittest for function screenip.det
return:
"""
#
# '''
# Dose Equiv. Toxicity:
#
# The FI value (kg-diet) is multiplied by the reported NOAEC (mg/kg-diet) and then divided by
# the test animal's body weight to derive the dose-equivalent chronic toxicity value (mg/kg-bw):
#
# Dose Equiv. Toxicity = (NOAEC * FI) / BW
#
# NOTE: The user enters the lowest available NOAEC for the mallard duck, for the bobwhite quail,
# and for any other test species. The model calculates the dose equivalent toxicity values for
# all of the modeled values (Cells F20-24 and results worksheet) and then selects the lowest dose
# equivalent toxicity value to represent the chronic toxicity of the chemical to birds.
# '''
# try:
# # result =
# # self.assertEquals(result, )
# pass
# finally:
# pass
# return
#
#
# def test_det_duck(self):
# """
# unittest for function screenip.det_duck:
# :return:
# """
# try:
# # det_duck = (self.noaec_duck * self.fi_bird(1580.)) / (1580. / 1000.)
# screenip_empty.noaec_duck = pd.Series([1.], dtype='int')
# screenip_empty.fi_bird = pd.Series([1.], dtype='int')
# result = screenip_empty.det_duck()
# npt.assert_array_almost_equal(result, 1000., 4, '', True)
# finally:
# pass
# return
#
# def test_det_quail(self):
# """
# unittest for function screenip.det_quail:
# :return:
# """
# try:
# # det_quail = (self.noaec_quail * self.fi_bird(178.)) / (178. / 1000.)
# screenip_empty.noaec_quail = pd.Series([1.], dtype='int')
# screenip_empty.fi_bird = pd.Series([1.], dtype='int')
# result = screenip_empty.det_quail()
# npt.assert_array_almost_equal(result, 1000., 4, '', True)
# finally:
# pass
# return
#
# def test_det_other_1(self):
# """
# unittest for function screenip.det_other_1:
# :return:
# """
# try:
# #det_other_1 = (self.noaec_bird_other_1 * self.fi_bird(self.bodyweight_bird_other_1)) / (self.bodyweight_bird_other_1 / 1000.)
# #det_other_2 = (self.noaec_bird_other_2 * self.fi_bird(self.bodyweight_bird_other_1)) / (self.bodyweight_bird_other_1 / 1000.)
# screenip_empty.noaec_bird_other_1 = pd.Series([400.]) # mg/kg-diet
# screenip_empty.bodyweight_bird_other_1 = pd.Series([100]) # grams
# result = screenip_empty.det_other_1()
# npt.assert_array_almost_equal(result, 4666, 4)
# finally:
# pass
# return
#
# The following tests are configured such that:
# 1. four values are provided for each needed input
# 2. the four input values generate four values of out_det_* per bird type
# 3. the inputs per bird type are set so that calculations of out_det_* will result in
# each bird type having one minimum among the bird types;
# thus all four calculations result in one minimum per bird type
# create empty pandas dataframes to create empty object for this unittest
screenip_empty = self.create_screenip_object()
expected_results = pd.Series([4.2174, 4.96125, 7.97237, 10.664648], dtype='float')
result = pd.Series([], dtype='float')
try:
screenip_empty.bodyweight_bobwhite_quail = 178.
screenip_empty.bodyweight_mallard_duck = 1580.
screenip_empty.noaec_quail = pd.Series([100., 300., 75., 150.], dtype='float')
screenip_empty.noaec_duck = pd.Series([400., 100., 200., 350.], dtype='float')
screenip_empty.noaec_bird_other_1 = pd.Series([50., 200., 300., 250.], dtype='float')
screenip_empty.noaec_bird_other_2 = pd.Series([350., 400., 250., 100.], dtype='float')
screenip_empty.noaec_bodyweight_bird_other_1 = pd.Series([345.34, 453.54, 649.29, 294.56], dtype='float')
screenip_empty.noaec_bodyweight_bird_other_2 = pd.Series([123.84, 85.743, 127.884, 176.34], dtype='float')
screenip_empty.no_of_runs = len(expected_results)
result = screenip_empty.det()
npt.assert_allclose(result, expected_results, rtol=1e-4, atol=0, err_msg='', verbose=True )
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_screenip_unit_acute_bird(self):
"""
unittest for function screenip.acute_bird:
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
screenip_empty = self.create_screenip_object()
expected_results = pd.Series([10., 5.22093, 0.479639], dtype='float')
result = pd.Series([], dtype='float')
try:
# self.out_acute_bird = self.out_dose_bird / self.out_at_bird
screenip_empty.out_dose_bird = | pd.Series([100., 121.23, 43.994], dtype='float') | pandas.Series |
# Copyright 2019, 2020, 2021 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import aif360.algorithms.postprocessing
import aif360.datasets
import aif360.metrics
import numpy as np
import pandas as pd
import sklearn.metrics
import sklearn.model_selection
import lale.datasets.data_schemas
import lale.datasets.openml
import lale.lib.lale
import lale.operators
import lale.type_checking
from lale.datasets.data_schemas import add_schema_adjusting_n_rows
logger = logging.getLogger(__name__)
logger.setLevel(logging.WARNING)
def dataset_to_pandas(dataset, return_only="Xy"):
"""
Return pandas representation of the AIF360 dataset.
Parameters
----------
dataset : aif360.datasets.BinaryLabelDataset
AIF360 dataset to convert to a pandas representation.
return_only : 'Xy', 'X', or 'y'
Which part of features X or labels y to convert and return.
Returns
-------
result : tuple
- item 0: pandas Dataframe or None, features X
- item 1: pandas Series or None, labels y
"""
if "X" in return_only:
X = pd.DataFrame(dataset.features, columns=dataset.feature_names)
result_X = lale.datasets.data_schemas.add_schema(X)
assert isinstance(result_X, pd.DataFrame), type(result_X)
else:
result_X = None
if "y" in return_only:
y = pd.Series(dataset.labels.ravel(), name=dataset.label_names[0])
result_y = lale.datasets.data_schemas.add_schema(y)
assert isinstance(result_y, pd.Series), type(result_y)
else:
result_y = None
return result_X, result_y
_dataset_fairness_properties: lale.type_checking.JSON_TYPE = {
"favorable_label": {
"description": 'Label value which is considered favorable (i.e. "positive").',
"type": "number",
},
"unfavorable_label": {
"description": 'Label value which is considered unfavorable (i.e. "negative").',
"type": "number",
},
"protected_attribute_names": {
"description": "Subset of feature names for which fairness is desired.",
"type": "array",
"items": {"type": "string"},
},
"unprivileged_groups": {
"description": "Representation for unprivileged group.",
"type": "array",
"items": {
"description": "Map from feature names to group-indicating values.",
"type": "object",
"additionalProperties": {"type": "number"},
},
},
"privileged_groups": {
"description": "Representation for privileged group.",
"type": "array",
"items": {
"description": "Map from feature names to group-indicating values.",
"type": "object",
"additionalProperties": {"type": "number"},
},
},
}
_categorical_fairness_properties: lale.type_checking.JSON_TYPE = {
"favorable_labels": {
"description": 'Label values which are considered favorable (i.e. "positive").',
"type": "array",
"minItems": 1,
"items": {
"anyOf": [
{"description": "Literal value.", "type": "string"},
{"description": "Numerical value.", "type": "number"},
{
"description": "Numeric range [a,b] from a to b inclusive.",
"type": "array",
"minItems": 2,
"maxItems": 2,
"items": {"type": "number"},
},
]
},
},
"protected_attributes": {
"description": "Features for which fairness is desired.",
"type": "array",
"minItems": 1,
"items": {
"type": "object",
"required": ["feature", "privileged_groups"],
"properties": {
"feature": {
"description": "Column name or column index.",
"anyOf": [{"type": "string"}, {"type": "integer"}],
},
"privileged_groups": {
"description": "Values or ranges that indicate being a member of the privileged group.",
"type": "array",
"minItems": 1,
"items": {
"anyOf": [
{"description": "Literal value.", "type": "string"},
{"description": "Numerical value.", "type": "number"},
{
"description": "Numeric range [a,b] from a to b inclusive.",
"type": "array",
"minItems": 2,
"maxItems": 2,
"items": {"type": "number"},
},
]
},
},
},
},
},
}
_categorical_fairness_schema = {
"type": "object",
"properties": _categorical_fairness_properties,
}
_dataset_fairness_schema = {
"type": "object",
"properties": _dataset_fairness_properties,
}
def dataset_fairness_info(dataset):
"""
Inspect the AIF360 dataset and return its fairness metadata as JSON.
Parameters
----------
dataset : aif360.datasets.BinaryLabelDataset
Returns
-------
result : dict
JSON data structure with fairness information.
- favorable_label : number
Label value which is considered favorable (i.e. "positive").
- unfavorable_label : number
Label value which is considered unfavorable (i.e. "negative").
- protected_attribute_names : array **of** items : string
Subset of feature names for which fairness is desired.
- unprivileged_groups : array
Representation for unprivileged group.
- items : dict
Map from feature names to group-indicating values.
- privileged_groups : array
Representation for privileged group.
- items : dict
Map from feature names to group-indicating values.
"""
def attributes_to_groups(names, value_arrays):
result = [{}]
for i in range(len(names)):
next_result = []
for d in result:
for next_v in value_arrays[i]:
next_d = {**d, names[i]: next_v}
next_result.append(next_d)
result = next_result
return result
unprivileged_groups = attributes_to_groups(
dataset.protected_attribute_names, dataset.unprivileged_protected_attributes
)
privileged_groups = attributes_to_groups(
dataset.protected_attribute_names, dataset.privileged_protected_attributes
)
result = {
"favorable_label": dataset.favorable_label,
"unfavorable_label": dataset.unfavorable_label,
"protected_attribute_names": dataset.protected_attribute_names,
"unprivileged_groups": unprivileged_groups,
"privileged_groups": privileged_groups,
}
lale.type_checking.validate_schema(result, _dataset_fairness_schema)
return result
class _PandasToDatasetConverter:
def __init__(self, favorable_label, unfavorable_label, protected_attribute_names):
lale.type_checking.validate_schema(
favorable_label, _dataset_fairness_properties["favorable_label"]
)
self.favorable_label = favorable_label
lale.type_checking.validate_schema(
unfavorable_label, _dataset_fairness_properties["unfavorable_label"]
)
self.unfavorable_label = unfavorable_label
lale.type_checking.validate_schema(
protected_attribute_names,
_dataset_fairness_properties["protected_attribute_names"],
)
self.protected_attribute_names = protected_attribute_names
def convert(self, X, y, probas=None):
assert isinstance(X, pd.DataFrame), type(X)
assert isinstance(y, pd.Series), type(y)
assert X.shape[0] == y.shape[0], f"X.shape {X.shape}, y.shape {y.shape}"
assert not X.isna().any().any(), f"X\n{X}\n"
assert not y.isna().any().any(), f"y\n{X}\n"
y_reindexed = pd.Series(data=y.values, index=X.index, name=y.name)
df = | pd.concat([X, y_reindexed], axis=1) | pandas.concat |
# -*- coding: utf-8 -*-
"""
workflow.py
<NAME> (<EMAIL>)
===============================================================
A script for interfacing with input and output files via
a workflow based approach. Handles progress saving by only
incorporating file checks to see if a particular process
has already been run, and skipping the processing step.
===============================================================
"""
"""
===============================================================
Modules
===============================================================
"""
import base64
from io import BytesIO
from math import sqrt
from modules import helpers
from sklearn.cross_validation import cross_val_score
from sklearn.ensemble import RandomForestClassifier
from sklearn.externals import joblib
from statsmodels.robust.scale import mad
from treeinterpreter import treeinterpreter as ti
import geopandas as gpd
import logging
import numpy as np
import os
import pandas as pd
import pickle
"""
===============================================================
Variables
===============================================================
"""
_report_template = """
<!doctype html>
<html lang="en">
<head>
<meta charset="utf-8">
<title>Report: {{ title }}</title>
<meta name="description" content="Random forest summary metrics for {{ title }}">
<style>
h1{
font-size: 300%;
text-align: center;
padding: 30px;
border-bottom: 5px double black;
}
h2{
font-size: 200%;
text-align: center;
padding: 10px;
padding-top: 50px;
border-bottom: 1px solid black;
}
p {
text-align: center;
}
table{
border: 0;
text-align: left;
font-size: 120%;
padding: 10px;
margin-left:auto;
margin-right:auto;
}
th{
border-bottom: 1px solid black;
border-collapse: collapse;
padding: 10px;
}
td{
padding: 5px;
padding-left: 10px;
}
img{
height: 100vh;
display: block;
margin-left: auto;
margin-right: auto;
padding: 30px;
}
</style>
</head>
<body>
<h1>Summary Report: {{ exp_title }}</h1>
<h2>Geospatial Semantic Features</h2><br>
{{ cd_plot }}
<h2>Multicollinearity Reduction</h2><br>
{{ ocorr_table }}
{{ ocorr_plot }}
<h2>Parameter Optimization</h2><br>
{{ grid_table }}
<h2>Cross Validation Performance</h2><br>
{{ cv_plot }}
<h2>Class Probabilities</h2><br>
{{ prob_plot }}
<h2>Feature Importance</h2><br>
{{ imp_plot }}
<h2>Outliers</h2><br>
{{ outlier_plot }}
</body>
</html>
"""
"""
===============================================================
Configuration Functions
===============================================================
"""
def _global_config(config, settings=None, avail=None):
"""
_global_config: obj -> obj
---------------------------------------------------------------
Sets the local [config] to the global [settings] if a global
setting exists. If all local [config] has been set, this function
returns the original [config] or if no [settings] and [avail] settings
exist.
Required Parameters
-------------------
* config: obj
The configobj instance object to scan if defaults
have been customized.
Optional Parameters
-------------------
* settings: None OR obj
The global settings to use if it exists, otherwise
use the defaults.
* avail: None OR (listof str)
The available [config] keys to set. Only these keys
will be set to global defaults if they exist.
Returns
-------
* config: obj
The configobj instance after defaults have been set
if applicable.
---------------------------------------------------------------
"""
if settings is not None and avail is not None:
for k in settings.keys():
if k not in config and k in avail:
config[k] = settings[k]
return config
def analysis_config(config, settings=None):
"""
analysis_config: obj -> obj
---------------------------------------------------------------
Sets the defaults for a custom analysis configuration object
from configobj.
The defaults are only set if the setting does not exist (thus,
it is implied that the user needs a default).
Required Parameters
-------------------
* config: obj
The configobj instance object to scan if defaults
have been customized.
Optional Parameters
-------------------
* settings: None
The global settings to use if it exists, otherwise
use the defaults.
Returns
-------
* config: obj
The configobj instance after defaults have been set
if applicable.
---------------------------------------------------------------
"""
config = _global_config(config, settings, ['cross_validation_tests', 'high_correlations', 'outlier_value', 'persist'])
config['cross_validation_tests'] = [2, 5, 10] if 'cross_validation_tests' not in config else config['cross_validation_tests']
config['high_correlations'] = [-0.7, 0.7] if 'high_correlations' not in config else config['high_correlations']
config['outlier_value'] = 10 if 'outlier_value' not in config else config['outlier_value']
config['persist'] = True if 'persist' not in config else config['persist']
return config
def forest_config(config, n_jobs=[-1], settings=None):
"""
forest_config: obj -> obj
---------------------------------------------------------------
Sets the defaults for a custom forest configuration object
from configobj.
The defaults are only set if the setting does not exist (thus,
it is implied that the user needs a default).
Required Parameters
-------------------
* config: obj
The configobj instance object to scan if defaults
have been customized.
Optional Parameters
-------------------
* settings: None
The global settings to use if it exists, otherwise
use the defaults.
Returns
-------
* config: obj
The configobj instance after defaults have been set
if applicable.
---------------------------------------------------------------
"""
config = _global_config(config, settings, RandomForestClassifier._get_param_names())
config['n_estimators'] = [10, 64, 96, 128] if 'n_estimators' not in config else config['n_estimators']
config['criterion'] = ['entropy'] if 'criterion' not in config else config['criterion']
config['oob_score'] = [True] if 'oob_score' not in config else [True]
config['class_weight'] = ['balanced'] if 'class_weight' not in config else config['class_weight']
config['n_jobs'] = [n_jobs] if 'n_jobs' not in config else config['n_jobs']
return config
def experiment_config(config):
"""
experiment_config: obj -> obj
---------------------------------------------------------------
Sets the defaults for a custom experiment info configuration object
from configobj.
The defaults are only set if the setting does not exist (thus,
it is implied that the user needs a default).
Required Parameters
-------------------
* config: obj
The configobj instance object to scan if defaults
have been customized.
Returns
-------
* config: obj
The configobj instance after defaults have been set
if applicable.
---------------------------------------------------------------
"""
config['title'] = '' if 'title' not in config else config['title']
config['filter'] = [] if 'filter' not in config else config['filter']
config['id'] = [] if 'id' not in config else config['id']
config['keep_columns'] = [] if 'keep_columns' not in config else config['keep_columns']
config['epsg'] = '4326' if 'epsg' not in config else config['epsg']
config['units'] = 'units' if 'units' not in config else config['units']
return config
def plot_config(config, settings=None):
"""
plot_config: obj -> obj
---------------------------------------------------------------
Sets the defaults for a custom experiment plot configuration object
from configobj.
The defaults are only set if the setting does not exist (thus,
it is implied that the user needs a default).
Required Parameters
-------------------
* config: obj
The configobj instance object to scan if defaults
have been customized.
Optional Parameters
-------------------
* settings: None
The global settings to use if it exists, otherwise
use the defaults.
Returns
-------
* config: obj
The configobj instance after defaults have been set
if applicable.
---------------------------------------------------------------
"""
config = _global_config(config, settings)
config['plot_style'] = 'whitegrid' if 'plot_style' not in config else config['plot_style']
config['plot_color'] = 'gray' if 'plot_color' not in config else config['plot_color']
config['plot_dpi'] = 300 if 'plot_dpi' not in config else config['plot_dpi']
config['plot_ext'] = '.png' if 'plot_ext' not in config else config['plot_ext']
return config
def settings_config(config):
"""
settings_config: obj -> obj
---------------------------------------------------------------
Sets the defaults for a custom settings configuration object
from configobj.
The defaults are only set if the setting does not exist (thus,
it is implied that the user needs a default).
Required Parameters
-------------------
* config: obj
The configobj instance object to scan if defaults
have been customized.
Returns
-------
* config: obj
The configobj instance after defaults have been set
if applicable.
---------------------------------------------------------------
"""
# (Settings) Configure the global settings
settings = config['settings']
config['settings']['cores'] = -1 if 'cores' not in settings else settings['cores']
# (Plots) Configure global plot settings
config['settings']['plot'] = {} if 'plot' not in settings else settings['plot']
config['settings']['plot'] = plot_config(config['settings']['plot'])
# (Analysis) Configure global analysis settings
config['settings']['analysis'] = {} if 'analysis' not in settings else settings['analysis']
config['settings']['analysis'] = analysis_config(config['settings']['analysis'])
# (Forest) Configure global forest settings
config['settings']['forest'] = {} if 'forest' not in settings else settings['forest']
config['settings']['forest'] = forest_config(config['settings']['forest'], n_jobs=config['settings']['cores'])
logging.info('Checked configuration file with defaults set when applicable')
return config
"""
===============================================================
Functions
===============================================================
"""
def gen_contrib(pkl, rf, outliers, variables, suspect_value=10, outlier_col='outlier_measure', cls_col='class', persist=True):
"""
gen_contrib: str obj pd.DataFrame pd.DataFrame float str str bool -> pd.DataFrame
---------------------------------------------------------------
Generates the contributions for each outlier given a [suspect] value.
Required Parameters
-------------------
* pkl: str
The pickle file to store the probabilities
* rf: obj
The sklearn random forest model that has been previously trained.
* outliers: pd.DataFrame
The outlier measures obtained from the [rf] model from sklearn.
It consists of two columns: [outlier_col] and [cls_col].
* variables: pd.DataFrame
The variables used to train the [rf] model from sklearn.
* suspect_value: float
The cutoff range to suspect an outlier. Any outlier measure
greater than this value is considered an outlier.
* outlier_col: str
The outlier measure column name of [outliers].
* cls_col: str
The class column name of [outliers].
Optional Parameters
-------------------
* persist: bool
Whether to generate a pickle file or not.
Returns
-------
* df: pd.DataFrame
The result dataframe with the classes, and the variable
contributions for each outlier.
---------------------------------------------------------------
"""
if not os.path.isfile(pkl) and persist:
# (Suspects) Obtain suspecting outliers
suspects = pd.concat([outliers, variables], axis=1)
suspects = suspects[suspects[outlier_col] > suspect_value]
svariables = suspects.drop([outlier_col, cls_col], axis=1) # variables of outliers
# (Feat_Contrib) Obtain variable contributions to assigned class
fc = ti.predict(rf, svariables.values)[2]
contrib = []
for c, cls in zip(fc, outliers[cls_col]):
idx = np.where(rf.classes_ == cls)
fci = [ft[idx][0] for ft in c]
contrib.append(fci)
# (Contrib_DF) Build informative contribution dataframe
df = pd.DataFrame(contrib)
df.columns = svariables.columns
df.index = svariables.index
df = pd.concat([suspects[cls_col], df], axis=1)
with open(pkl, 'wb') as f:
pickle.dump(df, f, protocol=4)
logging.info('Pickled outlier variable contributions ' + pkl)
else:
with open(pkl, 'rb') as f:
df = pickle.load(f)
logging.info('Pickled outlier variable contributions already exists, skipping ' + pkl)
return df
def gen_csv(out, df, persist=True, *args, **kwargs):
"""
gen_csv: str obj bool *args **kwargs -> None
---------------------------------------------------------------
Generates a csv file from a pandas [df] object. Skips
the generation if the csv file already exists.
Required Parameters
-------------------
* out: str
The path to store the csv file with extension
* df: obj
A pandas dataframe to save
* *args: *args
Arguments to be passed to to_csv from pandas
* **kwargs: **kwargs
Keyword arguments to be passed to to_csv from pandas
Optional Parameters
-------------------
* persist: bool
Whether to regenerate a pickle file or not.
---------------------------------------------------------------
"""
if not os.path.isfile(out):
df.to_csv(out, *args, **kwargs)
logging.info('Table saved at ' + out)
else:
logging.info('Table already exists, skipping ' + out)
def gen_f1_scores(pkl, obj, variables, targets, cv_files, cvs, persist=True, n_jobs=-1):
"""
gen_f1_scores: str obj pd.DataFrame pd.Series (listof str) (listof int) bool int -> pd.DataFrame
---------------------------------------------------------------
Generates the f1 scores for each cross validation test
specified by [cvs].
Required Parameters
-------------------
* pkl: str
The pickle file to store the probabilities
* obj: obj
The sklearn model that has been previously trained.
* variables: pd.DataFrame
The variables used to train the [obj] model from sklearn.
* targets: pd.DataFrame
The true target classes used to train the [obj] model from sklearn.
* cv_files: (listof str)
The cv files to save each cross_val_score object from
sklearn.
* cvs: (listof int)
The cross validation folds for each test in list form.
Optional Parameters
-------------------
* persist: bool
Whether to generate a pickle file or not.
* n_jobs: int
Number of cores to use for parallel processing.
Returns
-------
* cv_scores: pd.DataFrame
The result dataframe with a column for the folds
used for each cross validation and the respective
mean f1 scores.
---------------------------------------------------------------
"""
if not os.path.isfile(pkl) and persist:
scores = []
for cv_pkl, cv in zip(cv_files, cvs):
f1_scores = gen_pkl(cv_pkl, _func=cross_val_score, _persist=persist, estimator=obj, X=variables.values, y=targets.values, cv=cv, scoring='f1_weighted', n_jobs=n_jobs)
scores.append(f1_scores.mean())
cvs = pd.Series(cvs, name='cv_folds')
scores = pd.Series(scores, name='mean_f1_score')
cv_scores = pd.concat([cvs, scores], axis=1)
with open(pkl, 'wb') as f:
pickle.dump(cv_scores, f, protocol=4)
logging.info('Pickled F1 scores of cross validation tests ' + pkl)
else:
with open(pkl, 'rb') as f:
cv_scores = pickle.load(f)
logging.info('Pickled F1 scores of cross validation tests already exists, skipping ' + pkl)
return cv_scores
def gen_gdc(data_files, target, epsg, pkl, cols=[], persist=True):
"""
gen_gdc: (listof str) str str bool -> pd.DataFrame
---------------------------------------------------------------
Reads the list of files containing geodata and combines
them into one dataframe, before pickling them into a file
at [pkl]. The data will also be projected to [epsg] and
is assumed to all have the same coordinate reference system.
Geometric variables such as geom_type, length, area (units^2), vertices,
repx, and repy will also be included. Only the target variable
will be included from the data files for classification.
Required Parameters
-------------------
* data_files: (listof str)
The geodata files to be read by geopandas via fiona.
See http://www.gdal.org/ogr_formats.html
* target: str
The classification col in [gdc] with class data
* epsg: str
The coordinate reference system number in epsg to project the data to.
http://geopandas.org/user.html#GeoSeries.to_crs
* pkl: str
The pickle file path the save the read geodata
Optional Parameters
-------------------
* cols: (listof str)
The list of column names to keep.
* col_index: str OR None
The unique id column to use as the index.
* persist: bool
Whether to generate a pickle file or not.
Returns
-------
* gd: pd.DataFrame
The combined data from [data_files] projected
to [epsg]
---------------------------------------------------------------
"""
if not os.path.isfile(pkl) and persist:
gdc = helpers.concat_gdf(data_files, epsg=epsg)
crs = gdc.crs
variables = helpers.get_series(gdc, series_cols=cols + ['geom_type', 'length', 'area'])
variables['area'] = variables['area'].apply(sqrt)
vtx = helpers.get_vtx(gdc)
pts = gdc.representative_point()
pts = pd.DataFrame([[p.x, p.y] for p in pts], columns=['repx', 'repy'])
gdc = pd.concat([gdc[target], pts, variables, vtx, gdc.geometry], axis=1)
gdc = gpd.GeoDataFrame(gdc)
gdc.crs = crs
with open(pkl, 'wb') as f:
pickle.dump(gdc, f, protocol=4)
logging.info('Pickled GeoDataFrame file ' + pkl)
else:
with open(pkl, 'rb') as f:
gdc = pickle.load(f)
logging.info('GeoDataFrame file exists, skipping pickle for ' + pkl)
return gdc
def gen_gdcn(gdc,
gdn,
target,
pkl,
gdn_ipattern='near_',
corr_pkl=None,
corr_range=(-0.8, 0.8),
ignr_corr=None,
scaler=None,
ignr_scale=None,
persist=True):
"""
gen_gdcn: gpd.GeoDataFrame
gpd.GeoDataFrame
str
str
str OR None
str OR None
(tupleof num)
(listof str) OR None
obj
(listof str) OR None
bool
-> pd.DataFrame
---------------------------------------------------------------
Combines the relationship data [gdn] with [gdc]. Also performs
preprocessing of multicollinearity reduction, removal of 0 variance
variables, and scaling depending on [corr_pkl]..[scaler] arguments.
Required Parameters
-------------------
* gdf: gpd.GeoDataFrame
The geodataframe with the geometric variables and the original
data used to generate [gdn]
* gdn: gpd.GeoDataFrame
The geodataframe with the nearest distance to each
[target] class of [gdc] for each row of [gdc]
* target: str
The group col in [gdc] representing the classification groups
* pkl: str
The pickle file path the save the combined variables data
Optional Parameters
-------------------
* gdn_ipattern: str OR None
If not None, set this to the alias for the [gdn] variables
pattern in which each column corresponds to a unique class in the
[target] column with an added alias in front of its name.
E.g. If gdn_ipattern = 'near_' and a class from target is 'bus_stop',
the corresponding target class col from [gdn] would be 'near_bus_stop'
Once set, this will order the [gdn] columns in descending order
of [target] class counts - thus the class with the most counts are
first and the the class with the least counts are last. This is
useful for the ordered reduction of multicollinearity included
with this function.
* corr_pkl: str OR None
If not None, reduces multicollinearity in the data by only
limiting to variables that are not correlated to each
other. This considers variables to keep in order starting
with variables from the [gdf] then variables from the [gdn].
Specify a path to pickle the details of the correlation
removal in order to apply it.
* corr_range: (tupleof num)
If [corr_pkl] is not None, specify the negative (1st item)
and positive (2nd item) correlation thresholds to considering
multicollinearity.
* ignr_corr: (listof str) OR None
If [corr_pkl] is not None, specify the columns to ignore
when checking for high correlation removal
* scaler: obj OR None
If not None, uses a sklearn scaler object to scale the
non-categorical variables.
* ignr_scale: (listof str) OR None
If [scaler] is not None, specify the columns to ignore
when scaling variables.
* persist: bool
Whether to regenerate a pickle file or not.
Returns
-------
* gdcn: pd.DataFrame
The [gdc] data with the added relationship variables
modified with preprocessing from [corr_pkl]..[scaled]
adjustments if applicable.
---------------------------------------------------------------
"""
if not os.path.isfile(pkl) or not persist:
gdn = gdn['near_' + gdc[target].value_counts().index] if gdn_ipattern is not None else gdn # order by freq of [target]
gdcn = pd.concat([gdc, gdn], axis=1)
# (Variance) Remove zero variance variables
var = gdcn.var()
gdcn = gdcn.drop(var[var == 0].index, axis=1)
# (Multicollinearity) Remove colinear variables in order
if corr_pkl is not None:
if ignr_corr is None:
ocorr = helpers.ocorr_df(gdcn.drop(target, axis=1), corr_range[0], corr_range[1])
vkeep = ocorr.keep.tolist() + [target]
else:
corr_cols = [c for c in gdcn.columns if c not in ignr_corr and c != target]
corr_chk = gdcn.drop(target, axis=1)[corr_cols]
ocorr = helpers.ocorr_df(corr_chk, corr_range[0], corr_range[1])
vkeep = ocorr.keep.tolist() + [target] + ignr_corr
gdcn = gdcn[vkeep] # keep non-correlated variables
with open(corr_pkl, 'wb') as f:
pickle.dump(ocorr, f, protocol=4)
logging.info('Pickled dictionary of removed correlated variables at ' + corr_pkl)
# (Scale) Use a scaler to transform variables
if scaler is not None:
if ignr_scale is None:
scale_cols = gdcn.columns
else:
scale_cols = [c for c in gdcn.columns if c not in ignr_scale]
gdcn[scale_cols] = scaler.fit_transform(gdcn[scale_cols].values)
# (Save) Pickle the [complete] data
with open(pkl, 'wb') as f:
pickle.dump(gdcn, f, protocol=4)
logging.info('Calculated and pickled combined geodata file ' + pkl)
else:
with open(pkl, 'rb') as f:
gdcn = pickle.load(f)
logging.info('Combined geodata already calculated, skipping pickle for ' + pkl)
return gdcn
def gen_html_plot(_fig, *args, **kwargs):
"""
gen_html_plot: obj -> str
---------------------------------------------------------------
Converts a matplotlib figure [obj] to bytes for use in
data uri of html templates. Original code modified from
[1].
References
----------
* [1] http://stackoverflow.com/questions/31492525/converting-matplotlib-png-to-base64-for-viewing-in-html-template
Required Parameters
-------------------
* _fig: obj
A matplotlib figure obj.
Optional Parameters
-------------------
* *args: *args
Arguments to be passed to [fig].savefig
* **kwargs: **kwargs
Keyword arguments to be passed to [fig].savefig
Returns
-------
* html_plot: str
The string representation of image data from [fig]
to be embedded as a data uri in an html template.
---------------------------------------------------------------
"""
fig_io = BytesIO()
_fig.savefig(fig_io, *args, **kwargs)
fig_io.seek(0)
data_uri = base64.b64encode(fig_io.getvalue()).decode('utf8')
html_plot = '<img src="data:image/png;base64,' + data_uri + '"\>'
return html_plot
def gen_imp(pkl, obj, variable_cols, persist=True):
"""
gen_imp: str obj (listof str) bool -> pd.DataFrame
---------------------------------------------------------------
Uses a trained model [obj] from sklearn to extract the
variable importances.
Required Parameters
-------------------
* pkl: str
The pickle file to store the variable importances.
* obj: obj
The sklearn model that has been previously trained.
* variable_cols: pd.DataFrame
The names of the variables used to train the [obj] model
from sklearn in order.
Optional Parameters
-------------------
* persist: bool
Whether to generate a pickle file or not.
Returns
-------
* rf_imp: pd.DataFrame
The variable importance dataframe.
---------------------------------------------------------------
"""
if not os.path.isfile(pkl) or not persist:
imp = pd.DataFrame(obj.feature_importances_, columns=['importance'], index=variable_cols)
imp['variable'] = imp.index.values
imp = imp.sort_values(by='importance', ascending=False)
with open(pkl, 'wb') as f:
pickle.dump(imp, f, protocol=4)
logging.info('Pickled random forest variable importances ' + pkl)
else:
with open(pkl, 'rb') as f:
imp = pickle.load(f)
logging.info('Pickled random forest variable importances already exists, skipping ' + pkl)
return imp
def gen_mprob(pkl, prob, cls_col='predict', prob_col='max_prob', persist=True):
"""
gen_mprob: str pd.DataFrame str str bool -> pd.DataFrame
---------------------------------------------------------------
Obtains the mean probability for each class given the generated
probabilities.
Required Parameters
-------------------
* pkl: str
The pickle file to store the mean class probabilities.
* prob: pd.DataFrame
The probabilities to calculate the mean class probabilities
from. There must be a class column named [target] and
a probability column named [prob_col].
Optional Parameters
-------------------
* cls_col: str
The class column name from [prob].
* prob_col: str
The probability column name from [prob].
* persist: bool
Whether to generate a pickle file or not.
Returns
-------
* mprob: pd.DataFrame
The dataframe with information on the mean probabilities
for each class sorted from largest to smallest probability.
---------------------------------------------------------------
"""
if not os.path.isfile(pkl) or not persist:
mprob = pd.DataFrame(prob.groupby(cls_col)[prob_col].mean())
mprob[cls_col] = mprob.index.values
mprob = mprob.sort_values(by=prob_col, ascending=False)
with open(pkl, 'wb') as f:
pickle.dump(mprob, f, protocol=4)
logging.info('Pickled mean class probabilities ' + pkl)
else:
with open(pkl, 'rb') as f:
mprob = pickle.load(f)
logging.info('Pickled mean class probabilities already exists, skipping ' + pkl)
return mprob
def gen_outliers(pkl, prox_files, target_cls, persist=True):
"""
gen_outliers: str (listof str) pd.Series bool -> pd.DataFrame
---------------------------------------------------------------
Obtains the class outlier measures for each instance of data
using proximities as described by [1].
References
----------
* [1] Breiman, Leo: https://www.stat.berkeley.edu/~breiman/Using_random_forests_v4.0.pdf
Required Parameters
-------------------
* pkl: str
The pickle file to store the mean proximities.
* prox_files: (listof str)
The joblib pickle files with the stored proximities.
Each file represents a proximity matrix for a class
in the same order as [target_cls].
* target_cls: pd.Series
The series of classes to generate the mean proximities on.
Each class must have a corresponding [prox_files] in order.
Optional Parameters
-------------------
* persist: bool
Whether to generate a pickle file or not.
Returns
-------
* outliers: pd.DataFrame
The dataframe of outlier measures and the true classes
for each instance of data.
---------------------------------------------------------------
"""
if not os.path.isfile(pkl) or not persist:
iprox = [] # within-class mean prox of each instance
icls = [] # classes of each instance
idx = [] # original instance indices
for prox_pkl, cls in zip(prox_files, target_cls):
prox_df = joblib.load(prox_pkl)
prox = prox_df.values
np.fill_diagonal(prox, np.nan) # set matching instances to nan
out_n = len(prox) / np.nansum(prox**2, axis=0) # outlier measure of instances n
iout = (out_n - np.median(out_n)) / mad(out_n, center=np.median) # normalized outlier measure
iprox = iprox + list(iout)
icls = icls + [cls] * len(prox)
idx = idx + list(prox_df.index.values)
iprox = pd.Series(iprox, name='outlier_measure')
icls = pd.Series(icls, name='class')
outliers = pd.concat([iprox, icls], axis=1)
outliers.index = idx
with open(pkl, 'wb') as f:
pickle.dump(outliers, f, protocol=4)
logging.info('Pickled outlier measures ' + pkl)
else:
with open(pkl, 'rb') as f:
outliers = pickle.load(f)
logging.info('Pickled outlier measures already exists, skipping ' + pkl)
return outliers
def gen_pkl(_pkl, _func, _lib='pickle', _persist=True, *args, **kwargs):
"""
gen_pkl: (listof str) function str bool *args **kwargs -> any
---------------------------------------------------------------
Generates a pickled file from data returned from [-func] after
passing [*args] and/or [**kwargs].
Required Parameters
-------------------
* _pkl: str
The path to store the pickled file
* _func: function
A function that returns data to be pickled
Optional Parameters
-------------------
* _lib: str
An object that loads and dumps pickle files
from data returned from [_func]. Currently
supported inputs are 'pickle' and 'joblib'.
* persist: bool
Whether to regenerate a pickle file or not.
* *args: *args
Arguments to be passed to [_func]
* **kwargs: **kwargs
Keyword arguments to be passed to [_func]
Returns
-------
* data: any
The return value from [_func] after passing
[*args] and [**kawargs].
---------------------------------------------------------------
"""
if _lib not in ['pickle', 'joblib']:
raise(Exception('Error: ' + _lib + ' is not a supported object for load and dump.'))
if not os.path.isfile(_pkl) or not _persist:
data = _func(*args, **kwargs)
if _lib == 'pickle' and _persist:
with open(_pkl, 'wb') as f:
pickle.dump(data, f, protocol=4)
elif _lib == 'joblib' and _persist:
joblib.dump(data, _pkl)
logging.info('Pickled data from ' + _func.__name__ + ' for ' + _pkl)
else:
if _lib == 'pickle':
with open(_pkl, 'rb') as f:
data = pickle.load(f)
elif _lib == 'joblib':
data = joblib.load(_pkl)
logging.info('Pickled data from ' + _func.__name__ + ' already exists, skipping ' + _pkl)
return data
def gen_prob(pkl, obj, variables, persist=True):
"""
gen_prob: str obj pd.DataFrame bool -> pd.DataFrame
---------------------------------------------------------------
Uses a trained model [obj] from sklearn to extract the
probabilities for each class, the maximum probability of the
predicted class, and the predicted class information given
the attributes of predict_proba and classes_, and method
of predict.
Required Parameters
-------------------
* pkl: str
The pickle file to store the probabilities
* obj: obj
The sklearn model that has been previously trained.
* variables: pd.DataFrame
The variables used to train the [obj] model from sklearn.
Optional Parameters
-------------------
* persist: bool
Whether to generate a pickle file or not.
Returns
-------
* prob: pd.DataFrame
The probability dataframe with information on the
probabilities for each class, the maximum probabilty
for the predicted class, and the predicted class
itself in the respective order.
---------------------------------------------------------------
"""
if not os.path.isfile(pkl) or not persist:
pred = pd.Series(obj.predict(variables.values), name='predict')
cls_prob = pd.DataFrame(obj.predict_proba(variables.values), columns=obj.classes_)
max_prob = pd.Series(cls_prob.apply(max, axis=1).values, name='max_prob')
prob = pd.concat([cls_prob, max_prob, pred], axis=1)
with open(pkl, 'wb') as f:
pickle.dump(prob, f, protocol=4)
logging.info('Pickled random forest probabilities ' + pkl)
else:
with open(pkl, 'rb') as f:
prob = pickle.load(f)
logging.info('Pickled random forest probabilities already exists, skipping ' + pkl)
return prob
def gen_prox(pkl, obj, variables, persist=True):
"""
gen_prox: str obj pd.DataFrame str bool
---------------------------------------------------------------
Uses a trained model [obj] from sklearn to extract the
proximities for each [variables] and saves it to a [pkl].
This function is designed for parallel processing and reduction
of memory for large datasets, and thus does not return data. To
retrieve the results, load the data from the file at [pkl] using
joblib.load.
Required Parameters
-------------------
* pkl: str
The pickle file to store the proximities. This is created
using joblib.
* obj: obj
The sklearn model that has been previously trained.
* variables: pd.DataFrame
The training variables matching the [obj] model from sklearn.
Optional Parameters
-------------------
* persist: bool
Whether to generate a pickle file or not.
---------------------------------------------------------------
"""
if not os.path.isfile(pkl) or not persist:
prox = 1. - helpers.rf_prox(obj, variables.values)
prox = pd.DataFrame(prox, index=variables.index)
with open(pkl, 'wb') as f:
pickle.dump(prox, f, protocol=4)
logging.info('Pickled random forest proximities ' + pkl)
else:
logging.info('Pickled random forest proximities already exists, skipping ' + pkl)
def gen_rfg(rfg_files, grid, variables, targets, persist=True):
"""
gen_rfg: (listof str) obj pd.DataFrame pd.Series bool -> pd.DataFrame
---------------------------------------------------------------
Trains a random forest classifier for each [grid] parameter combination and
returns a dataframe that summarizes its oob score, fit,
and the path to the stored pickle files.
Required Parameters
-------------------
* rft_files: (listof str)
The list of pickle files to save each random forest
* grid: obj
The parameter grid to generate random forests combinations on.
* variables: pd.DataFrame
The variables to use for training the random forest classifier
* targets: pd.Series
The prediction targets to use for training the random forest
classifier
Optional Parameters
-------------------
* persist: bool
Whether to generate a pickle file or not.
Returns
-------
* : pd.DataFrame
The summary dataframe consisting of the number of trees
used for experimentation, out of bag score, score (fit)
of the random forest model and a the pickled file
that the random forest model is stored in.
---------------------------------------------------------------
"""
rfg_oob = []
grid_names = list(list(grid)[0].keys())
for pkl, g in zip(rfg_files, grid):
if not os.path.isfile(pkl) or not persist:
rfg = RandomForestClassifier(**g)
rfg = gen_pkl(pkl, _func=rfg.fit, _lib='joblib', _persist=persist, X=variables.values, y=targets.values)
else:
rfg = joblib.load(pkl)
logging.info('Pickled random forest grid already exists, skipping ' + pkl)
rfg_oob.append(list(g.values()) + [1 - rfg.oob_score_, rfg.score(variables.values, targets.values), pkl])
return | pd.DataFrame(rfg_oob, columns=grid_names + ['oob_error', 'score', 'pkl']) | pandas.DataFrame |
import pandas as pd
import os
os.chdir('/home/sameen/maltrail/new')
file_chdir = os.getcwd()
filecsv_list = []
for root, dirs, files in os.walk(file_chdir):
for file in files:
if os.path.splitext(file)[0] != 'all':
#alldata=pd.read_csv(file)
filecsv_list.append(file)
data = pd.DataFrame()
data = pd.read_csv(filecsv_list[0], names=["ip", "info", "reference"])
del filecsv_list[0]
label_list = []
size = []
count = 0
for csv in filecsv_list:
data.append( | pd.read_csv(csv, names=["ip", "info", "reference"]) | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
Created on Sat Mar 13 17:02:16 2021
@author: <NAME>
"""
import pandas as pd
import altair as alt
import streamlit as st
country_code = pd.read_csv('country_code.csv')
df = pd.read_csv("suicide_population.csv")
year_counts = df.groupby('country')['year'].unique().apply(len)
ten_year_plus_ind = (year_counts > 10).to_list()
ten_yeat_plus_country = df['country'].unique()[ten_year_plus_ind].tolist()
df_filtered = df.loc[df['country'].isin(ten_yeat_plus_country)]
df_filtered = pd.merge(df_filtered,country_code,how='left', left_on='country',right_on='country')
def make_bar_plot_df_multi_country(country_names,var):
con_df = df_filtered.loc[df_filtered['country'].isin(country_names)]
counts_df_list = []
for c in country_names:
counts = con_df.loc[con_df['country'] ==c].groupby(var)['suicides_no'].sum().to_frame()
counts = counts.reset_index()
counts['country'] = c
counts_df_list.append(counts)
counts_df = pd.concat(counts_df_list)
return counts_df
continent_names = ['Europe','Americas']
def make_bar_plot_multi_continent(continent_names, var):
con_df = df_filtered.loc[df_filtered['region'].isin(continent_names)]
counts_df_list = []
for c in continent_names:
counts = con_df.loc[con_df['region'] ==c].groupby(var)['suicides_no'].sum().to_frame()
counts = counts.reset_index()
counts['continent'] = c
counts_df_list.append(counts)
counts_df = pd.concat(counts_df_list)
return counts_df
# This functions takes 2 arguments: a list of country names; whether to visualize RAW/ AVG suicide number
def make_line_plot_df_multi_country(country_names, mode):
con_df = df_filtered.loc[df_filtered['country'].isin(country_names)]
counts_df_list = []
res_var = 'suicides_no' if mode == 'raw' else 'suicides/100k'
for c in country_names:
year_counts = con_df.loc[con_df['country'] ==c].groupby('year')[res_var].sum().to_frame()
year_counts = year_counts.reset_index()
year_counts['country'] = c
counts_df_list.append(year_counts)
counts_df = pd.concat(counts_df_list)
return counts_df
def make_line_plot_df_cont_attributes(country_names, continuous_attr):
con_df = df_filtered.loc[df_filtered['country'].isin(country_names),['country','year',continuous_attr]]
counts_df_list = []
for c in country_names:
year_counts = con_df.loc[con_df['country'] ==c].groupby('year')[continuous_attr].sum().to_frame()
year_counts = year_counts.reset_index()
year_counts['country'] = c
counts_df_list.append(year_counts)
counts_df = | pd.concat(counts_df_list) | pandas.concat |
import os, sys, ctypes
import win32com.client
import pandas as pd
from datetime import datetime
from slacker import Slacker
import time, calendar
slack = Slacker('@@@@@@@@@') #변경필수
def dbgout(message):
"""인자로 받은 문자열을 파이썬 셸과 슬랙으로 동시에 출력한다."""
print(datetime.now().strftime('[%m/%d %H:%M:%S]'), message)
strbuf = datetime.now().strftime('[%m/%d %H:%M:%S] ') + message
slack.chat.post_message('#stock', strbuf)
def printlog(message, *args):
"""인자로 받은 문자열을 파이썬 셸에 출력한다."""
print(datetime.now().strftime('[%m/%d %H:%M:%S]'), message, *args)
# 크레온 플러스 공통 OBJECT
cpCodeMgr = win32com.client.Dispatch('CpUtil.CpStockCode')
cpStatus = win32com.client.Dispatch('CpUtil.CpCybos')
cpTradeUtil = win32com.client.Dispatch('CpTrade.CpTdUtil')
cpStock = win32com.client.Dispatch('DsCbo1.StockMst')
cpOhlc = win32com.client.Dispatch('CpSysDib.StockChart')
cpBalance = win32com.client.Dispatch('CpTrade.CpTd6033')
cpCash = win32com.client.Dispatch('CpTrade.CpTdNew5331A')
cpOrder = win32com.client.Dispatch('CpTrade.CpTd0311')
# 거래 계좌선택 0~
#acc = cpTradeUtil.AccountNumber[0] # 계좌번호
def check_creon_system():
"""크레온 플러스 시스템 연결 상태를 점검한다."""
# 관리자 권한으로 프로세스 실행 여부
if not ctypes.windll.shell32.IsUserAnAdmin():
printlog('check_creon_system() : admin user -> FAILED')
return False
# 연결 여부 체크
if (cpStatus.IsConnect == 0):
printlog('check_creon_system() : connect to server -> FAILED')
return False
# 주문 관련 초기화 - 계좌 관련 코드가 있을 때만 사용
if (cpTradeUtil.TradeInit(0) != 0):
printlog('check_creon_system() : init trade -> FAILED')
return False
return True
def get_current_price(code):
"""인자로 받은 종목의 현재가, 매수호가, 매도호가를 반환한다."""
cpStock.SetInputValue(0, code) # 종목코드에 대한 가격 정보
cpStock.BlockRequest()
item = {}
item['cur_price'] = cpStock.GetHeaderValue(11) # 현재가
item['ask'] = cpStock.GetHeaderValue(16) # 매수호가
item['bid'] = cpStock.GetHeaderValue(17) # 매도호가
return item['cur_price'], item['ask'], item['bid']
def get_ohlc(code, qty):
"""인자로 받은 종목의 OHLC 가격 정보를 qty 개수만큼 반환한다."""
cpOhlc.SetInputValue(0, code) # 종목코드
cpOhlc.SetInputValue(1, ord('2')) # 1:기간, 2:개수
cpOhlc.SetInputValue(4, qty) # 요청개수
cpOhlc.SetInputValue(5, [0, 2, 3, 4, 5]) # 0:날짜, 2~5:OHLC
cpOhlc.SetInputValue(6, ord('D')) # D:일단위
cpOhlc.SetInputValue(9, ord('1')) # 0:무수정주가, 1:수정주가
cpOhlc.BlockRequest()
count = cpOhlc.GetHeaderValue(3) # 3:수신개수
columns = ['open', 'high', 'low', 'close']
index = []
rows = []
for i in range(count):
index.append(cpOhlc.GetDataValue(0, i))
rows.append([cpOhlc.GetDataValue(1, i), cpOhlc.GetDataValue(2, i),
cpOhlc.GetDataValue(3, i), cpOhlc.GetDataValue(4, i)])
df = | pd.DataFrame(rows, columns=columns, index=index) | pandas.DataFrame |
"""
Preprocess sites data.
<NAME>
February 2022
"""
import sys
import os
import configparser
import pandas as pd
import geopandas as gpd
import pyproj
from shapely.ops import transform
from shapely.geometry import shape, Point, mapping, LineString, MultiPolygon
from tqdm import tqdm
CONFIG = configparser.ConfigParser()
CONFIG.read(os.path.join(os.path.dirname(__file__), 'script_config.ini'))
BASE_PATH = CONFIG['file_locations']['base_path']
DATA_RAW = os.path.join(BASE_PATH, 'raw')
DATA_PROCESSED = os.path.join(BASE_PATH, 'processed')
def run_site_processing(iso3, level):
"""
Meta function for running site processing at GID 1 level.
"""
create_national_sites_csv(iso3)
create_national_sites_shp(iso3)
process_country_shapes(iso3)
process_regions(iso3, level)
create_regional_sites_layer_gid_1(iso3, level)
tech_specific_sites_gid_1(iso3, level)
if str(level) == "2":
create_regional_sites_layer_gid_2(iso3, level)
tech_specific_sites_gid_2(iso3, level)
return
def create_national_sites_csv(iso3):
"""
Create a national sites csv layer for a selected country.
"""
filename = '{}.csv'.format(iso3)
folder = os.path.join(DATA_PROCESSED, iso3, 'sites')
path_csv = os.path.join(folder, filename)
### Produce national sites data layers
if not os.path.exists(path_csv):
print('site.csv data does not exist')
print('Subsetting site data for {}'.format(iso3))
if not os.path.exists(folder):
os.makedirs(folder)
filename = "mobile_codes.csv"
path = os.path.join(DATA_RAW, filename)
mobile_codes = | pd.read_csv(path) | pandas.read_csv |
# -*- coding: utf-8 -*- {{{
# vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et:
# Copyright (c) 2016, Battelle Memorial Institute
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation
# are those of the authors and should not be interpreted as representing
# official policies, either expressed or implied, of the FreeBSD
# Project.
#
# This material was prepared as an account of work sponsored by an
# agency of the United States Government. Neither the United States
# Government nor the United States Department of Energy, nor Battelle,
# nor any of their employees, nor any jurisdiction or organization that
# has cooperated in the development of these materials, makes any
# warranty, express or implied, or assumes any legal liability or
# responsibility for the accuracy, completeness, or usefulness or any
# information, apparatus, product, software, or process disclosed, or
# represents that its use would not infringe privately owned rights.
#
# Reference herein to any specific commercial product, process, or
# service by trade name, trademark, manufacturer, or otherwise does not
# necessarily constitute or imply its endorsement, recommendation, or
# favoring by the United States Government or any agency thereof, or
# Battelle Memorial Institute. The views and opinions of authors
# expressed herein do not necessarily state or reflect those of the
# United States Government or any agency thereof.
#
# PACIFIC NORTHWEST NATIONAL LABORATORY
# operated by BATTELLE for the UNITED STATES DEPARTMENT OF ENERGY
# under Contract DE-AC05-76RL01830
# }}}
import os
import sys
import logging
import datetime
from dateutil import parser
from volttron.platform.vip.agent import Agent, Core, PubSub, RPC, compat
from volttron.platform.agent import utils
from volttron.platform.agent.utils import (get_aware_utc_now,
format_timestamp)
import pandas as pd
import statsmodels.formula.api as sm
utils.setup_logging()
_log = logging.getLogger(__name__)
class PGnEAgent(Agent):
def __init__(self, config_path, **kwargs):
super(PGnEAgent, self).__init__(**kwargs)
self.config = utils.load_config(config_path)
self.site = self.config.get('campus')
self.building = self.config.get('building')
self.temp_unit = self.config.get('temp_unit')
self.power_unit = self.config.get('power_unit')
self.out_temp_name = self.config.get('out_temp_name')
self.power_name = self.config.get('power_name')
self.aggregate_in_min = self.config.get('aggregate_in_min')
self.aggregate_freq = str(self.aggregate_in_min) + 'Min'
self.ts_name = self.config.get('ts_name')
self.window_size_in_day = int(self.config.get('window_size_in_day'))
self.min_required_window_size_in_percent = float(self.config.get('min_required_window_size_in_percent'))
self.interval_in_min = int(self.config.get('interval_in_min'))
self.no_of_recs_needed = 10 # self.window_size_in_day * 24 * (60 / self.interval_in_min)
self.min_no_of_records_needed_after_aggr = int(self.min_required_window_size_in_percent/100 *
self.no_of_recs_needed/self.aggregate_in_min)
self.schedule_run_in_sec = int(self.config.get('schedule_run_in_hr')) * 3600
# Testing
#self.no_of_recs_needed = 200
#self.min_no_of_records_needed_after_aggr = self.no_of_recs_needed/self.aggregate_in_min
@Core.receiver('onstart')
def onstart(self, sender, **kwargs):
self.core.periodic(self.schedule_run_in_sec, self.calculate_latest_coeffs)
def calculate_latest_coeffs(self):
unit_topic_tmpl = "{campus}/{building}/{unit}/{point}"
unit_points = [self.power_name]
df = None
#Get data
unit = self.temp_unit
for point in unit_points:
if point == self.power_name:
unit = self.power_unit
unit_topic = unit_topic_tmpl.format(campus=self.site,
building=self.building,
unit=unit,
point=point)
result = self.vip.rpc.call('platform.historian',
'query',
topic=unit_topic,
count=self.no_of_recs_needed,
order="LAST_TO_FIRST").get(timeout=10000)
df2 = pd.DataFrame(result['values'], columns=[self.ts_name, point])
df2[self.ts_name] = pd.to_datetime(df2[self.ts_name])
df2 = df2.groupby([pd.TimeGrouper(key=self.ts_name, freq=self.aggregate_freq)]).mean()
# df2[self.ts_name] = df2[self.ts_name].apply(lambda dt: dt.replace(second=0, microsecond=0))
df = df2 if df is None else pd.merge(df, df2, how='outer', left_index=True, right_index=True)
#Calculate coefficients
result_df = self.calculate_coeffs(df)
# Publish coeffs to store
#if coeffs is not None:
# self.save_coeffs(coeffs, subdevice)
def convert_units_to_SI(self, df, point, unit):
if unit == 'degreesFahrenheit':
df[point] = (df[point]-32) * 5/9
# Air state assumption: http://www.remak.eu/en/mass-air-flow-rate-unit-converter
# 1cfm ~ 0.00055kg/s
if unit == 'cubicFeetPerMinute':
df[point] = df[point] * 0.00055
def calculate_coeffs(self, dP):
dP['time'] = dP['posttime']
dP = dP.set_index(['posttime'])
dP.index = | pd.to_datetime(dP.index) | pandas.to_datetime |
#import urllib2
import csv
import sys
import re
from datetime import datetime
import time
import pandas as pd
import configparser
import hashlib
import os
import rdflib
import logging
logging.getLogger().disabled = True
if sys.version_info[0] == 3:
from importlib import reload
reload(sys)
if sys.version_info[0] == 2:
sys.setdefaultencoding('utf8')
whyis = rdflib.Namespace('http://vocab.rpi.edu/whyis/')
np = rdflib.Namespace("http://www.nanopub.org/nschema#")
prov = rdflib.Namespace("http://www.w3.org/ns/prov#")
dc = rdflib.Namespace("http://purl.org/dc/terms/")
sio = rdflib.Namespace("http://semanticscience.org/resource/")
setl = rdflib.Namespace("http://purl.org/twc/vocab/setl/")
pv = rdflib.Namespace("http://purl.org/net/provenance/ns#")
skos = rdflib.Namespace("http://www.w3.org/2008/05/skos#")
rdfs = rdflib.RDFS
rdf = rdflib.RDF
owl = rdflib.OWL
xsd = rdflib.XSD
def parseString(input_string, delim) :
my_list = input_string.split(delim)
for i in range(0,len(my_list)) :
my_list[i] = my_list[i].strip()
return my_list
def codeMapper(input_word) :
unitVal = input_word
for unit_label in unit_label_list :
if (unit_label == input_word) :
unit_index = unit_label_list.index(unit_label)
unitVal = unit_uri_list[unit_index]
for unit_code in unit_code_list :
if (unit_code == input_word) :
unit_index = unit_code_list.index(unit_code)
unitVal = unit_uri_list[unit_index]
return unitVal
def convertImplicitToKGEntry(*args) :
if (args[0][:2] == "??") :
if (studyRef is not None ) :
if (args[0]==studyRef) :
return "<" + prefixes[kb] + args[0][2:] + ">"
if (len(args) == 2) :
return "<" + prefixes[kb] + args[0][2:] + "-" + args[1] + ">"
else :
return "<" + prefixes[kb] + args[0][2:] + ">"
elif (':' not in args[0]) :
# Check for entry in column list
for item in explicit_entry_list :
if args[0] == item.Column :
if (len(args) == 2) :
return "<" + prefixes[kb] + args[0].replace(" ","_").replace(",","").replace("(","").replace(")","").replace("/","-").replace("\\","-") + "-" + args[1] + ">"
else :
return "<" + prefixes[kb] + args[0].replace(" ","_").replace(",","").replace("(","").replace(")","").replace("/","-").replace("\\","-") + ">"
return '"' + args[0] + "\"^^xsd:string"
else :
return args[0]
def checkImplicit(input_word) :
try:
if (input_word[:2] == "??") :
return True
else :
return False
except Exception as e:
print("Something went wrong in checkImplicit()" + str(e))
sys.exit(1)
def isfloat(term):
try:
float(term)
return True
except ValueError:
return False
def isURI(term):
try:
if any(c in term for c in ("http://","https://")) :
return True
else:
return False
except ValueError:
return False
def isSchemaVar(term) :
for entry in explicit_entry_list :
if term == entry[1] :
return True
return False
def assignVID(implicit_entry_tuples,timeline_tuple,a_tuple,column, npubIdentifier) :
v_id = npubIdentifier
for v_tuple in implicit_entry_tuples : # referenced in implicit list
if v_tuple["Column"] == a_tuple[column]:
v_id = hashlib.md5((str(v_tuple) + str(npubIdentifier)).encode("utf-8")).hexdigest()
if v_id == None : # maybe it's referenced in the timeline
for t_tuple in timeline_tuple:
if t_tuple["Column"] == a_tuple[column]:
#print("Got here")
v_id = hashlib.md5((str(t_tuple) + str(npubIdentifier)).encode("utf-8")).hexdigest()
if v_id == npubIdentifier : # if it's not in implicit list or timeline
print("Warning, " + column + " ID assigned to nanopub ID")
return v_id
def assignTerm(col_headers, column, implicit_entry_tuples, a_tuple, row, v_id) :
termURI = None
for v_tuple in implicit_entry_tuples : # referenced in implicit list
if v_tuple["Column"] == a_tuple[column]:
if "Template" in v_tuple :
template_term = extractTemplate(col_headers,row,v_tuple["Template"])
termURI = "<" + prefixes[kb] + template_term + ">"
if termURI is None :
termURI = convertImplicitToKGEntry(a_tuple[column],v_id)
return termURI
'''def processPrefixes(output_file,query_file):
if 'prefixes' in config['Prefixes']:
prefix_fn = config['Prefixes']['prefixes']
else:
prefix_fn="prefixes.txt"
prefix_file = open(prefix_fn,"r")
prefixes = prefix_file.readlines()
for prefix in prefixes :
#print(prefix.find(">"))
output_file.write(prefix)
query_file.write(prefix[1:prefix.find(">")+1])
query_file.write("\n")
prefix_file.close()
output_file.write("\n")'''
def checkTemplate(term) :
if "{" in term and "}" in term:
return True
return False
def extractTemplate(col_headers,row,term) :
while checkTemplate(term) :
open_index = term.find("{")
close_index = term.find("}")
key = term[open_index+1:close_index]
term = term[:open_index] + str(row[col_headers.index(key)+1]) + term[close_index+1:]
return term
def extractExplicitTerm(col_headers,row,term) : # need to write this function
while checkTemplate(term) :
open_index = term.find("{")
close_index = term.find("}")
key = term[open_index+1:close_index]
if isSchemaVar(key) :
for entry in explicit_entry_list :
if entry.Column == key :
if pd.notnull(entry.Template) :
term = extractTemplate(col_headers,row,entry.Template)
else :
typeString = ""
if pd.notnull(entry.Attribute) :
typeString += str(entry.Attribute)
if pd.notnull(entry.Entity) :
typeString += str(entry.Entity)
if pd.notnull(entry.Label) :
typeString += str(entry.Label)
if pd.notnull(entry.Unit) :
typeString += str(entry.Unit)
if pd.notnull(entry.Time) :
typeString += str(entry.Time)
if pd.notnull(entry.inRelationTo) :
typeString += str(entry.inRelationTo)
if pd.notnull(entry.wasGeneratedBy) :
typeString += str(entry.wasGeneratedBy)
if pd.notnull(entry.wasDerivedFrom) :
typeString += str(entry.wasDerivedFrom)
identifierKey = hashlib.md5((str(row[col_headers.index(key)+1])+typeString).encode("utf-8")).hexdigest()
term = entry.Column + "-" + identifierKey
#return extractTemplate(col_headers,row,entry.Template)
else : # What does it mean for a template reference to not be a schema variable?
print("Warning: Template reference " + term + " is not be a schema variable")
term = term[:open_index] + str(row[col_headers.index(key)+1]) + term[close_index+1:] # Needs updating probably, at least checking
return term
def writeClassAttributeOrEntity(item, term, input_tuple, assertionString, whereString, swrlString) :
if (pd.notnull(item.Entity)) and (pd.isnull(item.Attribute)) :
if ',' in item.Entity :
entities = parseString(item.Entity,',')
for entity in entities :
assertionString += " ;\n <" + rdfs.subClassOf + "> " + codeMapper(entity)
whereString += codeMapper(entity) + " "
swrlString += codeMapper(entity) + "(" + term + ") ^ "
if entities.index(entity) + 1 != len(entities) :
whereString += ", "
else :
assertionString += " ;\n <" + rdfs.subClassOf + "> " + codeMapper(item.Entity)
whereString += codeMapper(item.Entity) + " "
swrlString += codeMapper(item.Entity) + "(" + term + ") ^ "
input_tuple["Entity"]=codeMapper(item.Entity)
if (input_tuple["Entity"] == "hasco:Study") :
global studyRef
studyRef = item.Column
input_tuple["Study"] = item.Column
elif (pd.isnull(item.Entity)) and (pd.notnull(item.Attribute)) :
if ',' in item.Attribute :
attributes = parseString(item.Attribute,',')
for attribute in attributes :
assertionString += " ;\n <" + rdfs.subClassOf + "> " + codeMapper(attribute)
whereString += codeMapper(attribute) + " "
swrlString += codeMapper(attribute) + "(" + term + ") ^ "
if attributes.index(attribute) + 1 != len(attributes) :
whereString += ", "
else :
assertionString += " ;\n <" + rdfs.subClassOf + "> " + codeMapper(item.Attribute)
whereString += codeMapper(item.Attribute) + " "
swrlString += codeMapper(item.Attribute) + "(" + term + ") ^ "
input_tuple["Attribute"]=codeMapper(item.Attribute)
else :
print("Warning: Entry not assigned an Entity or Attribute value, or was assigned both.")
input_tuple["Attribute"]=codeMapper("sio:Attribute")
assertionString += " ;\n <" + rdfs.subClassOf + "> sio:Attribute"
whereString += "sio:Attribute "
swrlString += "sio:Attribute(" + term + ") ^ "
return [input_tuple, assertionString, whereString, swrlString]
def writeClassAttributeOf(item, term, input_tuple, assertionString, whereString, swrlString) :
if (pd.notnull(item.attributeOf)) :
if checkTemplate(item.attributeOf) :
open_index = item.attributeOf.find("{")
close_index = item.attributeOf.find("}")
key = item.attributeOf[open_index+1:close_index]
assertionString += " ;\n <" + rdfs.subClassOf + "> \n [ <" + rdf.type + "> <" + owl.Restriction + "> ;\n <" + owl.allValuesFrom + "> " + convertImplicitToKGEntry(key) + " ;\n <" + owl.onProperty + "> <" + properties_tuple["attributeOf"] + "> ]"
#assertionString += " ;\n <" + properties_tuple["attributeOf"] + "> " + convertImplicitToKGEntry(key)
whereString += " ;\n <" + properties_tuple["attributeOf"] + "> ?" + key.lower() + "_E"
swrlString += properties_tuple["attributeOf"] + "(" + term + " , " + [key,key[1:] + "_V"][checkImplicit(key)] + ") ^ "
else :
assertionString += " ;\n <" + rdfs.subClassOf + "> \n [ <" + rdf.type + "> <" + owl.Restriction + "> ;\n <" + owl.allValuesFrom + "> " + convertImplicitToKGEntry(item.attributeOf) + " ;\n <" + owl.onProperty + "> <" + properties_tuple["attributeOf"] + "> ]"
#assertionString += " ;\n <" + properties_tuple["attributeOf"] + "> " + convertImplicitToKGEntry(item.attributeOf)
whereString += " ;\n <" + properties_tuple["attributeOf"] + "> " + [item.attributeOf + " ",item.attributeOf[1:] + "_V "][checkImplicit(item.attributeOf)]
swrlString += properties_tuple["attributeOf"] + "(" + term + " , " + [item.attributeOf,item.attributeOf[1:] + "_V"][checkImplicit(item.attributeOf)] + ") ^ "
input_tuple["isAttributeOf"]=item.attributeOf
return [input_tuple, assertionString, whereString, swrlString]
def writeClassUnit(item, term, input_tuple, assertionString, whereString, swrlString) :
if (pd.notnull(item.Unit)) :
if checkTemplate(item.Unit) :
open_index = item.Unit.find("{")
close_index = item.Unit.find("}")
key = item.Unit[open_index+1:close_index]
assertionString += " ;\n <" + rdfs.subClassOf + "> \n [ <" + rdf.type + "> <" + owl.Restriction + "> ;\n <" + owl.hasValue + "> " + convertImplicitToKGEntry(key) + " ;\n <" + owl.onProperty + "> <" + properties_tuple["Unit"] + "> ]"
#assertionString += " ;\n <" + properties_tuple["Unit"] + "> " + convertImplicitToKGEntry(key)
whereString += " ;\n <" + properties_tuple["Unit"] + "> ?" + key.lower() + "_E"
swrlString += properties_tuple["Unit"] + "(" + term + " , " + [key,key[1:] + "_V"][checkImplicit(key)] + ") ^ "
input_tuple["Unit"] = key
else :
assertionString += " ;\n <" + rdfs.subClassOf + "> \n [ <" + rdf.type + "> <" + owl.Restriction + "> ;\n <" + owl.hasValue + "> " + str(codeMapper(item.Unit)) + " ;\n <" + owl.onProperty + "> <" + properties_tuple["Unit"] + "> ]"
#assertionString += " ;\n <" + properties_tuple["Unit"] + "> " + str(codeMapper(item.Unit))
whereString += " ;\n <" + properties_tuple["Unit"] + "> " + str(codeMapper(item.Unit))
swrlString += properties_tuple["Unit"] + "(" + term + " , " + str(codeMapper(item.Unit)) + ") ^ "
input_tuple["Unit"] = codeMapper(item.Unit)
# Incorporate item.Format here
return [input_tuple, assertionString, whereString, swrlString]
def writeClassTime(item, term, input_tuple, assertionString, whereString, swrlString) :
if (pd.notnull(item.Time)) :
if checkTemplate(item.Time) :
open_index = item.Time.find("{")
close_index = item.Time.find("}")
key = item.Time[open_index+1:close_index]
assertionString += " ;\n <" + rdfs.subClassOf + "> \n [ <" + rdf.type + "> <" + owl.Restriction + "> ;\n <" + owl.onProperty + "> <" + properties_tuple["Time"] + "> ;\n <" + owl.someValuesFrom + "> " + convertImplicitToKGEntry(key) + " ]"
#assertionString += " ;\n <" + properties_tuple["Time"] + "> " + convertImplicitToKGEntry(key)
whereString += " ;\n <" + properties_tuple["Time"] + "> ?" + key.lower() + "_E"
swrlString += properties_tuple["Time"] + "(" + term + " , " + [key,key[1:] + "_V"][checkImplicit(key)] + ") ^ "
else :
assertionString += " ;\n <" + rdfs.subClassOf + "> \n [ <" + rdf.type + "> <" + owl.Restriction + "> ;\n <" + owl.onProperty + "> <" + properties_tuple["Time"] + "> ;\n <" + owl.someValuesFrom + "> " + convertImplicitToKGEntry(item.Time) + " ]"
#assertionString += " ;\n <" + properties_tuple["Time"] + "> " + convertImplicitToKGEntry(item.Time)
whereString += " ;\n <" + properties_tuple["Time"] + "> " + [item.Time + " ",item.Time[1:] + "_V "][checkImplicit(item.Time)]
swrlString += properties_tuple["Time"] + "(" + term + " , " + [item.Time + " ",item.Time[1:] + "_V "][checkImplicit(item.Time)] + ") ^ "
input_tuple["Time"]=item.Time
return [input_tuple, assertionString, whereString, swrlString]
def writeClassRelation(item, term, input_tuple, assertionString, whereString, swrlString) :
if (pd.notnull(item.inRelationTo)) :
input_tuple["inRelationTo"]=item.inRelationTo
# If there is a value in the Relation column but not the Role column ...
if (pd.notnull(item.Relation)) and (pd.isnull(item.Role)) :
assertionString += " ;\n " + item.Relation + " " + convertImplicitToKGEntry(item.inRelationTo)
if(isSchemaVar(item.inRelationTo)):
whereString += " ;\n " + item.Relation + " ?" + item.inRelationTo.lower() + "_E "
swrlString += item.Relation + "(" + term + " , " + "?" + item.inRelationTo.lower() + "_E) ^ "
else :
whereString += " ;\n " + item.Relation + " " + [item.inRelationTo + " ",item.inRelationTo[1:] + "_V "][checkImplicit(item.inRelationTo)]
swrlString += item.Relation + "(" + term + " , " + [item.inRelationTo,item.inRelationTo[1:] + "_V"][checkImplicit(item.inRelationTo)] + ") ^ "
input_tuple["Relation"]=item.Relation
# If there is a value in the Role column but not the Relation column ...
elif (pd.isnull(item.Relation)) and (pd.notnull(item.Role)) :
assertionString += " ;\n <" + rdfs.subClassOf + "> \n [ <" + rdf.type + "> <" + owl.Restriction + "> ;\n <" + owl.onProperty + "> <" + properties_tuple["Role"] + "> ;\n <" + owl.someValuesFrom + "> [ <" + rdf.type + "> <" + owl.Class + "> ;\n <" + owl.intersectionOf + "> ( \n [ <" + rdf.type + "> <" + owl.Restriction + "> ;\n <" + owl.allValuesFrom + "> " + [item.inRelationTo,convertImplicitToKGEntry(item.inRelationTo)][checkImplicit(item.inRelationTo)] + " ;\n <" + owl.onProperty + "> <" + properties_tuple["inRelationTo"] + "> ] <" + item.Role + "> ) ] ]"
#assertionString += " ;\n <" + properties_tuple["Role"] + "> [ <" + rdf.type + "> " + item.Role + " ;\n <" + properties_tuple["inRelationTo"] + "> " + convertImplicitToKGEntry(item.inRelationTo) + " ]"
whereString += " ;\n <" + properties_tuple["Role"] + "> [ <" + rdf.type + "> " + item.Role + " ;\n <" + properties_tuple["inRelationTo"] + "> " + [item.inRelationTo + " ",item.inRelationTo[1:] + "_V "][checkImplicit(item.inRelationTo)] + " ]"
swrlString += "" # add appropriate swrl term
input_tuple["Role"]=item.Role
# If there is a value in the Role and Relation columns ...
elif (pd.notnull(item.Relation)) and (pd.notnull(item.Role)) :
input_tuple["Relation"]=item.Relation
input_tuple["Role"]=item.Role
assertionString += " ;\n <" + rdfs.subClassOf + "> \n [ <" + rdf.type + "> <" + owl.Restriction + "> ;\n <" + owl.onProperty + "> <" + properties_tuple["Role"] + "> ;\n <" + owl.someValuesFrom + "> [ <" + rdf.type + "> <" + owl.Class + "> ;\n <" + owl.intersectionOf + "> ( \n [ <" + rdf.type + "> <" + owl.Restriction + "> ;\n <" + owl.allValuesFrom + "> " + [item.inRelationTo,convertImplicitToKGEntry(item.inRelationTo)][checkImplicit(item.inRelationTo)] + " ;\n <" + owl.onProperty + "> <" + item.Relation + "> ] <" + item.Role + "> ) ] ]"
#assertionString += " ;\n <" + properties_tuple["inRelationTo"] + "> " + convertImplicitToKGEntry(item.inRelationTo)
if(isSchemaVar(item.inRelationTo)):
whereString += " ;\n <" + properties_tuple["inRelationTo"] + "> ?" + item.inRelationTo.lower() + "_E "
swrlString += "" # add appropriate swrl term
else :
whereString += " ;\n <" + properties_tuple["inRelationTo"] + "> " + [item.inRelationTo + " ",item.inRelationTo[1:] + "_V "][checkImplicit(item.inRelationTo)]
swrlString += "" # add appropriate swrl term
elif (pd.isnull(item.Relation)) and (pd.isnull(item.Role)) :
assertionString += " ;\n <" + rdfs.subClassOf + "> \n [ <" + rdf.type + "> <" + owl.Restriction + "> ;\n <" + owl.allValuesFrom + "> " + convertImplicitToKGEntry(item.inRelationTo) + " ;\n <" + owl.onProperty + "> <" + properties_tuple["inRelationTo"] + "> ]"
#assertionString += " ;\n <" + properties_tuple["inRelationTo"] + "> " + convertImplicitToKGEntry(item.inRelationTo)
if(isSchemaVar(item.inRelationTo)):
whereString += " ;\n <" + properties_tuple["inRelationTo"] + "> ?" + item.inRelationTo.lower() + "_E "
swrlString += properties_tuple["inRelationTo"] + "(" + term + " , " + "?" + item.inRelationTo.lower() + "_E) ^ "
else :
whereString += " ;\n <" + properties_tuple["inRelationTo"] + "> " + [item.inRelationTo + " ",item.inRelationTo[1:] + "_V "][checkImplicit(item.inRelationTo)]
swrlString += properties_tuple["inRelationTo"] + "(" + term + " , " + [item.inRelationTo,item.inRelationTo[1:] + "_V"][checkImplicit(item.inRelationTo)] + ") ^ "
elif (pd.notnull(item.Role)) : # if there is a role, but no in relation to
input_tuple["Role"]=item.Role
assertionString += " ;\n <" + rdfs.subClassOf + "> \n [ <" + rdf.type + "> <" + owl.Restriction + "> ;\n <" + owl.onProperty + "> <" + properties_tuple["Role"] + "> ;\n <" + owl.someValuesFrom + "> [ <" + rdf.type + "> <" + item.Role + "> ] ]"
#assertionString += " ;\n <" + properties_tuple["Role"] + "> [ <" + rdf.type + "> " + item.Role + " ]"
whereString += " ;\n <" + properties_tuple["Role"] + "> [ <" + rdf.type + "> " + item.Role + " ]"
swrlString += "" # add appropriate swrl term
return [input_tuple, assertionString, whereString, swrlString]
def writeClassWasDerivedFrom(item, term, input_tuple, provenanceString, whereString, swrlString) :
if pd.notnull(item.wasDerivedFrom) :
provenanceString += " ;\n <" + rdfs.subClassOf + "> \n [ <" + rdf.type + "> <" + owl.Restriction + "> ;\n <" + owl.someValuesFrom + "> " + convertImplicitToKGEntry(item.wasDerivedFrom) + " ;\n <" + owl.onProperty + "> <" + properties_tuple["wasDerivedFrom"] + "> ]"
#provenanceString += " ;\n <" + properties_tuple["wasDerivedFrom"] + "> " + convertImplicitToKGEntry(item.wasDerivedFrom)
input_tuple["wasDerivedFrom"]=item.wasDerivedFrom
if(isSchemaVar(item.wasDerivedFrom)):
whereString += " ;\n <" + properties_tuple["wasDerivedFrom"] + "> ?" + item.wasDerivedFrom.lower() + "_E "
swrlString += properties_tuple["wasDerivedFrom"] + "(" + term + " , " + "?" + item.wasDerivedFrom.lower() + "_E) ^ "
elif checkTemplate(item.wasDerivedFrom) :
open_index = item.wasDerivedFrom.find("{")
close_index = item.wasDerivedFrom.find("}")
key = item.wasDerivedFrom[open_index+1:close_index]
provenanceString += " ;\n <" + rdfs.subClassOf + "> \n [ <" + rdf.type + "> <" + owl.Restriction + "> ;\n <" + owl.someValuesFrom + "> " + convertImplicitToKGEntry(key) + " ;\n <" + owl.onProperty + "> <" + properties_tuple["wasDerivedFrom"] + "> ]"
#provenanceString += " ;\n <" + properties_tuple["wasDerivedFrom"] + "> " + convertImplicitToKGEntry(key)
whereString += " ;\n <" + properties_tuple["wasDerivedFrom"] + "> ?" + key.lower() + "_E"
swrlString += properties_tuple["wasDerivedFrom"] + "(" + term + " , " + [key,key[1:] + "_V"][checkImplicit(key)] + ") ^ "
else :
whereString += " ;\n <" + properties_tuple["wasDerivedFrom"] + "> " + [item.wasDerivedFrom + " ",item.wasDerivedFrom[1:] + "_V "][checkImplicit(item.wasDerivedFrom)]
swrlString += properties_tuple["wasDerivedFrom"] + "(" + term + " , " + [item.wasDerivedFrom,item.wasDerivedFrom[1:] + "_V"][checkImplicit(item.wasDerivedFrom)] + ") ^ "
return [input_tuple, provenanceString, whereString, swrlString]
def writeClassWasGeneratedBy(item, term, input_tuple, provenanceString, whereString, swrlString) :
if pd.notnull(item.wasGeneratedBy) :
provenanceString += " ;\n <" + properties_tuple["wasGeneratedBy"] + "> " + convertImplicitToKGEntry(item.wasGeneratedBy)
input_tuple["wasGeneratedBy"]=item.wasGeneratedBy
if(isSchemaVar(item.wasGeneratedBy)):
whereString += " ;\n <" + properties_tuple["wasGeneratedBy"] + "> ?" + item.wasGeneratedBy.lower() + "_E "
swrlString += properties_tuple["wasGeneratedBy"] + "(" + term + " , " + "?" + item.wasGeneratedBy.lower() + "_E) ^ "
elif checkTemplate(item.wasGeneratedBy) :
open_index = item.wasGeneratedBy.find("{")
close_index = item.wasGeneratedBy.find("}")
key = item.wasGeneratedBy[open_index+1:close_index]
assertionString += " ;\n <" + properties_tuple["wasGeneratedBy"] + "> " + convertImplicitToKGEntry(key)
whereString += " ;\n <" + properties_tuple["wasGeneratedBy"] + "> ?" + key.lower() + "_E"
swrlString += properties_tuple["wasGeneratedBy"] + "(" + term + " , " + [key,key[1:] + "_V"][checkImplicit(key)] + ") ^ "
else :
whereString += " ;\n <" + properties_tuple["wasGeneratedBy"] + "> " + [item.wasGeneratedBy + " ",item.wasGeneratedBy[1:] + "_V "][checkImplicit(item.wasGeneratedBy)]
swrlString += properties_tuple["wasGeneratedBy"] + "(" + term + " , " + [item.wasGeneratedBy,item.wasGeneratedBy[1:] + "_V"][checkImplicit(item.wasGeneratedBy)] + ") ^ "
return [input_tuple, provenanceString, whereString, swrlString]
def writeImplicitEntryTuples(implicit_entry_list, timeline_tuple, output_file, query_file, swrl_file, dm_fn) :
implicit_entry_tuples = []
assertionString = ''
provenanceString = ''
whereString = '\n'
swrlString = ''
datasetIdentifier = hashlib.md5(dm_fn.encode('utf-8')).hexdigest()
output_file.write("<" + prefixes[kb] + "head-implicit_entry-" + datasetIdentifier + "> { ")
output_file.write("\n <" + prefixes[kb] + "nanoPub-implicit_entry-" + datasetIdentifier + "> <" + rdf.type + "> <" + np.Nanopublication + ">")
output_file.write(" ;\n <" + np.hasAssertion + "> <" + prefixes[kb] + "assertion-implicit_entry-" + datasetIdentifier + ">")
output_file.write(" ;\n <" + np.hasProvenance + "> <" + prefixes[kb] + "provenance-implicit_entry-" + datasetIdentifier + ">")
output_file.write(" ;\n <" + np.hasPublicationInfo + "> <" + prefixes[kb] + "pubInfo-implicit_entry-" + datasetIdentifier + ">")
output_file.write(" .\n}\n\n")
col_headers=list(pd.read_csv(dm_fn).columns.values)
for item in implicit_entry_list :
implicit_tuple = {}
if "Template" in col_headers and pd.notnull(item.Template) :
implicit_tuple["Template"]=item.Template
assertionString += "\n <" + prefixes[kb] + item.Column[2:] + "> <" + rdf.type + "> owl:Class"
term_implicit = item.Column[1:] + "_V"
whereString += " " + term_implicit + " <" + rdf.type + "> "
implicit_tuple["Column"]=item.Column
if (hasattr(item,"Label") and | pd.notnull(item.Label) | pandas.notnull |
import strax
import straxen
import tarfile
import io
import os
from warnings import warn
from os import environ as os_environ
from straxen import aux_repo, pax_file
from pandas import DataFrame
from immutabledict import immutabledict
import numpy as np
export, __all__ = strax.exporter()
nt_test_run_id = '012882'
test_run_id_1T = '180423_1021'
testing_config_1T = dict(
hev_gain_model=('1T_to_pe_placeholder', False),
gain_model=('1T_to_pe_placeholder', False),
elife_conf=('elife_constant', 1e6),
electron_drift_velocity=("electron_drift_velocity_constant", 1e-4),
electron_drift_time_gate=("electron_drift_time_gate_constant", 1700),
)
# Let's make a dummy map for NVeto
_nveto_pmt_dummy = {'channel': list(range(2000, 2120)),
'x': list(range(120)),
'y': list(range(120)),
'z': list(range(120)),
}
_nveto_pmt_dummy_df = | DataFrame(_nveto_pmt_dummy) | pandas.DataFrame |
"""integration test for loanpy.sanity.py (2.0 BETA) for pytest 7.1.1"""
from ast import literal_eval
from datetime import datetime
from os import remove
from pathlib import Path
from pandas import read_csv, DataFrame, RangeIndex
from pandas.testing import assert_frame_equal, assert_series_equal
from pytest import raises
from unittest.mock import call, patch
from loanpy.adrc import Adrc
from loanpy.sanity import (
ArgumentsAlreadyTested,
cache,
check_cache,
eval_adapt,
eval_recon,
eval_all,
eval_one,
get_crossval_data,
get_dist,
get_nse4df,
get_noncrossval_sc,
get_tpr_fpr_opt,
loop_thru_data,
make_stat,
plot_roc,
postprocess,
postprocess2,
phonotactics_predicted,
write_to_cache)
PATH2FORMS = Path(__file__).parent / "input_files" / "forms_3cogs_wot.csv"
PATH2SC_AD = Path(__file__).parent / "input_files" / "sc_ad_3cogs.txt"
PATH2SC_RC = Path(__file__).parent / "input_files" / "sc_rc_3cogs.txt"
MOCK_CACHE_PATH = Path(__file__).parent / "mock_cache.csv"
def test_check_cache():
"""test if DIY cache is initiated correctly and args checked in it"""
# make sure this file does not exist (e.g. from previous tests)
try:
remove(MOCK_CACHE_PATH)
except FileNotFoundError:
pass
# set up first expected outcome, a pandas data frame
exp1 = DataFrame(columns=["arg1", "arg2", "arg3", "opt_tpr",
"optimal_howmany", "opt_tp", "timing", "date"])
# assert first break works: cache not found
check_cache(MOCK_CACHE_PATH, {"arg1": "x", "arg2": "y", "arg3": "z"})
assert_frame_equal(read_csv(MOCK_CACHE_PATH), exp1)
# check if nothing happens if arguments were NOT tested already
# assert that the function runs, does nothing, and returns None
assert check_cache(MOCK_CACHE_PATH,
{"arg1": "a", "arg2": "b", "arg3": "c"}) is None
# tear down
remove(MOCK_CACHE_PATH)
# check if exception is rased if these params were tested already
# set up mock cache with stored args
DataFrame({"arg1": ["x"], "arg2": ["y"], "arg3": ["z"]}).to_csv(
MOCK_CACHE_PATH, encoding="utf-8", index=False)
# assert exception is raised bc args exist in cache already
with raises(ArgumentsAlreadyTested) as aat_mock:
check_cache(MOCK_CACHE_PATH,
{"arg1": "x", "arg2": "y", "arg3": "z"})
assert str(aat_mock.value) == f"These arguments were tested \
already, see {MOCK_CACHE_PATH} line 1! (start counting at 1 in 1st row)"
# tear down
remove(MOCK_CACHE_PATH)
def test_write_to_cache():
"""Test if the writing-part of cache functions."""
init_args_mock = {"forms_csv": "forms.csv", "tgt_lg": "EAH",
"src_lg": "WOT", "crossval": True,
"path2cache": MOCK_CACHE_PATH,
"guesslist": [[2, 4, 6, 8]],
"max_phonotactics": 1, "max_paths": 1, "writesc": False,
"writesc_phonotactics": False, "vowelharmony": False,
"only_documented_clusters": False, "sort_by_nse": False,
"phonotactics_filter": False, "show_workflow": False,
"write": False,
"outname": "viz", "plot_to": None, "plotldnld": False}
DataFrame(
columns=list(init_args_mock) + [
"optimal_howmany",
"opt_tp",
"opt_tpr",
"timing",
"date"]).to_csv(
MOCK_CACHE_PATH,
index=False,
encoding="utf-8") # empty cache
df_exp = DataFrame(
{"forms_csv": "forms.csv", "tgt_lg": "EAH",
"src_lg": "WOT", "crossval": True,
"path2cache": str(MOCK_CACHE_PATH), "guesslist": str([[2, 4, 6, 8]]),
"max_phonotactics": 1, "max_paths": 1, "writesc": False,
"writesc_phonotactics": False, "vowelharmony": False,
"only_documented_clusters": False, "sort_by_nse": False,
"phonotactics_filter": False, "show_workflow": False, "write": False,
"outname": "viz", "plot_to": "None", "plotldnld": False,
"optimal_howmany": 0.501, "opt_tp": 0.6,
"opt_tpr": 0.099, "timing": "00:00:01",
"date": datetime.now().strftime("%x %X")},
index=RangeIndex(start=0, stop=1, step=1))
# write to mock cache
write_to_cache(
stat=(0.501, 0.6, 0.099),
init_args=init_args_mock,
path2cache=MOCK_CACHE_PATH, start=1, end=2)
# assert cache was written correctly
assert_frame_equal(read_csv(MOCK_CACHE_PATH), df_exp)
# assert sort functions correctly
df_exp = DataFrame(
{"forms_csv": ["forms.csv"] * 2, "tgt_lg": ["EAH"] * 2,
"src_lg": ["WOT"] * 2, "crossval": [True] * 2,
"path2cache": [str(MOCK_CACHE_PATH)] * 2,
"guesslist": [str([[2, 4, 6, 8]])] * 2,
"max_phonotactics": [1] * 2, "max_paths": [1] * 2,
"writesc": [False] * 2,
"writesc_phonotactics": [False] * 2, "vowelharmony": [False] * 2,
"only_documented_clusters": [False] * 2, "sort_by_nse": [False] * 2,
"phonotactics_filter": [False] * 2,
"show_workflow": [False] * 2, "write": [False] * 2,
"outname": ["viz"] * 2,
"plot_to": ["None"] * 2, "plotldnld": [False] * 2,
"optimal_howmany": [0.501] * 2, "opt_tp": [0.6, 0.6],
"opt_tpr": [0.8, 0.099],
"timing": ["00:00:01"] * 2,
"date": [datetime.now().strftime("%x %X")] * 2})
# write to mock cache
write_to_cache(
stat=(0.501, 0.6, 0.8),
init_args=init_args_mock,
path2cache=MOCK_CACHE_PATH, start=1, end=2)
# assert cache was written and sorted correctly
assert_frame_equal(read_csv(MOCK_CACHE_PATH), df_exp)
remove(MOCK_CACHE_PATH)
del df_exp, init_args_mock
def test_cache():
"""Is cache read and written to correctly?"""
# set up mock path
mockpath2cache = Path(__file__).parent / "mock_cache.csv"
try:
remove(mockpath2cache) # in case of leftovers from previous tests
except FileNotFoundError:
pass
# test cache with mockfunc
@cache
def mockfunc(*args, **kwargs): return "dfety", (1, 2, 3), 4, 5
assert mockfunc(path2cache=mockpath2cache, a="hi", b="bye") is None
# check if cache was initiated and written correctly
assert_frame_equal(read_csv(mockpath2cache), DataFrame({
"path2cache": [str(mockpath2cache)], "a": ["hi"], "b": ["bye"],
"opt_tpr": [3], "optimal_howmany": [1], "opt_tp": [2],
"timing": ["00:00:01"], "date": [datetime.now().strftime("%x %X")]
}))
remove(mockpath2cache)
# todo: once integration test for eval_all works, test @cache eval_all
def test_eval_adapt():
"""Is the main function doing its job in evaluating etymological data?"""
adrc_obj = Adrc(forms_csv=PATH2FORMS, source_language="WOT",
target_language="EAH", scdictlist=PATH2SC_AD)
assert eval_adapt(
"Apfel",
adrc_obj,
"apple",
10,
False,
False,
False,
False,
1,
1,
100,
49,
False) == {
'best_guess': 'KeyError',
'guesses': float("inf")}
# assert with show_workflow=True
# KeyError is triggered before 3rd part of workflow is added.
# max_phonotactics=0, therefore adapted_phonotactics=tokenised
assert eval_adapt("Apfel", adrc_obj, "apple",
10, False, False, False, False, 0, 1, 100, 49, True) == {
"best_guess": "KeyError", "guesses": float("inf"),
'tokenised': "['a', 'p', 'p', 'l', 'e']",
'adapted_phonotactics': "[['a', 'p', 'p', 'l', 'e']]"}
# assert no keyerror but target missed
assert eval_adapt(
"daʃa",
adrc_obj,
"dat͡ʃːa",
10,
False,
False,
False,
False,
0,
1,
100,
49,
False) == {
'best_guess': 'dat͡ʃa',
'guesses': float("inf")}
# assert no keyerror but target missed while showing workflow
assert eval_adapt("daʃa", adrc_obj, "dat͡ʃːa",
10, False, False, False, False, 0, 1, 100, 49, True) == {
'best_guess': 'dat͡ʃa', 'guesses': float("inf"),
'adapted_phonotactics': "[['d', 'a', 't͡ʃː', 'a']]",
'before_combinatorics': "[[['d'], ['a'], ['t͡ʃ'], ['a']]]",
'tokenised': "['d', 'a', 't͡ʃː', 'a']"}
# no keyerror, target missed, show workflow, max_phonotactics=1
assert eval_adapt("daʃa", adrc_obj, "aldajd",
10, False, False, False, False, 1, 1, 100, 49, True) == {
'adapted_phonotactics': "[['a', 'l', 'd', 'a', 'd']]",
'before_combinatorics': "[[['a'], ['l'], ['d'], ['a'], ['d']]]",
'best_guess': 'aldad',
'donor_phonotactics': 'VCCVCC',
'guesses': float("inf"),
'predicted_phonotactics': "['VCCVC']",
'tokenised': "['a', 'l', 'd', 'a', 'j', 'd']"}
# assert target hit
assert eval_adapt(
"dat͡ʃa",
adrc_obj,
"dat͡ʃːa",
10,
False,
False,
False,
False,
0,
1,
100,
49,
False) == {
'best_guess': 'dat͡ʃa',
'guesses': 1}
# assert target hit while showing workflow, no repair_phonotactics
assert eval_adapt("dat͡ʃa", adrc_obj, "dat͡ʃːa",
10, False, False, False, False, 0, 1, 100, 49, True) == {
'best_guess': 'dat͡ʃa', 'guesses': 1,
'adapted_phonotactics': "[['d', 'a', 't͡ʃː', 'a']]",
'before_combinatorics': "[[['d'], ['a'], ['t͡ʃ'], ['a']]]",
'tokenised': "['d', 'a', 't͡ʃː', 'a']"}
# assert target hit, show workflow, max_phonotactics=1
assert eval_adapt("aldad", adrc_obj, "aldajd",
10, False, False, False, False, 1, 1, 100, 49, True) == {
'adapted_phonotactics': "[['a', 'l', 'd', 'a', 'd']]",
'before_combinatorics': "[[['a'], ['l'], ['d'], ['a'], ['d']]]",
'best_guess': 'aldad',
'donor_phonotactics': 'VCCVCC',
'guesses': 1,
'predicted_phonotactics': "['VCCVC']",
'tokenised': "['a', 'l', 'd', 'a', 'j', 'd']"}
def test_eval_recon():
"""Is result of loanpy.adrc.Adrc.reconstruct evaluated?"""
adrc_obj = Adrc(forms_csv=PATH2FORMS, source_language="H",
target_language="EAH", scdictlist=PATH2SC_RC)
# test else clause (neither short nor long regex)
assert eval_recon("daʃa", adrc_obj, "dada") == {
'best_guess': '#d, a, d, a# not old', 'guesses': float("inf")}
# (no show workflow for reconstruct)
# test short regex and target missed
assert eval_recon("daʃa", adrc_obj, "aːruː") == {
'best_guess': '^(a)(n)(a)(at͡ʃi)$', 'guesses': float("inf")}
# test long regex (sort_by_nse=True, last arg) and target missed
assert eval_recon("daʃa", adrc_obj,
"aːruː", 1, True, False, False, True) == {
'best_guess': 'anaat͡ʃi', 'guesses': float("inf")}
# test long regex, target missed, sort_by_nse=True, howmany=2
assert eval_recon("daʃa", adrc_obj,
"aːruː", 2, True, False, False, True) == {
'best_guess': 'anaɣ', 'guesses': float("inf")}
# test long regex, target hit, sort_by_nse=True, howmany=2
assert eval_recon("anaɣ", adrc_obj,
"aːruː", 2, True, False, False, True) == {
'best_guess': 'anaɣ', 'guesses': 1}
# test long regex, target hit, sort_by_nse=True, howmany=1
assert eval_recon("anaat͡ʃi", adrc_obj,
"aːruː", 1, True, False, False, True) == {
'best_guess': 'anaat͡ʃi', 'guesses': 1}
# test short regex, target hit, sort_by_nse=False, howmany=2
assert eval_recon("anaat͡ʃi", adrc_obj,
"aːruː", 2, True, False, False, False) == {
'best_guess': '^(a)(n)(a)(at͡ʃi|ɣ)$', 'guesses': 2}
# test short regex, target hit, sort_by_nse=False, howmany=2, diff target
assert eval_recon("anaɣ", adrc_obj,
"aːruː", 2, True, False, False, False) == {
'best_guess': '^(a)(n)(a)(at͡ʃi|ɣ)$', 'guesses': 2}
def test_eval_one():
"""Are eval_adapt, eval_recon called and their results evaluated?"""
# assert None is returned if target word is not in the predictions
# create instance of Adrc class for input
adrc_obj = Adrc(forms_csv=PATH2FORMS, source_language="WOT",
target_language="EAH", scdictlist=PATH2SC_AD)
# assert keyerror, mode=adapt
assert eval_one(
"gaga", adrc_obj, "dada",
False, False, False, False, 0, 1, 100, 49, False, [
2, 4, 6], "adapt") == {
"guesses": float("inf"), "best_guess": "dada"}
# assert no keyerror, mode=adapt, target missed
assert eval_one(
"gaga", adrc_obj, "dada",
False, False, False, False, False, 0, 1, 100, 49, [
2, 4, 6], "adapt") == {
"guesses": float("inf"), "best_guess": "dada"}
# assert target hit on first try, mode=adapt
assert eval_one(
"dada",
adrc_obj,
"dada",
False,
False,
False,
False,
0,
1,
100,
49,
False,
[1],
"adapt") == {
"guesses": 1,
"best_guess": "dada"}
# assert target hit on first try, mode=adapt, show_workflow=True
assert eval_one(
"dada",
adrc_obj,
"dada",
False,
False,
False,
False,
0,
1,
100,
49,
True,
[1],
"adapt") == {
"guesses": 1,
"best_guess": "dada",
'adapted_phonotactics': "[['d', 'a', 'd', 'a']]",
'before_combinatorics': "[[['d'], ['a'], ['d'], ['a']]]",
'tokenised': "['d', 'a', 'd', 'a']"}
# assert reconstruct
adrc_obj = Adrc(forms_csv=PATH2FORMS, source_language="WOT",
target_language="EAH", scdictlist=PATH2SC_RC)
# assert keyerror, mode=reconstruct
assert eval_one(
"gaga", adrc_obj, "dada",
False, False, False, False, 0, 1, 100, 49, False, [
2, 4, 6], "reconstruct") == {
"guesses": float("inf"), "best_guess": '#d, a, d, a# not old'}
# assert no keyerror, mode=reconstruct, target missed
assert eval_one(
"gaga", adrc_obj, "aːruː",
False, False, False, False, False, 0, 1, 100, 49, [
2, 4, 6], "reconstruct") == {
"guesses": float("inf"), "best_guess": "^(a)(n)(a)(at͡ʃi|ɣ)$"}
# assert target hit on first try, mode=reconstruct
assert eval_one(
"anaat͡ʃi",
adrc_obj,
"aːruː",
False,
False,
False,
False,
0,
1,
100,
49,
False,
[1],
"reconstruct") == {
"guesses": 1,
"best_guess": "^(a)(n)(a)(at͡ʃi)$"}
def test_get_noncrossval_sc():
"""Are non-crossvalidated sound correspondences extracted and assigned?"""
# set up
adrc_obj = Adrc(forms_csv=PATH2FORMS, source_language="WOT",
target_language="EAH")
# run function
adrc_obj_out = get_noncrossval_sc(adrc_obj, None)
# assert result
assert adrc_obj_out.scdict == {
'a': ['a'],
'd': ['d'],
'j': ['j'],
'l': ['l'],
'n': ['n'],
't͡ʃː': ['t͡ʃ'],
'ɣ': ['ɣ'],
'ɯ': ['i']}
assert adrc_obj_out.sedict == {'a<a': 6, 'd<d': 1, 'i<ɯ': 1, 'j<j': 1,
'l<l': 1, 'n<n': 1, 't͡ʃ<t͡ʃː': 1, 'ɣ<ɣ': 2}
for struc, exp_phonotactics in zip(adrc_obj_out.scdict_phonotactics, [
['VCCVC', 'VCVC', 'VCVCV'], ['VCVC', 'VCCVC', 'VCVCV']]):
assert set(adrc_obj_out.scdict_phonotactics[struc]) == set(
exp_phonotactics)
# test with mode=reconstruct
# set up
adrc_obj = Adrc(forms_csv=PATH2FORMS, source_language="H",
target_language="EAH", mode="reconstruct")
# run function
adrc_obj_out = get_noncrossval_sc(adrc_obj, None)
# assert result
assert adrc_obj_out.scdict == {
'#-': ['-'],
'#a': [
'aː',
'ɒ'],
'-#': ['oz'],
'a': [
'uː',
'aː'],
'at͡ʃi#': ['-'],
'j': ['jn'],
'ld': ['ɟ'],
'n#': ['r'],
'ɣ': ['t͡ʃ'],
'ɣ#': ['-']}
assert adrc_obj_out.sedict == {
'#-<*-': 3,
'#a<*aː': 2,
'#a<*ɒ': 1,
'-#<*oz': 1,
'a<*aː': 1,
'a<*uː': 1,
'at͡ʃi#<*-': 1,
'j<*jn': 1,
'ld<*ɟ': 1,
'n#<*r': 1,
'ɣ#<*-': 1,
'ɣ<*t͡ʃ': 1}
assert adrc_obj_out.scdict_phonotactics == {}
# test with write=Path
# set up
adrc_obj = Adrc(forms_csv=PATH2FORMS, source_language="WOT",
target_language="EAH")
# set up path
path2noncrossval = Path(__file__).parent / "test_get_noncrossval_sc.txt"
# run function
adrc_obj_out = get_noncrossval_sc(adrc_obj, path2noncrossval)
# read and assert result
out = literal_eval(open(path2noncrossval, encoding="utf-8").read())
# phonotactic inventory has randomness
assert set(out.pop(3)) == {'VCVC', 'VCVCV', 'VCCVC'}
assert out == [{'a': ['a'], 'd': ['d'], 'j': ['j'], 'l': ['l'], 'n': ['n'],
't͡ʃː': ['t͡ʃ'], 'ɣ': ['ɣ'], 'ɯ': ['i']},
{'a<a': 6, 'd<d': 1, 'i<ɯ': 1, 'j<j': 1, 'l<l': 1, 'n<n': 1,
't͡ʃ<t͡ʃː': 1, 'ɣ<ɣ': 2},
{'a<a': [1, 2, 3], 'd<d': [2],
'i<ɯ': [1], 'j<j': [3], 'l<l': [2],
'n<n': [3], 't͡ʃ<t͡ʃː': [1], 'ɣ<ɣ': [1, 2]},
{'VCCVC<VCCVC': 1, 'VCVC<VCVC': 1, 'VCVCV<VCVCV': 1},
{'VCCVC<VCCVC': [2], 'VCVC<VCVC': [3], 'VCVCV<VCVCV': [1]}]
# tear down
remove(path2noncrossval)
del adrc_obj_out, adrc_obj, path2noncrossval
def test_get_crossval_data():
"""check if correct row is dropped from df for cross-validation"""
# set up
adrc_obj = Adrc(forms_csv=PATH2FORMS, source_language="WOT",
target_language="EAH")
# run function
# first cog isolated, missing sc: a ɣ a t͡ʃ i - a ɣ a t͡ʃː ɯ
adrc_obj_out = get_crossval_data(adrc_obj, 0, None)
# assert result
assert adrc_obj_out.scdict == {'a': ['a'], 'd': ['d'], 'j': ['j'],
'l': ['l'], 'n': ['n'], 'ɣ': ['ɣ']}
assert adrc_obj_out.sedict == {'a<a': 4, 'd<d': 1, 'j<j': 1, 'l<l': 1,
'n<n': 1, 'ɣ<ɣ': 1}
for struc, exp_phonotactics in zip(adrc_obj_out.scdict_phonotactics, [
['VCCVC', 'VCVC'], ['VCVC', 'VCCVC']]):
assert set(adrc_obj_out.scdict_phonotactics[struc]) == set(
exp_phonotactics)
# isolate different cogset
# set up
adrc_obj = Adrc(forms_csv=PATH2FORMS, source_language="WOT",
target_language="EAH")
# run function
# first cog isolated, missing sc: aː l ɟ uː - a l d a w
adrc_obj_out = get_crossval_data(adrc_obj, 1, None)
# assert result
assert adrc_obj_out.scdict == {'a': ['a'], 'j': ['j'], 'n': ['n'],
't͡ʃː': ['t͡ʃ'], 'ɣ': ['ɣ'], 'ɯ': ['i']}
assert adrc_obj_out.sedict == {'a<a': 4, 'i<ɯ': 1, 'j<j': 1,
'n<n': 1, 't͡ʃ<t͡ʃː': 1, 'ɣ<ɣ': 1}
for struc, exp_phonotactics in zip(adrc_obj_out.scdict_phonotactics, [
['VCVC', 'VCVCV'], ['VCVC', 'VCVCV']]):
assert set(adrc_obj_out.scdict_phonotactics[struc]) == set(
exp_phonotactics)
# isolate yet another cogset
# set up
adrc_obj = Adrc(forms_csv=PATH2FORMS, source_language="WOT",
target_language="EAH")
# run function
# first cog isolated, missing sc: a j a n - a j a n
adrc_obj_out = get_crossval_data(adrc_obj, 2, None)
# assert result
assert adrc_obj_out.scdict == {'a': ['a'], 'd': ['d'], 'l': ['l'],
't͡ʃː': ['t͡ʃ'], 'ɣ': ['ɣ'], 'ɯ': ['i']}
assert adrc_obj_out.sedict == {'a<a': 4, 'd<d': 1, 'i<ɯ': 1, 'l<l': 1,
't͡ʃ<t͡ʃː': 1, 'ɣ<ɣ': 2}
for struc, exp_phonotactics in zip(adrc_obj_out.scdict_phonotactics, [
['VCVCV', 'VCCVC'], ['VCVCV', 'VCCVC']]):
assert set(adrc_obj_out.scdict_phonotactics[struc]) == set(
exp_phonotactics)
# test with mode="reconstruct"
# set up
adrc_obj = Adrc(
forms_csv=PATH2FORMS,
source_language="H",
target_language="EAH",
mode="reconstruct")
# run function
# first cog isolated, missing sc: a j a n - a j a n
adrc_obj_out = get_crossval_data(adrc_obj, 2, None)
# assert result
assert adrc_obj_out.scdict == {
'#-': ['-'],
'#a': ['aː'],
'a': ['uː'],
'at͡ʃi#': ['-'],
'ld': ['ɟ'],
'ɣ': ['t͡ʃ'],
'ɣ#': ['-']}
assert adrc_obj_out.sedict == {
'#-<*-': 2,
'#a<*aː': 2,
'a<*uː': 1,
'at͡ʃi#<*-': 1,
'ld<*ɟ': 1,
'ɣ#<*-': 1,
'ɣ<*t͡ʃ': 1}
assert adrc_obj_out.scdict_phonotactics == {}
# test with writesc = Path()
# set up
path2outfolder = Path(__file__).parent / \
"output_files" # has to be folder!
adrc_obj = Adrc(
forms_csv=PATH2FORMS,
source_language="H",
target_language="EAH",
mode="reconstruct")
# run function
# first cog isolated, missing sc: a j a n - a j a n
adrc_obj_out = get_crossval_data(adrc_obj, 2, path2outfolder)
# assert result
assert adrc_obj_out.scdict == {
'#-': ['-'],
'#a': ['aː'],
'a': ['uː'],
'at͡ʃi#': ['-'],
'ld': ['ɟ'],
'ɣ': ['t͡ʃ'],
'ɣ#': ['-']}
assert adrc_obj_out.sedict == {
'#-<*-': 2,
'#a<*aː': 2,
'a<*uː': 1,
'at͡ʃi#<*-': 1,
'ld<*ɟ': 1,
'ɣ#<*-': 1,
'ɣ<*t͡ʃ': 1}
assert adrc_obj_out.scdict_phonotactics == {}
# assert file written correctly
assert literal_eval(open(path2outfolder / "sc2isolated.txt",
encoding="utf-8").read()) == [
{'#-': ['-'], '#a': ['aː'], 'a': ['uː'], 'at͡ʃi#': ['-'],
'ld': ['ɟ'], 'ɣ': ['t͡ʃ'], 'ɣ#': ['-']},
{'#-<*-': 2, '#a<*aː': 2, 'a<*uː': 1,
'at͡ʃi#<*-': 1, 'ld<*ɟ': 1, 'ɣ#<*-': 1, 'ɣ<*t͡ʃ': 1},
{'#-<*-': [1, 2], '#a<*aː': [1, 2], 'a<*uː': [2], 'at͡ʃi#<*-': [1],
'ld<*ɟ': [2], 'ɣ#<*-': [2], 'ɣ<*t͡ʃ': [1]}, {}, {}, {}]
# tear down
remove(path2outfolder / "sc2isolated.txt")
del adrc_obj_out, adrc_obj, path2outfolder
def test_loop_thru_data():
"""Is cross-validation called and loop run?"""
adrc_obj = Adrc(forms_csv=PATH2FORMS, source_language="WOT",
target_language="EAH")
# assert output is correct
assert loop_thru_data(
adrc_obj, 1, 1, 100, 49, False, False, False, False, False, [
10, 50, 100, 500, 1000], 'adapt', False, True) == adrc_obj
# set up expected output
df_exp = DataFrame([
("aɣat͡ʃi", "aɣat͡ʃːɯ", 1, float("inf"), "KeyError"),
("aldaɣ", "aldaɣ", 2, float("inf"), "KeyError"),
("ajan", "ajan", 3, float("inf"), "KeyError")],
columns=['Target_Form', 'Source_Form',
'Cognacy', 'guesses', 'best_guess'])
# assert output.dfety is correct
assert_frame_equal(adrc_obj.dfety, df_exp)
# assert popped words were plugged back in consistently in loop
assert adrc_obj.forms_target_language == ["aɣat͡ʃi", "aldaɣ", "ajan"]
# tear down
del df_exp, adrc_obj
def test_getnse4df():
"""Is normalised sum of examples for data frame calculated correctly?"""
# test with mode="adapt"
adrc_obj = Adrc(forms_csv=PATH2FORMS, source_language="WOT",
target_language="EAH", scdictlist=PATH2SC_AD)
out_adrc_obj = get_nse4df(adrc_obj, "Target_Form")
# assert output was correct
assert_frame_equal(out_adrc_obj.dfety, read_csv(
Path(__file__).parent / "expected_files" / "getnse4df_ad.csv"))
# test with mode="reconstruct"
adrc_obj = Adrc(
forms_csv=PATH2FORMS,
source_language="H",
target_language="EAH",
scdictlist=PATH2SC_RC,
mode="reconstruct")
out_adrc_obj = get_nse4df(adrc_obj, "Target_Form")
# assert output was correct
assert_frame_equal(out_adrc_obj.dfety, read_csv(
Path(__file__).parent / "expected_files" / "getnse4df_rc.csv"))
# tear down
del adrc_obj, out_adrc_obj
def test_phonotactics_predicted():
"""Correct boolean returned when checking if phonotactics was predicted?"""
adrc_obj = Adrc()
df_in = DataFrame({
"Target_Form": ["abc", "def", "ghi"],
"predicted_phonotactics": [["CCC", "VVV"], ["CVC"], ["CCV", "CCC"]]})
df_exp = df_in.assign(phonotactics_predicted=[False, True, True])
adrc_obj.dfety = df_in
assert_frame_equal(phonotactics_predicted(adrc_obj).dfety, df_exp)
# tear down
del adrc_obj, df_in, df_exp
def test_get_dist():
"""Are (normalised) Levenshtein Distances calculated correctly?"""
adrc_obj = Adrc()
# set up input
dfety = DataFrame({
"best_guess": ["will not buy", "record", "scratched"],
"Target_Form": ["won't buy", "tobacconists", "scratched"]})
adrc_obj.dfety = dfety
# set up expected outcome
df_exp = dfety.assign(
fast_levenshtein_distance_best_guess_Target_Form=[5, 10, 0],
fast_levenshtein_distance_div_maxlen_best_guess_Target_Form=[
0.42, 0.83, 0.00])
assert_frame_equal(get_dist(adrc_obj, "best_guess").dfety, df_exp)
# tear down
del adrc_obj, dfety, df_exp
def test_postprocess():
"""Is result of loanpy.sanity.loop_thru_data postprocessed correctly?"""
# test with mode="adapt"
adrc_obj = Adrc(forms_csv=PATH2FORMS, source_language="WOT",
target_language="EAH", scdictlist=PATH2SC_AD)
# pretend guesses are already made
adrc_obj.dfety["best_guess"] = ["aɣa", "bla", "ajan"]
# run function with show_workflow=False
adrc_obj_out = postprocess(adrc_obj)
assert_frame_equal(adrc_obj_out.dfety, read_csv(
Path(__file__).parent / "expected_files" / "postprocess_ad.csv"))
# test with mode="reconstruct"
adrc_obj = Adrc(
forms_csv=PATH2FORMS,
source_language="H",
target_language="EAH",
scdictlist=PATH2SC_RC,
mode="reconstruct")
# pretend guesses are already made
adrc_obj.dfety["best_guess"] = ["aːt͡ʃ", "bla", "ɒjnaːr"]
# run function with show_workflow=False
adrc_obj_out = postprocess(adrc_obj)
rfile = read_csv(
Path(__file__).parent / "expected_files" / "postprocess_rc.csv")
for i in adrc_obj_out.dfety.columns:
print(adrc_obj_out.dfety[i])
for j in rfile:
print(rfile[j])
assert_frame_equal(adrc_obj_out.dfety, read_csv(
Path(__file__).parent / "expected_files" / "postprocess_rc.csv"))
# test with show_workflow
# test with mode="adapt"
adrc_obj = Adrc(forms_csv=PATH2FORMS, source_language="WOT",
target_language="EAH", scdictlist=PATH2SC_AD)
# pretend guesses are already made
adrc_obj.dfety["best_guess"] = ["aɣa", "bla", "ajan"]
adrc_obj.dfety["predicted_phonotactics"] = [
"['VCV', 'CCC']", "['CCC', 'VCC']", "['VCVC']"]
# run function with show_workflow=False
adrc_obj_out = postprocess(adrc_obj)
assert_frame_equal(
adrc_obj_out.dfety,
read_csv(
Path(__file__).parent /
"expected_files" /
"postprocess_ad_workflow.csv"))
def test_make_stat():
pass # unittest = integrationtest, there was nothing to mock.
def test_gettprfpr():
pass # unittest = integrationtest, there was nothing to mock.
def test_plot_roc():
"""Is result plotted correctly to .jpg? Check result manually at
output_files / "mockplot.jpg!"""
path2mockplot = Path(__file__).parent / "output_files" / "mockplot.jpg"
plot_roc(guesslist=[1, 2, 3],
plot_to=path2mockplot, tpr_fpr_opt=(
[0.0, 0.0, 0.1, 0.1, 0.3, 0.6, 0.7],
[0.001, 0.003, 0.005, 0.007, 0.009, 0.099, 1.0],
(0.501, 0.6, 0.099)), opt_howmany=1,
opt_tpr=0.6, len_df=3, mode="adapt")
# verify manually that results in output_files and expected_files are same
def test_postprocess2():
"""Is result of loanpy.sanity.postprocess postprocessed correctly?"""
adrc_obj = Adrc(forms_csv=PATH2FORMS, source_language="WOT",
target_language="EAH", scdictlist=PATH2SC_AD)
# pretend guesses are already made
adrc_obj.dfety["guesses"] = [1, 2, 3]
assert postprocess2(adrc_obj, [4, 5, 6], "adapt") == (5, '3/3', '100%')
# define path for output
path2out = Path(__file__).parent / "postprocess2_integration.csv"
# assert write_to works
assert postprocess2(adrc_obj, [4, 5, 6], "adapt", path2out
) == (5, '3/3', '100%')
# since guesses were manually inserted!
assert_frame_equal( | read_csv(path2out) | pandas.read_csv |
import pandas as pd
import os
import re
import pprint
import shutil
# Clean all the obvious typos
corrections ={'BAUGHWJV':'BAUGHMAN',
'BOHNE':'BOEHNE',
'EISEMENGER':'EISENMENGER',
'GEITHER':'GEITHNER',
'KIMBREL':'KIMEREL',
'MATTINGLY': 'MATTLINGLY',
'FORESTALL':'FORRESTAL',
'GRENSPAN':'GREENSPAN',
'GREESPAN':'GREENSPAN',
'GREENPSAN':'GREENSPAN',
'GREENSPAN,':'GREENSPAN',
'GREENPAN':'GREENSPAN',
'McANDREWS':'MCANDREWS',
'MCDONUGH':'MCDONOUGH',
'MOSCOW':'MOSKOW',
'MORRIS':'MORRRIS',
'MONHOLLAN':'MONHOLLON',
'MILIER':'MILLER',
'MILER':'MILLER',
'SCWLTZ':'SCHULTZ',
'SCHELD':'SCHIELD',
'WILLZAMS':'WILLIAMS',
'WALLJCH':'WALLICH',
'VOLCKFR':'VOLCKER',
'VOLCRER':'VOLKER',
'ALLISON for':'ALLISON',
'ALTMA"':'ALTMANN',
'B A U G W':'BAUGW',
'BIES (as read by Ms':'BIES',
'BLACK &':'BLACK',
'MAYO/MR':'MAYO',
'Greene':"GREENE",
'CROSS,':'CROSS',
'GREENSPAN,':'GREENSPAN',
'HOSKINS,':'HOSKINS',
'MACCLAURY':'MACLAURY',
'MORRRIS':'MORRIS',
"O'CONNELL":'O’CONNELL',
'SOLOMON]':'SOLOMON',
'TRUMAN-':'TRUMAN',
'VOLCKER,':'VOLCKER',
'VOLKER,':'VOLCKER',
'WALLlCH':'WALLICH',
'[BALLES]':'BALLES',
'[GARDNER]':'GARDNER',
'[KICHLINE]?':'KICHLINE',
'[PARDEE]':'PARDEE',
'[ROOS]':'ROOS',
'[STERN':'STERN',
'[WILLES]':'WILLES',
'ŞAHIN':'SAHIN',
'[STERN(?)':'STERN',
'[STERN]':'STERN',
'GRALEY':'GRAMLEY',
'ALTMA”':'ALTMANN'}
def name_corr(val):
sentence=""
dictkeys=[key for key, value in corrections.items()]
if val in dictkeys:
val = corrections[val]
else:
if re.match(".*\(\?\)",val):
val = re.search("(.*)(\(\?\))",val)[1]
if val in dictkeys:
val = corrections[val]
if len(val.split(" "))>1:
#print(val.split(" ")[0])
#print(val.split(" ")[1:])
sentencehelp = " ".join(val.split(" ")[1:])
if not len(re.findall("Yes",sentencehelp))>7:
if len(sentencehelp)>10:
sentence = sentencehelp
#print(sentence)
val = val.split(" ")[0]
if val in dictkeys:
val = corrections[val]
#print(val)
return val,sentence
def get_interjections():
base_directory = base_directory = "../../../collection/python/data/transcript_raw_text"
raw_doc = os.listdir(base_directory)
filelist = sorted(raw_doc)
documents = []
if os.path.exists("../output/speaker_data"):
shutil.rmtree("../output/speaker_data")
os.mkdir("../output/speaker_data")
for doc_path in filelist:
with open("{}/{}".format(base_directory,doc_path),'r') as f:
documents.append(f.read().replace("\n"," ").replace(":",".").replace(r"\s\s+"," "))
date = pd.Series(data=filelist).apply(lambda x: x[0:10])
#print(date)
parsed_text = | pd.DataFrame() | pandas.DataFrame |
#!/usr/bin/env python
"""Tests for `arcos_py` package."""
from numpy import int64
import pandas as pd
import pytest
from pandas.testing import assert_frame_equal
from arcos4py import ARCOS
from arcos4py.tools._errors import noDataError
@pytest.fixture
def no_bin_data():
"""
pytest fixture to generate test data
"""
data = [item for i in range(10) for item in list(range(1, 11))]
m = [0 for i in range(100)]
d = {'id': data, 'time': data, 'm': m, 'x': data}
print(d)
df = pd.DataFrame(d)
return df
def test_empty_data(no_bin_data: pd.DataFrame):
with pytest.raises(noDataError, match='Input is empty'):
test_data = no_bin_data[no_bin_data['m'] > 0]
pos = ['x']
ts = ARCOS(
test_data, posCols=pos, frame_column='time', id_column='id', measurement_column='m', clid_column='clTrackID'
)
ts.trackCollev(eps=1, minClsz=1, nPrev=2)
def test_1_central_1_prev():
df_in = pd.read_csv('tests/testdata/1central_in.csv')
df_true = pd.read_csv('tests/testdata/1central_res.csv')
pos = ['x']
ts = ARCOS(
df_in, posCols=pos, frame_column='time', id_column='trackID', measurement_column='m', clid_column='clTrackID'
)
ts.bin_col = 'm'
out = ts.trackCollev(eps=1, minClsz=1, nPrev=1)
out = out.drop(columns=['m', 'x'])
assert_frame_equal(out, df_true, check_dtype=False)
def test_1_central_2_prev():
df_in = pd.read_csv('tests/testdata/1central_in.csv')
df_true = pd.read_csv('tests/testdata/1central2prev_res.csv')
pos = ['x']
ts = ARCOS(
df_in, posCols=pos, frame_column='time', id_column='trackID', measurement_column='m', clid_column='clTrackID'
)
ts.bin_col = 'm'
out = ts.trackCollev(eps=1, minClsz=1, nPrev=2)
out = out.drop(columns=['m', 'x'])
assert_frame_equal(out, df_true, check_dtype=False)
def test_1_central_3D():
df_in = pd.read_csv('tests/testdata/1central3D_in.csv')
df_true = pd.read_csv('tests/testdata/1central3D_res.csv')
pos = ['x', 'y', 'z']
ts = ARCOS(
df_in, posCols=pos, frame_column='time', id_column='trackID', measurement_column='m', clid_column='clTrackID'
)
ts.bin_col = 'm'
out = ts.trackCollev(eps=1, minClsz=1, nPrev=1)
out = out.drop(columns=['m', 'x', 'y', 'z'])
assert_frame_equal(out, df_true)
def test_1_central_growing():
df_in = pd.read_csv('tests/testdata/1centralGrowing_in.csv')
df_true = pd.read_csv('tests/testdata/1centralGrowing_res.csv')
pos = ['x']
ts = ARCOS(
df_in, posCols=pos, frame_column='time', id_column='trackID', measurement_column='m', clid_column='clTrackID'
)
ts.bin_col = 'm'
out = ts.trackCollev(eps=1, minClsz=1, nPrev=1)
out = out.drop(columns=['m', 'x'])
assert_frame_equal(out, df_true)
def test_2_central_growing():
df_in = pd.read_csv('tests/testdata/2centralGrowing_in.csv')
df_true = pd.read_csv('tests/testdata/2centralGrowing_res.csv')
pos = ['x']
ts = ARCOS(
df_in, posCols=pos, frame_column='time', id_column='trackID', measurement_column='m', clid_column='clTrackID'
)
ts.bin_col = 'm'
out = ts.trackCollev(eps=1, minClsz=1, nPrev=1)
out = out.drop(columns=['m', 'x'])
assert_frame_equal(out, df_true)
def test_2_with_1_common_symmetric():
df_in = pd.read_csv('tests/testdata/2with1commonSym_in.csv')
df_true = pd.read_csv('tests/testdata/2with1commonSym_res.csv')
pos = ['x']
ts = ARCOS(
df_in, posCols=pos, frame_column='time', id_column='trackID', measurement_column='m', clid_column='clTrackID'
)
ts.bin_col = 'm'
out = ts.trackCollev(eps=1.0, minClsz=1, nPrev=1)
out = out.drop(columns=['m', 'x'])
assert_frame_equal(out, df_true)
def test_2_with_1_common_asymmetric():
df_in = pd.read_csv('tests/testdata/2with1commonAsym_in.csv')
df_true = pd.read_csv('tests/testdata/2with1commonAsym_res.csv')
pos = ['x']
ts = ARCOS(
df_in, posCols=pos, frame_column='time', id_column='trackID', measurement_column='m', clid_column='clTrackID'
)
ts.bin_col = 'm'
out = ts.trackCollev(eps=1.0, minClsz=1, nPrev=1)
out = out.drop(columns=['m', 'x'])
assert_frame_equal(out, df_true)
def test_3_spreading_1_prev():
df_in = pd.read_csv('tests/testdata/3spreading_in.csv')
df_true = pd.read_csv('tests/testdata/3spreading_res.csv')
pos = ['x']
ts = ARCOS(
df_in, posCols=pos, frame_column='time', id_column='trackID', measurement_column='m', clid_column='clTrackID'
)
ts.bin_col = 'm'
out = ts.trackCollev(eps=1.0, minClsz=1, nPrev=1)
out = out.drop(columns=['m', 'x'])
assert_frame_equal(out, df_true)
def test_3_spreading_2_prev():
df_in = pd.read_csv('tests/testdata/3spreading_in.csv')
df_true = pd.read_csv('tests/testdata/3spreading2prev_res.csv')
pos = ['x']
ts = ARCOS(
df_in, posCols=pos, frame_column='time', id_column='trackID', measurement_column='m', clid_column='clTrackID'
)
ts.bin_col = 'm'
out = ts.trackCollev(eps=1.0, minClsz=1, nPrev=2)
out = out.drop(columns=['m', 'x'])
assert_frame_equal(out, df_true)
def test_5_overlapping_1_prev():
df_in = pd.read_csv('tests/testdata/5overlapping_in.csv')
df_true = pd.read_csv('tests/testdata/5overlapping_res.csv')
pos = ['x']
ts = ARCOS(
df_in, posCols=pos, frame_column='time', id_column='trackID', measurement_column='m', clid_column='clTrackID'
)
ts.bin_col = 'm'
out = ts.trackCollev(eps=1.0, minClsz=1, nPrev=1)
out = out.drop(columns=['m', 'x'])
assert_frame_equal(out, df_true)
def test_5_overlapping_2_prev():
df_in = pd.read_csv('tests/testdata/5overlapping_in.csv')
df_true = pd.read_csv('tests/testdata/5overlapping2prev_res.csv')
pos = ['x']
ts = ARCOS(
df_in, posCols=pos, frame_column='time', id_column='trackID', measurement_column='m', clid_column='clTrackID'
)
ts.bin_col = 'm'
out = ts.trackCollev(eps=1.0, minClsz=1, nPrev=2)
out = out.drop(columns=['m', 'x'])
assert_frame_equal(out, df_true)
def test_6_overlapping():
df_in = pd.read_csv('tests/testdata/6overlapping_in.csv')
df_true = pd.read_csv('tests/testdata/6overlapping_res.csv')
pos = ['x']
ts = ARCOS(
df_in, posCols=pos, frame_column='time', id_column='trackID', measurement_column='m', clid_column='clTrackID'
)
ts.bin_col = 'm'
out = ts.trackCollev(eps=1.0, minClsz=1, nPrev=1)
out = out.drop(columns=['m', 'x'])
out['trackID'] = out['trackID'].astype(int64)
assert_frame_equal(out, df_true)
def test_split_from_single():
df_in = pd.read_csv('tests/testdata/1objSplit_in.csv')
df_true = | pd.read_csv('tests/testdata/1objSplit_res.csv') | pandas.read_csv |
from __future__ import annotations
import collections
from datetime import datetime
from decimal import Decimal
from functools import wraps
import operator
import os
import re
import string
from sys import byteorder
from typing import (
TYPE_CHECKING,
Callable,
ContextManager,
Counter,
Iterable,
)
import warnings
import numpy as np
from pandas._config.localization import ( # noqa:F401
can_set_locale,
get_locales,
set_locale,
)
from pandas._typing import Dtype
from pandas.core.dtypes.common import (
is_float_dtype,
is_integer_dtype,
is_sequence,
is_unsigned_integer_dtype,
pandas_dtype,
)
import pandas as pd
from pandas import (
Categorical,
CategoricalIndex,
DataFrame,
DatetimeIndex,
Index,
IntervalIndex,
MultiIndex,
RangeIndex,
Series,
bdate_range,
)
from pandas._testing._io import ( # noqa:F401
close,
network,
round_trip_localpath,
round_trip_pathlib,
round_trip_pickle,
with_connectivity_check,
write_to_compressed,
)
from pandas._testing._random import ( # noqa:F401
randbool,
rands,
rands_array,
randu_array,
)
from pandas._testing._warnings import assert_produces_warning # noqa:F401
from pandas._testing.asserters import ( # noqa:F401
assert_almost_equal,
assert_attr_equal,
assert_categorical_equal,
assert_class_equal,
assert_contains_all,
assert_copy,
assert_datetime_array_equal,
assert_dict_equal,
assert_equal,
assert_extension_array_equal,
assert_frame_equal,
assert_index_equal,
assert_indexing_slices_equivalent,
assert_interval_array_equal,
assert_is_sorted,
assert_is_valid_plot_return_object,
assert_metadata_equivalent,
assert_numpy_array_equal,
assert_period_array_equal,
assert_series_equal,
assert_sp_array_equal,
assert_timedelta_array_equal,
raise_assert_detail,
)
from pandas._testing.compat import ( # noqa:F401
get_dtype,
get_obj,
)
from pandas._testing.contexts import ( # noqa:F401
RNGContext,
decompress_file,
ensure_clean,
ensure_clean_dir,
ensure_safe_environment_variables,
set_timezone,
use_numexpr,
with_csv_dialect,
)
from pandas.core.api import (
Float64Index,
Int64Index,
NumericIndex,
UInt64Index,
)
from pandas.core.arrays import (
BaseMaskedArray,
ExtensionArray,
PandasArray,
)
from pandas.core.arrays._mixins import NDArrayBackedExtensionArray
from pandas.core.construction import extract_array
if TYPE_CHECKING:
from pandas import (
PeriodIndex,
TimedeltaIndex,
)
_N = 30
_K = 4
UNSIGNED_INT_NUMPY_DTYPES: list[Dtype] = ["uint8", "uint16", "uint32", "uint64"]
UNSIGNED_INT_EA_DTYPES: list[Dtype] = ["UInt8", "UInt16", "UInt32", "UInt64"]
SIGNED_INT_NUMPY_DTYPES: list[Dtype] = [int, "int8", "int16", "int32", "int64"]
SIGNED_INT_EA_DTYPES: list[Dtype] = ["Int8", "Int16", "Int32", "Int64"]
ALL_INT_NUMPY_DTYPES = UNSIGNED_INT_NUMPY_DTYPES + SIGNED_INT_NUMPY_DTYPES
ALL_INT_EA_DTYPES = UNSIGNED_INT_EA_DTYPES + SIGNED_INT_EA_DTYPES
FLOAT_NUMPY_DTYPES: list[Dtype] = [float, "float32", "float64"]
FLOAT_EA_DTYPES: list[Dtype] = ["Float32", "Float64"]
COMPLEX_DTYPES: list[Dtype] = [complex, "complex64", "complex128"]
STRING_DTYPES: list[Dtype] = [str, "str", "U"]
DATETIME64_DTYPES: list[Dtype] = ["datetime64[ns]", "M8[ns]"]
TIMEDELTA64_DTYPES: list[Dtype] = ["timedelta64[ns]", "m8[ns]"]
BOOL_DTYPES: list[Dtype] = [bool, "bool"]
BYTES_DTYPES: list[Dtype] = [bytes, "bytes"]
OBJECT_DTYPES: list[Dtype] = [object, "object"]
ALL_REAL_NUMPY_DTYPES = FLOAT_NUMPY_DTYPES + ALL_INT_NUMPY_DTYPES
ALL_NUMPY_DTYPES = (
ALL_REAL_NUMPY_DTYPES
+ COMPLEX_DTYPES
+ STRING_DTYPES
+ DATETIME64_DTYPES
+ TIMEDELTA64_DTYPES
+ BOOL_DTYPES
+ OBJECT_DTYPES
+ BYTES_DTYPES
)
NARROW_NP_DTYPES = [
np.float16,
np.float32,
np.int8,
np.int16,
np.int32,
np.uint8,
np.uint16,
np.uint32,
]
ENDIAN = {"little": "<", "big": ">"}[byteorder]
NULL_OBJECTS = [None, np.nan, pd.NaT, float("nan"), pd.NA, Decimal("NaN")]
NP_NAT_OBJECTS = [
cls("NaT", unit)
for cls in [np.datetime64, np.timedelta64]
for unit in [
"Y",
"M",
"W",
"D",
"h",
"m",
"s",
"ms",
"us",
"ns",
"ps",
"fs",
"as",
]
]
EMPTY_STRING_PATTERN = re.compile("^$")
# set testing_mode
_testing_mode_warnings = (DeprecationWarning, ResourceWarning)
def set_testing_mode():
# set the testing mode filters
testing_mode = os.environ.get("PANDAS_TESTING_MODE", "None")
if "deprecate" in testing_mode:
for category in _testing_mode_warnings:
warnings.simplefilter("always", category)
def reset_testing_mode():
# reset the testing mode filters
testing_mode = os.environ.get("PANDAS_TESTING_MODE", "None")
if "deprecate" in testing_mode:
for category in _testing_mode_warnings:
warnings.simplefilter("ignore", category)
set_testing_mode()
def reset_display_options():
"""
Reset the display options for printing and representing objects.
"""
pd.reset_option("^display.", silent=True)
# -----------------------------------------------------------------------------
# Comparators
def equalContents(arr1, arr2) -> bool:
"""
Checks if the set of unique elements of arr1 and arr2 are equivalent.
"""
return frozenset(arr1) == frozenset(arr2)
def box_expected(expected, box_cls, transpose=True):
"""
Helper function to wrap the expected output of a test in a given box_class.
Parameters
----------
expected : np.ndarray, Index, Series
box_cls : {Index, Series, DataFrame}
Returns
-------
subclass of box_cls
"""
if box_cls is pd.array:
if isinstance(expected, RangeIndex):
# pd.array would return an IntegerArray
expected = PandasArray(np.asarray(expected._values))
else:
expected = pd.array(expected)
elif box_cls is Index:
expected = Index._with_infer(expected)
elif box_cls is Series:
expected = Series(expected)
elif box_cls is DataFrame:
expected = Series(expected).to_frame()
if transpose:
# for vector operations, we need a DataFrame to be a single-row,
# not a single-column, in order to operate against non-DataFrame
# vectors of the same length. But convert to two rows to avoid
# single-row special cases in datetime arithmetic
expected = expected.T
expected = pd.concat([expected] * 2, ignore_index=True)
elif box_cls is np.ndarray or box_cls is np.array:
expected = np.array(expected)
elif box_cls is to_array:
expected = to_array(expected)
else:
raise NotImplementedError(box_cls)
return expected
def to_array(obj):
"""
Similar to pd.array, but does not cast numpy dtypes to nullable dtypes.
"""
# temporary implementation until we get pd.array in place
dtype = getattr(obj, "dtype", None)
if dtype is None:
return np.asarray(obj)
return extract_array(obj, extract_numpy=True)
# -----------------------------------------------------------------------------
# Others
def getCols(k):
return string.ascii_uppercase[:k]
# make index
def makeStringIndex(k=10, name=None):
return Index(rands_array(nchars=10, size=k), name=name)
def makeUnicodeIndex(k=10, name=None):
return Index(randu_array(nchars=10, size=k), name=name)
def makeCategoricalIndex(k=10, n=3, name=None, **kwargs):
"""make a length k index or n categories"""
x = rands_array(nchars=4, size=n, replace=False)
return CategoricalIndex(
Categorical.from_codes(np.arange(k) % n, categories=x), name=name, **kwargs
)
def makeIntervalIndex(k=10, name=None, **kwargs):
"""make a length k IntervalIndex"""
x = np.linspace(0, 100, num=(k + 1))
return IntervalIndex.from_breaks(x, name=name, **kwargs)
def makeBoolIndex(k=10, name=None):
if k == 1:
return Index([True], name=name)
elif k == 2:
return Index([False, True], name=name)
return Index([False, True] + [False] * (k - 2), name=name)
def makeNumericIndex(k=10, name=None, *, dtype):
dtype = pandas_dtype(dtype)
assert isinstance(dtype, np.dtype)
if is_integer_dtype(dtype):
values = np.arange(k, dtype=dtype)
if is_unsigned_integer_dtype(dtype):
values += 2 ** (dtype.itemsize * 8 - 1)
elif is_float_dtype(dtype):
values = np.random.random_sample(k) - np.random.random_sample(1)
values.sort()
values = values * (10 ** np.random.randint(0, 9))
else:
raise NotImplementedError(f"wrong dtype {dtype}")
return NumericIndex(values, dtype=dtype, name=name)
def makeIntIndex(k=10, name=None):
base_idx = makeNumericIndex(k, name=name, dtype="int64")
return Int64Index(base_idx)
def makeUIntIndex(k=10, name=None):
base_idx = makeNumericIndex(k, name=name, dtype="uint64")
return UInt64Index(base_idx)
def makeRangeIndex(k=10, name=None, **kwargs):
return RangeIndex(0, k, 1, name=name, **kwargs)
def makeFloatIndex(k=10, name=None):
base_idx = makeNumericIndex(k, name=name, dtype="float64")
return Float64Index(base_idx)
def makeDateIndex(k: int = 10, freq="B", name=None, **kwargs) -> DatetimeIndex:
dt = datetime(2000, 1, 1)
dr = bdate_range(dt, periods=k, freq=freq, name=name)
return DatetimeIndex(dr, name=name, **kwargs)
def makeTimedeltaIndex(k: int = 10, freq="D", name=None, **kwargs) -> TimedeltaIndex:
return pd.timedelta_range(start="1 day", periods=k, freq=freq, name=name, **kwargs)
def makePeriodIndex(k: int = 10, name=None, **kwargs) -> PeriodIndex:
dt = datetime(2000, 1, 1)
return pd.period_range(start=dt, periods=k, freq="B", name=name, **kwargs)
def makeMultiIndex(k=10, names=None, **kwargs):
N = (k // 2) + 1
rng = range(N)
mi = MultiIndex.from_product([("foo", "bar"), rng], names=names, **kwargs)
assert len(mi) >= k # GH#38795
return mi[:k]
def index_subclass_makers_generator():
make_index_funcs = [
makeDateIndex,
makePeriodIndex,
makeTimedeltaIndex,
makeRangeIndex,
makeIntervalIndex,
makeCategoricalIndex,
makeMultiIndex,
]
yield from make_index_funcs
def all_timeseries_index_generator(k: int = 10) -> Iterable[Index]:
"""
Generator which can be iterated over to get instances of all the classes
which represent time-series.
Parameters
----------
k: length of each of the index instances
"""
make_index_funcs: list[Callable[..., Index]] = [
makeDateIndex,
makePeriodIndex,
makeTimedeltaIndex,
]
for make_index_func in make_index_funcs:
yield make_index_func(k=k)
# make series
def make_rand_series(name=None, dtype=np.float64):
index = makeStringIndex(_N)
data = np.random.randn(_N)
data = data.astype(dtype, copy=False)
return Series(data, index=index, name=name)
def makeFloatSeries(name=None):
return make_rand_series(name=name)
def makeStringSeries(name=None):
return make_rand_series(name=name)
def makeObjectSeries(name=None):
data = makeStringIndex(_N)
data = Index(data, dtype=object)
index = makeStringIndex(_N)
return Series(data, index=index, name=name)
def getSeriesData():
index = makeStringIndex(_N)
return {c: Series(np.random.randn(_N), index=index) for c in getCols(_K)}
def makeTimeSeries(nper=None, freq="B", name=None):
if nper is None:
nper = _N
return Series(
np.random.randn(nper), index=makeDateIndex(nper, freq=freq), name=name
)
def makePeriodSeries(nper=None, name=None):
if nper is None:
nper = _N
return Series(np.random.randn(nper), index=makePeriodIndex(nper), name=name)
def getTimeSeriesData(nper=None, freq="B"):
return {c: makeTimeSeries(nper, freq) for c in getCols(_K)}
def getPeriodData(nper=None):
return {c: makePeriodSeries(nper) for c in getCols(_K)}
# make frame
def makeTimeDataFrame(nper=None, freq="B"):
data = getTimeSeriesData(nper, freq)
return DataFrame(data)
def makeDataFrame() -> DataFrame:
data = getSeriesData()
return DataFrame(data)
def getMixedTypeDict():
index = Index(["a", "b", "c", "d", "e"])
data = {
"A": [0.0, 1.0, 2.0, 3.0, 4.0],
"B": [0.0, 1.0, 0.0, 1.0, 0.0],
"C": ["foo1", "foo2", "foo3", "foo4", "foo5"],
"D": bdate_range("1/1/2009", periods=5),
}
return index, data
def makeMixedDataFrame():
return DataFrame(getMixedTypeDict()[1])
def makePeriodFrame(nper=None):
data = getPeriodData(nper)
return DataFrame(data)
def makeCustomIndex(
nentries, nlevels, prefix="#", names=False, ndupe_l=None, idx_type=None
):
"""
Create an index/multindex with given dimensions, levels, names, etc'
nentries - number of entries in index
nlevels - number of levels (> 1 produces multindex)
prefix - a string prefix for labels
names - (Optional), bool or list of strings. if True will use default
names, if false will use no names, if a list is given, the name of
each level in the index will be taken from the list.
ndupe_l - (Optional), list of ints, the number of rows for which the
label will repeated at the corresponding level, you can specify just
the first few, the rest will use the default ndupe_l of 1.
len(ndupe_l) <= nlevels.
idx_type - "i"/"f"/"s"/"u"/"dt"/"p"/"td".
If idx_type is not None, `idx_nlevels` must be 1.
"i"/"f" creates an integer/float index,
"s"/"u" creates a string/unicode index
"dt" create a datetime index.
"td" create a datetime index.
if unspecified, string labels will be generated.
"""
if ndupe_l is None:
ndupe_l = [1] * nlevels
assert is_sequence(ndupe_l) and len(ndupe_l) <= nlevels
assert names is None or names is False or names is True or len(names) is nlevels
assert idx_type is None or (
idx_type in ("i", "f", "s", "u", "dt", "p", "td") and nlevels == 1
)
if names is True:
# build default names
names = [prefix + str(i) for i in range(nlevels)]
if names is False:
# pass None to index constructor for no name
names = None
# make singleton case uniform
if isinstance(names, str) and nlevels == 1:
names = [names]
# specific 1D index type requested?
idx_func_dict: dict[str, Callable[..., Index]] = {
"i": makeIntIndex,
"f": makeFloatIndex,
"s": makeStringIndex,
"u": makeUnicodeIndex,
"dt": makeDateIndex,
"td": makeTimedeltaIndex,
"p": makePeriodIndex,
}
idx_func = idx_func_dict.get(idx_type)
if idx_func:
idx = idx_func(nentries)
# but we need to fill in the name
if names:
idx.name = names[0]
return idx
elif idx_type is not None:
raise ValueError(
f"{repr(idx_type)} is not a legal value for `idx_type`, "
"use 'i'/'f'/'s'/'u'/'dt'/'p'/'td'."
)
if len(ndupe_l) < nlevels:
ndupe_l.extend([1] * (nlevels - len(ndupe_l)))
assert len(ndupe_l) == nlevels
assert all(x > 0 for x in ndupe_l)
list_of_lists = []
for i in range(nlevels):
def keyfunc(x):
import re
numeric_tuple = re.sub(r"[^\d_]_?", "", x).split("_")
return [int(num) for num in numeric_tuple]
# build a list of lists to create the index from
div_factor = nentries // ndupe_l[i] + 1
# Deprecated since version 3.9: collections.Counter now supports []. See PEP 585
# and Generic Alias Type.
cnt: Counter[str] = collections.Counter()
for j in range(div_factor):
label = f"{prefix}_l{i}_g{j}"
cnt[label] = ndupe_l[i]
# cute Counter trick
result = sorted(cnt.elements(), key=keyfunc)[:nentries]
list_of_lists.append(result)
tuples = list(zip(*list_of_lists))
# convert tuples to index
if nentries == 1:
# we have a single level of tuples, i.e. a regular Index
index = Index(tuples[0], name=names[0])
elif nlevels == 1:
name = None if names is None else names[0]
index = Index((x[0] for x in tuples), name=name)
else:
index = MultiIndex.from_tuples(tuples, names=names)
return index
def makeCustomDataframe(
nrows,
ncols,
c_idx_names=True,
r_idx_names=True,
c_idx_nlevels=1,
r_idx_nlevels=1,
data_gen_f=None,
c_ndupe_l=None,
r_ndupe_l=None,
dtype=None,
c_idx_type=None,
r_idx_type=None,
):
"""
Create a DataFrame using supplied parameters.
Parameters
----------
nrows, ncols - number of data rows/cols
c_idx_names, idx_names - False/True/list of strings, yields No names ,
default names or uses the provided names for the levels of the
corresponding index. You can provide a single string when
c_idx_nlevels ==1.
c_idx_nlevels - number of levels in columns index. > 1 will yield MultiIndex
r_idx_nlevels - number of levels in rows index. > 1 will yield MultiIndex
data_gen_f - a function f(row,col) which return the data value
at that position, the default generator used yields values of the form
"RxCy" based on position.
c_ndupe_l, r_ndupe_l - list of integers, determines the number
of duplicates for each label at a given level of the corresponding
index. The default `None` value produces a multiplicity of 1 across
all levels, i.e. a unique index. Will accept a partial list of length
N < idx_nlevels, for just the first N levels. If ndupe doesn't divide
nrows/ncol, the last label might have lower multiplicity.
dtype - passed to the DataFrame constructor as is, in case you wish to
have more control in conjunction with a custom `data_gen_f`
r_idx_type, c_idx_type - "i"/"f"/"s"/"u"/"dt"/"td".
If idx_type is not None, `idx_nlevels` must be 1.
"i"/"f" creates an integer/float index,
"s"/"u" creates a string/unicode index
"dt" create a datetime index.
"td" create a timedelta index.
if unspecified, string labels will be generated.
Examples
--------
# 5 row, 3 columns, default names on both, single index on both axis
>> makeCustomDataframe(5,3)
# make the data a random int between 1 and 100
>> mkdf(5,3,data_gen_f=lambda r,c:randint(1,100))
# 2-level multiindex on rows with each label duplicated
# twice on first level, default names on both axis, single
# index on both axis
>> a=makeCustomDataframe(5,3,r_idx_nlevels=2,r_ndupe_l=[2])
# DatetimeIndex on row, index with unicode labels on columns
# no names on either axis
>> a=makeCustomDataframe(5,3,c_idx_names=False,r_idx_names=False,
r_idx_type="dt",c_idx_type="u")
# 4-level multindex on rows with names provided, 2-level multindex
# on columns with default labels and default names.
>> a=makeCustomDataframe(5,3,r_idx_nlevels=4,
r_idx_names=["FEE","FIH","FOH","FUM"],
c_idx_nlevels=2)
>> a=mkdf(5,3,r_idx_nlevels=2,c_idx_nlevels=4)
"""
assert c_idx_nlevels > 0
assert r_idx_nlevels > 0
assert r_idx_type is None or (
r_idx_type in ("i", "f", "s", "u", "dt", "p", "td") and r_idx_nlevels == 1
)
assert c_idx_type is None or (
c_idx_type in ("i", "f", "s", "u", "dt", "p", "td") and c_idx_nlevels == 1
)
columns = makeCustomIndex(
ncols,
nlevels=c_idx_nlevels,
prefix="C",
names=c_idx_names,
ndupe_l=c_ndupe_l,
idx_type=c_idx_type,
)
index = makeCustomIndex(
nrows,
nlevels=r_idx_nlevels,
prefix="R",
names=r_idx_names,
ndupe_l=r_ndupe_l,
idx_type=r_idx_type,
)
# by default, generate data based on location
if data_gen_f is None:
data_gen_f = lambda r, c: f"R{r}C{c}"
data = [[data_gen_f(r, c) for c in range(ncols)] for r in range(nrows)]
return DataFrame(data, index, columns, dtype=dtype)
def _create_missing_idx(nrows, ncols, density, random_state=None):
if random_state is None:
random_state = np.random
else:
random_state = np.random.RandomState(random_state)
# below is cribbed from scipy.sparse
size = round((1 - density) * nrows * ncols)
# generate a few more to ensure unique values
min_rows = 5
fac = 1.02
extra_size = min(size + min_rows, fac * size)
def _gen_unique_rand(rng, _extra_size):
ind = rng.rand(int(_extra_size))
return np.unique(np.floor(ind * nrows * ncols))[:size]
ind = _gen_unique_rand(random_state, extra_size)
while ind.size < size:
extra_size *= 1.05
ind = _gen_unique_rand(random_state, extra_size)
j = np.floor(ind * 1.0 / nrows).astype(int)
i = (ind - j * nrows).astype(int)
return i.tolist(), j.tolist()
def makeMissingDataframe(density=0.9, random_state=None):
df = makeDataFrame()
i, j = _create_missing_idx(*df.shape, density=density, random_state=random_state)
df.values[i, j] = np.nan
return df
def test_parallel(num_threads=2, kwargs_list=None):
"""
Decorator to run the same function multiple times in parallel.
Parameters
----------
num_threads : int, optional
The number of times the function is run in parallel.
kwargs_list : list of dicts, optional
The list of kwargs to update original
function kwargs on different threads.
Notes
-----
This decorator does not pass the return value of the decorated function.
Original from scikit-image:
https://github.com/scikit-image/scikit-image/pull/1519
"""
assert num_threads > 0
has_kwargs_list = kwargs_list is not None
if has_kwargs_list:
assert len(kwargs_list) == num_threads
import threading
def wrapper(func):
@wraps(func)
def inner(*args, **kwargs):
if has_kwargs_list:
update_kwargs = lambda i: dict(kwargs, **kwargs_list[i])
else:
update_kwargs = lambda i: kwargs
threads = []
for i in range(num_threads):
updated_kwargs = update_kwargs(i)
thread = threading.Thread(target=func, args=args, kwargs=updated_kwargs)
threads.append(thread)
for thread in threads:
thread.start()
for thread in threads:
thread.join()
return inner
return wrapper
class SubclassedSeries(Series):
_metadata = ["testattr", "name"]
@property
def _constructor(self):
# For testing, those properties return a generic callable, and not
# the actual class. In this case that is equivalent, but it is to
# ensure we don't rely on the property returning a class
# See https://github.com/pandas-dev/pandas/pull/46018 and
# https://github.com/pandas-dev/pandas/issues/32638 and linked issues
return lambda *args, **kwargs: SubclassedSeries(*args, **kwargs)
@property
def _constructor_expanddim(self):
return lambda *args, **kwargs: SubclassedDataFrame(*args, **kwargs)
class SubclassedDataFrame(DataFrame):
_metadata = ["testattr"]
@property
def _constructor(self):
return lambda *args, **kwargs: SubclassedDataFrame(*args, **kwargs)
@property
def _constructor_sliced(self):
return lambda *args, **kwargs: SubclassedSeries(*args, **kwargs)
class SubclassedCategorical(Categorical):
@property
def _constructor(self):
return SubclassedCategorical
def _make_skipna_wrapper(alternative, skipna_alternative=None):
"""
Create a function for calling on an array.
Parameters
----------
alternative : function
The function to be called on the array with no NaNs.
Only used when 'skipna_alternative' is None.
skipna_alternative : function
The function to be called on the original array
Returns
-------
function
"""
if skipna_alternative:
def skipna_wrapper(x):
return skipna_alternative(x.values)
else:
def skipna_wrapper(x):
nona = x.dropna()
if len(nona) == 0:
return np.nan
return alternative(nona)
return skipna_wrapper
def convert_rows_list_to_csv_str(rows_list: list[str]):
"""
Convert list of CSV rows to single CSV-formatted string for current OS.
This method is used for creating expected value of to_csv() method.
Parameters
----------
rows_list : List[str]
Each element represents the row of csv.
Returns
-------
str
Expected output of to_csv() in current OS.
"""
sep = os.linesep
return sep.join(rows_list) + sep
def external_error_raised(expected_exception: type[Exception]) -> ContextManager:
"""
Helper function to mark pytest.raises that have an external error message.
Parameters
----------
expected_exception : Exception
Expected error to raise.
Returns
-------
Callable
Regular `pytest.raises` function with `match` equal to `None`.
"""
import pytest
return pytest.raises(expected_exception, match=None) # noqa: PDF010
cython_table = | pd.core.common._cython_table.items() | pandas.core.common._cython_table.items |
import os
import pandas as pd
"""Remove a directory tree"""
def remove_callback_dir(directory):
from shutil import rmtree
to_remove = os.path.join('tcc', directory)
if os.path.exists(to_remove):
rmtree(to_remove)
"""Create a directory tree"""
def make_callback_dir(directory, i):
full_dir = os.path.join('tcc', directory, str(i))
if not os.path.exists(full_dir):
os.makedirs(full_dir)
return full_dir
"""Save array data in csvfile with ids=ids"""
def save_array(ids, array, csvfile):
labels = ["toxic", "severe_toxic", "obscene", "threat", "insult", "identity_hate"]
| pd.DataFrame(array, index=ids, columns=labels) | pandas.DataFrame |
import unittest
import pandas as pd
from mmvec.heatmap import (
_parse_taxonomy_strings, _parse_heatmap_metadata_annotations,
_process_microbe_metadata, _process_metabolite_metadata,
_normalize_table)
import pandas.util.testing as pdt
class TestParseTaxonomyStrings(unittest.TestCase):
def setUp(self):
self.taxa = pd.Series([
'k__Bacteria; p__Proteobacteria; c__Deltaproteobacteria; '
'o__Desulfobacterales; f__Desulfobulbaceae; g__; s__',
'k__Bacteria; p__Cyanobacteria; c__Chloroplast; o__Streptophyta',
'k__Bacteria; p__Proteobacteria; c__Alphaproteobacteria; '
'o__Rickettsiales; f__mitochondria; g__Lardizabala; s__biternata',
'k__Archaea; p__Euryarchaeota; c__Methanomicrobia; '
'o__Methanosarcinales; f__Methanosarcinaceae; g__Methanosarcina',
'k__Bacteria; p__Proteobacteria; c__Alphaproteobacteria; '
'o__Rickettsiales; f__mitochondria; g__Pavlova; s__lutheri',
'k__Archaea; p__[Parvarchaeota]; c__[Parvarchaea]; o__WCHD3-30',
'k__Bacteria; p__Proteobacteria; c__Alphaproteobacteria; '
'o__Sphingomonadales; f__Sphingomonadaceae'],
index=pd.Index([c for c in 'ABCDEFG'], name='feature-id'),
name='Taxon')
self.exp = pd.Series(
['s__', 'o__Streptophyta', 's__biternata', 'g__Methanosarcina',
's__lutheri', 'o__WCHD3-30', 'f__Sphingomonadaceae'],
index=pd.Index([c for c in 'ABCDEFG'], name='feature-id'),
name='Taxon')
def test_parse_taxonomy_strings(self):
exp = pd.Series(['p__Proteobacteria', 'p__Cyanobacteria',
'p__Proteobacteria', 'p__Euryarchaeota',
'p__Proteobacteria', 'p__[Parvarchaeota]',
'p__Proteobacteria'],
index=pd.Index([c for c in 'ABCDEFG'],
name='feature-id'), name='Taxon')
obs = _parse_taxonomy_strings(self.taxa, level=2)
pdt.assert_series_equal(exp, obs)
def test_parse_taxonomy_strings_baserank(self):
exp = pd.Series(['k__Bacteria', 'k__Bacteria', 'k__Bacteria',
'k__Archaea', 'k__Bacteria', 'k__Archaea',
'k__Bacteria'],
index=pd.Index([c for c in 'ABCDEFG'],
name='feature-id'), name='Taxon')
obs = _parse_taxonomy_strings(self.taxa, level=1)
pdt.assert_series_equal(exp, obs)
def test_parse_taxonomy_strings_toprank(self):
# expect top rank even if level is higher than depth of top rank
obs = _parse_taxonomy_strings(self.taxa, level=7)
pdt.assert_series_equal(self.exp, obs)
def test_parse_taxonomy_strings_rank_out_of_range_is_top(self):
# expect top rank even if level is higher than depth of top rank
obs = _parse_taxonomy_strings(self.taxa, level=9)
pdt.assert_series_equal(self.exp, obs)
class TestHeatmapAnnotation(unittest.TestCase):
def setUp(self):
self.taxonomy = pd.Series(
['k__Bacteria', 'k__Archaea', 'k__Bacteria', 'k__Archaea'],
index=pd.Index([c for c in 'ABCD'], name='id'), name='Taxon')
def test_parse_heatmap_metadata_annotations_colorhelix(self):
exp_cols = pd.Series(
[[0.8377187772618228, 0.7593149036488329, 0.9153517040128891],
[0.2539759281991313, 0.3490084835469758, 0.14482988411775732],
[0.8377187772618228, 0.7593149036488329, 0.9153517040128891],
[0.2539759281991313, 0.3490084835469758, 0.14482988411775732]],
index=pd.Index([c for c in 'ABCD'], name='id'), name='Taxon')
exp_classes = {'k__Archaea': [0.2539759281991313, 0.3490084835469758,
0.14482988411775732],
'k__Bacteria': [0.8377187772618228, 0.7593149036488329,
0.9153517040128891]}
cols, classes = _parse_heatmap_metadata_annotations(
self.taxonomy, 'colorhelix')
pdt.assert_series_equal(exp_cols, cols)
self.assertDictEqual(exp_classes, classes)
def test_parse_heatmap_metadata_annotations_magma(self):
exp_cols = pd.Series(
[(0.944006, 0.377643, 0.365136), (0.445163, 0.122724, 0.506901),
(0.944006, 0.377643, 0.365136), (0.445163, 0.122724, 0.506901)],
index=pd.Index([c for c in 'ABCD'], name='id'), name='Taxon')
exp_classes = {'k__Archaea': (0.445163, 0.122724, 0.506901),
'k__Bacteria': (0.944006, 0.377643, 0.365136)}
cols, classes = _parse_heatmap_metadata_annotations(
self.taxonomy, 'magma')
pdt.assert_series_equal(exp_cols, cols)
self.assertDictEqual(exp_classes, classes)
class TestMetadataProcessing(unittest.TestCase):
def setUp(self):
self.taxonomy = pd.Series(
['k__Bacteria', 'k__Archaea', 'k__Bacteria'],
index=pd.Index([c for c in 'ABC']), name='Taxon')
self.metabolites = pd.Series([
'amino acid', 'carbohydrate', 'drug metabolism'],
index=pd.Index(['a', 'b', 'c']), name='Super Pathway')
self.ranks = pd.DataFrame(
[[4, 1, 2, 3], [1, 2, 1, 2], [2, 4, 3, 1], [6, 4, 2, 3]],
index=pd.Index([c for c in 'ABCD']), columns=[c for c in 'abcd'])
# test that metadata processing works, filters ranks, and works in sequence
def test_process_metadata(self):
# filter on taxonomy, taxonomy parser/annotation tested above
with self.assertWarnsRegex(UserWarning, "microbe IDs are present"):
res = _process_microbe_metadata(
self.ranks, self.taxonomy, -1, 'magma')
ranks_filtered = pd.DataFrame(
[[4, 1, 2, 3], [1, 2, 1, 2], [2, 4, 3, 1]],
index=pd.Index([c for c in 'ABC']), columns=[c for c in 'abcd'])
pdt.assert_frame_equal(ranks_filtered, res[1])
# filter on metabolites, annotation tested above
with self.assertWarnsRegex(UserWarning, "metabolite IDs are present"):
res = _process_metabolite_metadata(
ranks_filtered, self.metabolites, 'magma')
ranks_filtered = ranks_filtered[[c for c in 'abc']]
pdt.assert_frame_equal(ranks_filtered, res[1])
class TestNormalize(unittest.TestCase):
def setUp(self):
self.tab = pd.DataFrame({'a': [1, 2, 3], 'b': [3, 4, 3]})
def test_normalize_table_log10(self):
res = _normalize_table(self.tab, 'log10')
exp = pd.DataFrame(
{'a': {0: 0.3010299956639812, 1: 0.47712125471966244,
2: 0.6020599913279624},
'b': {0: 0.6020599913279624, 1: 0.6989700043360189,
2: 0.6020599913279624}})
pdt.assert_frame_equal(res, exp)
def test_normalize_table_z_score_col(self):
res = _normalize_table(self.tab, 'z_score_col')
exp = pd.DataFrame({'a': {0: -1.0, 1: 0.0, 2: 1.0},
'b': {0: -0.577350269189626, 1: 1.154700538379251,
2: -0.577350269189626}})
pdt.assert_frame_equal(res, exp)
def test_normalize_table_rel_col(self):
res = _normalize_table(self.tab, 'rel_col')
exp = pd.DataFrame({'a': {0: 0.16666666666666666,
1: 0.3333333333333333, 2: 0.5},
'b': {0: 0.3, 1: 0.4, 2: 0.3}})
| pdt.assert_frame_equal(res, exp) | pandas.util.testing.assert_frame_equal |
# Copyright (c) 2020 <NAME>
import sklearn.metrics
from tqdm import tqdm
import pandas as pd
import numpy as np
import torch
import scipy.sparse
import scipy.io
import scipy.special
import types
import json
import warnings
import math
import torch.nn.functional as F
import csv
from pynvml import *
from contextlib import redirect_stdout
from sparsechem import censored_mse_loss_numpy
from collections import namedtuple
from scipy.sparse import csr_matrix
from tensorboard.backend.event_processing import plugin_event_multiplexer as event_multiplexer # pylint: disable=line-too-long
class Nothing(object):
def __getattr__(self, name):
return Nothing()
def __call__(self, *args, **kwargs):
return Nothing()
def __repr__(self):
return "Nothing"
class Nothing(object):
def __getattr__(self, name):
return Nothing()
def __call__(self, *args, **kwargs):
return Nothing()
def __repr__(self):
return "Nothing"
# Control downsampling: how many scalar data do we keep for each run/tag
# combination?
SIZE_GUIDANCE = {'scalars': 10000}
def extract_scalars(multiplexer, run, tag):
"""Extract tabular data from the scalars at a given run and tag.
The result is a list of 3-tuples (wall_time, step, value).
"""
tensor_events = multiplexer.Tensors(run, tag)
return [
# (event.wall_time, event.step, tf.make_ndarray(event.tensor_proto).item())
(event.wall_time, event.step, event.tensor_proto.float_val[0])
for event in tensor_events
]
def create_multiplexer(logdir):
multiplexer = event_multiplexer.EventMultiplexer(
tensor_size_guidance=SIZE_GUIDANCE)
multiplexer.AddRunsFromDirectory(logdir)
multiplexer.Reload()
return multiplexer
def export_scalars(multiplexer, run, tag, filepath, write_headers=True):
data = extract_scalars(multiplexer, run, tag)
with open(filepath, 'w') as outfile:
writer = csv.writer(outfile)
if write_headers:
writer.writerow(('wall_time', 'step', 'value'))
for row in data:
writer.writerow(row)
def return_max_val(data):
max_val = 0
for row in data:
if row[2] > max_val:
max_val = row[2]
return max_val
def inverse_normalization(yr_hat_all, mean, variance, dev="cpu", array=False):
if array==False:
stdev = np.sqrt(variance)
diagstdev = scipy.sparse.diags(np.array(stdev)[0],0)
diag = torch.from_numpy(diagstdev.todense())
y_inv_norm = torch.matmul(yr_hat_all, diag.to(torch.float32).to(dev))
diagm = scipy.sparse.diags(mean, 0)
y_mask = np.ones(yr_hat_all.shape)
y_inv_norm = y_inv_norm + torch.from_numpy(y_mask * diagm).to(torch.float32).to(dev)
else:
y_mask = yr_hat_all.copy()
y_mask.data = np.ones_like(y_mask.data)
set_mask = set([(i,j) for i,j in zip(y_mask.nonzero()[0], y_mask.nonzero()[1])])
stdev = np.sqrt(variance)
diagstdev = scipy.sparse.diags(stdev,0)
y_inv_norm = yr_hat_all.multiply(y_mask * diagstdev)
diagm = scipy.sparse.diags(mean, 0)
y_inv_norm = y_inv_norm + y_mask * diagm
set_inv_norm = set([(i,j) for i,j in zip(y_inv_norm.nonzero()[0], y_inv_norm.nonzero()[1])])
set_delta = set_mask - set_inv_norm
for delta in set_delta:
y_inv_norm[delta[0],delta[1]]=0
assert yr_hat_all.shape == y_inv_norm.shape, "Shapes of yr_hat_all and y_inv_norm must be equal."
y_inv_norm.sort_indices()
assert (yr_hat_all.indptr == y_inv_norm.indptr).all(), "yr_hat_all and y_inv_norm must have the same .indptr"
assert (yr_hat_all.indices == y_inv_norm.indices).all(), "yr_hat_all and y_inv_norm must have the same .indices"
return y_inv_norm
def normalize_regr(y_regr, mean=None, std=None):
y_regr_64 = scipy.sparse.csc_matrix(y_regr, dtype=np.float64)
tot = np.array(y_regr_64.sum(axis=0).squeeze())[0]
set_regr = set([(i,j) for i,j in zip(y_regr_64.nonzero()[0], y_regr_64.nonzero()[1])])
N = y_regr_64.getnnz(axis=0)
m = tot/N
diagm = scipy.sparse.diags(m, 0)
y_mask = y_regr_64.copy()
y_mask.data = np.ones_like(y_mask.data)
y_normalized = y_regr_64 - y_mask * diagm
set_norm = set([(i,j) for i,j in zip(y_normalized.nonzero()[0], y_normalized.nonzero()[1])])
set_delta = set_regr - set_norm
sqr = y_regr_64.copy()
sqr.data **= 2
msquared = np.square(m)
variance = sqr.sum(axis=0)/N - msquared
stdev_inv = 1/np.sqrt(variance)
diagstdev_inv = scipy.sparse.diags(np.array(stdev_inv)[0],0)
y_normalized = y_normalized.multiply(y_mask * diagstdev_inv)
for delta in set_delta:
y_normalized[delta[0],delta[1]]=0
assert y_regr_64.shape == y_normalized.shape, "Shapes of y_regr and y_normalized must be equal."
y_normalized.sort_indices()
assert (y_regr_64.indptr == y_normalized.indptr).all(), "y_regr and y_normalized must have the same .indptr"
assert (y_regr_64.indices == y_normalized.indices).all(), "y_regr and y_normalized must have the same .indptr"
return y_normalized, m, variance
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
def calc_acc_kappa(recall, fpr, num_pos, num_neg):
"""Calculates accuracy from recall and precision."""
num_all = num_neg + num_pos
tp = np.round(recall * num_pos).astype(np.int)
fn = num_pos - tp
fp = np.round(fpr * num_neg).astype(np.int)
tn = num_neg - fp
acc = (tp + tn) / num_all
pexp = num_pos / num_all * (tp + fp) / num_all + num_neg / num_all * (tn + fn) / num_all
kappa = (acc - pexp) / (1 - pexp)
return acc, kappa
def all_metrics(y_true, y_score, cal_fact_aucpr_task):
"""Compute classification metrics.
Args:
y_true true labels (0 / 1)
y_score logit values
"""
if len(y_true) <= 1 or (y_true[0] == y_true).all():
df = pd.DataFrame({"roc_auc_score": [np.nan], "auc_pr": [np.nan], "avg_prec_score": [np.nan], "f1_max": [np.nan], "p_f1_max": [np.nan], "kappa": [np.nan], "kappa_max": [np.nan], "p_kappa_max": [np.nan], "bceloss": [np.nan], "auc_pr_cal": [np.nan]})
return df
fpr, tpr, tpr_thresholds = sklearn.metrics.roc_curve(y_true=y_true, y_score=y_score)
roc_auc_score = sklearn.metrics.auc(x=fpr, y=tpr)
precision, recall, pr_thresholds = sklearn.metrics.precision_recall_curve(y_true = y_true, probas_pred = y_score)
with np.errstate(divide='ignore'):
#precision can be zero but can be ignored so disable warnings (divide by 0)
precision_cal = 1/(((1/precision - 1)*cal_fact_aucpr_task)+1)
bceloss = F.binary_cross_entropy_with_logits(
input = torch.FloatTensor(y_score),
target = torch.FloatTensor(y_true),
reduction="none").mean().item()
## calculating F1 for all cutoffs
F1_score = np.zeros(len(precision))
mask = precision > 0
F1_score[mask] = 2 * (precision[mask] * recall[mask]) / (precision[mask] + recall[mask])
f1_max_idx = F1_score.argmax()
f1_max = F1_score[f1_max_idx]
p_f1_max = scipy.special.expit(pr_thresholds[f1_max_idx])
auc_pr = sklearn.metrics.auc(x = recall, y = precision)
auc_pr_cal = sklearn.metrics.auc(x = recall, y = precision_cal)
avg_prec_score = sklearn.metrics.average_precision_score(
y_true = y_true,
y_score = y_score)
y_classes = np.where(y_score >= 0.0, 1, 0)
## accuracy for all thresholds
acc, kappas = calc_acc_kappa(recall=tpr, fpr=fpr, num_pos=(y_true==1).sum(), num_neg=(y_true==0).sum())
kappa_max_idx = kappas.argmax()
kappa_max = kappas[kappa_max_idx]
p_kappa_max = scipy.special.expit(tpr_thresholds[kappa_max_idx])
kappa = sklearn.metrics.cohen_kappa_score(y_true, y_classes)
df = pd.DataFrame({"roc_auc_score": [roc_auc_score], "auc_pr": [auc_pr], "avg_prec_score": [avg_prec_score], "f1_max": [f1_max], "p_f1_max": [p_f1_max], "kappa": [kappa], "kappa_max": [kappa_max], "p_kappa_max": p_kappa_max, "bceloss": bceloss, "auc_pr_cal": [auc_pr_cal]})
return df
def compute_corr(x, y):
if len(y) <= 1:
return np.nan
ystd = y.std()
xstd = x.std()
if ystd == 0 or xstd == 0:
return np.nan
return np.dot((x - x.mean()), (y - y.mean())) / len(y) / y.std() / x.std()
def all_metrics_regr(y_true, y_score, y_censor=None):
if len(y_true) <= 1:
df = pd.DataFrame({"rmse": [np.nan], "rmse_uncen": [np.nan], "rsquared": [np.nan], "corrcoef": [np.nan]})
return df
## censor0 means non-censored observations
censor0 = y_censor == 0 if y_censor is not None else slice(None)
mse_cen = censored_mse_loss_numpy(target=y_true, input=y_score, censor=y_censor).mean()
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=RuntimeWarning)
mse = ((y_true[censor0] - y_score[censor0])**2).mean()
yvar = y_true[censor0].var()
if yvar == 0 or np.isnan(yvar):
rsquared = np.nan
corr = np.nan
else:
rsquared = 1 - mse / yvar
corr = compute_corr(y_true[censor0], y_score[censor0])
df = pd.DataFrame({
"rmse": [np.sqrt(mse_cen)],
"rmse_uncen": [np.sqrt(mse)],
"rsquared": [rsquared],
"corrcoef": [corr],
})
return df
def compute_metrics(cols, y_true, y_score, num_tasks, cal_fact_aucpr):
if len(cols) < 1:
return pd.DataFrame({
"roc_auc_score": np.nan,
"auc_pr": np.nan,
"avg_prec_score": np.nan,
"f1_max": np.nan,
"p_f1_max": np.nan,
"kappa": np.nan,
"kappa_max": np.nan,
"p_kappa_max": np.nan,
"bceloss": np.nan}, index=np.arange(num_tasks))
df = pd.DataFrame({"task": cols, "y_true": y_true, "y_score": y_score})
if hasattr(cal_fact_aucpr, "__len__"):
metrics = df.groupby("task", sort=True).apply(lambda g:
all_metrics(
y_true = g.y_true.values,
y_score = g.y_score.values,
cal_fact_aucpr_task = cal_fact_aucpr[g['task'].values[0]]))
else:
metrics = df.groupby("task", sort=True).apply(lambda g:
all_metrics(
y_true = g.y_true.values,
y_score = g.y_score.values,
cal_fact_aucpr_task = 1.0))
metrics.reset_index(level=-1, drop=True, inplace=True)
return metrics.reindex(np.arange(num_tasks))
def compute_metrics_regr(cols, y_true, y_score, num_tasks, y_censor=None):
"""Returns metrics for regression tasks."""
if len(cols) < 1:
return pd.DataFrame({
"rmse": np.nan,
"rmse_uncen": np.nan,
"rsquared": np.nan,
"corrcoef": np.nan,
},
index=np.arange(num_tasks))
df = pd.DataFrame({
"task": cols,
"y_true": y_true,
"y_score": y_score,
"y_censor": y_censor,
})
metrics = df.groupby("task", sort=True).apply(lambda g:
all_metrics_regr(
y_true = g.y_true.values,
y_score = g.y_score.values,
y_censor = g.y_censor.values if y_censor is not None else None))
metrics.reset_index(level=-1, drop=True, inplace=True)
return metrics.reindex(np.arange(num_tasks))
def class_fold_counts(y_class, folding):
folds = np.unique(folding)
num_pos = []
num_neg = []
for fold in folds:
yf = y_class[folding == fold]
num_pos.append( np.array((yf == +1).sum(0)).flatten() )
num_neg.append( np.array((yf == -1).sum(0)).flatten() )
return np.row_stack(num_pos), np.row_stack(num_neg)
def print_metrics(epoch, train_time, metrics_tr, metrics_va, header):
if metrics_tr is None:
if header:
print("Epoch\tlogl_va | auc_va | aucpr_va | aucpr_cal_va | maxf1_va | tr_time")
output_fstr = (
f"{epoch}.\t{metrics_va['logloss']:.5f}"
f" | {metrics_va['roc_auc_score']:.5f}"
f" | {metrics_va['auc_pr']:.5f}"
f" | {metrics_va['auc_pr_cal']:.5f}"
f" | {metrics_va['f1_max']:.5f}"
f" | {train_time:6.1f}"
)
print(output_fstr)
return
## full print
if header:
print("Epoch\tlogl_tr logl_va | auc_tr auc_va | aucpr_tr aucpr_va | maxf1_tr maxf1_va | tr_time")
output_fstr = (
f"{epoch}.\t{metrics_tr['logloss']:.5f} {metrics_va['logloss']:.5f}"
f" | {metrics_tr['roc_auc_score']:.5f} {metrics_va['roc_auc_score']:.5f}"
f" | {metrics_tr['auc_pr']:.5f} {metrics_va['auc_pr']:.5f}"
f" | {metrics_tr['f1_max']:.5f} {metrics_va['f1_max']:.5f}"
f" | {train_time:6.1f}"
)
print(output_fstr)
def print_table(formats, data):
for key, fmt in formats.items():
print(fmt.format(data[key]), end="")
Column = namedtuple("Column", "key size dec title")
columns_cr = [
Column("epoch", size=6, dec= 0, title="Epoch"),
Column(None, size=1, dec=-1, title="|"),
Column("logloss", size=8, dec= 5, title="logl"),
Column("bceloss", size=8, dec= 5, title="bceloss"),
Column("roc_auc_score", size=8, dec= 5, title="aucroc"),
Column("auc_pr", size=8, dec= 5, title="aucpr"),
Column("auc_pr_cal", size=9, dec= 5, title="aucpr_cal"),
Column("f1_max", size=8, dec= 5, title="f1_max"),
Column(None, size=1, dec=-1, title="|"),
Column("rmse", size=9, dec= 5, title="rmse"),
Column("rsquared", size=9, dec= 5, title="rsquared"),
Column("corrcoef", size=9, dec= 5, title="corrcoef"),
Column(None, size=1, dec=-1, title="|"),
Column("train_time", size=6, dec= 1, title="tr_time"),
]
def print_cell(value, size, dec, left, end=" "):
align = "<" if left else ">"
if type(value) == str:
print(("{:" + align + str(size) + "}").format(value), end=end)
else:
print(("{:" + align + str(size) + "." + str(dec) + "f}").format(value), end=end)
def print_metrics_cr(epoch, train_time, results_tr, results_va, header):
data = pd.concat([results_va["classification_agg"], results_va["regression_agg"]])
data["train_time"] = train_time
data["epoch"] = epoch
if header:
for i, col in enumerate(columns_cr):
print_cell(col.title, col.size, dec=0, left=(i==0))
print()
## printing row with values
for i, col in enumerate(columns_cr):
print_cell(data.get(col.key, col.title), col.size, dec=col.dec, left=(i==0))
print()
def evaluate_binary(net, loader, loss, dev, progress=True):
net.eval()
logloss_sum = 0.0
logloss_count = 0
y_ind_list = []
y_true_list = []
y_hat_list = []
num_tasks = loader.dataset.y.shape[1]
with torch.no_grad():
for b in tqdm(loader, leave=False, disable=(progress == False)):
X = torch.sparse_coo_tensor(
b["x_ind"],
b["x_data"],
size = [b["batch_size"], loader.dataset.input_size]).to(dev)
y_ind = b["y_ind"].to(dev)
y_data = b["y_data"].to(dev)
y_hat_all = net(X)
y_hat = y_hat_all[y_ind[0], y_ind[1]]
output = loss(y_hat, y_data).sum()
logloss_sum += output
logloss_count += y_data.shape[0]
## storing data for AUCs
y_ind_list.append(b["y_ind"])
y_true_list.append(b["y_data"])
y_hat_list.append(y_hat.cpu())
if len(y_ind_list) == 0:
return {
"metrics": compute_metrics([], y_true=[], y_score=[], num_tasks=num_tasks),
"logloss": np.nan,
}
y_ind = torch.cat(y_ind_list, dim=1).numpy()
y_true = torch.cat(y_true_list, dim=0).numpy()
y_hat = torch.cat(y_hat_list, dim=0).numpy()
metrics = compute_metrics(y_ind[1], y_true=y_true, y_score=y_hat, num_tasks=num_tasks)
return {
'metrics': metrics,
'logloss': logloss_sum.cpu().numpy() / logloss_count
}
def train_binary(net, optimizer, loader, loss, dev, task_weights, normalize_loss=None, num_int_batches=1, progress=True):
"""
Args:
net pytorch network
optimizer optimizer to use
loader data loader with training data
dev device
task_weights weights of the tasks
normalize_loss normalization value, if None then use batch size
num_int_batches number of internal batches to use
progress whether to show a progress bar
"""
net.train()
logloss_sum = 0.0
logloss_count = 0
int_count = 0
for b in tqdm(loader, leave=False, disable=(progress == False)):
if int_count == 0:
optimizer.zero_grad()
X = torch.sparse_coo_tensor(
b["x_ind"],
b["x_data"],
size = [b["batch_size"], loader.dataset.input_size]).to(dev)
y_ind = b["y_ind"].to(dev)
y_w = task_weights[y_ind[1]]
y_data = b["y_data"].to(dev)
yhat_all = net(X)
yhat = yhat_all[y_ind[0], y_ind[1]]
norm = normalize_loss
if norm is None:
norm = b["batch_size"] * num_int_batches
output = (loss(yhat, y_data) * y_w).sum()
output_n = output / norm
output_n.backward()
int_count += 1
if int_count == num_int_batches:
optimizer.step()
int_count = 0
logloss_sum += output.detach() / y_data.shape[0]
logloss_count += 1
if int_count > 0:
## process tail batch (should not happen)
optimizer.step()
return logloss_sum / logloss_count
def batch_forward(net, b, input_size, loss_class, loss_regr, weights_class, weights_regr, censored_weight=[], dev="cpu", normalize_inv=None, y_cat_columns=None):
"""returns full outputs from the network for the batch b"""
X = torch.sparse_coo_tensor(
b["x_ind"],
b["x_data"],
size = [b["batch_size"], input_size]).to(dev, non_blocking=True)
if net.cat_id_size is None:
yc_hat_all, yr_hat_all = net(X)
else:
yc_hat_all, yr_hat_all, ycat_hat_all = net(X)
if normalize_inv is not None:
#inverse normalization
yr_hat_all = inverse_normalization(yr_hat_all, normalize_inv["mean"], normalize_inv["var"], dev).to(dev)
out = {}
out["yc_hat_all"] = yc_hat_all
out["yr_hat_all"] = yr_hat_all
out["yc_loss"] = 0
out["yr_loss"] = 0
out["yc_weights"] = 0
out["yr_weights"] = 0
out["yc_cat_loss"] = 0
if net.class_output_size > 0:
yc_ind = b["yc_ind"].to(dev, non_blocking=True)
yc_w = weights_class[yc_ind[1]]
yc_data = b["yc_data"].to(dev, non_blocking=True)
yc_hat = yc_hat_all[yc_ind[0], yc_ind[1]]
out["yc_ind"] = yc_ind
out["yc_data"] = yc_data
out["yc_hat"] = yc_hat
out["yc_loss"] = (loss_class(yc_hat, yc_data) * yc_w).sum()
out["yc_weights"] = yc_w.sum()
if net.cat_id_size is not None and net.cat_id_size > 0:
yc_cat_ind = b["yc_cat_ind"].to(dev, non_blocking=True)
yc_cat_data = b["yc_cat_data"].to(dev, non_blocking=True)
yc_cat_hat = ycat_hat_all[yc_cat_ind[0], yc_cat_ind[1]]
if y_cat_columns is not None:
yc_hat_all[:,y_cat_columns] = ycat_hat_all
yc_hat = yc_hat_all[yc_ind[0], yc_ind[1]]
out["yc_hat"] = yc_hat
out["yc_cat_loss"] = loss_class(yc_cat_hat, yc_cat_data).sum()
if net.regr_output_size > 0:
yr_ind = b["yr_ind"].to(dev, non_blocking=True)
yr_w = weights_regr[yr_ind[1]]
yr_data = b["yr_data"].to(dev, non_blocking=True)
yr_hat = yr_hat_all[yr_ind[0], yr_ind[1]]
out["ycen_data"] = b["ycen_data"]
if out["ycen_data"] is not None:
out["ycen_data"] = out["ycen_data"].to(dev, non_blocking=True)
if len(censored_weight) > 0:
## updating weights of censored data
yrcen_w = yr_w * censored_weight[yr_ind[1]]
yr_w = torch.where(out["ycen_data"] == 0, yr_w, yrcen_w)
out["yr_ind"] = yr_ind
out["yr_data"] = yr_data
out["yr_hat"] = yr_hat
out["yr_loss"] = (loss_regr(input=yr_hat, target=yr_data, censor=out["ycen_data"]) * yr_w).sum()
out["yr_weights"] = yr_w.sum()
return out
def train_class_regr(net, optimizer, loader, loss_class, loss_regr, dev,
weights_class, weights_regr, censored_weight,
normalize_loss=None, num_int_batches=1, progress=True, reporter=None, writer=None, epoch=0, args=None, scaler=None, nvml_handle=None):
net.train()
int_count = 0
batch_count = 0
#scaler = torch.cuda.amp.GradScaler()
for b in tqdm(loader, leave=False, disable=(progress == False)):
if int_count == 0:
optimizer.zero_grad()
norm = normalize_loss
if norm is None:
norm = b["batch_size"] * num_int_batches
if args.mixed_precision == 1:
mixed_precision = True
else:
mixed_precision = False
with torch.cuda.amp.autocast(enabled=mixed_precision):
fwd = batch_forward(net, b=b, input_size=loader.dataset.input_size, loss_class=loss_class, loss_regr=loss_regr, weights_class=weights_class, weights_regr=weights_regr, censored_weight=censored_weight, dev=dev)
if writer is not None and reporter is not None:
info = nvmlDeviceGetMemoryInfo(nvml_handle)
#writer.add_scalar("GPUmem", torch.cuda.memory_allocated() / 1024 ** 2, 3*(int_count+num_int_batches*batch_count+epoch*num_int_batches*b["batch_size"]))
writer.add_scalar("GPUmem", float("{}".format(info.used >> 20)), 3*(int_count+num_int_batches*batch_count+epoch*num_int_batches*b["batch_size"]))
if batch_count == 1:
with open(f"{args.output_dir}/memprofile.txt", "a+") as profile_file:
with redirect_stdout(profile_file):
profile_file.write(f"\nForward pass model detailed report:\n\n")
reporter.report()
loss = fwd["yc_loss"] + fwd["yr_loss"] + fwd["yc_cat_loss"] + net.GetRegularizer()
loss_norm = loss / norm
#loss_norm.backward()
if mixed_precision:
scaler.scale(loss_norm).backward()
else:
loss_norm.backward()
if writer is not None and reporter is not None:
info = nvmlDeviceGetMemoryInfo(nvml_handle)
#writer.add_scalar("GPUmem", torch.cuda.memory_allocated() / 1024 ** 2, 3*(int_count+num_int_batches*batch_count+epoch*num_int_batches*b["batch_size"])+1)
writer.add_scalar("GPUmem", float("{}".format(info.used >> 20)), 3*(int_count+num_int_batches*batch_count+epoch*num_int_batches*b["batch_size"])+1)
int_count += 1
if int_count == num_int_batches:
if mixed_precision and not isinstance(optimizer,Nothing):
scaler.step(optimizer)
scaler.update()
else:
optimizer.step()
if writer is not None and reporter is not None:
info = nvmlDeviceGetMemoryInfo(nvml_handle)
#writer.add_scalar("GPUmem", torch.cuda.memory_allocated() / 1024 ** 2, 3*(int_count-1+num_int_batches*batch_count+epoch*num_int_batches*b["batch_size"])+2)
writer.add_scalar("GPUmem", float("{}".format(info.used >> 20)), 3*(int_count-1+num_int_batches*batch_count+epoch*num_int_batches*b["batch_size"])+2)
int_count = 0
batch_count+=1
if int_count > 0:
## process tail batch (should not happen)
if mixed_precision and not isinstance(optimizer,Nothing):
scaler.step(optimizer)
scaler.update()
else:
optimizer.step()
def aggregate_results(df, weights):
"""Compute aggregates based on the weights"""
wsum = weights.sum()
if wsum == 0:
return | pd.Series(np.nan, index=df.columns) | pandas.Series |
# Copyright 2016 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import ABCMeta, abstractmethod
import json
import os
from glob import glob
from os.path import join
from textwrap import dedent
from lru import LRU
import bcolz
from bcolz import ctable
from intervaltree import IntervalTree
import logbook
import numpy as np
import pandas as pd
from pandas import HDFStore
import tables
from six import with_metaclass
from toolz import keymap, valmap
from trading_calendars import get_calendar
from zipline.data._minute_bar_internal import (
minute_value,
find_position_of_minute,
find_last_traded_position_internal
)
from zipline.gens.sim_engine import NANOS_IN_MINUTE
from zipline.data.bar_reader import BarReader, NoDataForSid, NoDataOnDate
from zipline.data.bcolz_daily_bars import check_uint32_safe
from zipline.utils.cli import maybe_show_progress
from zipline.utils.compat import mappingproxy
from zipline.utils.memoize import lazyval
logger = logbook.Logger('MinuteBars')
US_EQUITIES_MINUTES_PER_DAY = 390
FUTURES_MINUTES_PER_DAY = 1440
DEFAULT_EXPECTEDLEN = US_EQUITIES_MINUTES_PER_DAY * 252 * 15
OHLC_RATIO = 1000
class BcolzMinuteOverlappingData(Exception):
pass
class BcolzMinuteWriterColumnMismatch(Exception):
pass
class MinuteBarReader(BarReader):
@property
def data_frequency(self):
return "minute"
def _calc_minute_index(market_opens, minutes_per_day):
minutes = np.zeros(len(market_opens) * minutes_per_day,
dtype='datetime64[ns]')
deltas = np.arange(0, minutes_per_day, dtype='timedelta64[m]')
for i, market_open in enumerate(market_opens):
start = market_open.asm8
minute_values = start + deltas
start_ix = minutes_per_day * i
end_ix = start_ix + minutes_per_day
minutes[start_ix:end_ix] = minute_values
return pd.to_datetime(minutes, utc=True, box=True)
def _sid_subdir_path(sid):
"""
Format subdir path to limit the number directories in any given
subdirectory to 100.
The number in each directory is designed to support at least 100000
equities.
Parameters
----------
sid : int
Asset identifier.
Returns
-------
out : string
A path for the bcolz rootdir, including subdirectory prefixes based on
the padded string representation of the given sid.
e.g. 1 is formatted as 00/00/000001.bcolz
"""
padded_sid = format(sid, '06')
return os.path.join(
# subdir 1 00/XX
padded_sid[0:2],
# subdir 2 XX/00
padded_sid[2:4],
"{0}.bcolz".format(str(padded_sid))
)
def convert_cols(cols, scale_factor, sid, invalid_data_behavior):
"""Adapt OHLCV columns into uint32 columns.
Parameters
----------
cols : dict
A dict mapping each column name (open, high, low, close, volume)
to a float column to convert to uint32.
scale_factor : int
Factor to use to scale float values before converting to uint32.
sid : int
Sid of the relevant asset, for logging.
invalid_data_behavior : str
Specifies behavior when data cannot be converted to uint32.
If 'raise', raises an exception.
If 'warn', logs a warning and filters out incompatible values.
If 'ignore', silently filters out incompatible values.
"""
scaled_opens = np.nan_to_num(cols['open']) * scale_factor
scaled_highs = np.nan_to_num(cols['high']) * scale_factor
scaled_lows = np.nan_to_num(cols['low']) * scale_factor
scaled_closes = np.nan_to_num(cols['close']) * scale_factor
exclude_mask = np.zeros_like(scaled_opens, dtype=bool)
for col_name, scaled_col in [
('open', scaled_opens),
('high', scaled_highs),
('low', scaled_lows),
('close', scaled_closes),
]:
max_val = scaled_col.max()
try:
check_uint32_safe(max_val, col_name)
except ValueError:
if invalid_data_behavior == 'raise':
raise
if invalid_data_behavior == 'warn':
logger.warn(
'Values for sid={}, col={} contain some too large for '
'uint32 (max={}), filtering them out',
sid, col_name, max_val,
)
# We want to exclude all rows that have an unsafe value in
# this column.
exclude_mask &= (scaled_col >= np.iinfo(np.uint32).max)
# Convert all cols to uint32.
opens = scaled_opens.astype(np.uint32)
highs = scaled_highs.astype(np.uint32)
lows = scaled_lows.astype(np.uint32)
closes = scaled_closes.astype(np.uint32)
volumes = cols['volume'].astype(np.uint32)
# Exclude rows with unsafe values by setting to zero.
opens[exclude_mask] = 0
highs[exclude_mask] = 0
lows[exclude_mask] = 0
closes[exclude_mask] = 0
volumes[exclude_mask] = 0
return opens, highs, lows, closes, volumes
class BcolzMinuteBarMetadata(object):
"""
Parameters
----------
ohlc_ratio : int
The factor by which the pricing data is multiplied so that the
float data can be stored as an integer.
calendar : trading_calendars.trading_calendar.TradingCalendar
The TradingCalendar on which the minute bars are based.
start_session : datetime
The first trading session in the data set.
end_session : datetime
The last trading session in the data set.
minutes_per_day : int
The number of minutes per each period.
"""
FORMAT_VERSION = 3
METADATA_FILENAME = 'metadata.json'
@classmethod
def metadata_path(cls, rootdir):
return os.path.join(rootdir, cls.METADATA_FILENAME)
@classmethod
def read(cls, rootdir):
path = cls.metadata_path(rootdir)
with open(path) as fp:
raw_data = json.load(fp)
try:
version = raw_data['version']
except KeyError:
# Version was first written with version 1, assume 0,
# if version does not match.
version = 0
default_ohlc_ratio = raw_data['ohlc_ratio']
if version >= 1:
minutes_per_day = raw_data['minutes_per_day']
else:
# version 0 always assumed US equities.
minutes_per_day = US_EQUITIES_MINUTES_PER_DAY
if version >= 2:
calendar = get_calendar(raw_data['calendar_name'])
start_session = pd.Timestamp(
raw_data['start_session'], tz='UTC')
end_session = pd.Timestamp(raw_data['end_session'], tz='UTC')
else:
# No calendar info included in older versions, so
# default to NYSE.
calendar = get_calendar('NYSE')
start_session = pd.Timestamp(
raw_data['first_trading_day'], tz='UTC')
end_session = calendar.minute_to_session_label(
pd.Timestamp(
raw_data['market_closes'][-1], unit='m', tz='UTC')
)
if version >= 3:
ohlc_ratios_per_sid = raw_data['ohlc_ratios_per_sid']
if ohlc_ratios_per_sid is not None:
ohlc_ratios_per_sid = keymap(int, ohlc_ratios_per_sid)
else:
ohlc_ratios_per_sid = None
return cls(
default_ohlc_ratio,
ohlc_ratios_per_sid,
calendar,
start_session,
end_session,
minutes_per_day,
version=version,
)
def __init__(
self,
default_ohlc_ratio,
ohlc_ratios_per_sid,
calendar,
start_session,
end_session,
minutes_per_day,
version=FORMAT_VERSION,
):
self.calendar = calendar
self.start_session = start_session
self.end_session = end_session
self.default_ohlc_ratio = default_ohlc_ratio
self.ohlc_ratios_per_sid = ohlc_ratios_per_sid
self.minutes_per_day = minutes_per_day
self.version = version
def write(self, rootdir):
"""
Write the metadata to a JSON file in the rootdir.
Values contained in the metadata are:
version : int
The value of FORMAT_VERSION of this class.
ohlc_ratio : int
The default ratio by which to multiply the pricing data to
convert the floats from floats to an integer to fit within
the np.uint32. If ohlc_ratios_per_sid is None or does not
contain a mapping for a given sid, this ratio is used.
ohlc_ratios_per_sid : dict
A dict mapping each sid in the output to the factor by
which the pricing data is multiplied so that the float data
can be stored as an integer.
minutes_per_day : int
The number of minutes per each period.
calendar_name : str
The name of the TradingCalendar on which the minute bars are
based.
start_session : datetime
'YYYY-MM-DD' formatted representation of the first trading
session in the data set.
end_session : datetime
'YYYY-MM-DD' formatted representation of the last trading
session in the data set.
Deprecated, but included for backwards compatibility:
first_trading_day : string
'YYYY-MM-DD' formatted representation of the first trading day
available in the dataset.
market_opens : list
List of int64 values representing UTC market opens as
minutes since epoch.
market_closes : list
List of int64 values representing UTC market closes as
minutes since epoch.
"""
calendar = self.calendar
slicer = calendar.schedule.index.slice_indexer(
self.start_session,
self.end_session,
)
schedule = calendar.schedule[slicer]
market_opens = schedule.market_open
market_closes = schedule.market_close
metadata = {
'version': self.version,
'ohlc_ratio': self.default_ohlc_ratio,
'ohlc_ratios_per_sid': self.ohlc_ratios_per_sid,
'minutes_per_day': self.minutes_per_day,
'calendar_name': self.calendar.name,
'start_session': str(self.start_session.date()),
'end_session': str(self.end_session.date()),
# Write these values for backwards compatibility
'first_trading_day': str(self.start_session.date()),
'market_opens': (
market_opens.values.astype('datetime64[m]').
astype(np.int64).tolist()),
'market_closes': (
market_closes.values.astype('datetime64[m]').
astype(np.int64).tolist()),
}
with open(self.metadata_path(rootdir), 'w+') as fp:
json.dump(metadata, fp)
class BcolzMinuteBarWriter(object):
"""
Class capable of writing minute OHLCV data to disk into bcolz format.
Parameters
----------
rootdir : string
Path to the root directory into which to write the metadata and
bcolz subdirectories.
calendar : trading_calendars.trading_calendar.TradingCalendar
The trading calendar on which to base the minute bars. Used to
get the market opens used as a starting point for each periodic
span of minutes in the index, and the market closes that
correspond with the market opens.
minutes_per_day : int
The number of minutes per each period. Defaults to 390, the mode
of minutes in NYSE trading days.
start_session : datetime
The first trading session in the data set.
end_session : datetime
The last trading session in the data set.
default_ohlc_ratio : int, optional
The default ratio by which to multiply the pricing data to
convert from floats to integers that fit within np.uint32. If
ohlc_ratios_per_sid is None or does not contain a mapping for a
given sid, this ratio is used. Default is OHLC_RATIO (1000).
ohlc_ratios_per_sid : dict, optional
A dict mapping each sid in the output to the ratio by which to
multiply the pricing data to convert the floats from floats to
an integer to fit within the np.uint32.
expectedlen : int, optional
The expected length of the dataset, used when creating the initial
bcolz ctable.
If the expectedlen is not used, the chunksize and corresponding
compression ratios are not ideal.
Defaults to supporting 15 years of NYSE equity market data.
see: http://bcolz.blosc.org/opt-tips.html#informing-about-the-length-of-your-carrays # noqa
write_metadata : bool, optional
If True, writes the minute bar metadata (on init of the writer).
If False, no metadata is written (existing metadata is
retained). Default is True.
Notes
-----
Writes a bcolz directory for each individual sid, all contained within
a root directory which also contains metadata about the entire dataset.
Each individual asset's data is stored as a bcolz table with a column for
each pricing field: (open, high, low, close, volume)
The open, high, low, and close columns are integers which are 1000 times
the quoted price, so that the data can represented and stored as an
np.uint32, supporting market prices quoted up to the thousands place.
volume is a np.uint32 with no mutation of the tens place.
The 'index' for each individual asset are a repeating period of minutes of
length `minutes_per_day` starting from each market open.
The file format does not account for half-days.
e.g.:
2016-01-19 14:31
2016-01-19 14:32
...
2016-01-19 20:59
2016-01-19 21:00
2016-01-20 14:31
2016-01-20 14:32
...
2016-01-20 20:59
2016-01-20 21:00
All assets are written with a common 'index', sharing a common first
trading day. Assets that do not begin trading until after the first trading
day will have zeros for all pricing data up and until data is traded.
'index' is in quotations, because bcolz does not provide an index. The
format allows index-like behavior by writing each minute's data into the
corresponding position of the enumeration of the aforementioned datetime
index.
The datetimes which correspond to each position are written in the metadata
as integer nanoseconds since the epoch into the `minute_index` key.
See Also
--------
zipline.data.minute_bars.BcolzMinuteBarReader
"""
COL_NAMES = ('open', 'high', 'low', 'close', 'volume')
def __init__(self,
rootdir,
calendar,
start_session,
end_session,
minutes_per_day,
default_ohlc_ratio=OHLC_RATIO,
ohlc_ratios_per_sid=None,
expectedlen=DEFAULT_EXPECTEDLEN,
write_metadata=True):
self._rootdir = rootdir
self._start_session = start_session
self._end_session = end_session
self._calendar = calendar
slicer = (
calendar.schedule.index.slice_indexer(start_session, end_session))
self._schedule = calendar.schedule[slicer]
self._session_labels = self._schedule.index
self._minutes_per_day = minutes_per_day
self._expectedlen = expectedlen
self._default_ohlc_ratio = default_ohlc_ratio
self._ohlc_ratios_per_sid = ohlc_ratios_per_sid
self._minute_index = _calc_minute_index(
self._schedule.market_open, self._minutes_per_day)
if write_metadata:
metadata = BcolzMinuteBarMetadata(
self._default_ohlc_ratio,
self._ohlc_ratios_per_sid,
self._calendar,
self._start_session,
self._end_session,
self._minutes_per_day,
)
metadata.write(self._rootdir)
@classmethod
def open(cls, rootdir, end_session=None):
"""
Open an existing ``rootdir`` for writing.
Parameters
----------
end_session : Timestamp (optional)
When appending, the intended new ``end_session``.
"""
metadata = BcolzMinuteBarMetadata.read(rootdir)
return BcolzMinuteBarWriter(
rootdir,
metadata.calendar,
metadata.start_session,
end_session if end_session is not None else metadata.end_session,
metadata.minutes_per_day,
metadata.default_ohlc_ratio,
metadata.ohlc_ratios_per_sid,
write_metadata=end_session is not None
)
@property
def first_trading_day(self):
return self._start_session
def ohlc_ratio_for_sid(self, sid):
if self._ohlc_ratios_per_sid is not None:
try:
return self._ohlc_ratios_per_sid[sid]
except KeyError:
pass
# If no ohlc_ratios_per_sid dict is passed, or if the specified
# sid is not in the dict, fallback to the general ohlc_ratio.
return self._default_ohlc_ratio
def sidpath(self, sid):
"""
Parameters
----------
sid : int
Asset identifier.
Returns
-------
out : string
Full path to the bcolz rootdir for the given sid.
"""
sid_subdir = _sid_subdir_path(sid)
return join(self._rootdir, sid_subdir)
def last_date_in_output_for_sid(self, sid):
"""
Parameters
----------
sid : int
Asset identifier.
Returns
-------
out : pd.Timestamp
The midnight of the last date written in to the output for the
given sid.
"""
sizes_path = "{0}/close/meta/sizes".format(self.sidpath(sid))
if not os.path.exists(sizes_path):
return pd.NaT
with open(sizes_path, mode='r') as f:
sizes = f.read()
data = json.loads(sizes)
# use integer division so that the result is an int
# for pandas index later https://github.com/pandas-dev/pandas/blob/master/pandas/tseries/base.py#L247 # noqa
num_days = data['shape'][0] // self._minutes_per_day
if num_days == 0:
# empty container
return pd.NaT
return self._session_labels[num_days - 1]
def _init_ctable(self, path):
"""
Create empty ctable for given path.
Parameters
----------
path : string
The path to rootdir of the new ctable.
"""
# Only create the containing subdir on creation.
# This is not to be confused with the `.bcolz` directory, but is the
# directory up one level from the `.bcolz` directories.
sid_containing_dirname = os.path.dirname(path)
if not os.path.exists(sid_containing_dirname):
# Other sids may have already created the containing directory.
os.makedirs(sid_containing_dirname)
initial_array = np.empty(0, np.uint32)
table = ctable(
rootdir=path,
columns=[
initial_array,
initial_array,
initial_array,
initial_array,
initial_array,
],
names=[
'open',
'high',
'low',
'close',
'volume'
],
expectedlen=self._expectedlen,
mode='w',
)
table.flush()
return table
def _ensure_ctable(self, sid):
"""Ensure that a ctable exists for ``sid``, then return it."""
sidpath = self.sidpath(sid)
if not os.path.exists(sidpath):
return self._init_ctable(sidpath)
return bcolz.ctable(rootdir=sidpath, mode='a')
def _zerofill(self, table, numdays):
# Compute the number of minutes to be filled, accounting for the
# possibility of a partial day's worth of minutes existing for
# the previous day.
minute_offset = len(table) % self._minutes_per_day
num_to_prepend = numdays * self._minutes_per_day - minute_offset
prepend_array = np.zeros(num_to_prepend, np.uint32)
# Fill all OHLCV with zeros.
table.append([prepend_array] * 5)
table.flush()
def pad(self, sid, date):
"""
Fill sid container with empty data through the specified date.
If the last recorded trade is not at the close, then that day will be
padded with zeros until its close. Any day after that (up to and
including the specified date) will be padded with `minute_per_day`
worth of zeros
Parameters
----------
sid : int
The asset identifier for the data being written.
date : datetime-like
The date used to calculate how many slots to be pad.
The padding is done through the date, i.e. after the padding is
done the `last_date_in_output_for_sid` will be equal to `date`
"""
table = self._ensure_ctable(sid)
last_date = self.last_date_in_output_for_sid(sid)
tds = self._session_labels
if date <= last_date or date < tds[0]:
# No need to pad.
return
if last_date == pd.NaT:
# If there is no data, determine how many days to add so that
# desired days are written to the correct slots.
days_to_zerofill = tds[tds.slice_indexer(end=date)]
else:
days_to_zerofill = tds[tds.slice_indexer(
start=last_date + tds.freq,
end=date)]
self._zerofill(table, len(days_to_zerofill))
new_last_date = self.last_date_in_output_for_sid(sid)
assert new_last_date == date, "new_last_date={0} != date={1}".format(
new_last_date, date)
def set_sid_attrs(self, sid, **kwargs):
"""Write all the supplied kwargs as attributes of the sid's file.
"""
table = self._ensure_ctable(sid)
for k, v in kwargs.items():
table.attrs[k] = v
def write(self, data, show_progress=False, invalid_data_behavior='warn'):
"""Write a stream of minute data.
Parameters
----------
data : iterable[(int, pd.DataFrame)]
The data to write. Each element should be a tuple of sid, data
where data has the following format:
columns : ('open', 'high', 'low', 'close', 'volume')
open : float64
high : float64
low : float64
close : float64
volume : float64|int64
index : DatetimeIndex of market minutes.
A given sid may appear more than once in ``data``; however,
the dates must be strictly increasing.
show_progress : bool, optional
Whether or not to show a progress bar while writing.
"""
ctx = maybe_show_progress(
data,
show_progress=show_progress,
item_show_func=lambda e: e if e is None else str(e[0]),
label="Merging minute equity files:",
)
write_sid = self.write_sid
with ctx as it:
for e in it:
write_sid(*e, invalid_data_behavior=invalid_data_behavior)
def write_sid(self, sid, df, invalid_data_behavior='warn'):
"""
Write the OHLCV data for the given sid.
If there is no bcolz ctable yet created for the sid, create it.
If the length of the bcolz ctable is not exactly to the date before
the first day provided, fill the ctable with 0s up to that date.
Parameters
----------
sid : int
The asset identifer for the data being written.
df : pd.DataFrame
DataFrame of market data with the following characteristics.
columns : ('open', 'high', 'low', 'close', 'volume')
open : float64
high : float64
low : float64
close : float64
volume : float64|int64
index : DatetimeIndex of market minutes.
"""
cols = {
'open': df.open.values,
'high': df.high.values,
'low': df.low.values,
'close': df.close.values,
'volume': df.volume.values,
}
dts = df.index.values
# Call internal method, since DataFrame has already ensured matching
# index and value lengths.
self._write_cols(sid, dts, cols, invalid_data_behavior)
def write_cols(self, sid, dts, cols, invalid_data_behavior='warn'):
"""
Write the OHLCV data for the given sid.
If there is no bcolz ctable yet created for the sid, create it.
If the length of the bcolz ctable is not exactly to the date before
the first day provided, fill the ctable with 0s up to that date.
Parameters
----------
sid : int
The asset identifier for the data being written.
dts : datetime64 array
The dts corresponding to values in cols.
cols : dict of str -> np.array
dict of market data with the following characteristics.
keys are ('open', 'high', 'low', 'close', 'volume')
open : float64
high : float64
low : float64
close : float64
volume : float64|int64
"""
if not all(len(dts) == len(cols[name]) for name in self.COL_NAMES):
raise BcolzMinuteWriterColumnMismatch(
"Length of dts={0} should match cols: {1}".format(
len(dts),
" ".join("{0}={1}".format(name, len(cols[name]))
for name in self.COL_NAMES)))
self._write_cols(sid, dts, cols, invalid_data_behavior)
def _write_cols(self, sid, dts, cols, invalid_data_behavior):
"""
Internal method for `write_cols` and `write`.
Parameters
----------
sid : int
The asset identifier for the data being written.
dts : datetime64 array
The dts corresponding to values in cols.
cols : dict of str -> np.array
dict of market data with the following characteristics.
keys are ('open', 'high', 'low', 'close', 'volume')
open : float64
high : float64
low : float64
close : float64
volume : float64|int64
"""
table = self._ensure_ctable(sid)
tds = self._session_labels
input_first_day = self._calendar.minute_to_session_label(
pd.Timestamp(dts[0]), direction='previous')
last_date = self.last_date_in_output_for_sid(sid)
day_before_input = input_first_day - tds.freq
self.pad(sid, day_before_input)
table = self._ensure_ctable(sid)
# Get the number of minutes already recorded in this sid's ctable
num_rec_mins = table.size
all_minutes = self._minute_index
# Get the latest minute we wish to write to the ctable
last_minute_to_write = | pd.Timestamp(dts[-1], tz='UTC') | pandas.Timestamp |
# %% coding=utf-8
import sys
import dlib
import numpy as np
import pandas as pd
from sklearn.externals import joblib
"""
实际预测部分
"""
#todo
# 1.lime问题太多,将lime更换成interpret
predictor_path = "model/shape_predictor_68_face_landmarks.dat"
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor(predictor_path)
model = joblib.load('model/beauty.pkl')
df_input = | pd.read_csv('/data/face/df_input.csv') | pandas.read_csv |
# Concatenate uber1, uber2, and uber3: row_concat
row_concat = pd.concat([uber1, uber2, uber3])
# Print the shape of row_concat
print(row_concat.shape)
# Print the head of row_concat
print(row_concat.head())
# Concatenate ebola_melt and status_country column-wise: ebola_tidy
ebola_tidy = pd.concat([ebola_melt, status_country], axis=1)
# Print the shape of ebola_tidy
print(ebola_tidy.shape)
# Print the head of ebola_tidy
print(ebola_tidy.head())
# Import necessary modules
import glob
import pandas as pd
# Write the pattern: pattern
pattern = '*.csv'
# Save all file matches: csv_files
csv_files = glob.glob(pattern)
# Print the file names
print(csv_files)
# Load the second file into a DataFrame: csv2
csv2 = pd.read_csv(csv_files[1])
# Print the head of csv2
print(csv2.head())
# Create an empty list: frames
frames = []
# Iterate over csv_files
for csv in csv_files:
# Read csv into a DataFrame: df
df = pd.read_csv(csv)
# Append df to frames
frames.append(df)
# Concatenate frames into a single DataFrame: uber
uber = pd.concat(frames)
# Print the shape of uber
print(uber.shape)
# Print the head of uber
print(uber.head())
# Merge the DataFrames: o2o
o2o = pd.merge(left=site, right=visited, left_on="name", right_on="site")
# Print o2o
print(o2o)
# Merge the DataFrames: m2o
m2o = pd.merge(left=site, right=visited, left_on="name", right_on="site")
# Print m2o
print(m2o)
# Merge site and visited: m2m
m2m = pd.merge(left=site, right=visited, left_on="name", right_on="site")
# Merge m2m and survey: m2m
m2m = | pd.merge(left=m2m, right=survey, left_on="ident", right_on="taken") | pandas.merge |
import pandas as pd
import pytest
from bach import DataFrame
from bach.series.series_multi_level import SeriesNumericInterval
@pytest.fixture()
def interval_data_pdf() -> pd.DataFrame:
pdf = pd.DataFrame(
{
'lower': [0., 0., 3., 5., 1., 2., 3., 4., 5.],
'upper': [1., 1., 4., 6., 2., 3., 4., 5., 6.],
'a': [10, 15, 20, 25, 30, 35, 40, 45, 50],
},
)
pdf['bounds'] = '(]'
return pdf
def test_series_numeric_interval_to_pandas(engine, interval_data_pdf: pd.DataFrame) -> None:
df = DataFrame.from_pandas(engine=engine, df=interval_data_pdf, convert_objects=True)
df['range'] = SeriesNumericInterval.from_value(
base=df,
name='num_interval',
value={
'lower': df['lower'],
'upper': df['upper'],
'bounds': df['bounds'],
}
)
expected = pd.DataFrame(
{
'_index_0': [0, 1, 2, 3, 4, 5, 6, 7, 8],
'range': [
pd.Interval(left=0., right=1., closed='right'),
pd.Interval(left=0., right=1., closed='right'),
pd.Interval(left=3., right=4., closed='right'),
pd.Interval(left=5., right=6., closed='right'),
pd.Interval(left=1., right=2., closed='right'),
pd.Interval(left=2., right=3., closed='right'),
pd.Interval(left=3., right=4., closed='right'),
pd.Interval(left=4., right=5., closed='right'),
pd.Interval(left=5., right=6., closed='right'),
]
}
).set_index('_index_0')['range']
result = df['range'].sort_index().to_pandas()
pd.testing.assert_series_equal(expected, result)
def test_series_numeric_interval_sort_values(engine, interval_data_pdf: pd.DataFrame) -> None:
df = DataFrame.from_pandas(engine=engine, df=interval_data_pdf, convert_objects=True)
df['range'] = SeriesNumericInterval.from_value(
base=df,
name='num_interval',
value={
'lower': df['lower'],
'upper': df['upper'],
'bounds': df['bounds'],
},
)
expected = pd.DataFrame(
{
'range': [
pd.Interval(left=0., right=1., closed='right'),
pd.Interval(left=0., right=1., closed='right'),
pd.Interval(left=1., right=2., closed='right'),
pd.Interval(left=2., right=3., closed='right'),
pd.Interval(left=3., right=4., closed='right'),
pd.Interval(left=3., right=4., closed='right'),
pd.Interval(left=4., right=5., closed='right'),
pd.Interval(left=5., right=6., closed='right'),
pd.Interval(left=5., right=6., closed='right'),
],
}
)['range']
result = df.reset_index()['range'].sort_values().to_pandas()
pd.testing.assert_series_equal(expected, result, check_index=False, check_index_type=False)
def test_series_numeric_interval_append(engine, interval_data_pdf: pd.DataFrame) -> None:
interval_data_pdf[['lower', 'upper']] = interval_data_pdf[['lower', 'upper']] .astype(int)
df = DataFrame.from_pandas(engine=engine, df=interval_data_pdf, convert_objects=True)
df['range_1'] = SeriesNumericInterval.from_value(
base=df,
name='range_1',
value={
'lower': df['lower'],
'upper': df['upper'],
'bounds': df['bounds'],
},
)
df['range_2'] = SeriesNumericInterval.from_value(
base=df,
name='range_2',
value={
'lower': df['lower'] + 1,
'upper': df['upper'] + 2,
'bounds': df['bounds'],
},
)
expected = pd.DataFrame(
{
'_index_0': [0, 4, 0, 5, 4, 2, 5, 7, 2, 3, 7, 3],
'range_1': [
pd.Interval(left=0., right=1., closed='right'),
pd.Interval(left=1., right=2., closed='right'),
pd.Interval(left=1., right=3., closed='right'),
pd.Interval(left=2., right=3., closed='right'),
pd.Interval(left=2., right=4., closed='right'),
pd.Interval(left=3., right=4., closed='right'),
pd.Interval(left=3., right=5., closed='right'),
pd.Interval(left=4., right=5., closed='right'),
pd.Interval(left=4., right=6., closed='right'),
pd.Interval(left=5., right=6., closed='right'),
pd.Interval(left=5., right=7., closed='right'),
pd.Interval(left=6., right=8., closed='right'),
],
}
).set_index('_index_0')['range_1']
result = df['range_1'].append(df['range_2'])
result = result.drop_duplicates().sort_values()
pd.testing.assert_series_equal(expected, result.to_pandas())
def test_series_numeric_interval_dropna(engine, interval_data_pdf: pd.DataFrame) -> None:
interval_data_pdf.loc[2, ['upper', 'lower', 'bounds']] = None
df = DataFrame.from_pandas(engine=engine, df=interval_data_pdf, convert_objects=True)
range = SeriesNumericInterval.from_value(
base=df,
name='range',
value={
'lower': df['lower'],
'upper': df['upper'],
'bounds': df['bounds'],
},
)
expected = pd.DataFrame(
{
'_index_0': [0, 1, 3, 4, 5, 6, 7, 8],
'range': [
pd.Interval(left=0., right=1., closed='right'),
pd.Interval(left=0., right=1., closed='right'),
pd.Interval(left=5., right=6., closed='right'),
pd.Interval(left=1., right=2., closed='right'),
pd.Interval(left=2., right=3., closed='right'),
pd.Interval(left=3., right=4., closed='right'),
pd.Interval(left=4., right=5., closed='right'),
pd.Interval(left=5., right=6., closed='right'),
]
}
).set_index('_index_0')['range']
result = range.dropna().sort_index().to_pandas()
pd.testing.assert_series_equal(expected, result)
def test_series_numeric_value_counts(engine, interval_data_pdf: pd.DataFrame) -> None:
df = DataFrame.from_pandas(engine=engine, df=interval_data_pdf, convert_objects=True)
range = SeriesNumericInterval.from_value(
base=df,
name='range',
value={
'lower': df['lower'],
'upper': df['upper'],
'bounds': df['bounds'],
},
)
expected = pd.DataFrame(
{
'value_counts': [2, 1, 1, 2, 1, 2],
'range': [
pd.Interval(left=0., right=1., closed='right'),
pd.Interval(left=1., right=2., closed='right'),
| pd.Interval(left=2., right=3., closed='right') | pandas.Interval |
# -*- coding:utf-8 -*-
# !/usr/bin/env python
"""
Date: 2021/11/18 18:18
Desc: 天天基金网-基金数据-分红送配
http://fund.eastmoney.com/data/fundfenhong.html
"""
import pandas as pd
import requests
from tqdm import tqdm
def fund_fh_em() -> pd.DataFrame:
"""
天天基金网-基金数据-分红送配-基金分红
http://fund.eastmoney.com/data/fundfenhong.html#DJR,desc,1,,,
:return: 基金分红
:rtype: pandas.DataFrame
"""
url = "http://fund.eastmoney.com/Data/funddataIndex_Interface.aspx"
params = {
"dt": "8",
"page": "1",
"rank": "DJR",
"sort": "desc",
"gs": "",
"ftype": "",
"year": "",
}
r = requests.get(url, params=params)
data_text = r.text
total_page = eval(data_text[data_text.find("=") + 1: data_text.find(";")])[0]
big_df = pd.DataFrame()
for page in tqdm(range(1, total_page + 1)):
params.update({"page": page})
r = requests.get(url, params=params)
data_text = r.text
temp_list = eval(
data_text[data_text.find("[["): data_text.find(";var jjfh_jjgs")]
)
temp_df = pd.DataFrame(temp_list)
big_df = big_df.append(temp_df, ignore_index=True)
big_df.reset_index(inplace=True)
big_df["index"] = big_df.index + 1
big_df.columns = [
"序号",
"基金代码",
"基金简称",
"权益登记日",
"除息日期",
"分红",
"分红发放日",
"-",
]
big_df = big_df[["序号", "基金代码", "基金简称", "权益登记日", "除息日期", "分红", "分红发放日"]]
big_df['权益登记日'] = pd.to_datetime(big_df['权益登记日']).dt.date
big_df['除息日期'] = pd.to_datetime(big_df['除息日期']).dt.date
big_df['分红发放日'] = pd.to_datetime(big_df['分红发放日']).dt.date
big_df['分红'] = pd.to_numeric(big_df['分红'])
return big_df
def fund_cf_em() -> pd.DataFrame:
"""
天天基金网-基金数据-分红送配-基金拆分
http://fund.eastmoney.com/data/fundchaifen.html#FSRQ,desc,1,,,
:return: 基金拆分
:rtype: pandas.DataFrame
"""
url = "http://fund.eastmoney.com/Data/funddataIndex_Interface.aspx"
params = {
"dt": "9",
"page": "1",
"rank": "FSRQ",
"sort": "desc",
"gs": "",
"ftype": "",
"year": "",
}
r = requests.get(url, params=params)
data_text = r.text
total_page = eval(data_text[data_text.find("=") + 1: data_text.find(";")])[0]
big_df = pd.DataFrame()
for page in tqdm(range(1, total_page + 1)):
params.update({"page": page})
r = requests.get(url, params=params)
data_text = r.text
temp_list = eval(
data_text[data_text.find("[["): data_text.find(";var jjcf_jjgs")]
)
temp_df = pd.DataFrame(temp_list)
big_df = big_df.append(temp_df, ignore_index=True)
big_df.reset_index(inplace=True)
big_df["index"] = big_df.index + 1
big_df.columns = [
"序号",
"基金代码",
"基金简称",
"拆分折算日",
"拆分类型",
"拆分折算",
"-",
]
big_df = big_df[["序号", "基金代码", "基金简称", "拆分折算日", "拆分类型", "拆分折算"]]
big_df['拆分折算日'] = pd.to_datetime(big_df['拆分折算日']).dt.date
big_df['拆分折算'] = pd.to_numeric(big_df['拆分折算'], errors="coerce")
return big_df
def fund_fh_rank_em() -> pd.DataFrame:
"""
天天基金网-基金数据-分红送配-基金分红排行
http://fund.eastmoney.com/data/fundleijifenhong.html#FHFCZ,desc,1,,
:return: 基金分红排行
:rtype: pandas.DataFrame
"""
url = "http://fund.eastmoney.com/Data/funddataIndex_Interface.aspx"
params = {
"dt": "10",
"page": "1",
"rank": "FHFCZ",
"sort": "desc",
"gs": "",
"ftype": "",
"year": "",
}
r = requests.get(url, params=params)
data_text = r.text
total_page = eval(data_text[data_text.find("=") + 1: data_text.find(";")])[0]
big_df = pd.DataFrame()
for page in tqdm(range(1, total_page + 1)):
params.update({"page": page})
r = requests.get(url, params=params)
data_text = r.text
temp_list = eval(
data_text[data_text.find("[["): data_text.find(";var fhph_jjgs")]
)
temp_df = pd.DataFrame(temp_list)
big_df = big_df.append(temp_df, ignore_index=True)
big_df.reset_index(inplace=True)
big_df["index"] = big_df.index + 1
big_df.columns = [
"序号",
"基金代码",
"基金简称",
"累计分红",
"累计次数",
"成立日期",
"-",
]
big_df = big_df[["序号", "基金代码", "基金简称", "累计分红", "累计次数", "成立日期"]]
big_df['成立日期'] = pd.to_datetime(big_df['成立日期']).dt.date
big_df['累计分红'] = pd.to_numeric(big_df['累计分红'], errors="coerce")
big_df['累计次数'] = pd.to_nu | meric(big_df['累计次数'], errors="coerce") | pandas.to_numeric |
import os
import locale
import codecs
import nose
import numpy as np
from numpy.testing import assert_equal
import pandas as pd
from pandas import date_range, Index
import pandas.util.testing as tm
from pandas.tools.util import cartesian_product, to_numeric
CURRENT_LOCALE = locale.getlocale()
LOCALE_OVERRIDE = os.environ.get('LOCALE_OVERRIDE', None)
class TestCartesianProduct(tm.TestCase):
def test_simple(self):
x, y = list('ABC'), [1, 22]
result = cartesian_product([x, y])
expected = [np.array(['A', 'A', 'B', 'B', 'C', 'C']),
np.array([1, 22, 1, 22, 1, 22])]
assert_equal(result, expected)
def test_datetimeindex(self):
# regression test for GitHub issue #6439
# make sure that the ordering on datetimeindex is consistent
x = date_range('2000-01-01', periods=2)
result = [Index(y).day for y in cartesian_product([x, x])]
expected = [np.array([1, 1, 2, 2]), np.array([1, 2, 1, 2])]
assert_equal(result, expected)
class TestLocaleUtils(tm.TestCase):
@classmethod
def setUpClass(cls):
super(TestLocaleUtils, cls).setUpClass()
cls.locales = tm.get_locales()
if not cls.locales:
raise nose.SkipTest("No locales found")
tm._skip_if_windows()
@classmethod
def tearDownClass(cls):
super(TestLocaleUtils, cls).tearDownClass()
del cls.locales
def test_get_locales(self):
# all systems should have at least a single locale
assert len(tm.get_locales()) > 0
def test_get_locales_prefix(self):
if len(self.locales) == 1:
raise nose.SkipTest("Only a single locale found, no point in "
"trying to test filtering locale prefixes")
first_locale = self.locales[0]
assert len(tm.get_locales(prefix=first_locale[:2])) > 0
def test_set_locale(self):
if len(self.locales) == 1:
raise nose.SkipTest("Only a single locale found, no point in "
"trying to test setting another locale")
if LOCALE_OVERRIDE is not None:
lang, enc = LOCALE_OVERRIDE.split('.')
else:
lang, enc = 'it_CH', 'UTF-8'
enc = codecs.lookup(enc).name
new_locale = lang, enc
if not tm._can_set_locale(new_locale):
with tm.assertRaises(locale.Error):
with tm.set_locale(new_locale):
pass
else:
with tm.set_locale(new_locale) as normalized_locale:
new_lang, new_enc = normalized_locale.split('.')
new_enc = codecs.lookup(enc).name
normalized_locale = new_lang, new_enc
self.assertEqual(normalized_locale, new_locale)
current_locale = locale.getlocale()
self.assertEqual(current_locale, CURRENT_LOCALE)
class TestToNumeric(tm.TestCase):
def test_series(self):
s = pd.Series(['1', '-3.14', '7'])
res = to_numeric(s)
expected = pd.Series([1, -3.14, 7])
tm.assert_series_equal(res, expected)
s = pd.Series(['1', '-3.14', 7])
res = to_numeric(s)
tm.assert_series_equal(res, expected)
def test_series_numeric(self):
s = pd.Series([1, 3, 4, 5], index=list('ABCD'), name='XXX')
res = to_numeric(s)
tm.assert_series_equal(res, s)
s = pd.Series([1., 3., 4., 5.], index=list('ABCD'), name='XXX')
res = to_numeric(s)
tm.assert_series_equal(res, s)
# bool is regarded as numeric
s = pd.Series([True, False, True, True],
index=list('ABCD'), name='XXX')
res = to_numeric(s)
tm.assert_series_equal(res, s)
def test_error(self):
s = pd.Series([1, -3.14, 'apple'])
with tm.assertRaises(ValueError):
to_numeric(s, errors='raise')
res = to_numeric(s, errors='ignore')
expected = pd.Series([1, -3.14, 'apple'])
tm.assert_series_equal(res, expected)
res = to_numeric(s, errors='coerce')
expected = pd.Series([1, -3.14, np.nan])
tm.assert_series_equal(res, expected)
def test_error_seen_bool(self):
s = pd.Series([True, False, 'apple'])
with tm.assertRaises(ValueError):
to_numeric(s, errors='raise')
res = to_numeric(s, errors='ignore')
expected = pd.Series([True, False, 'apple'])
tm.assert_series_equal(res, expected)
# coerces to float
res = to_numeric(s, errors='coerce')
expected = pd.Series([1., 0., np.nan])
tm.assert_series_equal(res, expected)
def test_list(self):
s = ['1', '-3.14', '7']
res = to_numeric(s)
expected = np.array([1, -3.14, 7])
tm.assert_numpy_array_equal(res, expected)
def test_list_numeric(self):
s = [1, 3, 4, 5]
res = to_numeric(s)
tm.assert_numpy_array_equal(res, np.array(s, dtype=np.int64))
s = [1., 3., 4., 5.]
res = to_numeric(s)
tm.assert_numpy_array_equal(res, np.array(s))
# bool is regarded as numeric
s = [True, False, True, True]
res = to_numeric(s)
tm.assert_numpy_array_equal(res, np.array(s))
def test_numeric(self):
s = pd.Series([1, -3.14, 7], dtype='O')
res = to_numeric(s)
expected = pd.Series([1, -3.14, 7])
tm.assert_series_equal(res, expected)
s = pd.Series([1, -3.14, 7])
res = to_numeric(s)
tm.assert_series_equal(res, expected)
def test_all_nan(self):
s = pd.Series(['a', 'b', 'c'])
res = to_numeric(s, errors='coerce')
expected = pd.Series([np.nan, np.nan, np.nan])
tm.assert_series_equal(res, expected)
def test_type_check(self):
# GH 11776
df = pd.DataFrame({'a': [1, -3.14, 7], 'b': ['4', '5', '6']})
with tm.assertRaisesRegexp(TypeError, "1-d array"):
to_numeric(df)
for errors in ['ignore', 'raise', 'coerce']:
with tm.assertRaisesRegexp(TypeError, "1-d array"):
to_numeric(df, errors=errors)
def test_scalar(self):
self.assertEqual(pd.to_numeric(1), 1)
self.assertEqual(pd.to_numeric(1.1), 1.1)
self.assertEqual(pd.to_numeric('1'), 1)
self.assertEqual(pd.to_numeric('1.1'), 1.1)
with tm.assertRaises(ValueError):
to_numeric('XX', errors='raise')
self.assertEqual(to_numeric('XX', errors='ignore'), 'XX')
self.assertTrue(np.isnan(to_numeric('XX', errors='coerce')))
def test_numeric_dtypes(self):
idx = pd.Index([1, 2, 3], name='xxx')
res = pd.to_numeric(idx)
tm.assert_index_equal(res, idx)
res = pd.to_numeric(pd.Series(idx, name='xxx'))
tm.assert_series_equal(res, pd.Series(idx, name='xxx'))
res = pd.to_numeric(idx.values)
tm.assert_numpy_array_equal(res, idx.values)
idx = pd.Index([1., np.nan, 3., np.nan], name='xxx')
res = pd.to_numeric(idx)
tm.assert_index_equal(res, idx)
res = pd.to_numeric(pd.Series(idx, name='xxx'))
tm.assert_series_equal(res, pd.Series(idx, name='xxx'))
res = pd.to_numeric(idx.values)
tm.assert_numpy_array_equal(res, idx.values)
def test_str(self):
idx = pd.Index(['1', '2', '3'], name='xxx')
exp = np.array([1, 2, 3], dtype='int64')
res = pd.to_numeric(idx)
tm.assert_index_equal(res, pd.Index(exp, name='xxx'))
res = pd.to_numeric(pd.Series(idx, name='xxx'))
tm.assert_series_equal(res, pd.Series(exp, name='xxx'))
res = pd.to_numeric(idx.values)
tm.assert_numpy_array_equal(res, exp)
idx = pd.Index(['1.5', '2.7', '3.4'], name='xxx')
exp = np.array([1.5, 2.7, 3.4])
res = pd.to_numeric(idx)
tm.assert_index_equal(res, pd.Index(exp, name='xxx'))
res = pd.to_numeric(pd.Series(idx, name='xxx'))
tm.assert_series_equal(res, pd.Series(exp, name='xxx'))
res = | pd.to_numeric(idx.values) | pandas.to_numeric |
import pytest
import pandas._testing as tm
from pandas.io.formats.css import CSSResolver, CSSWarning
def assert_resolves(css, props, inherited=None):
resolve = CSSResolver()
actual = resolve(css, inherited=inherited)
assert props == actual
def assert_same_resolution(css1, css2, inherited=None):
resolve = CSSResolver()
resolved1 = resolve(css1, inherited=inherited)
resolved2 = resolve(css2, inherited=inherited)
assert resolved1 == resolved2
@pytest.mark.parametrize(
"name,norm,abnorm",
[
(
"whitespace",
"hello: world; foo: bar",
" \t hello \t :\n world \n ; \n foo: \tbar\n\n",
),
("case", "hello: world; foo: bar", "Hello: WORLD; foO: bar"),
("empty-decl", "hello: world; foo: bar", "; hello: world;; foo: bar;\n; ;"),
("empty-list", "", ";"),
],
)
def test_css_parse_normalisation(name, norm, abnorm):
assert_same_resolution(norm, abnorm)
@pytest.mark.parametrize(
"invalid_css,remainder",
[
# No colon
("hello-world", ""),
("border-style: solid; hello-world", "border-style: solid"),
(
"border-style: solid; hello-world; font-weight: bold",
"border-style: solid; font-weight: bold",
),
# Unclosed string fail
# Invalid size
("font-size: blah", "font-size: 1em"),
("font-size: 1a2b", "font-size: 1em"),
("font-size: 1e5pt", "font-size: 1em"),
("font-size: 1+6pt", "font-size: 1em"),
("font-size: 1unknownunit", "font-size: 1em"),
("font-size: 10", "font-size: 1em"),
("font-size: 10 pt", "font-size: 1em"),
],
)
def test_css_parse_invalid(invalid_css, remainder):
with tm.assert_produces_warning(CSSWarning):
assert_same_resolution(invalid_css, remainder)
# TODO: we should be checking that in other cases no warnings are raised
@pytest.mark.parametrize(
"shorthand,expansions",
[
("margin", ["margin-top", "margin-right", "margin-bottom", "margin-left"]),
("padding", ["padding-top", "padding-right", "padding-bottom", "padding-left"]),
(
"border-width",
[
"border-top-width",
"border-right-width",
"border-bottom-width",
"border-left-width",
],
),
(
"border-font",
[
"border-top-font",
"border-right-font",
"border-bottom-font",
"border-left-font",
],
),
(
"border-style",
[
"border-top-style",
"border-right-style",
"border-bottom-style",
"border-left-style",
],
),
],
)
def test_css_side_shorthands(shorthand, expansions):
top, right, bottom, left = expansions
assert_resolves(
f"{shorthand}: 1pt", {top: "1pt", right: "1pt", bottom: "1pt", left: "1pt"},
)
assert_resolves(
f"{shorthand}: 1pt 4pt", {top: "1pt", right: "4pt", bottom: "1pt", left: "4pt"},
)
assert_resolves(
f"{shorthand}: 1pt 4pt 2pt",
{top: "1pt", right: "4pt", bottom: "2pt", left: "4pt"},
)
assert_resolves(
f"{shorthand}: 1pt 4pt 2pt 0pt",
{top: "1pt", right: "4pt", bottom: "2pt", left: "0pt"},
)
with tm.assert_produces_warning(CSSWarning):
assert_resolves(f"{shorthand}: 1pt 1pt 1pt 1pt 1pt", {})
@pytest.mark.parametrize(
"style,inherited,equiv",
[
("margin: 1px; margin: 2px", "", "margin: 2px"),
("margin: 1px", "margin: 2px", "margin: 1px"),
("margin: 1px; margin: inherit", "margin: 2px", "margin: 2px"),
(
"margin: 1px; margin-top: 2px",
"",
"margin-left: 1px; margin-right: 1px; "
+ "margin-bottom: 1px; margin-top: 2px",
),
("margin-top: 2px", "margin: 1px", "margin: 1px; margin-top: 2px"),
("margin: 1px", "margin-top: 2px", "margin: 1px"),
(
"margin: 1px; margin-top: inherit",
"margin: 2px",
"margin: 1px; margin-top: 2px",
),
],
)
def test_css_precedence(style, inherited, equiv):
resolve = | CSSResolver() | pandas.io.formats.css.CSSResolver |
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.cluster import KMeans
import sklearn.metrics as sm
import pandas as pd
import numpy as np
iris = datasets.load_iris()
X = | pd.DataFrame(iris.data) | pandas.DataFrame |
""""""
"""
Copyright (c) 2021 <NAME> as part of Airlab Amsterdam
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import random
import numpy as np
import pandas as pd
import torch
from lib.loss_metrics import RMSE, ND, QuantileLoss, MAPE, sMAPE, NRMSE
import importlib
import time
#%% Helper functions
# Quantile function for StudenT(2) distribution
def StudentT2icdf(loc, scale, quantile):
alpha = 4 * quantile * (1 - quantile)
Q = 2 * (quantile - 0.5) * (2 / alpha).sqrt()
return Q * scale + loc
# Fix seed
def fix_seed(seed):
# Set seed
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
# Calculate and show metrics
def calc_metrics(yhat, y, quantiles):
df = pd.DataFrame(columns={'RMSE','NRMSE','ND','MAPE','sMAPE','QuantileLoss','Quantile'})
df.loc[:, 'Quantile'] = quantiles
for q, quantile in enumerate(quantiles):
df.loc[q, 'RMSE'] = RMSE(y, yhat[q])
df.loc[q, 'NRMSE'] = NRMSE(y, yhat[q])
df.loc[q, 'ND'] = ND(y, yhat[q])
df.loc[q, 'MAPE'] = MAPE(y, yhat[q])
df.loc[q, 'sMAPE'] = sMAPE(y, yhat[q])
df.loc[q, 'QuantileLoss'] = QuantileLoss(y, yhat[q], quantile)
q = 4
print(f" RMSE/NRMSE/ND/MAPE/sMAPE loss: {df['RMSE'][q]:.2f}/{df['NRMSE'][q]:.2f}/{df['ND'][q]:.3f}/{df['MAPE'][q]:.3f}/{df['sMAPE'][q]:.3f}")
print(f" p10/p50/p90/mp50 loss: {df['QuantileLoss'][0]:.3f}/{df['QuantileLoss'][4]:.3f}/{df['QuantileLoss'][8]:.3f}/{df['QuantileLoss'].mean():.3f}")
return df
# Instantiate model based on string algorithm input
def instantiate_model(algorithm):
model_class = importlib.import_module('algorithms.'+algorithm)
model = getattr(model_class, algorithm)
return model
# Count model parameters
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
# Read experiment csv
def read_table(filename):
for x in range(0, 10):
try:
table = | pd.read_csv(filename, sep=';') | pandas.read_csv |
import json
import multiprocessing
import os
from itertools import repeat, product
import pandas as pd
from tqdm import tqdm
import matplotlib.pyplot as plt
import seaborn as sns
from exprimo import set_log_dir, log, PLOT_STYLE
from exprimo.optimize import optimize_with_config
sns.set(style=PLOT_STYLE)
LOG_DIR = os.path.expanduser('~/logs/e3_optimizer-comparison')
set_log_dir(LOG_DIR)
run_config = 'pipelined' # (1, 0, 0, 1)
NETWORK = ('resnet50', 'alexnet', 'inception')[run_config[0] if isinstance(run_config[0], int) else 0]
BATCHES = (1, 10)[run_config[1] if isinstance(run_config[1], int) else 0]
PIPELINE_BATCHES = (1, 2, 4)[run_config[2] if isinstance(run_config[2], int) else 0]
MEMORY_LIMITED = bool(run_config[3] if len(run_config) > 3 and isinstance(run_config[3], int) else 0)
REPEATS = 50
OPTIMIZERS = ('hc', 'sa', 'ga', 'me')
OPTIMIZER_NAMES = {
'hc': 'Hill Climbing',
'sa': 'Simulated Annealing',
'ga': 'Genetic Algorithm',
'me': 'MAP-elites',
}
NETWORK_NAMES = {
'resnet50': 'ResNet-50',
'alexnet': 'AlexNet',
'inception': 'Inception V3'
}
cmap = sns.cubehelix_palette(5, start=.5, rot=-.75, reverse=True)
def test_optimizer(c, r, log_dir):
c['log_dir'] = log_dir + f'/{r:03}'
_, t = optimize_with_config(config=c, verbose=False, set_log_dir=True)
return t
def run_optimizer_test(n_threads=-1):
if n_threads == -1:
n_threads = multiprocessing.cpu_count()
for optimizer in tqdm(OPTIMIZERS):
# log(f'Testing optimizer {optimizer}')
run_name = f'e3_{optimizer}-{NETWORK}{"-pipeline" if PIPELINE_BATCHES > 1 else ""}' \
f'{"-limited" if MEMORY_LIMITED else ""}'
config_path = f'configs/experiments/e3/{run_name}.json'
score_path = os.path.join(LOG_DIR, f'{run_name}_scores.csv')
with open(score_path, 'w') as f:
f.write('run, time\n')
with open(config_path) as f:
config = json.load(f)
config['optimizer_args']['verbose'] = False
config['optimizer_args']['batches'] = BATCHES
config['optimizer_args']['pipeline_batches'] = PIPELINE_BATCHES
log_dir = config['log_dir']
threaded_optimizer = config['optimizer'] in ('ga', 'genetic_algorithm', 'map-elites', 'map_elites')
if n_threads == 1 or threaded_optimizer:
for r in tqdm(range(REPEATS)):
time = test_optimizer(config, r, log_dir)
with open(score_path, 'a') as f:
f.write(f'{r},{time}\n')
else:
worker_pool = multiprocessing.Pool(n_threads)
times = worker_pool.starmap(test_optimizer, zip(repeat(config), (r for r in range(REPEATS)),
repeat(log_dir)))
worker_pool.close()
with open(score_path, 'a') as f:
for r, t in enumerate(times):
f.write(f'{r},{t}\n')
set_log_dir(LOG_DIR)
def plot_results():
all_results = pd.DataFrame()
# CREATE PLOT OF RESULTS
for optimizer in OPTIMIZERS:
run_name = f'e3_{optimizer}-{NETWORK}{"-pipeline" if PIPELINE_BATCHES > 1 else ""}' \
f'{"-limited" if MEMORY_LIMITED else ""}'
score_path = os.path.join(LOG_DIR, f'{run_name}_scores.csv')
scores = pd.read_csv(score_path, index_col=0, squeeze=True)
scores /= PIPELINE_BATCHES
all_results[OPTIMIZER_NAMES[optimizer]] = scores
plt.figure(figsize=(8, 8))
chart = sns.barplot(data=all_results, palette=cmap)
chart.set_xticklabels(
chart.get_xticklabels(),
rotation=45,
horizontalalignment='right',
)
plt.ylabel('Batch execution time (ms)')
plt.xlabel('Optimization algorithm')
plt.tight_layout()
plt.savefig(os.path.join(LOG_DIR, 'score_comparison.pdf'))
plt.show()
plt.close()
def plot_result_all_networks(test_type='normal'):
all_results = | pd.DataFrame() | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# File : utils.py
# Modified : 17.02.2022
# By : <NAME> <<EMAIL>>
from collections import OrderedDict
import numpy as np
import os
from typing import List
import random
import cv2
from PIL import Image
import torch
import torchvision
from pathlib import Path
import torch.nn as nn
from torch import optim
from torch.optim.lr_scheduler import ReduceLROnPlateau
from torch.utils.data import DataLoader
from efficientnet_pytorch import EfficientNet
from torchvision import transforms
from torch.utils.data import Dataset
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score, roc_auc_score, f1_score
import wandb
training_transforms = transforms.Compose([#Microscope(),
#AdvancedHairAugmentation(),
transforms.RandomRotation(30),
#transforms.RandomResizedCrop(256, scale=(0.8, 1.0)),
transforms.RandomHorizontalFlip(),
transforms.RandomVerticalFlip(),
#transforms.ColorJitter(brightness=32. / 255.,saturation=0.5,hue=0.01),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])])
testing_transforms = transforms.Compose([transforms.Resize(256),
transforms.CenterCrop(256),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])])
def seed_worker(worker_id):
worker_seed = torch.initial_seed() % 2**32
np.random.seed(worker_seed)
random.seed(worker_seed)
# Creating seeds to make results reproducible
def seed_everything(seed_value):
np.random.seed(seed_value)
random.seed(seed_value)
torch.manual_seed(seed_value)
os.environ['PYTHONHASHSEED'] = str(seed_value)
if torch.cuda.is_available():
torch.cuda.manual_seed(seed_value)
torch.cuda.manual_seed_all(seed_value)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
seed = 2022
seed_everything(seed)
def get_parameters(net, EXCLUDE_LIST) -> List[np.ndarray]:
parameters = []
for i, (name, tensor) in enumerate(net.state_dict().items()):
# print(f" [layer {i}] {name}, {type(tensor)}, {tensor.shape}, {tensor.dtype}")
# Check if this tensor should be included or not
exclude = False
for forbidden_ending in EXCLUDE_LIST:
if forbidden_ending in name:
exclude = True
if exclude:
continue
# Convert torch.Tensor to NumPy.ndarray
parameters.append(tensor.cpu().numpy())
return parameters
def set_parameters(net, parameters, EXCLUDE_LIST):
keys = []
for name in net.state_dict().keys():
# Check if this tensor should be included or not
exclude = False
for forbidden_ending in EXCLUDE_LIST:
if forbidden_ending in name:
exclude = True
if exclude:
continue
# Add to list of included keys
keys.append(name)
params_dict = zip(keys, parameters)
state_dict = OrderedDict({k: torch.tensor(v) for k, v in params_dict})
net.load_state_dict(state_dict, strict=False)
class Net(nn.Module):
def __init__(self, arch, return_feats=False):
super(Net, self).__init__()
self.arch = arch
self.return_feats = return_feats
if 'fgdf' in str(arch.__class__):
self.arch.fc = nn.Linear(in_features=1280, out_features=500, bias=True)
if 'EfficientNet' in str(arch.__class__):
self.arch._fc = nn.Linear(in_features=self.arch._fc.in_features, out_features=500, bias=True)
#self.dropout1 = nn.Dropout(0.2)
else:
self.arch.fc = nn.Linear(in_features=arch.fc.in_features, out_features=500, bias=True)
self.output = nn.Linear(500, 1)
def forward(self, images):
"""
No sigmoid in forward because we are going to use BCEWithLogitsLoss
Which applies sigmoid for us when calculating a loss
"""
x = images
features = self.arch(x)
output = self.output(features)
if self.return_feats:
return features
return output
def load_model(model = 'efficientnet-b2', device="cuda"):
if "efficientnet" in model:
arch = EfficientNet.from_pretrained(model)
elif model == "googlenet":
arch = torchvision.models.googlenet(pretrained=True)
else:
arch = torchvision.models.resnet50(pretrained=True)
model = Net(arch=arch).to(device)
return model
def create_split(source_dir, n_b, n_m):
# Split synthetic dataset
input_images = [str(f) for f in sorted(Path(source_dir).rglob('*')) if os.path.isfile(f)]
ind_0, ind_1 = [], []
for i, f in enumerate(input_images):
if f.split('.')[0][-1] == '0':
ind_0.append(i)
else:
ind_1.append(i)
train_id_list, val_id_list = ind_0[:round(len(ind_0)*0.8)], ind_0[round(len(ind_0)*0.8):] #ind_0[round(len(ind_0)*0.6):round(len(ind_0)*0.8)] ,
train_id_1, val_id_1 = ind_1[:round(len(ind_1)*0.8)], ind_1[round(len(ind_1)*0.8):] #ind_1[round(len(ind_1)*0.6):round(len(ind_1)*0.8)] ,
train_id_list = np.append(train_id_list, train_id_1)
val_id_list = np.append(val_id_list, val_id_1)
return train_id_list, val_id_list #test_id_list
def load_isic_by_patient(partition, path='/workspace/melanoma_isic_dataset'):
# Load data
df = pd.read_csv(os.path.join(path,'train_concat.csv'))
train_img_dir = os.path.join(path,'train/train/')
df['image_name'] = [os.path.join(train_img_dir, df.iloc[index]['image_name'] + '.jpg') for index in range(len(df))]
df["patient_id"] = df["patient_id"].fillna('nan')
# df.loc[df['patient_id'].isnull()==True]['target'].unique() # 337 rows melanomas
"""
# EXP 6: same bias/ratio same size - different BIASES
bias_df = pd.read_csv("/workspace/flower/bias_pseudoannotations_real_train_ISIC20.csv")
bias_df['image_name'] = [os.path.join(train_img_dir, bias_df.iloc[index]['image_name']) for index in range(len(bias_df))]
#bias_df = pd.merge(bias_df, df, how='inner', on=["image_name"])
target_groups = bias_df.groupby('target', as_index=False) # keep column target
df_ben = target_groups.get_group(0) # 32533 benign
df_mal = target_groups.get_group(1) # 5105 melanoma
# EXP 6
if partition == 0:
#FRAMES
df_b = df_ben.groupby('black_frame').get_group(1) # 687 with frame
df_m = df_mal.groupby(['black_frame','ruler_mark']).get_group((1,0))[:323] # 2082 with frame
df = pd.concat([df_b, df_m]) # Use 1010 (32%mel) # TOTAL 2848 (75% mel)
train_split, valid_split = train_test_split(df, stratify=df.target, test_size = 0.20, random_state=42)
elif partition == 1:
# RULES
df_b = df_ben.groupby(['black_frame','ruler_mark']).get_group((0,1)).head(1125) # 4717 with rules and no frames
df_m = df_mal.groupby(['black_frame','ruler_mark']).get_group((0,1)).head(375) # 516 with rules and no frames
df = pd.concat([df_b, df_m]) # Use 1500 (25%mel) # TOTAL 5233 (10% mel)
train_split, valid_split = train_test_split(df, stratify=df.target, test_size = 0.20, random_state=42)
elif partition == 2:
# NONE
df_b = df_ben.groupby(['black_frame','ruler_mark']).get_group((0,0)).head(1125) # 27129 without frames or rulers
df_m = df_mal.groupby(['black_frame','ruler_mark']).get_group((0,0)).head(375) # 2507 without frames or rulers 14%
df = pd.concat([df_b, df_m]) # Use 1500 (25%mel) # TOTAL 29636 (8.4% mel)
train_split, valid_split = train_test_split(df, stratify=df.target, test_size = 0.20, random_state=42)
else:
#server
df_b = df_ben.groupby(['black_frame','ruler_mark']).get_group((0,0))[2000:5000] # 3000
df_m = df_mal.groupby(['black_frame','ruler_mark']).get_group((0,0))[500:1500] # 1000 (30% M) T=4000
valid_split = pd.concat([df_b, df_m])
validation_df=pd.DataFrame(valid_split)
testing_dataset = CustomDataset(df = validation_df, train = True, transforms = testing_transforms )
return testing_dataset
"""
# Split by Patient
patient_groups = df.groupby('patient_id') #37311
# Split by Patient and Class
melanoma_groups_list = [patient_groups.get_group(x) for x in patient_groups.groups if patient_groups.get_group(x)['target'].unique().all()==1] # 4188 - after adding ID na 4525
benign_groups_list = [patient_groups.get_group(x) for x in patient_groups.groups if 0 in patient_groups.get_group(x)['target'].unique()] # 2055 - 33123
np.random.shuffle(melanoma_groups_list)
np.random.shuffle(benign_groups_list)
# EXP 5: same bias/ratio different size - simulate regions
if partition == 0:
df_b = pd.concat(benign_groups_list[:270]) # 4253
df_m = pd.concat(melanoma_groups_list[:350]) # 1029 (19.5% melanomas) T=5282
df = pd.concat([df_b, df_m])
train_split, valid_split = train_test_split(df, stratify=df.target, test_size = 0.20, random_state=42)
elif partition == 1:
df_b = pd.concat(benign_groups_list[270:440]) # 2881
df_m = pd.concat(melanoma_groups_list[350:539]) # 845 (22.6% melanomas) T=3726
df = pd.concat([df_b, df_m])
train_split, valid_split = train_test_split(df, stratify=df.target, test_size = 0.20, random_state=42)
elif partition == 2:
df_b = pd.concat(benign_groups_list[440:490]) # 805
df_m = pd.concat(melanoma_groups_list[539:615]) # 194 (19.4% melanomas) T=999
df = pd.concat([df_b, df_m])
train_split, valid_split = train_test_split(df, stratify=df.target, test_size = 0.20, random_state=42)
elif partition == 3:
df_b = | pd.concat(benign_groups_list[490:511]) | pandas.concat |
__author__ = "<NAME>"
__copyright__ = "Copyright 2020, freedom Partners"
__email__ = "<EMAIL>"
'''
wg2 Merger
Synopsis :
merger.merge_reviews() : Merge all reviews ina single file / dataframe
merger.replace_places() : unify matching places with different names
merger.close_places() : Marks closed corresponding places
merger.create_db() : Creates db files for website
merger.process_tags()
'''
import logging
import json
import pandas as pd
import wg2.util.text
import wg2.util.progress_monitor
import wg2.db.tags
class PlaceDataframe():
def __init__(self):
self._places = pd.DataFrame(columns = ['title', 'title_n', 'address', 'addr_details',
'lat', 'lng', 'official_url',
'closed','tags','ratings'])
def insert(self, item):
if (self._places.shape[0] == 0):
id = 0
else:
id = self._places.index.max() + 1
item_df = pd.DataFrame(data=item,index=[id])
self._places = pd.concat([self._places,item_df])
return id
def find(self,title_n,lat,lng):
return self._places[abs(self._places['lat']-lat)<0.1][abs(self._places['lng']-lng)<0.1][self._places['title_n']==title_n]
def find_or_insert(self,item) :
id = 0
df = self.find(item['title_n'],item['lat'],item['lng'])
if df.shape[0] == 0 :
id = self.insert(item)
else :
id = df.index[0]
return id
class Merger():
# _sources = ['pdl','tra','mcl','lfd','tmo']
_sources = ['pdl','tra','mcl','lfd'] # Timeout import not ready...
_data_root = 'data/'
_reviews_filename = _data_root+'reviews.csv'
_place_replace_filename = _data_root+'place_replace.csv'
_place_closed_filename = _data_root+'place_closed.csv'
_place_db_filename = _data_root+'place_db.csv'
_review_db_filename = _data_root+'review_db.csv'
_pm = wg2.util.progress_monitor.ProgressMonitor("Merger")
def merge_reviews(self):
reviews = pd.DataFrame()
for source in self._sources:
filename = self._data_root+source+'_dataset.csv'
df = | pd.read_csv(filename) | pandas.read_csv |
import argparse
import pandas as pd
import csv
def join_on_key(file1, file2, on_key, col_sep, file1_disp_col, file2_disp_col, output_file):
print('file1: {0}'.format(file1))
print('file2: {0}'.format(file2))
print('on_key: {0}'.format(on_key))
print('col_sep: {0}'.format(col_sep))
print('file1_disp_col: {0}'.format(file1_disp_col))
print('file2_disp_col: {0}'.format(file2_disp_col))
print('output: {0}'.format(output_file))
df_file1 = pd.read_csv(file1, header=None)
file1_num_col = len(df_file1.columns)
file1_cols = ['col_f1_{0}'.format(x) for x in range(1, file1_num_col + 1)]
df_file1.columns = file1_cols
df_file1.fillna(-1, inplace=True)
df_file2 = | pd.read_csv(file2, header=None) | pandas.read_csv |
import xml.etree.ElementTree as et
import numpy as np
import pandas as pd
from nltk import (word_tokenize, pos_tag)
from nltk.corpus import sentiwordnet as swn
from nltk.metrics import edit_distance
import hunspell
import re
from tqdm import tqdm
import argparse
import sys
import config as cf
# def parse2014AspectTerm(filepath):
# """
# Since no good way of collecting the aspect term words from the raw xml data,
# this function is using loop to facilitate collecting the terms manually.
# """
# aspectTerm_dict = {
# 'food': [],
# 'service': [],
# 'price': [],
# 'ambience': [],
# 'anecdotes/miscellaneous': []
# }
# tree = et.parse(filepath)
# root = tree.getroot()
# sentences = root.findall('sentence')
def parse2014(filepath, args):
"""
parse 2014 raw data in xml format
only tested for restaurant data
"""
data = pd.DataFrame(columns = ['id', 'text', 'aspect', 'polarity'])
tree = et.parse(filepath)
root = tree.getroot()
sentences = root.findall('sentence');
i = 0
for sentence in tqdm(sentences):
id = sentence.attrib.get('id')
text = sentence.find('text').text
# TODO categorize term words/phrases into aspect terms
# aspectTerms = child.find('aspectTerms')
# if aspectTerms != None:
# for term in aspectTerms.findall('aspectTerm'):
# terms.append(term.attrib.get('term'))
for category in sentence.find('aspectCategories').findall('aspectCategory'):
if category.attrib.get('polarity') != 'conflict':
data.loc[i] = [id, text, category.attrib.get('category'), category.attrib.get('polarity')]
i = i + 1
writeCSV(data, cf.ROOT_PATH + cf.DATA_PATH + '%s_%s_%s_raw.csv' % (args.domain, args.aim, args.year))
# revised 3/28/18 to add call to writeCOR
writeCOR(data, cf.ROOT_PATH + cf.DATA_PATH + '%s_%s_%s_raw.cor' % (args.domain, args.aim, args.year))
return data
def writeCSV(dataframe, filepath):
dataframe.to_csv(filepath, index = False)
def writeCOR(dataframe, filepath):
numex = len(dataframe.index)
with open(filepath, 'w') as f:
for i in range(numex):
#
if dataframe.loc[i][3] == 'positive':
f.write(dataframe.loc[i][1] + '\n')
f.write(dataframe.loc[i][2] + '\n')
f.write('1' + '\n')
elif dataframe.loc[i][3] == 'negative':
f.write(dataframe.loc[i][1] + '\n')
f.write(dataframe.loc[i][2] + '\n')
f.write('-1' + '\n')
elif dataframe.loc[i][3] == 'neutral':
f.write(dataframe.loc[i][1] + '\n')
f.write(dataframe.loc[i][2] + '\n')
f.write('0' + '\n')
#
f.close()
# end of writeCor()
def tokenize(data):
wordData = []
for s in data:
wordData.append([w for w in word_tokenize(s.lower())])
return wordData
def cleanup(wordData):
dictionary = embeddingDict(embeddingPath)
wordData = cleanOp(wordData, re.compile(r'-'), dictionary, correctDashWord)
wordData = cleanOp(wordData, re.compile(r'-'), dictionary, cleanDashWord)
wordData = cleanOp(wordData, re.compile(r':'), dictionary, parseTime)
wordData = cleanOp(wordData, re.compile('\+'), dictionary, parsePlus)
wordData = cleanOp(wordData, re.compile(r'\d+'), dictionary, parseNumber)
# Revised 3/29/18 to move spell check to separate method
# wordData = cleanOp(wordData, re.compile(r''), dictionary, correctSpell)
return wordData
def spellcheck(wordData):
dictionary = embeddingDict(embeddingPath)
wordData = cleanOp(wordData, re.compile(r''), dictionary, correctSpell)
return wordData
def cleanOp(wordData, regex, dictionary, op):
for i, sentence in enumerate(wordData):
if bool(regex.search(sentence)):
newSentence = ''
for word in word_tokenize(sentence.lower()):
if bool(regex.search(word)) and word not in dictionary:
word = op(word)
newSentence = newSentence + ' ' + word
wordData[i] = newSentence[1:] # revised 3/29/18 to avoid space at start of sentence
return wordData
def parseTime(word):
time_re = re.compile(r'^(([01]?\d|2[0-3]):([0-5]\d)|24:00)(pm|am)?$')
if not bool(time_re.match(word)):
return word
else:
dawn_re = re.compile(r'0?[234]:(\d{2})(am)?$')
earlyMorning_re = re.compile(r'0?[56]:(\d{2})(am)?$')
morning_re = re.compile(r'((0?[789])|(10)):(\d{2})(am)?$')
noon_re = re.compile(r'((11):(\d{2})(am)?)|(((0?[01])|(12)):(\d{2})pm)$')
afternoon_re = re.compile(r'((0?[2345]):(\d{2})pm)|((1[4567]):(\d{2}))$')
evening_re = re.compile(r'((0?[678]):(\d{2})pm)|(((1[89])|20):(\d{2}))$')
night_re = re.compile(r'(((0?9)|10):(\d{2})pm)|((2[12]):(\d{2}))$')
midnight_re = re.compile(r'(((0?[01])|12):(\d{2})am)|(0?[01]:(\d{2}))|(11:(\d{2})pm)|(2[34]:(\d{2}))$')
if bool(noon_re.match(word)):
return 'noon'
elif bool(evening_re.match(word)):
return 'evening'
elif bool(morning_re.match(word)):
return 'morning'
elif bool(earlyMorning_re.match(word)):
return 'early morning'
elif bool(night_re.match(word)):
return 'night'
elif bool(midnight_re.match(word)):
return 'midnight'
elif bool(dawb_re.match(word)):
return 'dawn'
else:
return word
def parsePlus(word):
return re.sub('\+', ' +', word)
def parseNumber(word):
if bool(re.search(r'\d+', word)):
return word
else:
search = re.search(r'\d+', word)
pos = search.start()
num = search.group()
return word[:pos] + ' %s ' % num + parseNumber(word[pos+len(num):])
# def translateSymbol(word):
def checkSpell(word):
global hobj
return hobj.spell(word)
def correctSpell(word):
global hobj
suggestions = hobj.suggest(word)
if len(suggestions) != 0:
distance = [edit_distance(word, s) for s in suggestions]
return suggestions[distance.index(min(distance))]
else:
return word
def createTempVocabulary(wordData, args):
words = sorted(set([word for l in wordData for word in l.split(' ')]))
global embeddingPath
vocabulary = filterWordEmbedding(words, embeddingPath, args)
return vocabulary
def splitDashWord(word):
if '-' not in word:
return [word]
else:
return word.split('-')
def cleanDashWord(word):
return ''.join([s + ' ' for s in word.split('-')])
def correctDashWord(word):
splittedWords = word.split('-')
for i, word in enumerate(splittedWords):
if not checkSpell(word):
splittedWords[i] = correctSpell(word)
return ''.join([s + '-' for s in splittedWords])[:-1]
def joinWord(words):
return ''.join([s + ' ' for s in words])[:-1]
def embeddingDict(embeddingPath):
dictionary = []
with open(embeddingPath) as f:
for line in tqdm(f):
values = line.split()
word = joinWord(values[:-300])
dictionary.append(word)
f.close()
return dictionary
def filterWordEmbedding(words, embeddingPath, args):
vocabulary = []
filteredEmbeddingDict = []
words = [word.lower() for word in words]
with open(embeddingPath) as f:
for line in tqdm(f):
values = line.split()
word = values[0]
# word = word.decode('utf-8') # added to remove Unicode warning
# try-except added to debug Unicode warning
# to see the word that triggers warning, from command line: python -W error::UnicodeWarning preprocess.py
try:
if word in words:
vocabulary.append(word)
filteredEmbeddingDict.append(line)
except:
print("stopping in filterWordEmbedding")
# print("line: ", line)
# print("values: ", values)
print("word: ", word)
# print("words: ", words)
# exit()
f.close()
unknownWords = [word for word in words if word not in vocabulary]
with open(dataPath + '%s_filtered_%s.txt' % (cf.WORD2VEC_FILE[0:-4], args.aim), 'w+') as f:
for line in filteredEmbeddingDict:
f.write(line)
with open('unknown.txt', 'w+') as f:
for i, word in enumerate(unknownWords):
f.write(word + '\n')
def createVocabulary(trainDictPath, testDictPath, gloveDictPath):
dictionary = []
with open(trainDictPath) as f:
for line in f:
dictionary.append(line)
f.close()
with open(testDictPath) as f:
for line in f:
dictionary.append(line)
f.close()
with open(gloveDictPath) as f:
miscFlag = True
anecFlag = True
for line in f:
if not (miscFlag or anecFlag):
break
word = line.split()[0]
if miscFlag and word == 'miscellaneous':
dictionary.append(line)
miscFlag = False
if anecFlag and word == 'anecdotes':
dictionary.append(line)
anecFlag = False
f.close()
dictionary = set(dictionary)
dictionaryNP = np.zeros((len(dictionary) + 1, 300))
with open(dataPath + '%s_filtered.txt' % cf.WORD2VEC_FILE[0:-4], 'w+') as f:
for i, line in enumerate(dictionary):
values = line.split()
try:
dictionaryNP[i] = np.asarray(values[-300:], dtype='float32')
except ValueError:
print(joinWord(values[:-300]))
f.write(line)
f.close()
dictionaryNP[-1] = np.random.normal(0, 0.01, [1,300])
np.save(dataPath + 'glove', dictionaryNP)
def sampleData():
"""
To randomly sample a small fraction from the processed train and test data,
which will be used for testing the models
"""
trainDataPath = dataPath + cf.TRAIN_FILE
testDataPath = dataPath + cf.TEST_FILE
train = pd.read_csv(trainDataPath)
test = | pd.read_csv(testDataPath) | pandas.read_csv |
from collections import defaultdict
from scipy.stats import fisher_exact
import pandas as pd
import os
import sys
import glob
from oats.nlp.search import binary_robinkarp_match, binary_fuzzy_match
from oats.utils.utils import flatten
def annotate_using_rabin_karp(ids_to_texts, ontology, fixcase=1):
"""
Build a dictionary of annotations using the Rabin Karp algorithm. This is useful for finding
instances of ontology terms in larger text strings.
Args:
ids_to_texts (dict of int:str): Mapping from unique integer IDs to natural language text strings.
ontology (oats.annotation.Ontology): Object of the ontology to be used.
fixcase (int, optional): Set to 1 to normalize all strings before matching, set to 0 to ignore this option.
Returns:
dict of int:list of str: Mapping from unique integer IDs to lists of ontology term IDs.
"""
annotations = defaultdict(list)
prime = 193
for identifer,description in ids_to_texts.items():
annotations[identifer].extend([])
for word,term_list in ontology.token_to_terms.items():
if fixcase==1:
word = word.lower()
description = description.lower()
if binary_robinkarp_match(word, description, prime):
annotations[identifer].extend(term_list)
return(annotations)
def annotate_using_fuzzy_matching(ids_to_texts, ontology, threshold=0.90, fixcase=1, local=1):
"""
Build a dictionary of annotations using fuzzy string matching. This is useful for finding
instances of ontology terms in larger text strings.
Args:
ids_to_texts (dict of int:str): Mapping from unique integer IDs to natural language text strings.
ontology (oats.annotation.Ontology): Ontology object with specified terms.
threshold (float, optional): Value ranging from 0 to 1, the similarity threshold for string matches.
fixcase (int, optional): Set to 1 to normalize all strings before matching, set to 0 to ignore this option.
local (int, optional): Set the alignment method, 0 for global and 1 for local. Local alignment should
always be used for annotating ontology terms to long strings of text.
Returns:
dict of int:list of str: Mapping from unique integer IDs to lists of ontology term IDs.
"""
annotations = defaultdict(list)
for identifier,description in ids_to_texts.items():
annotations[identifier].extend([])
for word, term_list in ontology.token_to_terms.items():
if fixcase==1:
word = word.lower()
description = description.lower()
if binary_fuzzy_match(word, description, threshold, local):
annotations[identifier].extend(term_list)
return(annotations)
def annotate_using_noble_coder(ids_to_texts, jar_path, ontology_name, precise=1, output=None):
"""
Build a dictionary of annotations using NOBLE Coder (Tseytlin et al., 2016).
Args:
ids_to_texts (dict of int:str): Mapping from unique integer IDs to natural language text strings.
jar_path (str): Path of the NOBLE Coder jar file.
ontology_name (str): Name of the ontology (e.g., "pato", "po") used to find matching a NOBLE Coder
terminology file (e.g., pato.term, po.term) in ~/.noble/terminologies. This name is not case-sensitive.
precise (int, optional): Set to 1 to do precise matching, set to 0 to accept partial matches.
output (str, optional): Path to a text file where the stdout from running NOBLE Coder should be
redirected. If not provided, this output is redirected to a temporary file and deleted.
Returns:
dict of int:list of str: Mapping from unique integer IDs to lists of ontology term IDs.
Raises:
FileNotFoundError: NOBLE Coder cannot find the terminology file matching this ontology.
"""
# Configuration for running the NOBLE Coder script.
tempfiles_directory = "temp_textfiles"
output_directory = "temp_output"
if not os.path.exists(tempfiles_directory):
os.makedirs(tempfiles_directory)
if not os.path.exists(output_directory):
os.makedirs(output_directory)
default_results_filename = "RESULTS.tsv"
default_results_path = os.path.join(output_directory,default_results_filename)
if precise == 1:
specificity = "precise-match"
else:
specificity = "partial-match"
# Generate temporary text files for each of the text descriptions.
# Identifiers for descriptions are encoded into the filenames themselves.
annotations = {identifier:[] for identifier in ids_to_texts.keys()}
for identifier,description in ids_to_texts.items():
tempfile_path = os.path.join(tempfiles_directory, f"{identifier}.txt")
with open(tempfile_path, "w") as file:
file.write(description)
# Use all specified ontologies to annotate each text file.
# Also NOBLE Coder will check for a terminology file matching this ontology, make sure it's there.
expected_terminology_file = os.path.expanduser(os.path.join("~",".noble", "terminologies", f"{ontology_name}.term"))
if not os.path.exists(expected_terminology_file):
raise FileNotFoundError(expected_terminology_file)
if output is not None:
stdout_path = output
else:
stdout_path = os.path.join(output_directory,"nc_stdout.txt")
os.system(f"java -jar {jar_path} -terminology {ontology_name} -input {tempfiles_directory} -output {output_directory} -search '{specificity}' -score.concepts > {stdout_path}")
for identifier,term_list in _parse_noble_coder_results(default_results_path).items():
# Need to convert identifier back to an integer because it's being read from a file name.
# NOBLE Coder finds every occurance of a matching, reduce this to form a set.
identifier = int(identifier)
term_list = list(set(term_list))
term_list = [term_id.replace("_",":") for term_id in term_list]
annotations[identifier].extend(term_list)
# Cleanup and return the annotation dictionary.
_cleanup_noble_coder_results(output_directory, tempfiles_directory)
return(annotations)
def _parse_noble_coder_results(results_filename):
"""
Translates the generated NOBLE Coder output file into a dictionary of annotations.
Args:
results_filename (str): Path of the output file created by NOBLE Coder.
Returns:
dict of int:list of str: Mapping from unique integer IDs to lists of ontology term IDs.
"""
df = pd.read_csv(results_filename, usecols=["Document", "Matched Term", "Code"], sep="\t")
annotations = defaultdict(list)
for row in df.itertuples():
textfile_processed = row[1]
identifer = str(textfile_processed.split(".")[0])
tokens_matched = row[2].split()
ontology_term_id = row[3]
annotations[identifer].append(ontology_term_id)
return(annotations)
def _cleanup_noble_coder_results(output_directory, textfiles_directory):
"""
Removes all directories and files created and used by running NOBLE Coder.
Args:
output_directory (str): Path of the directory containing the NOBLE Coder outputs.
textfiles_directory (str): Path of the directory of input text files.
"""
# Expected paths to each object that should be removed.
stdout_file = os.path.join(output_directory,"nc_stdout.txt")
html_file = os.path.join(output_directory,"index.html")
results_file = os.path.join(output_directory,"RESULTS.tsv")
properties_file = os.path.join(output_directory,"search.properties")
reports_directory = os.path.join(output_directory,"reports")
# Safely remove everything in the output directory.
if os.path.isfile(stdout_file):
os.remove(stdout_file)
if os.path.isfile(html_file):
os.remove(html_file)
if os.path.isfile(results_file):
os.remove(results_file)
if os.path.isfile(properties_file):
os.remove(properties_file)
for filepath in glob.iglob(os.path.join(reports_directory,"*.html")):
os.remove(filepath)
os.rmdir(reports_directory)
os.rmdir(output_directory)
# Safely remove everything in the text file directory.
for filepath in glob.iglob(os.path.join(textfiles_directory,"*.txt")):
os.remove(filepath)
os.rmdir(textfiles_directory)
def write_annotations_to_file(annotations_dict, annotations_output_path, sep="\t"):
"""
Write a dictionary of annotations to a file. The produces file format of IDs followed by delimited
ontology term IDs is used as input and output formats for some other packages, so this is included
as an option for interfacing with other steps in a pipeline if necessary.
Args:
annotations_dict (dict of int:list of str): Mapping from unique integer IDs to lists of ontology term IDs.
annotations_output_file (str): Path of the output file that will be created.
"""
outfile = open(annotations_output_path,"w")
for identifer,term_list in annotations_dict.items():
row_values = [str(identifer)]
row_values.extend(term_list)
outfile.write(sep.join(row_values).strip()+"\n")
outfile.close()
def read_annotations_from_file(annotations_input_path, sep="\t"):
"""
Read a file of annotations and produce a dictionary. This is intended to be able to read the types
of files that are produced by the functions that write dictionaries of annotations to files. This
does the reverse process of producing a dictionary from those files.
Args:
annotations_input_file (str): Path of the input annotations file to read.
Returns:
dict of int:list of str: Mapping from unique integer IDs to lists of ontology term IDs.
"""
infile = open(annotations_input_path, "r")
annotations_dict = {}
for line in infile.read():
row_values = line.strip().split(sep)
identifier = row_values[0]
term_ids = row_values[1:len(row_values)]
annotations_dict[identifer] = term_ids
return(annotations_dict)
def _get_term_name(i, ontology):
""" Small helper function for the function below.
"""
try:
return(ontology[i].name)
except:
return("")
def term_enrichment(all_ids_to_annotations, group_ids, ontology, inherited=False):
""" Obtain a dataframe with the results of a term enrichment analysis using Fisher exact test with the results sorted by p-value.
Args:
all_ids_to_annotations (dict of int:list of str): A mapping between unique integer IDs (for genes) and list of ontology term IDs annotated to them.
group_ids (list of int): The IDs which should be a subset of the dictionary argument that refer to those belonging to the group to be tested.
ontology (oats.annotation.ontology.Ontology): An ontology object that shoud match the ontology from which the annotations are drawn.
inherited (bool, optional): By default this is false to indicate that the lists of ontology term IDs have not already be pre-populated to include the terms that are
superclasses of the terms annotated to that given ID. Set to true to indicate that these superclasses are already accounted for and the process of inheriting additional
terms should be skipped.
Returns:
pandas.DataFrame: A dataframe sorted by p-value that contains the results of the enrichment analysis with one row per ontology term.
"""
# If it has not already been performed for this data, using the ontology structure to inherit additional terms from these annotations.
if inherited:
all_ids_to_inherited_annotations = all_ids_to_annotations
else:
all_ids_to_inherited_annotations = {i:ontology.inherited(terms) for i,terms in all_ids_to_annotations.items()}
# Find the list of all the unique ontology term IDs that appear anywhere in the annotations.
unique_term_ids = list(set(flatten(all_ids_to_inherited_annotations.values())))
# For each term, determine the total number of (gene) IDs that it is annotated to.
num_ids_annot_with_term_t = lambda t,id_to_terms: [(t in terms) for i,terms in id_to_terms.items()].count(True)
term_id_to_gene_count = {t:num_ids_annot_with_term_t(t,all_ids_to_inherited_annotations) for t in unique_term_ids}
total_num_of_genes = len(all_ids_to_inherited_annotations)
df = | pd.DataFrame(unique_term_ids, columns=["term_id"]) | pandas.DataFrame |
from flask import Flask, render_template, request
from os import getenv
import spotipy
from spotipy.oauth2 import SpotifyClientCredentials
from sklearn.neighbors import NearestNeighbors
import pickle
import numpy as np
import json
import pandas as pd
import plotly
import plotly.graph_objects as go
with open("base_model", "rb") as f:
model = pickle.load(f)
# initializes our app
app = Flask(__name__)
# Get API keys from .env
cid = getenv("CLIENT_ID")
secret = getenv("CLIENT_SECRET")
client_credentials_manager = SpotifyClientCredentials(
client_id=cid, client_secret=secret
)
sp = spotipy.Spotify(client_credentials_manager=client_credentials_manager)
@app.route("/")
def root():
'''Root bage'''
return render_template("main.html", title="Spotify4")
@app.route("/about-the-team")
def abouttheteam():
'''About The Team - page'''
return render_template("about-the-team.html", title="Spotify4")
@app.route("/how-it-works")
def howitworks():
'''How It Works - page'''
return render_template("how-it-works.html", title="Spotify4")
@app.route("/analyze", methods=["POST"])
def analyze():
'''Where the magic happens'''
input_url = request.values["song_link"]
# Get audio features for supplied URL
analyze_track = sp.audio_features(input_url)[0]
analyze_track = pd.DataFrame(
{
"id": [analyze_track["id"]],
"acousticness": [analyze_track["acousticness"]],
"danceability": [analyze_track["danceability"]],
"duration_ms": [analyze_track["duration_ms"]],
"energy": [analyze_track["energy"]],
"instrumentalness": [analyze_track["instrumentalness"]],
"key": [analyze_track["key"]],
"liveness": [analyze_track["liveness"]],
"loudness": [analyze_track["loudness"]],
"mode": [analyze_track["mode"]],
"speechiness": [analyze_track["speechiness"]],
"tempo": [analyze_track["tempo"]],
"time_signature": [analyze_track["time_signature"]],
}
)
analyze_track.set_index("id", inplace=True)
# Model variable assignment
_, neighbors_indexes = model.kneighbors(analyze_track)
Y = | pd.read_csv("indexes") | pandas.read_csv |
import sys
import os
curPath = os.path.abspath(os.path.dirname(__file__))
rootPath = os.path.split(curPath)[0]
sys.path.append(os.path.split(rootPath)[0])
from src.spider.QQZoneSpider import QQZoneSpider
from src.util.util import get_full_time_from_mktime
import math
import threading
import pandas as pd
import json
import re
import time
"""
QQ空间抽奖小程序
可以指定说说并从点赞或评论的人中随机抽奖
"""
class winClient(object):
like_list = []
cmt_list = []
def __init__(self):
self.sp = QQZoneSpider(use_redis=False, debug=False, from_client=True, mood_num=20)
warm_tip = "****************************************\n" \
"**************QQ空间抽奖小程序***************\n" \
"****************************************"
self.output(warm_tip)
self.output("请输入获取最近访客的时间间隔,默认为60秒")
time_step = input()
try:
time_step = int(time_step)
except:
time_step = 60
visit_file_name = "最近访客" + str(int(time.time())) + ".xlsx"
visit_file_name.replace(" ", "")
self.output("最近访客文件名:" + visit_file_name)
try:
self.sp.login_with_qr_code()
self.output("用户" + self.sp.username + "登陆成功!")
except BaseException:
self.output("用户登陆失败!请检查网络连接或稍后再试!")
os._exit(1)
visit_t = threading.Thread(target=self.sp.parse_recent_visit, args=[visit_file_name, time_step])
visit_t.start()
self.output("正在获取最近的说说...")
url_mood = self.sp.get_mood_url()
url_mood = url_mood + '&uin=' + str(self.sp.username)
self.content_list = self.get_content_list(url_mood, 0)
self.content_list += self.get_content_list(url_mood, 20)
self.content_list += self.get_content_list(url_mood, 40)
self.output('------------------------')
self.output("最近的60条说说:")
for item in self.content_list:
content = item['content']
if len(content) > 20:
content = content[0:20] + '。。。'
item_str = '|' + str(item['order']) + '|' + content
self.output(item_str)
while True:
try:
self.output('------------------------')
self.output("**以下输入请全部只输入数字!按回车键结束输入!**")
self.output("**输入Q退出本程序**")
is_digit = False
while not is_digit:
self.output('请输入您要选择的说说序号:')
mood_order = input()
is_digit = self.check_input(mood_order)
mood_order = int(mood_order)
if mood_order > len(self.content_list):
pos = (math.ceil(mood_order / 20) - 1) * 20
mood_order = mood_order % 20
t1 = threading.Thread(target=self.get_all, args=(url_mood, pos, mood_order))
t1.setDaemon(True)
t1.start()
else:
unikey = self.content_list[mood_order - 1]
key = unikey['unikey']
tid = unikey['tid']
t2 = threading.Thread(target=self.start_like_cmt_thread, args=(key, tid))
t2.setDaemon(True)
t2.start()
is_digit = False
while not is_digit:
self.output('请输入抽奖的类型,1-点赞;2-评论;(其它)-我全都要! :')
type = input()
is_digit = self.check_input(type)
is_digit = False
while not is_digit:
self.output('请选择抽奖的用户数量:')
user_num = input()
is_digit = self.check_input(user_num)
# 等待线程运行完
try:
t1.join()
t2.join()
self.like_t.join()
self.cmt_t.join()
except:
pass
self.type = int(type)
self.user_num = int(user_num)
self.file_name = self.content_list[mood_order - 1]['content']
if len(self.file_name) > 10:
self.file_name = self.file_name[:10]
self.file_name = re.sub('[^\w\u4e00-\u9fff]+', '', self.file_name)
if len(self.file_name) <= 0:
self.file_name = str(mood_order)
print("说说:", self.file_name)
self.do_raffle()
cmt_df = pd.DataFrame(self.cmt_list)
like_df = | pd.DataFrame(self.like_list) | pandas.DataFrame |
import json
import pandas as pd
class Datasource:
"""A class that represents a Datasmoothie datasource.
This is used
to make it easy to upload data, update data and do any operation
the user needs to do on a dataset.
Parameters
----------
client : datasmoothie.Client
The client that will interface with the API.
name : string
Name of the Datasource.
primaryKey : integer
The identifier for the datasource.
Attributes
----------
_name : string
Name of the datasource.
_client : datasmoothie.Client
The client that will interface with the API.
_pk : integer
Identifier of the datasource in Datasmoothie.
"""
def __init__(self, client, meta, primary_key):
"""Initialise a Datasource.
Parameters
----------
client : A Datasmoothie python client.
A Datasmothie python client that has a valid api token.
name : string
Datasource name.
primaryKey : type
The primary key of the Datasource in Datasmoothie.
"""
self.survey_meta = {}
self.survey_data = ""
self.meta = meta
self.name = meta['name']
self._client = client
self._pk = primary_key
def deserialize_dataframe(self, data, index, columns,
multi_index = True, multi_columns = True):
""" Deserializes a dataframe that was serialized with orient='split'
"""
if multi_index:
index = pd.MultiIndex.from_tuples(index)
if multi_columns:
columns = pd.MultiIndex.from_tuples(columns)
return pd.DataFrame(data=data, index=index, columns=columns)
def get_id(self):
"""Get the id of this datasource.
The id is needed when charts are added to reports.
Returns
-------
int
The unique id number identifying this datasource in Datasmoothie.
"""
return self._pk
def name(self):
"""Get name of datasource.
Returns
-------
type
Datasource name.
"""
return self._name
def get_meta(self):
"""Get survey meta data.
Returns
-------
json
Meta data for the survey. Includes question labels etc.
"""
resp = self._client.get_request('datasource/{}'.format(self._pk),
'meta')
return resp
def get_variables(self, type=None):
"""Get a list of the variables in the datasource.
Parameters
----------
type : string
Get only a certain type of variable (single, int, float, delimited set).
Returns
-------
list
A list of variable names.
"""
try:
result = []
for variable in list(self.survey_meta['columns'].keys()):
if type is not None:
if self.survey_meta['columns'][variable]['type'] == type:
result.append(variable)
else:
result.append(variable)
return result
except Exception as e:
raise "Datasource doesn't have any meta data yet."
def get_meta_and_data(self):
"""Get meta data and data for a data source.
Returns
-------
json dict
Returns a dict with two keys, meta and data. The meta is
Quantipy meta data and the data is a csv with the response data.
"""
resp = self._client.get_request('datasource/{}'.format(self._pk),
'meta_data')
self.survey_meta = resp['meta']
self.survey_data = resp['data']
return resp
def update_meta_and_data(self, meta, data):
"""Update the remote datasource with new meta-data and data.
Parameters
----------
meta : json object
Meta data (in quantipy form).
data : string
A CSV file with the dataset's data.
Returns
-------
type
The Json object the API returned.
"""
payload = {
'meta': meta,
'data': data
}
resp = self._client.post_request('datasource/{}'.format(self._pk),
'meta_data',
data=payload
)
return resp
def get_tables(self, stub, banner, views, combine=False, language=None):
""" Calculates views for a stub/banner combination
Parameters
----------
stub : list
List of variables on the x axis
banner : list
List of variables on the y axis
views : list
List of view's to calculate
Unsupported view's are ignored.
Returns
-------
dict
A dict that contains the views as keys
and the results as Pandas DataFrames.
"""
payload = {
'stub': stub,
'banner': banner,
'views': views
}
resp = self._client.post_request(resource='datasource/{}'.format(self._pk),
action="tables",
data=payload
)
results = {}
if resp.status_code == 200:
content = json.loads(resp.content)
for view in content['results']:
results[view] = self.deserialize_dataframe(
data=content['results'][view]['data'],
index=content['results'][view]['index'],
columns=content['results'][view]['columns']
)
#remove invalide views
views = [i for i in views if i in results.keys()]
if 'counts' in views:
results['counts'] = results['counts'].astype(int)
if 'c%' in views:
results['c%'] = results['c%'].round(1)
# to combine % and counts, we merge them row by row
if 'c%' in views and 'counts' in views and combine:
mi_pct = results['c%'].index
mi_counts = results['counts'].index
values = results['c%'].index.levels[1]
mi_pct = mi_pct.set_levels(level=1,
levels=["{} (%)".format(i) for i in values])
mi_counts = mi_counts.set_levels(level=1,
levels=["{}".format(i) for i in values])
results['c%'].index = mi_pct
results['counts'].index = mi_counts
results['c%'] = pd.concat([results['counts'], results['c%']]).sort_index(level=0)
del results['counts']
views.remove('counts')
if combine and len(views) > 1:
combined = results[views[0]]
for view in views[1:]:
combined = pd.concat([combined, results[view]])
combined.index = self.apply_labels(combined.index)
combined.columns = self.apply_labels(combined.columns)
return combined
else:
return results
def get_table_set(self, stubs, banners, views, language=None):
table_set = []
for stub in stubs:
for banner in banners:
table = self.get_tables(stub,
banner,
views,
combine=True,
language=language)
table_set.append(table)
return table_set
def table_set_to_excel(self, table_set, filename):
writer = pd.ExcelWriter('{}'.format(filename), engine="xlsxwriter")
workbook = writer.book
left_format = workbook.add_format({'align':'left'})
left_format.set_align('left')
for index, table in enumerate(table_set):
table.to_excel(writer,
startrow=2,
sheet_name="Table {}".format(index))
datasheet = writer.sheets["Table {}".format(index)]
datasheet.set_column(0,0,20, left_format)
datasheet.set_column(1,1,20, left_format)
writer.save()
def get_table(self, stub, banner, view):
""" Calculates a single view for a stub/banner combination
Parameters
----------
stub : list
List of variables on the x axis
banner : list
List of variables on the y axis
views : string
A view to calculate
Returns
-------
Pandas.DataFrame OR the response obj
The resulting Pandas.DataFrame or the response object if it fails
"""
payload = {
'stub': stub,
'banner': banner,
'view': view
}
resp = self._client.post_request(resource='datasource/{}'.format(self._pk),
action="table",
data=payload
)
if resp.status_code == 200:
content = json.loads(resp.content)
return self.deserialize_dataframe(data=content['data'],
index=content['index'],
columns=content['columns'])
else:
return resp
def get_crosstab(self, stub, banner):
""" Calculates a single crosstab view for a stub/banner combination
Parameters
----------
stub : list
List of variables on the x axis
banner : list
List of variables on the y axis
views : string
A view to calculate
Returns
-------
Pandas.DataFrame OR the response obj
The resulting Pandas.DataFrame or the response object if it fails
"""
payload = {
'stub': stub,
'banner': banner
}
resp = self._client.post_request(resource='datasource/{}'.format(self._pk),
action="crosstab",
data=payload
)
if resp.status_code == 200:
content = json.loads(resp.content)
return self.deserialize_dataframe(data=content['data'],
index=content['index'],
columns=content['columns'])
else:
return resp
def get_survey_meta(self):
if self.survey_meta == {}:
resp = self.get_meta_and_data()
return resp['meta']
else:
return self.survey_meta
def apply_labels(self, index, text_key=None):
if text_key is None:
text_key = self.get_survey_meta()['lib']['default text']
tuple_list = index.to_native_types()
new_list = []
for t in tuple_list:
code = t[1]
variable = t[0]
value_map = self.get_values(variable)
try:
code = int(code)
except Exception as e:
pass
if code in value_map.keys():
value = value_map[code]
else:
if '%' in code:
value = '%'
else:
value = code
variable = self.text(variable)
new_list.append((variable, value))
return | pd.MultiIndex.from_tuples(new_list, names=["Questions", "Values"]) | pandas.MultiIndex.from_tuples |
"""
A warehouse for constant values required to initilize the PUDL Database.
This constants module stores and organizes a bunch of constant values which are
used throughout PUDL to populate static lists within the data packages or for
data cleaning purposes.
"""
import importlib.resources
import pandas as pd
import sqlalchemy as sa
######################################################################
# Constants used within the init.py module.
######################################################################
prime_movers = [
'steam_turbine',
'gas_turbine',
'hydro',
'internal_combustion',
'solar_pv',
'wind_turbine'
]
"""list: A list of the types of prime movers"""
rto_iso = {
'CAISO': 'California ISO',
'ERCOT': 'Electric Reliability Council of Texas',
'MISO': 'Midcontinent ISO',
'ISO-NE': 'ISO New England',
'NYISO': 'New York ISO',
'PJM': 'PJM Interconnection',
'SPP': 'Southwest Power Pool'
}
"""dict: A dictionary containing ISO/RTO abbreviations (keys) and names (values)
"""
us_states = {
'AK': 'Alaska',
'AL': 'Alabama',
'AR': 'Arkansas',
'AS': 'American Samoa',
'AZ': 'Arizona',
'CA': 'California',
'CO': 'Colorado',
'CT': 'Connecticut',
'DC': 'District of Columbia',
'DE': 'Delaware',
'FL': 'Florida',
'GA': 'Georgia',
'GU': 'Guam',
'HI': 'Hawaii',
'IA': 'Iowa',
'ID': 'Idaho',
'IL': 'Illinois',
'IN': 'Indiana',
'KS': 'Kansas',
'KY': 'Kentucky',
'LA': 'Louisiana',
'MA': 'Massachusetts',
'MD': 'Maryland',
'ME': 'Maine',
'MI': 'Michigan',
'MN': 'Minnesota',
'MO': 'Missouri',
'MP': 'Northern Mariana Islands',
'MS': 'Mississippi',
'MT': 'Montana',
'NA': 'National',
'NC': 'North Carolina',
'ND': 'North Dakota',
'NE': 'Nebraska',
'NH': 'New Hampshire',
'NJ': 'New Jersey',
'NM': 'New Mexico',
'NV': 'Nevada',
'NY': 'New York',
'OH': 'Ohio',
'OK': 'Oklahoma',
'OR': 'Oregon',
'PA': 'Pennsylvania',
'PR': 'Puerto Rico',
'RI': 'Rhode Island',
'SC': 'South Carolina',
'SD': 'South Dakota',
'TN': 'Tennessee',
'TX': 'Texas',
'UT': 'Utah',
'VA': 'Virginia',
'VI': 'Virgin Islands',
'VT': 'Vermont',
'WA': 'Washington',
'WI': 'Wisconsin',
'WV': 'West Virginia',
'WY': 'Wyoming'
}
"""dict: A dictionary containing US state abbreviations (keys) and names
(values)
"""
canada_prov_terr = {
'AB': 'Alberta',
'BC': 'British Columbia',
'CN': 'Canada',
'MB': 'Manitoba',
'NB': 'New Brunswick',
'NS': 'Nova Scotia',
'NL': 'Newfoundland and Labrador',
'NT': 'Northwest Territories',
'NU': 'Nunavut',
'ON': 'Ontario',
'PE': 'Prince Edwards Island',
'QC': 'Quebec',
'SK': 'Saskatchewan',
'YT': 'Yukon Territory',
}
"""dict: A dictionary containing Canadian provinces' and territories'
abbreviations (keys) and names (values)
"""
cems_states = {k: v for k, v in us_states.items() if v not in
{'Alaska',
'American Samoa',
'Guam',
'Hawaii',
'Northern Mariana Islands',
'National',
'Puerto Rico',
'Virgin Islands'}
}
"""dict: A dictionary containing US state abbreviations (keys) and names
(values) that are present in the CEMS dataset
"""
# This is imperfect for states that have split timezones. See:
# https://en.wikipedia.org/wiki/List_of_time_offsets_by_U.S._state_and_territory
# For states that are split, I went with where there seem to be more people
# List of timezones in pytz.common_timezones
# Canada: https://en.wikipedia.org/wiki/Time_in_Canada#IANA_time_zone_database
state_tz_approx = {
"AK": "US/Alaska", # Alaska; Not in CEMS
"AL": "US/Central", # Alabama
"AR": "US/Central", # Arkansas
"AS": "Pacific/Pago_Pago", # American Samoa; Not in CEMS
"AZ": "US/Arizona", # Arizona
"CA": "US/Pacific", # California
"CO": "US/Mountain", # Colorado
"CT": "US/Eastern", # Connecticut
"DC": "US/Eastern", # District of Columbia
"DE": "US/Eastern", # Delaware
"FL": "US/Eastern", # Florida (split state)
"GA": "US/Eastern", # Georgia
"GU": "Pacific/Guam", # Guam; Not in CEMS
"HI": "US/Hawaii", # Hawaii; Not in CEMS
"IA": "US/Central", # Iowa
"ID": "US/Mountain", # Idaho (split state)
"IL": "US/Central", # Illinois
"IN": "US/Eastern", # Indiana (split state)
"KS": "US/Central", # Kansas (split state)
"KY": "US/Eastern", # Kentucky (split state)
"LA": "US/Central", # Louisiana
"MA": "US/Eastern", # Massachusetts
"MD": "US/Eastern", # Maryland
"ME": "US/Eastern", # Maine
"MI": "America/Detroit", # Michigan (split state)
"MN": "US/Central", # Minnesota
"MO": "US/Central", # Missouri
"MP": "Pacific/Saipan", # Northern Mariana Islands; Not in CEMS
"MS": "US/Central", # Mississippi
"MT": "US/Mountain", # Montana
"NC": "US/Eastern", # North Carolina
"ND": "US/Central", # North Dakota (split state)
"NE": "US/Central", # Nebraska (split state)
"NH": "US/Eastern", # New Hampshire
"NJ": "US/Eastern", # New Jersey
"NM": "US/Mountain", # New Mexico
"NV": "US/Pacific", # Nevada
"NY": "US/Eastern", # New York
"OH": "US/Eastern", # Ohio
"OK": "US/Central", # Oklahoma
"OR": "US/Pacific", # Oregon (split state)
"PA": "US/Eastern", # Pennsylvania
"PR": "America/Puerto_Rico", # Puerto Rico; Not in CEMS
"RI": "US/Eastern", # Rhode Island
"SC": "US/Eastern", # South Carolina
"SD": "US/Central", # South Dakota (split state)
"TN": "US/Central", # Tennessee
"TX": "US/Central", # Texas
"UT": "US/Mountain", # Utah
"VA": "US/Eastern", # Virginia
"VI": "America/Puerto_Rico", # Virgin Islands; Not in CEMS
"VT": "US/Eastern", # Vermont
"WA": "US/Pacific", # Washington
"WI": "US/Central", # Wisconsin
"WV": "US/Eastern", # West Virginia
"WY": "US/Mountain", # Wyoming
# Canada (none of these are in CEMS)
"AB": "America/Edmonton", # Alberta
"BC": "America/Vancouver", # British Columbia (split province)
"MB": "America/Winnipeg", # Manitoba
"NB": "America/Moncton", # New Brunswick
"NS": "America/Halifax", # Nova Scotia
"NL": "America/St_Johns", # Newfoundland and Labrador (split province)
"NT": "America/Yellowknife", # Northwest Territories (split province)
"NU": "America/Iqaluit", # Nunavut (split province)
"ON": "America/Toronto", # Ontario (split province)
"PE": "America/Halifax", # Prince Edwards Island
"QC": "America/Montreal", # Quebec (split province)
"SK": "America/Regina", # Saskatchewan (split province)
"YT": "America/Whitehorse", # Yukon Territory
}
"""dict: A dictionary containing US and Canadian state/territory abbreviations
(keys) and timezones (values)
"""
# Construct a dictionary mapping a canonical fuel name to a list of strings
# which are used to represent that fuel in the FERC Form 1 Reporting. Case is
# ignored, as all fuel strings can be converted to a lower case in the data
# set.
# Previous categories of ferc1_biomass_strings and ferc1_stream_strings have
# been deleted and their contents redistributed to ferc1_waste_strings and
# ferc1_other_strings
ferc1_coal_strings = [
'coal', 'coal-subbit', 'lignite', 'coal(sb)', 'coal (sb)', 'coal-lignite',
'coke', 'coa', 'lignite/coal', 'coal - subbit', 'coal-subb', 'coal-sub',
'coal-lig', 'coal-sub bit', 'coals', 'ciak', 'petcoke', 'coal.oil',
'coal/gas', 'bit coal', 'coal-unit #3', 'coal-subbitum', 'coal tons',
'coal mcf', 'coal unit #3', 'pet. coke', 'coal-u3', 'coal&coke', 'tons'
]
"""
list: A list of strings which are used to represent coal fuel in FERC Form 1
reporting.
"""
ferc1_oil_strings = [
'oil', '#6 oil', '#2 oil', 'fuel oil', 'jet', 'no. 2 oil', 'no.2 oil',
'no.6& used', 'used oil', 'oil-2', 'oil (#2)', 'diesel oil',
'residual oil', '# 2 oil', 'resid. oil', 'tall oil', 'oil/gas',
'no.6 oil', 'oil-fuel', 'oil-diesel', 'oil / gas', 'oil bbls', 'oil bls',
'no. 6 oil', '#1 kerosene', 'diesel', 'no. 2 oils', 'blend oil',
'#2oil diesel', '#2 oil-diesel', '# 2 oil', 'light oil', 'heavy oil',
'gas.oil', '#2', '2', '6', 'bbl', 'no 2 oil', 'no 6 oil', '#1 oil', '#6',
'oil-kero', 'oil bbl', 'biofuel', 'no 2', 'kero', '#1 fuel oil',
'no. 2 oil', 'blended oil', 'no 2. oil', '# 6 oil', 'nno. 2 oil',
'#2 fuel', 'oill', 'oils', 'gas/oil', 'no.2 oil gas', '#2 fuel oil',
'oli', 'oil (#6)', 'oil/diesel', '2 oil', '#6 hvy oil', 'jet fuel',
'diesel/compos', 'oil-8', 'oil {6}', 'oil-unit #1', 'bbl.', 'oil.',
'oil #6', 'oil (6)', 'oil(#2)', 'oil-unit1&2', 'oil-6', '#2 fue oil',
'dielel oil', 'dielsel oil', '#6 & used', 'barrels', 'oil un 1 & 2',
'jet oil', 'oil-u1&2', 'oiul', 'pil', 'oil - 2', '#6 & used', 'oial'
]
"""
list: A list of strings which are used to represent oil fuel in FERC Form 1
reporting.
"""
ferc1_gas_strings = [
'gas', 'gass', 'methane', 'natural gas', 'blast gas', 'gas mcf',
'propane', 'prop', 'natural gas', 'nat.gas', 'nat gas',
'nat. gas', 'natl gas', 'ga', 'gas`', 'syngas', 'ng', 'mcf',
'blast gaa', 'nat gas', 'gac', 'syngass', 'prop.', 'natural', 'coal.gas',
'n. gas', 'lp gas', 'natuaral gas', 'coke gas', 'gas #2016', 'propane**',
'* propane', 'propane **', 'gas expander', 'gas ct', '# 6 gas', '#6 gas',
'coke oven gas'
]
"""
list: A list of strings which are used to represent gas fuel in FERC Form 1
reporting.
"""
ferc1_solar_strings = []
ferc1_wind_strings = []
ferc1_hydro_strings = []
ferc1_nuke_strings = [
'nuclear', 'grams of uran', 'grams of', 'grams of ura',
'grams', 'nucleur', 'nulear', 'nucl', 'nucleart', 'nucelar',
'gr.uranium', 'grams of urm', 'nuclear (9)', 'nulcear', 'nuc',
'gr. uranium', 'nuclear mw da', 'grams of ura'
]
"""
list: A list of strings which are used to represent nuclear fuel in FERC Form
1 reporting.
"""
ferc1_waste_strings = [
'tires', 'tire', 'refuse', 'switchgrass', 'wood waste', 'woodchips',
'biomass', 'wood', 'wood chips', 'rdf', 'tires/refuse', 'tire refuse',
'waste oil', 'waste', 'woodships', 'tire chips'
]
"""
list: A list of strings which are used to represent waste fuel in FERC Form 1
reporting.
"""
ferc1_other_strings = [
'steam', 'purch steam', 'all', 'tdf', 'n/a', 'purch. steam', 'other',
'composite', 'composit', 'mbtus', 'total', 'avg', 'avg.', 'blo',
'all fuel', 'comb.', 'alt. fuels', 'na', 'comb', '/#=2\x80â\x91?',
'kã\xadgv¸\x9d?', "mbtu's", 'gas, oil', 'rrm', '3\x9c', 'average',
'furfural', '0', 'watson bng', 'toal', 'bng', '# 6 & used', 'combined',
'blo bls', 'compsite', '*', 'compos.', 'gas / oil', 'mw days', 'g', 'c',
'lime', 'all fuels', 'at right', '20', '1', 'comp oil/gas', 'all fuels to',
'the right are', 'c omposite', 'all fuels are', 'total pr crk',
'all fuels =', 'total pc', 'comp', 'alternative', 'alt. fuel', 'bio fuel',
'total prairie', ''
]
"""list: A list of strings which are used to represent other fuels in FERC Form
1 reporting.
"""
# There are also a bunch of other weird and hard to categorize strings
# that I don't know what to do with... hopefully they constitute only a
# small fraction of the overall generation.
ferc1_fuel_strings = {"coal": ferc1_coal_strings,
"oil": ferc1_oil_strings,
"gas": ferc1_gas_strings,
"solar": ferc1_solar_strings,
"wind": ferc1_wind_strings,
"hydro": ferc1_hydro_strings,
"nuclear": ferc1_nuke_strings,
"waste": ferc1_waste_strings,
"other": ferc1_other_strings
}
"""dict: A dictionary linking fuel types (keys) to lists of various strings
representing that fuel (values)
"""
# Similarly, dictionary for cleaning up fuel unit strings
ferc1_ton_strings = ['toms', 'taons', 'tones', 'col-tons', 'toncoaleq', 'coal',
'tons coal eq', 'coal-tons', 'ton', 'tons', 'tons coal',
'coal-ton', 'tires-tons', 'coal tons -2 ',
'coal tons 200', 'ton-2000', 'coal tons -2', 'coal tons',
'coal-tone', 'tire-ton', 'tire-tons', 'ton coal eqv']
"""list: A list of fuel unit strings for tons."""
ferc1_mcf_strings = \
['mcf', "mcf's", 'mcfs', 'mcf.', 'gas mcf', '"gas" mcf', 'gas-mcf',
'mfc', 'mct', ' mcf', 'msfs', 'mlf', 'mscf', 'mci', 'mcl', 'mcg',
'm.cu.ft.', 'kcf', '(mcf)', 'mcf *(4)', 'mcf00', 'm.cu.ft..']
"""list: A list of fuel unit strings for thousand cubic feet."""
ferc1_bbl_strings = \
['barrel', 'bbls', 'bbl', 'barrels', 'bbrl', 'bbl.', 'bbls.',
'oil 42 gal', 'oil-barrels', 'barrrels', 'bbl-42 gal',
'oil-barrel', 'bb.', 'barrells', 'bar', 'bbld', 'oil- barrel',
'barrels .', 'bbl .', 'barels', 'barrell', 'berrels', 'bb',
'bbl.s', 'oil-bbl', 'bls', 'bbl:', 'barrles', 'blb', 'propane-bbl',
'barriel', 'berriel', 'barrile', '(bbl.)', 'barrel *(4)', '(4) barrel',
'bbf', 'blb.', '(bbl)', 'bb1', 'bbsl', 'barrrel', 'barrels 100%',
'bsrrels', "bbl's", '*barrels', 'oil - barrels', 'oil 42 gal ba', 'bll',
'boiler barrel', 'gas barrel', '"boiler" barr', '"gas" barrel',
'"boiler"barre', '"boiler barre', 'barrels .']
"""list: A list of fuel unit strings for barrels."""
ferc1_gal_strings = ['gallons', 'gal.', 'gals', 'gals.', 'gallon', 'gal',
'galllons']
"""list: A list of fuel unit strings for gallons."""
ferc1_1kgal_strings = ['oil(1000 gal)', 'oil(1000)', 'oil (1000)', 'oil(1000',
'oil(1000ga)']
"""list: A list of fuel unit strings for thousand gallons."""
ferc1_gramsU_strings = [ # noqa: N816 (U-ranium is capitalized...)
'gram', 'grams', 'gm u', 'grams u235', 'grams u-235', 'grams of uran',
'grams: u-235', 'grams:u-235', 'grams:u235', 'grams u308', 'grams: u235',
'grams of', 'grams - n/a', 'gms uran', 's e uo2 grams', 'gms uranium',
'grams of urm', 'gms. of uran', 'grams (100%)', 'grams v-235',
'se uo2 grams'
]
"""list: A list of fuel unit strings for grams."""
ferc1_kgU_strings = [ # noqa: N816 (U-ranium is capitalized...)
'kg of uranium', 'kg uranium', 'kilg. u-235', 'kg u-235', 'kilograms-u23',
'kg', 'kilograms u-2', 'kilograms', 'kg of', 'kg-u-235', 'kilgrams',
'kilogr. u235', 'uranium kg', 'kg uranium25', 'kilogr. u-235',
'kg uranium 25', 'kilgr. u-235', 'kguranium 25', 'kg-u235'
]
"""list: A list of fuel unit strings for thousand grams."""
ferc1_mmbtu_strings = ['mmbtu', 'mmbtus', 'mbtus', '(mmbtu)',
"mmbtu's", 'nuclear-mmbtu', 'nuclear-mmbt']
"""list: A list of fuel unit strings for million British Thermal Units."""
ferc1_mwdth_strings = \
['mwd therman', 'mw days-therm', 'mwd thrml', 'mwd thermal',
'mwd/mtu', 'mw days', 'mwdth', 'mwd', 'mw day', 'dth', 'mwdaysthermal',
'mw day therml', 'mw days thrml', 'nuclear mwd', 'mmwd', 'mw day/therml'
'mw days/therm', 'mw days (th', 'ermal)']
"""list: A list of fuel unit strings for megawatt days thermal."""
ferc1_mwhth_strings = ['mwh them', 'mwh threm', 'nwh therm', 'mwhth',
'mwh therm', 'mwh', 'mwh therms.', 'mwh term.uts',
'mwh thermal', 'mwh thermals', 'mw hr therm',
'mwh therma', 'mwh therm.uts']
"""list: A list of fuel unit strings for megawatt hours thermal."""
ferc1_fuel_unit_strings = {'ton': ferc1_ton_strings,
'mcf': ferc1_mcf_strings,
'bbl': ferc1_bbl_strings,
'gal': ferc1_gal_strings,
'1kgal': ferc1_1kgal_strings,
'gramsU': ferc1_gramsU_strings,
'kgU': ferc1_kgU_strings,
'mmbtu': ferc1_mmbtu_strings,
'mwdth': ferc1_mwdth_strings,
'mwhth': ferc1_mwhth_strings
}
"""
dict: A dictionary linking fuel units (keys) to lists of various strings
representing those fuel units (values)
"""
# Categorizing the strings from the FERC Form 1 Plant Kind (plant_kind) field
# into lists. There are many strings that weren't categorized,
# Solar and Solar Project were not classified as these do not indicate if they
# are solar thermal or photovoltaic. Variants on Steam (e.g. "steam 72" and
# "steam and gas") were classified based on additional research of the plants
# on the Internet.
ferc1_plant_kind_steam_turbine = [
'coal', 'steam', 'steam units 1 2 3', 'steam units 4 5',
'steam fossil', 'steam turbine', 'steam a', 'steam 100',
'steam units 1 2 3', 'steams', 'steam 1', 'steam retired 2013', 'stream',
'steam units 1,2,3', 'steam units 4&5', 'steam units 4&6',
'steam conventional', 'unit total-steam', 'unit total steam',
'*resp. share steam', 'resp. share steam', 'steam (see note 1,',
'steam (see note 3)', 'mpc 50%share steam', '40% share steam'
'steam (2)', 'steam (3)', 'steam (4)', 'steam (5)', 'steam (6)',
'steam (7)', 'steam (8)', 'steam units 1 and 2', 'steam units 3 and 4',
'steam (note 1)', 'steam (retired)', 'steam (leased)', 'coal-fired steam',
'oil-fired steam', 'steam/fossil', 'steam (a,b)', 'steam (a)', 'stean',
'steam-internal comb', 'steam (see notes)', 'steam units 4 & 6',
'resp share stm note3' 'mpc50% share steam', 'mpc40%share steam',
'steam - 64%', 'steam - 100%', 'steam (1) & (2)', 'resp share st note3',
'mpc 50% shares steam', 'steam-64%', 'steam-100%', 'steam (see note 1)',
'mpc 50% share steam', 'steam units 1, 2, 3', 'steam units 4, 5',
'steam (2)', 'steam (1)', 'steam 4, 5', 'steam - 72%', 'steam (incl i.c.)',
'steam- 72%', 'steam;retired - 2013', "respondent's sh.-st.",
"respondent's sh-st", '40% share steam', 'resp share stm note3',
'mpc50% share steam', 'resp share st note 3', '\x02steam (1)',
]
"""
list: A list of strings from FERC Form 1 for the steam turbine plant kind.
"""
ferc1_plant_kind_combustion_turbine = [
'combustion turbine', 'gt', 'gas turbine',
'gas turbine # 1', 'gas turbine', 'gas turbine (note 1)',
'gas turbines', 'simple cycle', 'combustion turbine',
'comb.turb.peak.units', 'gas turbine', 'combustion turbine',
'com turbine peaking', 'gas turbine peaking', 'comb turb peaking',
'combustine turbine', 'comb. turine', 'conbustion turbine',
'combustine turbine', 'gas turbine (leased)', 'combustion tubine',
'gas turb', 'gas turbine peaker', 'gtg/gas', 'simple cycle turbine',
'gas-turbine', 'gas turbine-simple', 'gas turbine - note 1',
'gas turbine #1', 'simple cycle', 'gasturbine', 'combustionturbine',
'gas turbine (2)', 'comb turb peak units', 'jet engine',
'jet powered turbine', '*gas turbine', 'gas turb.(see note5)',
'gas turb. (see note', 'combutsion turbine', 'combustion turbin',
'gas turbine-unit 2', 'gas - turbine', 'comb turbine peaking',
'gas expander turbine', 'jet turbine', 'gas turbin (lease',
'gas turbine (leased', 'gas turbine/int. cm', 'comb.turb-gas oper.',
'comb.turb.gas/oil op', 'comb.turb.oil oper.', 'jet', 'comb. turbine (a)',
'gas turb.(see notes)', 'gas turb(see notes)', 'comb. turb-gas oper',
'comb.turb.oil oper', 'gas turbin (leasd)', 'gas turbne/int comb',
'gas turbine (note1)', 'combution turbin', '* gas turbine',
'add to gas turbine', 'gas turbine (a)', 'gas turbinint comb',
'gas turbine (note 3)', 'resp share gas note3', 'gas trubine',
'*gas turbine(note3)', 'gas turbine note 3,6', 'gas turbine note 4,6',
'gas turbine peakload', 'combusition turbine', 'gas turbine (lease)',
'comb. turb-gas oper.', 'combution turbine', 'combusion turbine',
'comb. turb. oil oper', 'combustion burbine', 'combustion and gas',
'comb. turb.', 'gas turbine (lease', 'gas turbine (leasd)',
'gas turbine/int comb', '*gas turbine(note 3)', 'gas turbine (see nos',
'i.c.e./gas turbine', 'gas turbine/intcomb', 'cumbustion turbine',
'gas turb, int. comb.', 'gas turb, diesel', 'gas turb, int. comb',
'i.c.e/gas turbine', 'diesel turbine', 'comubstion turbine',
'i.c.e. /gas turbine', 'i.c.e/ gas turbine', 'i.c.e./gas tubine',
]
"""list: A list of strings from FERC Form 1 for the combustion turbine plant
kind.
"""
ferc1_plant_kind_combined_cycle = [
'Combined cycle', 'combined cycle', 'combined', 'gas & steam turbine',
'gas turb. & heat rec', 'combined cycle', 'com. cyc', 'com. cycle',
'gas turb-combined cy', 'combined cycle ctg', 'combined cycle - 40%',
'com cycle gas turb', 'combined cycle oper', 'gas turb/comb. cyc',
'combine cycle', 'cc', 'comb. cycle', 'gas turb-combined cy',
'steam and cc', 'steam cc', 'gas steam', 'ctg steam gas',
'steam comb cycle', 'gas/steam comb. cycl', 'steam (comb. cycle)'
'gas turbine/steam', 'steam & gas turbine', 'gas trb & heat rec',
'steam & combined ce', 'st/gas turb comb cyc', 'gas tur & comb cycl',
'combined cycle (a,b)', 'gas turbine/ steam', 'steam/gas turb.',
'steam & comb cycle', 'gas/steam comb cycle', 'comb cycle (a,b)', 'igcc',
'steam/gas turbine', 'gas turbine / steam', 'gas tur & comb cyc',
'comb cyc (a) (b)', 'comb cycle', 'comb cyc', 'combined turbine',
'combine cycle oper', 'comb cycle/steam tur', 'cc / gas turb',
'steam (comb. cycle)', 'steam & cc', 'gas turbine/steam',
'gas turb/cumbus cycl', 'gas turb/comb cycle', 'gasturb/comb cycle',
'gas turb/cumb. cyc', 'igcc/gas turbine', 'gas / steam', 'ctg/steam-gas',
'ctg/steam -gas'
]
"""
list: A list of strings from FERC Form 1 for the combined cycle plant kind.
"""
ferc1_plant_kind_nuke = [
'nuclear', 'nuclear (3)', 'steam(nuclear)', 'nuclear(see note4)'
'nuclear steam', 'nuclear turbine', 'nuclear - steam',
'nuclear (a)(b)(c)', 'nuclear (b)(c)', '* nuclear', 'nuclear (b) (c)',
'nuclear (see notes)', 'steam (nuclear)', '* nuclear (note 2)',
'nuclear (note 2)', 'nuclear (see note 2)', 'nuclear(see note4)',
'nuclear steam', 'nuclear(see notes)', 'nuclear-steam',
'nuclear (see note 3)'
]
"""list: A list of strings from FERC Form 1 for the nuclear plant kind."""
ferc1_plant_kind_geothermal = [
'steam - geothermal', 'steam_geothermal', 'geothermal'
]
"""list: A list of strings from FERC Form 1 for the geothermal plant kind."""
ferc_1_plant_kind_internal_combustion = [
'ic', 'internal combustion', 'internal comb.', 'internl combustion'
'diesel turbine', 'int combust (note 1)', 'int. combust (note1)',
'int.combustine', 'comb. cyc', 'internal comb', 'diesel', 'diesel engine',
'internal combustion', 'int combust - note 1', 'int. combust - note1',
'internal comb recip', 'reciprocating engine', 'comb. turbine',
'internal combust.', 'int. combustion (1)', '*int combustion (1)',
"*internal combust'n", 'internal', 'internal comb.', 'steam internal comb',
'combustion', 'int. combustion', 'int combust (note1)', 'int. combustine',
'internl combustion', '*int. combustion (1)'
]
"""
list: A list of strings from FERC Form 1 for the internal combustion plant
kind.
"""
ferc1_plant_kind_wind = [
'wind', 'wind energy', 'wind turbine', 'wind - turbine', 'wind generation'
]
"""list: A list of strings from FERC Form 1 for the wind plant kind."""
ferc1_plant_kind_photovoltaic = [
'solar photovoltaic', 'photovoltaic', 'solar', 'solar project'
]
"""list: A list of strings from FERC Form 1 for the photovoltaic plant kind."""
ferc1_plant_kind_solar_thermal = ['solar thermal']
"""
list: A list of strings from FERC Form 1 for the solar thermal plant kind.
"""
# Making a dictionary of lists from the lists of plant_fuel strings to create
# a dictionary of plant fuel lists.
ferc1_plant_kind_strings = {
'steam': ferc1_plant_kind_steam_turbine,
'combustion_turbine': ferc1_plant_kind_combustion_turbine,
'combined_cycle': ferc1_plant_kind_combined_cycle,
'nuclear': ferc1_plant_kind_nuke,
'geothermal': ferc1_plant_kind_geothermal,
'internal_combustion': ferc_1_plant_kind_internal_combustion,
'wind': ferc1_plant_kind_wind,
'photovoltaic': ferc1_plant_kind_photovoltaic,
'solar_thermal': ferc1_plant_kind_solar_thermal
}
"""
dict: A dictionary of plant kinds (keys) and associated lists of plant_fuel
strings (values).
"""
# This is an alternative set of strings for simplifying the plant kind field
# from Uday & Laura at CPI. For the moment we have reverted to using our own
# categorizations which are more detailed, but these are preserved here for
# comparison and testing, if need be.
cpi_diesel_strings = ['DIESEL', 'Diesel Engine', 'Diesel Turbine', ]
"""
list: A list of strings for fuel type diesel compiled by Climate Policy
Initiative.
"""
cpi_geothermal_strings = ['Steam - Geothermal', ]
"""
list: A list of strings for fuel type geothermal compiled by Climate Policy
Initiative.
"""
cpi_natural_gas_strings = [
'Combined Cycle', 'Combustion Turbine', 'GT',
'GAS TURBINE', 'Comb. Turbine', 'Gas Turbine #1', 'Combine Cycle Oper',
'Combustion', 'Combined', 'Gas Turbine/Steam', 'Gas Turbine Peaker',
'Gas Turbine - Note 1', 'Resp Share Gas Note3', 'Gas Turbines',
'Simple Cycle', 'Gas / Steam', 'GasTurbine', 'Combine Cycle',
'CTG/Steam-Gas', 'GTG/Gas', 'CTG/Steam -Gas', 'Steam/Gas Turbine',
'CombustionTurbine', 'Gas Turbine-Simple', 'STEAM & GAS TURBINE',
'Gas & Steam Turbine', 'Gas', 'Gas Turbine (2)', 'COMBUSTION AND GAS',
'Com Turbine Peaking', 'Gas Turbine Peaking', 'Comb Turb Peaking',
'JET ENGINE', 'Comb. Cyc', 'Com. Cyc', 'Com. Cycle',
'GAS TURB-COMBINED CY', 'Gas Turb', 'Combined Cycle - 40%',
'IGCC/Gas Turbine', 'CC', 'Combined Cycle Oper', 'Simple Cycle Turbine',
'Steam and CC', 'Com Cycle Gas Turb', 'I.C.E/ Gas Turbine',
'Combined Cycle CTG', 'GAS-TURBINE', 'Gas Expander Turbine',
'Gas Turbine (Leased)', 'Gas Turbine # 1', 'Gas Turbine (Note 1)',
'COMBUSTINE TURBINE', 'Gas Turb, Int. Comb.', 'Combined Turbine',
'Comb Turb Peak Units', 'Combustion Tubine', 'Comb. Cycle',
'COMB.TURB.PEAK.UNITS', 'Steam and CC', 'I.C.E. /Gas Turbine',
'Conbustion Turbine', 'Gas Turbine/Int Comb', 'Steam & CC',
'GAS TURB. & HEAT REC', 'Gas Turb/Comb. Cyc', 'Comb. Turine',
]
"""list: A list of strings for fuel type gas compiled by Climate Policy
Initiative.
"""
cpi_nuclear_strings = ['Nuclear', 'Nuclear (3)', ]
"""list: A list of strings for fuel type nuclear compiled by Climate Policy
Initiative.
"""
cpi_other_strings = [
'IC', 'Internal Combustion', 'Int Combust - Note 1',
'Resp. Share - Note 2', 'Int. Combust - Note1', 'Resp. Share - Note 4',
'Resp Share - Note 5', 'Resp. Share - Note 7', 'Internal Comb Recip',
'Reciprocating Engine', 'Internal Comb', 'Resp. Share - Note 8',
'Resp. Share - Note 9', 'Resp Share - Note 11', 'Resp. Share - Note 6',
'INT.COMBUSTINE', 'Steam (Incl I.C.)', 'Other', 'Int Combust (Note 1)',
'Resp. Share (Note 2)', 'Int. Combust (Note1)', 'Resp. Share (Note 8)',
'Resp. Share (Note 9)', 'Resp Share (Note 11)', 'Resp. Share (Note 4)',
'Resp. Share (Note 6)', 'Plant retired- 2013', 'Retired - 2013',
]
"""list: A list of strings for fuel type other compiled by Climate Policy
Initiative.
"""
cpi_steam_strings = [
'Steam', 'Steam Units 1, 2, 3', 'Resp Share St Note 3',
'Steam Turbine', 'Steam-Internal Comb', 'IGCC', 'Steam- 72%', 'Steam (1)',
'Steam (1)', 'Steam Units 1,2,3', 'Steam/Fossil', 'Steams', 'Steam - 72%',
'Steam - 100%', 'Stream', 'Steam Units 4, 5', 'Steam - 64%', 'Common',
'Steam (A)', 'Coal', 'Steam;Retired - 2013', 'Steam Units 4 & 6',
]
"""list: A list of strings for fuel type steam compiled by Climate Policy
Initiative.
"""
cpi_wind_strings = ['Wind', 'Wind Turbine', 'Wind - Turbine', 'Wind Energy', ]
"""list: A list of strings for fuel type wind compiled by Climate Policy
Initiative.
"""
cpi_solar_strings = [
'Solar Photovoltaic', 'Solar Thermal', 'SOLAR PROJECT', 'Solar',
'Photovoltaic',
]
"""list: A list of strings for fuel type photovoltaic compiled by Climate
Policy Initiative.
"""
cpi_plant_kind_strings = {
'natural_gas': cpi_natural_gas_strings,
'diesel': cpi_diesel_strings,
'geothermal': cpi_geothermal_strings,
'nuclear': cpi_nuclear_strings,
'steam': cpi_steam_strings,
'wind': cpi_wind_strings,
'solar': cpi_solar_strings,
'other': cpi_other_strings,
}
"""dict: A dictionary linking fuel types (keys) to lists of strings associated
by Climate Policy Institute with those fuel types (values).
"""
# Categorizing the strings from the FERC Form 1 Type of Plant Construction
# (construction_type) field into lists.
# There are many strings that weren't categorized, including crosses between
# conventional and outdoor, PV, wind, combined cycle, and internal combustion.
# The lists are broken out into the two types specified in Form 1:
# conventional and outdoor. These lists are inclusive so that variants of
# conventional (e.g. "conventional full") and outdoor (e.g. "outdoor full"
# and "outdoor hrsg") are included.
ferc1_const_type_outdoor = [
'outdoor', 'outdoor boiler', 'full outdoor', 'outdoor boiler',
'outdoor boilers', 'outboilers', 'fuel outdoor', 'full outdoor',
'outdoors', 'outdoor', 'boiler outdoor& full', 'boiler outdoor&full',
'outdoor boiler& full', 'full -outdoor', 'outdoor steam',
'outdoor boiler', 'ob', 'outdoor automatic', 'outdoor repower',
'full outdoor boiler', 'fo', 'outdoor boiler & ful', 'full-outdoor',
'fuel outdoor', 'outoor', 'outdoor', 'outdoor boiler&full',
'boiler outdoor &full', 'outdoor boiler &full', 'boiler outdoor & ful',
'outdoor-boiler', 'outdoor - boiler', 'outdoor const.',
'4 outdoor boilers', '3 outdoor boilers', 'full outdoor', 'full outdoors',
'full oudoors', 'outdoor (auto oper)', 'outside boiler',
'outdoor boiler&full', 'outdoor hrsg', 'outdoor hrsg',
'outdoor-steel encl.', 'boiler-outdr & full',
'con.& full outdoor', 'partial outdoor', 'outdoor (auto. oper)',
'outdoor (auto.oper)', 'outdoor construction', '1 outdoor boiler',
'2 outdoor boilers', 'outdoor enclosure', '2 outoor boilers',
'boiler outdr.& full', 'boiler outdr. & full', 'ful outdoor',
'outdoor-steel enclos', 'outdoor (auto oper.)', 'con. & full outdoor',
'outdore', 'boiler & full outdor', 'full & outdr boilers',
'outodoor (auto oper)', 'outdoor steel encl.', 'full outoor',
'boiler & outdoor ful', 'otdr. blr. & f. otdr', 'f.otdr & otdr.blr.',
'oudoor (auto oper)', 'outdoor constructin', 'f. otdr. & otdr. blr',
]
"""list: A list of strings from FERC Form 1 associated with the outdoor
construction type.
"""
ferc1_const_type_semioutdoor = [
'more than 50% outdoo', 'more than 50% outdos', 'over 50% outdoor',
'over 50% outdoors', 'semi-outdoor', 'semi - outdoor', 'semi outdoor',
'semi-enclosed', 'semi-outdoor boiler', 'semi outdoor boiler',
'semi- outdoor', 'semi - outdoors', 'semi -outdoor'
'conven & semi-outdr', 'conv & semi-outdoor', 'conv & semi- outdoor',
'convent. semi-outdr', 'conv. semi outdoor', 'conv(u1)/semiod(u2)',
'conv u1/semi-od u2', 'conv-one blr-semi-od', 'convent semioutdoor',
'conv. u1/semi-od u2', 'conv - 1 blr semi od', 'conv. ui/semi-od u2',
'conv-1 blr semi-od', 'conven. semi-outdoor', 'conv semi-outdoor',
'u1-conv./u2-semi-od', 'u1-conv./u2-semi -od', 'convent. semi-outdoo',
'u1-conv. / u2-semi', 'conven & semi-outdr', 'semi -outdoor',
'outdr & conventnl', 'conven. full outdoor', 'conv. & outdoor blr',
'conv. & outdoor blr.', 'conv. & outdoor boil', 'conv. & outdr boiler',
'conv. & out. boiler', 'convntl,outdoor blr', 'outdoor & conv.',
'2 conv., 1 out. boil', 'outdoor/conventional', 'conv. boiler outdoor',
'conv-one boiler-outd', 'conventional outdoor', 'conventional outdor',
'conv. outdoor boiler', 'conv.outdoor boiler', 'conventional outdr.',
'conven,outdoorboiler', 'conven full outdoor', 'conven,full outdoor',
'1 out boil, 2 conv', 'conv. & full outdoor', 'conv. & outdr. boilr',
'conv outdoor boiler', 'convention. outdoor', 'conv. sem. outdoor',
'convntl, outdoor blr', 'conv & outdoor boil', 'conv & outdoor boil.',
'outdoor & conv', 'conv. broiler outdor', '1 out boilr, 2 conv',
'conv.& outdoor boil.', 'conven,outdr.boiler', 'conven,outdr boiler',
'outdoor & conventil', '1 out boilr 2 conv', 'conv & outdr. boilr',
'conven, full outdoor', 'conven full outdr.', 'conven, full outdr.',
'conv/outdoor boiler', "convnt'l outdr boilr", '1 out boil 2 conv',
'conv full outdoor', 'conven, outdr boiler', 'conventional/outdoor',
'conv&outdoor boiler', 'outdoor & convention', 'conv & outdoor boilr',
'conv & full outdoor', 'convntl. outdoor blr', 'conv - ob',
"1conv'l/2odboilers", "2conv'l/1odboiler", 'conv-ob', 'conv.-ob',
'1 conv/ 2odboilers', '2 conv /1 odboilers', 'conv- ob', 'conv -ob',
'con sem outdoor', 'cnvntl, outdr, boilr', 'less than 50% outdoo',
'under 50% outdoor', 'under 50% outdoors', '1cnvntnl/2odboilers',
'2cnvntnl1/1odboiler', 'con & ob', 'combination (b)', 'indoor & outdoor',
'conven. blr. & full', 'conv. & otdr. blr.', 'combination',
'indoor and outdoor', 'conven boiler & full', "2conv'l/10dboiler",
'4 indor/outdr boiler', '4 indr/outdr boilerr', '4 indr/outdr boiler',
'indoor & outdoof',
]
"""list: A list of strings from FERC Form 1 associated with the semi - outdoor
construction type, or a mix of conventional and outdoor construction.
"""
ferc1_const_type_conventional = [
'conventional', 'conventional', 'conventional boiler', 'conv-b',
'conventionall', 'convention', 'conventional', 'coventional',
'conven full boiler', 'c0nventional', 'conventtional', 'convential'
'underground', 'conventional bulb', 'conventrional',
'*conventional', 'convential', 'convetional', 'conventioanl',
'conventioinal', 'conventaional', 'indoor construction', 'convenional',
'conventional steam', 'conventinal', 'convntional', 'conventionl',
'conventionsl', 'conventiional', 'convntl steam plants', 'indoor const.',
'full indoor', 'indoor', 'indoor automatic', 'indoor boiler',
'(peak load) indoor', 'conventionl,indoor', 'conventionl, indoor',
'conventional, indoor', 'comb. cycle indoor', '3 indoor boiler',
'2 indoor boilers', '1 indoor boiler', '2 indoor boiler',
'3 indoor boilers', 'fully contained', 'conv - b', 'conventional/boiler',
'cnventional', 'comb. cycle indooor', 'sonventional',
]
"""list: A list of strings from FERC Form 1 associated with the conventional
construction type.
"""
# Making a dictionary of lists from the lists of construction_type strings to
# create a dictionary of construction type lists.
ferc1_const_type_strings = {
'outdoor': ferc1_const_type_outdoor,
'semioutdoor': ferc1_const_type_semioutdoor,
'conventional': ferc1_const_type_conventional,
}
"""dict: A dictionary of construction types (keys) and lists of construction
type strings associated with each type (values) from FERC Form 1.
"""
ferc1_power_purchase_type = {
'RQ': 'requirement',
'LF': 'long_firm',
'IF': 'intermediate_firm',
'SF': 'short_firm',
'LU': 'long_unit',
'IU': 'intermediate_unit',
'EX': 'electricity_exchange',
'OS': 'other_service',
'AD': 'adjustment'
}
"""dict: A dictionary of abbreviations (keys) and types (values) for power
purchase agreements from FERC Form 1.
"""
# Dictionary mapping DBF files (w/o .DBF file extension) to DB table names
ferc1_dbf2tbl = {
'F1_1': 'f1_respondent_id',
'F1_2': 'f1_acb_epda',
'F1_3': 'f1_accumdepr_prvsn',
'F1_4': 'f1_accumdfrrdtaxcr',
'F1_5': 'f1_adit_190_detail',
'F1_6': 'f1_adit_190_notes',
'F1_7': 'f1_adit_amrt_prop',
'F1_8': 'f1_adit_other',
'F1_9': 'f1_adit_other_prop',
'F1_10': 'f1_allowances',
'F1_11': 'f1_bal_sheet_cr',
'F1_12': 'f1_capital_stock',
'F1_13': 'f1_cash_flow',
'F1_14': 'f1_cmmn_utlty_p_e',
'F1_15': 'f1_comp_balance_db',
'F1_16': 'f1_construction',
'F1_17': 'f1_control_respdnt',
'F1_18': 'f1_co_directors',
'F1_19': 'f1_cptl_stk_expns',
'F1_20': 'f1_csscslc_pcsircs',
'F1_21': 'f1_dacs_epda',
'F1_22': 'f1_dscnt_cptl_stk',
'F1_23': 'f1_edcfu_epda',
'F1_24': 'f1_elctrc_erg_acct',
'F1_25': 'f1_elctrc_oper_rev',
'F1_26': 'f1_elc_oper_rev_nb',
'F1_27': 'f1_elc_op_mnt_expn',
'F1_28': 'f1_electric',
'F1_29': 'f1_envrnmntl_expns',
'F1_30': 'f1_envrnmntl_fclty',
'F1_31': 'f1_fuel',
'F1_32': 'f1_general_info',
'F1_33': 'f1_gnrt_plant',
'F1_34': 'f1_important_chg',
'F1_35': 'f1_incm_stmnt_2',
'F1_36': 'f1_income_stmnt',
'F1_37': 'f1_miscgen_expnelc',
'F1_38': 'f1_misc_dfrrd_dr',
'F1_39': 'f1_mthly_peak_otpt',
'F1_40': 'f1_mtrl_spply',
'F1_41': 'f1_nbr_elc_deptemp',
'F1_42': 'f1_nonutility_prop',
'F1_43': 'f1_note_fin_stmnt', # 37% of DB
'F1_44': 'f1_nuclear_fuel',
'F1_45': 'f1_officers_co',
'F1_46': 'f1_othr_dfrrd_cr',
'F1_47': 'f1_othr_pd_in_cptl',
'F1_48': 'f1_othr_reg_assets',
'F1_49': 'f1_othr_reg_liab',
'F1_50': 'f1_overhead',
'F1_51': 'f1_pccidica',
'F1_52': 'f1_plant_in_srvce',
'F1_53': 'f1_pumped_storage',
'F1_54': 'f1_purchased_pwr',
'F1_55': 'f1_reconrpt_netinc',
'F1_56': 'f1_reg_comm_expn',
'F1_57': 'f1_respdnt_control',
'F1_58': 'f1_retained_erng',
'F1_59': 'f1_r_d_demo_actvty',
'F1_60': 'f1_sales_by_sched',
'F1_61': 'f1_sale_for_resale',
'F1_62': 'f1_sbsdry_totals',
'F1_63': 'f1_schedules_list',
'F1_64': 'f1_security_holder',
'F1_65': 'f1_slry_wg_dstrbtn',
'F1_66': 'f1_substations',
'F1_67': 'f1_taxacc_ppchrgyr',
'F1_68': 'f1_unrcvrd_cost',
'F1_69': 'f1_utltyplnt_smmry',
'F1_70': 'f1_work',
'F1_71': 'f1_xmssn_adds',
'F1_72': 'f1_xmssn_elc_bothr',
'F1_73': 'f1_xmssn_elc_fothr',
'F1_74': 'f1_xmssn_line',
'F1_75': 'f1_xtraordnry_loss',
'F1_76': 'f1_codes_val',
'F1_77': 'f1_sched_lit_tbl',
'F1_78': 'f1_audit_log',
'F1_79': 'f1_col_lit_tbl',
'F1_80': 'f1_load_file_names',
'F1_81': 'f1_privilege',
'F1_82': 'f1_sys_error_log',
'F1_83': 'f1_unique_num_val',
'F1_84': 'f1_row_lit_tbl',
'F1_85': 'f1_footnote_data',
'F1_86': 'f1_hydro',
'F1_87': 'f1_footnote_tbl', # 52% of DB
'F1_88': 'f1_ident_attsttn',
'F1_89': 'f1_steam',
'F1_90': 'f1_leased',
'F1_91': 'f1_sbsdry_detail',
'F1_92': 'f1_plant',
'F1_93': 'f1_long_term_debt',
'F1_106_2009': 'f1_106_2009',
'F1_106A_2009': 'f1_106a_2009',
'F1_106B_2009': 'f1_106b_2009',
'F1_208_ELC_DEP': 'f1_208_elc_dep',
'F1_231_TRN_STDYCST': 'f1_231_trn_stdycst',
'F1_324_ELC_EXPNS': 'f1_324_elc_expns',
'F1_325_ELC_CUST': 'f1_325_elc_cust',
'F1_331_TRANSISO': 'f1_331_transiso',
'F1_338_DEP_DEPL': 'f1_338_dep_depl',
'F1_397_ISORTO_STL': 'f1_397_isorto_stl',
'F1_398_ANCL_PS': 'f1_398_ancl_ps',
'F1_399_MTH_PEAK': 'f1_399_mth_peak',
'F1_400_SYS_PEAK': 'f1_400_sys_peak',
'F1_400A_ISO_PEAK': 'f1_400a_iso_peak',
'F1_429_TRANS_AFF': 'f1_429_trans_aff',
'F1_ALLOWANCES_NOX': 'f1_allowances_nox',
'F1_CMPINC_HEDGE_A': 'f1_cmpinc_hedge_a',
'F1_CMPINC_HEDGE': 'f1_cmpinc_hedge',
'F1_EMAIL': 'f1_email',
'F1_RG_TRN_SRV_REV': 'f1_rg_trn_srv_rev',
'F1_S0_CHECKS': 'f1_s0_checks',
'F1_S0_FILING_LOG': 'f1_s0_filing_log',
'F1_SECURITY': 'f1_security'
# 'F1_PINS': 'f1_pins', # private data, not publicized.
# 'F1_FREEZE': 'f1_freeze', # private data, not publicized
}
"""dict: A dictionary mapping FERC Form 1 DBF files(w / o .DBF file extension)
(keys) to database table names (values).
"""
ferc1_huge_tables = {
'f1_footnote_tbl',
'f1_footnote_data',
'f1_note_fin_stmnt',
}
"""set: A set containing large FERC Form 1 tables.
"""
# Invert the map above so we can go either way as needed
ferc1_tbl2dbf = {v: k for k, v in ferc1_dbf2tbl.items()}
"""dict: A dictionary mapping database table names (keys) to FERC Form 1 DBF
files(w / o .DBF file extension) (values).
"""
# This dictionary maps the strings which are used to denote field types in the
# DBF objects to the corresponding generic SQLAlchemy Column types:
# These definitions come from a combination of the dbfread example program
# dbf2sqlite and this DBF file format documentation page:
# http://www.dbase.com/KnowledgeBase/int/db7_file_fmt.htm
# Un-mapped types left as 'XXX' which should obviously make an error...
dbf_typemap = {
'C': sa.String,
'D': sa.Date,
'F': sa.Float,
'I': sa.Integer,
'L': sa.Boolean,
'M': sa.Text, # 10 digit .DBT block number, stored as a string...
'N': sa.Float,
'T': sa.DateTime,
'0': sa.Integer, # based on dbf2sqlite mapping
'B': 'XXX', # .DBT block number, binary string
'@': 'XXX', # Timestamp... Date = Julian Day, Time is in milliseconds?
'+': 'XXX', # Autoincrement (e.g. for IDs)
'O': 'XXX', # Double, 8 bytes
'G': 'XXX', # OLE 10 digit/byte number of a .DBT block, stored as string
}
"""dict: A dictionary mapping field types in the DBF objects (keys) to the
corresponding generic SQLAlchemy Column types.
"""
# This is the set of tables which have been successfully integrated into PUDL:
ferc1_pudl_tables = (
'fuel_ferc1', # Plant-level data, linked to plants_steam_ferc1
'plants_steam_ferc1', # Plant-level data
'plants_small_ferc1', # Plant-level data
'plants_hydro_ferc1', # Plant-level data
'plants_pumped_storage_ferc1', # Plant-level data
'purchased_power_ferc1', # Inter-utility electricity transactions
'plant_in_service_ferc1', # Row-mapped plant accounting data.
# 'accumulated_depreciation_ferc1' # Requires row-mapping to be useful.
)
"""tuple: A tuple containing the FERC Form 1 tables that can be successfully
integrated into PUDL.
"""
ferc714_pudl_tables = (
"respondent_id_ferc714",
"id_certification_ferc714",
"gen_plants_ba_ferc714",
"demand_monthly_ba_ferc714",
"net_energy_load_ba_ferc714",
"adjacency_ba_ferc714",
"interchange_ba_ferc714",
"lambda_hourly_ba_ferc714",
"lambda_description_ferc714",
"description_pa_ferc714",
"demand_forecast_pa_ferc714",
"demand_hourly_pa_ferc714",
)
table_map_ferc1_pudl = {
'fuel_ferc1': 'f1_fuel',
'plants_steam_ferc1': 'f1_steam',
'plants_small_ferc1': 'f1_gnrt_plant',
'plants_hydro_ferc1': 'f1_hydro',
'plants_pumped_storage_ferc1': 'f1_pumped_storage',
'plant_in_service_ferc1': 'f1_plant_in_srvce',
'purchased_power_ferc1': 'f1_purchased_pwr',
# 'accumulated_depreciation_ferc1': 'f1_accumdepr_prvsn'
}
"""dict: A dictionary mapping PUDL table names (keys) to the corresponding FERC
Form 1 DBF table names.
"""
# This is the list of EIA923 tables that can be successfully pulled into PUDL
eia923_pudl_tables = ('generation_fuel_eia923',
'boiler_fuel_eia923',
'generation_eia923',
'coalmine_eia923',
'fuel_receipts_costs_eia923')
"""tuple: A tuple containing the EIA923 tables that can be successfully
integrated into PUDL.
"""
epaipm_pudl_tables = (
'transmission_single_epaipm',
'transmission_joint_epaipm',
'load_curves_epaipm',
'plant_region_map_epaipm',
)
"""tuple: A tuple containing the EPA IPM tables that can be successfully
integrated into PUDL.
"""
# List of entity tables
entity_tables = ['utilities_entity_eia',
'plants_entity_eia',
'generators_entity_eia',
'boilers_entity_eia',
'regions_entity_epaipm', ]
"""list: A list of PUDL entity tables.
"""
xlsx_maps_pkg = 'pudl.package_data.meta.xlsx_maps'
"""string: The location of the xlsx maps within the PUDL package data.
"""
##############################################################################
# EIA 923 Spreadsheet Metadata
##############################################################################
# patterns for matching columns to months:
month_dict_eia923 = {1: '_january$',
2: '_february$',
3: '_march$',
4: '_april$',
5: '_may$',
6: '_june$',
7: '_july$',
8: '_august$',
9: '_september$',
10: '_october$',
11: '_november$',
12: '_december$'}
"""dict: A dictionary mapping column numbers (keys) to months (values).
"""
##############################################################################
# EIA 860 Spreadsheet Metadata
##############################################################################
# This is the list of EIA860 tables that can be successfully pulled into PUDL
eia860_pudl_tables = (
'boiler_generator_assn_eia860',
'utilities_eia860',
'plants_eia860',
'generators_eia860',
'ownership_eia860'
)
"""tuple: A tuple containing the list of EIA 860 tables that can be
successfully pulled into PUDL.
"""
eia861_pudl_tables = (
"service_territory_eia861",
)
# The set of FERC Form 1 tables that have the same composite primary keys: [
# respondent_id, report_year, report_prd, row_number, spplmnt_num ].
# TODO: THIS ONLY PERTAINS TO 2015 AND MAY NEED TO BE ADJUSTED BY YEAR...
ferc1_data_tables = (
'f1_acb_epda', 'f1_accumdepr_prvsn', 'f1_accumdfrrdtaxcr',
'f1_adit_190_detail', 'f1_adit_190_notes', 'f1_adit_amrt_prop',
'f1_adit_other', 'f1_adit_other_prop', 'f1_allowances', 'f1_bal_sheet_cr',
'f1_capital_stock', 'f1_cash_flow', 'f1_cmmn_utlty_p_e',
'f1_comp_balance_db', 'f1_construction', 'f1_control_respdnt',
'f1_co_directors', 'f1_cptl_stk_expns', 'f1_csscslc_pcsircs',
'f1_dacs_epda', 'f1_dscnt_cptl_stk', 'f1_edcfu_epda', 'f1_elctrc_erg_acct',
'f1_elctrc_oper_rev', 'f1_elc_oper_rev_nb', 'f1_elc_op_mnt_expn',
'f1_electric', 'f1_envrnmntl_expns', 'f1_envrnmntl_fclty', 'f1_fuel',
'f1_general_info', 'f1_gnrt_plant', 'f1_important_chg', 'f1_incm_stmnt_2',
'f1_income_stmnt', 'f1_miscgen_expnelc', 'f1_misc_dfrrd_dr',
'f1_mthly_peak_otpt', 'f1_mtrl_spply', 'f1_nbr_elc_deptemp',
'f1_nonutility_prop', 'f1_note_fin_stmnt', 'f1_nuclear_fuel',
'f1_officers_co', 'f1_othr_dfrrd_cr', 'f1_othr_pd_in_cptl',
'f1_othr_reg_assets', 'f1_othr_reg_liab', 'f1_overhead', 'f1_pccidica',
'f1_plant_in_srvce', 'f1_pumped_storage', 'f1_purchased_pwr',
'f1_reconrpt_netinc', 'f1_reg_comm_expn', 'f1_respdnt_control',
'f1_retained_erng', 'f1_r_d_demo_actvty', 'f1_sales_by_sched',
'f1_sale_for_resale', 'f1_sbsdry_totals', 'f1_schedules_list',
'f1_security_holder', 'f1_slry_wg_dstrbtn', 'f1_substations',
'f1_taxacc_ppchrgyr', 'f1_unrcvrd_cost', 'f1_utltyplnt_smmry', 'f1_work',
'f1_xmssn_adds', 'f1_xmssn_elc_bothr', 'f1_xmssn_elc_fothr',
'f1_xmssn_line', 'f1_xtraordnry_loss',
'f1_hydro', 'f1_steam', 'f1_leased', 'f1_sbsdry_detail',
'f1_plant', 'f1_long_term_debt', 'f1_106_2009', 'f1_106a_2009',
'f1_106b_2009', 'f1_208_elc_dep', 'f1_231_trn_stdycst', 'f1_324_elc_expns',
'f1_325_elc_cust', 'f1_331_transiso', 'f1_338_dep_depl',
'f1_397_isorto_stl', 'f1_398_ancl_ps', 'f1_399_mth_peak',
'f1_400_sys_peak', 'f1_400a_iso_peak', 'f1_429_trans_aff',
'f1_allowances_nox', 'f1_cmpinc_hedge_a', 'f1_cmpinc_hedge',
'f1_rg_trn_srv_rev')
"""tuple: A tuple containing the FERC Form 1 tables that have the same composite
primary keys: [respondent_id, report_year, report_prd, row_number,
spplmnt_num].
"""
# Line numbers, and corresponding FERC account number
# from FERC Form 1 pages 204-207, Electric Plant in Service.
# Descriptions from: https://www.law.cornell.edu/cfr/text/18/part-101
ferc_electric_plant_accounts = pd.DataFrame.from_records([
# 1. Intangible Plant
(2, '301', 'Intangible: Organization'),
(3, '302', 'Intangible: Franchises and consents'),
(4, '303', 'Intangible: Miscellaneous intangible plant'),
(5, 'subtotal_intangible', 'Subtotal: Intangible Plant'),
# 2. Production Plant
# A. steam production
(8, '310', 'Steam production: Land and land rights'),
(9, '311', 'Steam production: Structures and improvements'),
(10, '312', 'Steam production: Boiler plant equipment'),
(11, '313', 'Steam production: Engines and engine-driven generators'),
(12, '314', 'Steam production: Turbogenerator units'),
(13, '315', 'Steam production: Accessory electric equipment'),
(14, '316', 'Steam production: Miscellaneous power plant equipment'),
(15, '317', 'Steam production: Asset retirement costs for steam production\
plant'),
(16, 'subtotal_steam_production', 'Subtotal: Steam Production Plant'),
# B. nuclear production
(18, '320', 'Nuclear production: Land and land rights (Major only)'),
(19, '321', 'Nuclear production: Structures and improvements (Major\
only)'),
(20, '322', 'Nuclear production: Reactor plant equipment (Major only)'),
(21, '323', 'Nuclear production: Turbogenerator units (Major only)'),
(22, '324', 'Nuclear production: Accessory electric equipment (Major\
only)'),
(23, '325', 'Nuclear production: Miscellaneous power plant equipment\
(Major only)'),
(24, '326', 'Nuclear production: Asset retirement costs for nuclear\
production plant (Major only)'),
(25, 'subtotal_nuclear_produciton', 'Subtotal: Nuclear Production Plant'),
# C. hydraulic production
(27, '330', 'Hydraulic production: Land and land rights'),
(28, '331', 'Hydraulic production: Structures and improvements'),
(29, '332', 'Hydraulic production: Reservoirs, dams, and waterways'),
(30, '333', 'Hydraulic production: Water wheels, turbines and generators'),
(31, '334', 'Hydraulic production: Accessory electric equipment'),
(32, '335', 'Hydraulic production: Miscellaneous power plant equipment'),
(33, '336', 'Hydraulic production: Roads, railroads and bridges'),
(34, '337', 'Hydraulic production: Asset retirement costs for hydraulic\
production plant'),
(35, 'subtotal_hydraulic_production', 'Subtotal: Hydraulic Production\
Plant'),
# D. other production
(37, '340', 'Other production: Land and land rights'),
(38, '341', 'Other production: Structures and improvements'),
(39, '342', 'Other production: Fuel holders, producers, and accessories'),
(40, '343', 'Other production: Prime movers'),
(41, '344', 'Other production: Generators'),
(42, '345', 'Other production: Accessory electric equipment'),
(43, '346', 'Other production: Miscellaneous power plant equipment'),
(44, '347', 'Other production: Asset retirement costs for other production\
plant'),
(None, '348', 'Other production: Energy Storage Equipment'),
(45, 'subtotal_other_production', 'Subtotal: Other Production Plant'),
(46, 'subtotal_production', 'Subtotal: Production Plant'),
# 3. Transmission Plant,
(48, '350', 'Transmission: Land and land rights'),
(None, '351', 'Transmission: Energy Storage Equipment'),
(49, '352', 'Transmission: Structures and improvements'),
(50, '353', 'Transmission: Station equipment'),
(51, '354', 'Transmission: Towers and fixtures'),
(52, '355', 'Transmission: Poles and fixtures'),
(53, '356', 'Transmission: Overhead conductors and devices'),
(54, '357', 'Transmission: Underground conduit'),
(55, '358', 'Transmission: Underground conductors and devices'),
(56, '359', 'Transmission: Roads and trails'),
(57, '359.1', 'Transmission: Asset retirement costs for transmission\
plant'),
(58, 'subtotal_transmission', 'Subtotal: Transmission Plant'),
# 4. Distribution Plant
(60, '360', 'Distribution: Land and land rights'),
(61, '361', 'Distribution: Structures and improvements'),
(62, '362', 'Distribution: Station equipment'),
(63, '363', 'Distribution: Storage battery equipment'),
(64, '364', 'Distribution: Poles, towers and fixtures'),
(65, '365', 'Distribution: Overhead conductors and devices'),
(66, '366', 'Distribution: Underground conduit'),
(67, '367', 'Distribution: Underground conductors and devices'),
(68, '368', 'Distribution: Line transformers'),
(69, '369', 'Distribution: Services'),
(70, '370', 'Distribution: Meters'),
(71, '371', 'Distribution: Installations on customers\' premises'),
(72, '372', 'Distribution: Leased property on customers\' premises'),
(73, '373', 'Distribution: Street lighting and signal systems'),
(74, '374', 'Distribution: Asset retirement costs for distribution plant'),
(75, 'subtotal_distribution', 'Subtotal: Distribution Plant'),
# 5. Regional Transmission and Market Operation Plant
(77, '380', 'Regional transmission: Land and land rights'),
(78, '381', 'Regional transmission: Structures and improvements'),
(79, '382', 'Regional transmission: Computer hardware'),
(80, '383', 'Regional transmission: Computer software'),
(81, '384', 'Regional transmission: Communication Equipment'),
(82, '385', 'Regional transmission: Miscellaneous Regional Transmission\
and Market Operation Plant'),
(83, '386', 'Regional transmission: Asset Retirement Costs for Regional\
Transmission and Market Operation\
Plant'),
(84, 'subtotal_regional_transmission', 'Subtotal: Transmission and Market\
Operation Plant'),
(None, '387', 'Regional transmission: [Reserved]'),
# 6. General Plant
(86, '389', 'General: Land and land rights'),
(87, '390', 'General: Structures and improvements'),
(88, '391', 'General: Office furniture and equipment'),
(89, '392', 'General: Transportation equipment'),
(90, '393', 'General: Stores equipment'),
(91, '394', 'General: Tools, shop and garage equipment'),
(92, '395', 'General: Laboratory equipment'),
(93, '396', 'General: Power operated equipment'),
(94, '397', 'General: Communication equipment'),
(95, '398', 'General: Miscellaneous equipment'),
(96, 'subtotal_general', 'Subtotal: General Plant'),
(97, '399', 'General: Other tangible property'),
(98, '399.1', 'General: Asset retirement costs for general plant'),
(99, 'total_general', 'TOTAL General Plant'),
(100, '101_and_106', 'Electric plant in service (Major only)'),
(101, '102_purchased', 'Electric plant purchased'),
(102, '102_sold', 'Electric plant sold'),
(103, '103', 'Experimental plant unclassified'),
(104, 'total_electric_plant', 'TOTAL Electric Plant in Service')],
columns=['row_number', 'ferc_account_id', 'ferc_account_description'])
"""list: A list of tuples containing row numbers, FERC account IDs, and FERC
account descriptions from FERC Form 1 pages 204 - 207, Electric Plant in
Service.
"""
# Line numbers, and corresponding FERC account number
# from FERC Form 1 page 219, ACCUMULATED PROVISION FOR DEPRECIATION
# OF ELECTRIC UTILITY PLANT (Account 108).
ferc_accumulated_depreciation = pd.DataFrame.from_records([
# Section A. Balances and Changes During Year
(1, 'balance_beginning_of_year', 'Balance Beginning of Year'),
(3, 'depreciation_expense', '(403) Depreciation Expense'),
(4, 'depreciation_expense_asset_retirement', \
'(403.1) Depreciation Expense for Asset Retirement Costs'),
(5, 'expense_electric_plant_leased_to_others', \
'(413) Exp. of Elec. Plt. Leas. to Others'),
(6, 'transportation_expenses_clearing',\
'Transportation Expenses-Clearing'),
(7, 'other_clearing_accounts', 'Other Clearing Accounts'),
(8, 'other_accounts_specified',\
'Other Accounts (Specify, details in footnote):'),
# blank: might also be other charges like line 17.
(9, 'other_charges', 'Other Charges:'),
(10, 'total_depreciation_provision_for_year',\
'TOTAL Deprec. Prov for Year (Enter Total of lines 3 thru 9)'),
(11, 'net_charges_for_plant_retired', 'Net Charges for Plant Retired:'),
(12, 'book_cost_of_plant_retired', 'Book Cost of Plant Retired'),
(13, 'cost_of_removal', 'Cost of Removal'),
(14, 'salvage_credit', 'Salvage (Credit)'),
(15, 'total_net_charges_for_plant_retired',\
'TOTAL Net Chrgs. for Plant Ret. (Enter Total of lines 12 thru 14)'),
(16, 'other_debit_or_credit_items',\
'Other Debit or Cr. Items (Describe, details in footnote):'),
# blank: can be "Other Charges", e.g. in 2012 for PSCo.
(17, 'other_charges_2', 'Other Charges 2'),
(18, 'book_cost_or_asset_retirement_costs_retired',\
'Book Cost or Asset Retirement Costs Retired'),
(19, 'balance_end_of_year', \
'Balance End of Year (Enter Totals of lines 1, 10, 15, 16, and 18)'),
# Section B. Balances at End of Year According to Functional Classification
(20, 'steam_production_end_of_year', 'Steam Production'),
(21, 'nuclear_production_end_of_year', 'Nuclear Production'),
(22, 'hydraulic_production_end_of_year',\
'Hydraulic Production-Conventional'),
(23, 'pumped_storage_end_of_year', 'Hydraulic Production-Pumped Storage'),
(24, 'other_production', 'Other Production'),
(25, 'transmission', 'Transmission'),
(26, 'distribution', 'Distribution'),
(27, 'regional_transmission_and_market_operation',
'Regional Transmission and Market Operation'),
(28, 'general', 'General'),
(29, 'total', 'TOTAL (Enter Total of lines 20 thru 28)')],
columns=['row_number', 'line_id', 'ferc_account_description'])
"""list: A list of tuples containing row numbers, FERC account IDs, and FERC
account descriptions from FERC Form 1 page 219, Accumulated Provision for
Depreciation of electric utility plant(Account 108).
"""
######################################################################
# Constants from EIA From 923 used within init.py module
######################################################################
# From Page 7 of EIA Form 923, Census Region a US state is located in
census_region = {
'NEW': 'New England',
'MAT': 'Middle Atlantic',
'SAT': 'South Atlantic',
'ESC': 'East South Central',
'WSC': 'West South Central',
'ENC': 'East North Central',
'WNC': 'West North Central',
'MTN': 'Mountain',
'PACC': 'Pacific Contiguous (OR, WA, CA)',
'PACN': 'Pacific Non-Contiguous (AK, HI)',
}
"""dict: A dictionary mapping Census Region abbreviations (keys) to Census
Region names (values).
"""
# From Page 7 of EIA Form923
# Static list of NERC (North American Electric Reliability Corporation)
# regions, used for where plant is located
nerc_region = {
'NPCC': 'Northeast Power Coordinating Council',
'ASCC': 'Alaska Systems Coordinating Council',
'HICC': 'Hawaiian Islands Coordinating Council',
'MRO': 'Midwest Reliability Organization',
'SERC': 'SERC Reliability Corporation',
'RFC': 'Reliability First Corporation',
'SPP': 'Southwest Power Pool',
'TRE': 'Texas Regional Entity',
'FRCC': 'Florida Reliability Coordinating Council',
'WECC': 'Western Electricity Coordinating Council'
}
"""dict: A dictionary mapping NERC Region abbreviations (keys) to NERC
Region names (values).
"""
# From Page 7 of EIA Form 923 EIA’s internal consolidated NAICS sectors.
# For internal purposes, EIA consolidates NAICS categories into seven groups.
sector_eia = {
# traditional regulated electric utilities
'1': 'Electric Utility',
# Independent power producers which are not cogenerators
'2': 'NAICS-22 Non-Cogen',
# Independent power producers which are cogenerators, but whose
# primary business purpose is the sale of electricity to the public
'3': 'NAICS-22 Cogen',
# Commercial non-cogeneration facilities that produce electric power,
# are connected to the gird, and can sell power to the public
'4': 'Commercial NAICS Non-Cogen',
# Commercial cogeneration facilities that produce electric power, are
# connected to the grid, and can sell power to the public
'5': 'Commercial NAICS Cogen',
# Industrial non-cogeneration facilities that produce electric power, are
# connected to the gird, and can sell power to the public
'6': 'Industrial NAICS Non-Cogen',
# Industrial cogeneration facilities that produce electric power, are
# connected to the gird, and can sell power to the public
'7': 'Industrial NAICS Cogen'
}
"""dict: A dictionary mapping EIA numeric codes (keys) to EIA’s internal
consolidated NAICS sectors (values).
"""
# EIA 923: EIA Type of prime mover:
prime_movers_eia923 = {
'BA': 'Energy Storage, Battery',
'BT': 'Turbines Used in a Binary Cycle. Including those used for geothermal applications',
'CA': 'Combined-Cycle -- Steam Part',
'CE': 'Energy Storage, Compressed Air',
'CP': 'Energy Storage, Concentrated Solar Power',
'CS': 'Combined-Cycle Single-Shaft Combustion Turbine and Steam Turbine share of single',
'CT': 'Combined-Cycle Combustion Turbine Part',
'ES': 'Energy Storage, Other (Specify on Schedule 9, Comments)',
'FC': 'Fuel Cell',
'FW': 'Energy Storage, Flywheel',
'GT': 'Combustion (Gas) Turbine. Including Jet Engine design',
'HA': 'Hydrokinetic, Axial Flow Turbine',
'HB': 'Hydrokinetic, Wave Buoy',
'HK': 'Hydrokinetic, Other',
'HY': 'Hydraulic Turbine. Including turbines associated with delivery of water by pipeline.',
'IC': 'Internal Combustion (diesel, piston, reciprocating) Engine',
'PS': 'Energy Storage, Reversible Hydraulic Turbine (Pumped Storage)',
'OT': 'Other',
'ST': 'Steam Turbine. Including Nuclear, Geothermal, and Solar Steam (does not include Combined Cycle).',
'PV': 'Photovoltaic',
'WT': 'Wind Turbine, Onshore',
'WS': 'Wind Turbine, Offshore'
}
"""dict: A dictionary mapping EIA 923 prime mover codes (keys) and prime mover
names / descriptions (values).
"""
# EIA 923: The fuel code reported to EIA.Two or three letter alphanumeric:
fuel_type_eia923 = {
'AB': 'Agricultural By-Products',
'ANT': 'Anthracite Coal',
'BFG': 'Blast Furnace Gas',
'BIT': 'Bituminous Coal',
'BLQ': 'Black Liquor',
'CBL': 'Coal, Blended',
'DFO': 'Distillate Fuel Oil. Including diesel, No. 1, No. 2, and No. 4 fuel oils.',
'GEO': 'Geothermal',
'JF': 'Jet Fuel',
'KER': 'Kerosene',
'LFG': 'Landfill Gas',
'LIG': 'Lignite Coal',
'MSB': 'Biogenic Municipal Solid Waste',
'MSN': 'Non-biogenic Municipal Solid Waste',
'MSW': 'Municipal Solid Waste',
'MWH': 'Electricity used for energy storage',
'NG': 'Natural Gas',
'NUC': 'Nuclear. Including Uranium, Plutonium, and Thorium.',
'OBG': 'Other Biomass Gas. Including digester gas, methane, and other biomass gases.',
'OBL': 'Other Biomass Liquids',
'OBS': 'Other Biomass Solids',
'OG': 'Other Gas',
'OTH': 'Other Fuel',
'PC': 'Petroleum Coke',
'PG': 'Gaseous Propane',
'PUR': 'Purchased Steam',
'RC': 'Refined Coal',
'RFO': 'Residual Fuel Oil. Including No. 5 & 6 fuel oils and bunker C fuel oil.',
'SC': 'Coal-based Synfuel. Including briquettes, pellets, or extrusions, which are formed by binding materials or processes that recycle materials.',
'SGC': 'Coal-Derived Synthesis Gas',
'SGP': 'Synthesis Gas from Petroleum Coke',
'SLW': 'Sludge Waste',
'SUB': 'Subbituminous Coal',
'SUN': 'Solar',
'TDF': 'Tire-derived Fuels',
'WAT': 'Water at a Conventional Hydroelectric Turbine and water used in Wave Buoy Hydrokinetic Technology, current Hydrokinetic Technology, Tidal Hydrokinetic Technology, and Pumping Energy for Reversible (Pumped Storage) Hydroelectric Turbines.',
'WC': 'Waste/Other Coal. Including anthracite culm, bituminous gob, fine coal, lignite waste, waste coal.',
'WDL': 'Wood Waste Liquids, excluding Black Liquor. Including red liquor, sludge wood, spent sulfite liquor, and other wood-based liquids.',
'WDS': 'Wood/Wood Waste Solids. Including paper pellets, railroad ties, utility polies, wood chips, bark, and other wood waste solids.',
'WH': 'Waste Heat not directly attributed to a fuel source',
'WND': 'Wind',
'WO': 'Waste/Other Oil. Including crude oil, liquid butane, liquid propane, naphtha, oil waste, re-refined moto oil, sludge oil, tar oil, or other petroleum-based liquid wastes.'
}
"""dict: A dictionary mapping EIA 923 fuel type codes (keys) and fuel type
names / descriptions (values).
"""
# Fuel type strings for EIA 923 generator fuel table
fuel_type_eia923_gen_fuel_coal_strings = [
'ant', 'bit', 'cbl', 'lig', 'pc', 'rc', 'sc', 'sub', 'wc', ]
"""list: The list of EIA 923 Generation Fuel strings associated with coal fuel.
"""
fuel_type_eia923_gen_fuel_oil_strings = [
'dfo', 'rfo', 'wo', 'jf', 'ker', ]
"""list: The list of EIA 923 Generation Fuel strings associated with oil fuel.
"""
fuel_type_eia923_gen_fuel_gas_strings = [
'bfg', 'lfg', 'ng', 'og', 'obg', 'pg', 'sgc', 'sgp', ]
"""list: The list of EIA 923 Generation Fuel strings associated with gas fuel.
"""
fuel_type_eia923_gen_fuel_solar_strings = ['sun', ]
"""list: The list of EIA 923 Generation Fuel strings associated with solar
power.
"""
fuel_type_eia923_gen_fuel_wind_strings = ['wnd', ]
"""list: The list of EIA 923 Generation Fuel strings associated with wind
power.
"""
fuel_type_eia923_gen_fuel_hydro_strings = ['wat', ]
"""list: The list of EIA 923 Generation Fuel strings associated with hydro
power.
"""
fuel_type_eia923_gen_fuel_nuclear_strings = ['nuc', ]
"""list: The list of EIA 923 Generation Fuel strings associated with nuclear
power.
"""
fuel_type_eia923_gen_fuel_waste_strings = [
'ab', 'blq', 'msb', 'msn', 'msw', 'obl', 'obs', 'slw', 'tdf', 'wdl', 'wds']
"""list: The list of EIA 923 Generation Fuel strings associated with solid waste
fuel.
"""
fuel_type_eia923_gen_fuel_other_strings = ['geo', 'mwh', 'oth', 'pur', 'wh', ]
"""list: The list of EIA 923 Generation Fuel strings associated with geothermal
power.
"""
fuel_type_eia923_gen_fuel_simple_map = {
'coal': fuel_type_eia923_gen_fuel_coal_strings,
'oil': fuel_type_eia923_gen_fuel_oil_strings,
'gas': fuel_type_eia923_gen_fuel_gas_strings,
'solar': fuel_type_eia923_gen_fuel_solar_strings,
'wind': fuel_type_eia923_gen_fuel_wind_strings,
'hydro': fuel_type_eia923_gen_fuel_hydro_strings,
'nuclear': fuel_type_eia923_gen_fuel_nuclear_strings,
'waste': fuel_type_eia923_gen_fuel_waste_strings,
'other': fuel_type_eia923_gen_fuel_other_strings,
}
"""dict: A dictionary mapping EIA 923 Generation Fuel fuel types (keys) to lists
of strings associated with that fuel type (values).
"""
# Fuel type strings for EIA 923 boiler fuel table
fuel_type_eia923_boiler_fuel_coal_strings = [
'ant', 'bit', 'lig', 'pc', 'rc', 'sc', 'sub', 'wc', ]
"""list: A list of strings from EIA 923 Boiler Fuel associated with fuel type
coal.
"""
fuel_type_eia923_boiler_fuel_oil_strings = ['dfo', 'rfo', 'wo', 'jf', 'ker', ]
"""list: A list of strings from EIA 923 Boiler Fuel associated with fuel type
oil.
"""
fuel_type_eia923_boiler_fuel_gas_strings = [
'bfg', 'lfg', 'ng', 'og', 'obg', 'pg', 'sgc', 'sgp', ]
"""list: A list of strings from EIA 923 Boiler Fuel associated with fuel type
gas.
"""
fuel_type_eia923_boiler_fuel_waste_strings = [
'ab', 'blq', 'msb', 'msn', 'obl', 'obs', 'slw', 'tdf', 'wdl', 'wds', ]
"""list: A list of strings from EIA 923 Boiler Fuel associated with fuel type
waste.
"""
fuel_type_eia923_boiler_fuel_other_strings = ['oth', 'pur', 'wh', ]
"""list: A list of strings from EIA 923 Boiler Fuel associated with fuel type
other.
"""
fuel_type_eia923_boiler_fuel_simple_map = {
'coal': fuel_type_eia923_boiler_fuel_coal_strings,
'oil': fuel_type_eia923_boiler_fuel_oil_strings,
'gas': fuel_type_eia923_boiler_fuel_gas_strings,
'waste': fuel_type_eia923_boiler_fuel_waste_strings,
'other': fuel_type_eia923_boiler_fuel_other_strings,
}
"""dict: A dictionary mapping EIA 923 Boiler Fuel fuel types (keys) to lists
of strings associated with that fuel type (values).
"""
# PUDL consolidation of EIA923 AER fuel type strings into same categories as
# 'energy_source_eia923' plus additional renewable and nuclear categories.
# These classifications are not currently used, as the EIA fuel type and energy
# source designations provide more detailed information.
aer_coal_strings = ['col', 'woc', 'pc']
"""list: A list of EIA 923 AER fuel type strings associated with coal.
"""
aer_gas_strings = ['mlg', 'ng', 'oog']
"""list: A list of EIA 923 AER fuel type strings associated with gas.
"""
aer_oil_strings = ['dfo', 'rfo', 'woo']
"""list: A list of EIA 923 AER fuel type strings associated with oil.
"""
aer_solar_strings = ['sun']
"""list: A list of EIA 923 AER fuel type strings associated with solar power.
"""
aer_wind_strings = ['wnd']
"""list: A list of EIA 923 AER fuel type strings associated with wind power.
"""
aer_hydro_strings = ['hps', 'hyc']
"""list: A list of EIA 923 AER fuel type strings associated with hydro power.
"""
aer_nuclear_strings = ['nuc']
"""list: A list of EIA 923 AER fuel type strings associated with nuclear power.
"""
aer_waste_strings = ['www']
"""list: A list of EIA 923 AER fuel type strings associated with waste.
"""
aer_other_strings = ['geo', 'orw', 'oth']
"""list: A list of EIA 923 AER fuel type strings associated with other fuel.
"""
aer_fuel_type_strings = {
'coal': aer_coal_strings,
'gas': aer_gas_strings,
'oil': aer_oil_strings,
'solar': aer_solar_strings,
'wind': aer_wind_strings,
'hydro': aer_hydro_strings,
'nuclear': aer_nuclear_strings,
'waste': aer_waste_strings,
'other': aer_other_strings
}
"""dict: A dictionary mapping EIA 923 AER fuel types (keys) to lists
of strings associated with that fuel type (values).
"""
# EIA 923: A partial aggregation of the reported fuel type codes into
# larger categories used by EIA in, for example,
# the Annual Energy Review (AER).Two or three letter alphanumeric.
# See the Fuel Code table (Table 5), below:
fuel_type_aer_eia923 = {
'SUN': 'Solar PV and thermal',
'COL': 'Coal',
'DFO': 'Distillate Petroleum',
'GEO': 'Geothermal',
'HPS': 'Hydroelectric Pumped Storage',
'HYC': 'Hydroelectric Conventional',
'MLG': 'Biogenic Municipal Solid Waste and Landfill Gas',
'NG': 'Natural Gas',
'NUC': 'Nuclear',
'OOG': 'Other Gases',
'ORW': 'Other Renewables',
'OTH': 'Other (including nonbiogenic MSW)',
'PC': 'Petroleum Coke',
'RFO': 'Residual Petroleum',
'WND': 'Wind',
'WOC': 'Waste Coal',
'WOO': 'Waste Oil',
'WWW': 'Wood and Wood Waste'
}
"""dict: A dictionary mapping EIA 923 AER fuel types (keys) to lists
of strings associated with that fuel type (values).
"""
fuel_type_eia860_coal_strings = ['ant', 'bit', 'cbl', 'lig', 'pc', 'rc', 'sc',
'sub', 'wc', 'coal', 'petroleum coke', 'col',
'woc']
"""list: A list of strings from EIA 860 associated with fuel type coal.
"""
fuel_type_eia860_oil_strings = ['dfo', 'jf', 'ker', 'rfo', 'wo', 'woo',
'petroleum']
"""list: A list of strings from EIA 860 associated with fuel type oil.
"""
fuel_type_eia860_gas_strings = ['bfg', 'lfg', 'mlg', 'ng', 'obg', 'og', 'pg',
'sgc', 'sgp', 'natural gas', 'other gas',
'oog', 'sg']
"""list: A list of strings from EIA 860 associated with fuel type gas.
"""
fuel_type_eia860_solar_strings = ['sun', 'solar']
"""list: A list of strings from EIA 860 associated with solar power.
"""
fuel_type_eia860_wind_strings = ['wnd', 'wind', 'wt']
"""list: A list of strings from EIA 860 associated with wind power.
"""
fuel_type_eia860_hydro_strings = ['wat', 'hyc', 'hps', 'hydro']
"""list: A list of strings from EIA 860 associated with hydro power.
"""
fuel_type_eia860_nuclear_strings = ['nuc', 'nuclear']
"""list: A list of strings from EIA 860 associated with nuclear power.
"""
fuel_type_eia860_waste_strings = ['ab', 'blq', 'bm', 'msb', 'msn', 'obl',
'obs', 'slw', 'tdf', 'wdl', 'wds', 'biomass',
'msw', 'www']
"""list: A list of strings from EIA 860 associated with fuel type waste.
"""
fuel_type_eia860_other_strings = ['mwh', 'oth', 'pur', 'wh', 'geo', 'none',
'orw', 'other']
"""list: A list of strings from EIA 860 associated with fuel type other.
"""
fuel_type_eia860_simple_map = {
'coal': fuel_type_eia860_coal_strings,
'oil': fuel_type_eia860_oil_strings,
'gas': fuel_type_eia860_gas_strings,
'solar': fuel_type_eia860_solar_strings,
'wind': fuel_type_eia860_wind_strings,
'hydro': fuel_type_eia860_hydro_strings,
'nuclear': fuel_type_eia860_nuclear_strings,
'waste': fuel_type_eia860_waste_strings,
'other': fuel_type_eia860_other_strings,
}
"""dict: A dictionary mapping EIA 860 fuel types (keys) to lists
of strings associated with that fuel type (values).
"""
# EIA 923/860: Lumping of energy source categories.
energy_source_eia_simple_map = {
'coal': ['ANT', 'BIT', 'LIG', 'PC', 'SUB', 'WC', 'RC'],
'oil': ['DFO', 'JF', 'KER', 'RFO', 'WO'],
'gas': ['BFG', 'LFG', 'NG', 'OBG', 'OG', 'PG', 'SG', 'SGC', 'SGP'],
'solar': ['SUN'],
'wind': ['WND'],
'hydro': ['WAT'],
'nuclear': ['NUC'],
'waste': ['AB', 'BLQ', 'MSW', 'OBL', 'OBS', 'SLW', 'TDF', 'WDL', 'WDS'],
'other': ['GEO', 'MWH', 'OTH', 'PUR', 'WH']
}
"""dict: A dictionary mapping EIA fuel types (keys) to fuel codes (values).
"""
fuel_group_eia923_simple_map = {
'coal': ['coal', 'petroleum coke'],
'oil': ['petroleum'],
'gas': ['natural gas', 'other gas']
}
"""dict: A dictionary mapping EIA 923 simple fuel types("oil", "coal", "gas")
(keys) to fuel types (values).
"""
# EIA 923: The type of physical units fuel consumption is reported in.
# All consumption is reported in either short tons for solids,
# thousands of cubic feet for gases, and barrels for liquids.
fuel_units_eia923 = {
'mcf': 'Thousands of cubic feet (for gases)',
'short_tons': 'Short tons (for solids)',
'barrels': 'Barrels (for liquids)'
}
"""dict: A dictionary mapping EIA 923 fuel units (keys) to fuel unit
descriptions (values).
"""
# EIA 923: Designates the purchase type under which receipts occurred
# in the reporting month. One or two character alphanumeric:
contract_type_eia923 = {
'C': 'Contract - Fuel received under a purchase order or contract with a term of one year or longer. Contracts with a shorter term are considered spot purchases ',
'NC': 'New Contract - Fuel received under a purchase order or contract with duration of one year or longer, under which deliveries were first made during the reporting month',
'S': 'Spot Purchase',
'T': 'Tolling Agreement – Fuel received under a tolling agreement (bartering arrangement of fuel for generation)'
}
"""dict: A dictionary mapping EIA 923 contract codes (keys) to contract
descriptions (values) for each month in the Fuel Receipts and Costs table.
"""
# EIA 923: The fuel code associated with the fuel receipt.
# Defined on Page 7 of EIA Form 923
# Two or three character alphanumeric:
energy_source_eia923 = {
'ANT': 'Anthracite Coal',
'BFG': 'Blast Furnace Gas',
'BM': 'Biomass',
'BIT': 'Bituminous Coal',
'DFO': 'Distillate Fuel Oil. Including diesel, No. 1, No. 2, and No. 4 fuel oils.',
'JF': 'Jet Fuel',
'KER': 'Kerosene',
'LIG': 'Lignite Coal',
'NG': 'Natural Gas',
'PC': 'Petroleum Coke',
'PG': 'Gaseous Propone',
'OG': 'Other Gas',
'RC': 'Refined Coal',
'RFO': 'Residual Fuel Oil. Including No. 5 & 6 fuel oils and bunker C fuel oil.',
'SG': 'Synthesis Gas from Petroleum Coke',
'SGP': 'Petroleum Coke Derived Synthesis Gas',
'SC': 'Coal-based Synfuel. Including briquettes, pellets, or extrusions, which are formed by binding materials or processes that recycle materials.',
'SUB': 'Subbituminous Coal',
'WC': 'Waste/Other Coal. Including anthracite culm, bituminous gob, fine coal, lignite waste, waste coal.',
'WO': 'Waste/Other Oil. Including crude oil, liquid butane, liquid propane, naphtha, oil waste, re-refined moto oil, sludge oil, tar oil, or other petroleum-based liquid wastes.',
}
"""dict: A dictionary mapping fuel codes (keys) to fuel descriptions (values)
for each fuel receipt from the EIA 923 Fuel Receipts and Costs table.
"""
# EIA 923 Fuel Group, from Page 7 EIA Form 923
# Groups fossil fuel energy sources into fuel groups that are located in the
# Electric Power Monthly: Coal, Natural Gas, Petroleum, Petroleum Coke.
fuel_group_eia923 = (
'coal',
'natural_gas',
'petroleum',
'petroleum_coke',
'other_gas'
)
"""tuple: A tuple containing EIA 923 fuel groups.
"""
# EIA 923: Type of Coal Mine as defined on Page 7 of EIA Form 923
coalmine_type_eia923 = {
'P': 'Preparation Plant',
'S': 'Surface',
'U': 'Underground',
'US': 'Both an underground and surface mine with most coal extracted from underground',
'SU': 'Both an underground and surface mine with most coal extracted from surface',
}
"""dict: A dictionary mapping EIA 923 coal mine type codes (keys) to
descriptions (values).
"""
# EIA 923: State abbreviation related to coal mine location.
# Country abbreviations are also used in this category, but they are
# non-standard because of collisions with US state names. Instead of using
# the provided non-standard names, we convert to ISO-3166-1 three letter
# country codes https://en.wikipedia.org/wiki/ISO_3166-1_alpha-3
coalmine_country_eia923 = {
'AU': 'AUS', # Australia
'CL': 'COL', # Colombia
'CN': 'CAN', # Canada
'IS': 'IDN', # Indonesia
'PL': 'POL', # Poland
'RS': 'RUS', # Russia
'UK': 'GBR', # United Kingdom of Great Britain
'VZ': 'VEN', # Venezuela
'OC': 'other_country',
'IM': 'unknown'
}
"""dict: A dictionary mapping coal mine country codes (keys) to ISO-3166-1 three
letter country codes (values).
"""
# EIA 923: Mode for the longest / second longest distance.
transport_modes_eia923 = {
'RR': 'Rail: Shipments of fuel moved to consumers by rail \
(private or public/commercial). Included is coal hauled to or \
away from a railroad siding by truck if the truck did not use public\
roads.',
'RV': 'River: Shipments of fuel moved to consumers via river by barge. \
Not included are shipments to Great Lakes coal loading docks, \
tidewater piers, or coastal ports.',
'GL': 'Great Lakes: Shipments of coal moved to consumers via \
the Great Lakes. These shipments are moved via the Great Lakes \
coal loading docks, which are identified by name and location as \
follows: Conneaut Coal Storage & Transfer, Conneaut, Ohio; \
NS Coal Dock (Ashtabula Coal Dock), Ashtabula, Ohio; \
Sandusky Coal Pier, Sandusky, Ohio; Toledo Docks, Toledo, Ohio; \
KCBX Terminals Inc., Chicago, Illinois; \
Superior Midwest Energy Terminal, Superior, Wisconsin',
'TP': 'Tidewater Piers and Coastal Ports: Shipments of coal moved to \
Tidewater Piers and Coastal Ports for further shipments to consumers \
via coastal water or ocean. The Tidewater Piers and Coastal Ports \
are identified by name and location as follows: Dominion Terminal \
Associates, Newport News, Virginia; McDuffie Coal Terminal, Mobile, \
Alabama; IC Railmarine Terminal, Convent, Louisiana; \
International Marine Terminals, Myrtle Grove, Louisiana; \
Cooper/T. Smith Stevedoring Co. Inc., Darrow, Louisiana; \
Seward Terminal Inc., Seward, Alaska; Los Angeles Export Terminal, \
Inc., Los Angeles, California; Levin-Richmond Terminal Corp., \
Richmond, California; Baltimore Terminal, Baltimore, Maryland; \
Norfolk Southern Lamberts Point P-6, Norfolk, Virginia; \
Chesapeake Bay Piers, Baltimore, Maryland; Pier IX Terminal Company, \
Newport News, Virginia; Electro-Coal Transport Corp., Davant, \
Louisiana',
'WT': 'Water: Shipments of fuel moved to consumers by other waterways.',
'TR': 'Truck: Shipments of fuel moved to consumers by truck. \
Not included is fuel hauled to or away from a railroad siding by \
truck on non-public roads.',
'tr': 'Truck: Shipments of fuel moved to consumers by truck. \
Not included is fuel hauled to or away from a railroad siding by \
truck on non-public roads.',
'TC': 'Tramway/Conveyor: Shipments of fuel moved to consumers \
by tramway or conveyor.',
'SP': 'Slurry Pipeline: Shipments of coal moved to consumers \
by slurry pipeline.',
'PL': 'Pipeline: Shipments of fuel moved to consumers by pipeline'
}
"""dict: A dictionary mapping primary and secondary transportation mode codes
(keys) to descriptions (values).
"""
# we need to include all of the columns which we want to keep for either the
# entity or annual tables. The order here matters. We need to harvest the plant
# location before harvesting the location of the utilites for example.
entities = {
'plants': [ # base cols
['plant_id_eia'],
# static cols
['balancing_authority_code', 'balancing_authority_name',
'city', 'county', 'ferc_cogen_status',
'ferc_exempt_wholesale_generator', 'ferc_small_power_producer',
'grid_voltage_2_kv', 'grid_voltage_3_kv', 'grid_voltage_kv',
'iso_rto_code', 'latitude', 'longitude',
'nerc_region', 'plant_name_eia', 'primary_purpose_naics_id',
'sector_id', 'sector_name', 'state', 'street_address', 'zip_code'],
# annual cols
['ash_impoundment', 'ash_impoundment_lined', 'ash_impoundment_status',
'energy_storage', 'ferc_cogen_docket_no', 'water_source',
'ferc_exempt_wholesale_generator_docket_no',
'ferc_small_power_producer_docket_no',
'liquefied_natural_gas_storage',
'natural_gas_local_distribution_company', 'natural_gas_storage',
'natural_gas_pipeline_name_1', 'natural_gas_pipeline_name_2',
'natural_gas_pipeline_name_3', 'net_metering', 'pipeline_notes',
'regulatory_status_code', 'transmission_distribution_owner_id',
'transmission_distribution_owner_name',
'transmission_distribution_owner_state', 'utility_id_eia'],
# need type fixing
{},
# {'plant_id_eia': 'int64',
# 'grid_voltage_2_kv': 'float64',
# 'grid_voltage_3_kv': 'float64',
# 'grid_voltage_kv': 'float64',
# 'longitude': 'float64',
# 'latitude': 'float64',
# 'primary_purpose_naics_id': 'float64',
# 'sector_id': 'float64',
# 'zip_code': 'float64',
# 'utility_id_eia': 'float64'},
],
'generators': [ # base cols
['plant_id_eia', 'generator_id'],
# static cols
['prime_mover_code', 'duct_burners', 'operating_date',
'topping_bottoming_code', 'solid_fuel_gasification',
'pulverized_coal_tech', 'fluidized_bed_tech', 'subcritical_tech',
'supercritical_tech', 'ultrasupercritical_tech', 'stoker_tech',
'other_combustion_tech', 'bypass_heat_recovery',
'rto_iso_lmp_node_id', 'rto_iso_location_wholesale_reporting_id',
'associated_combined_heat_power', 'original_planned_operating_date',
'operating_switch', 'previously_canceled'],
# annual cols
['capacity_mw', 'fuel_type_code_pudl', 'multiple_fuels',
'ownership_code', 'deliver_power_transgrid', 'summer_capacity_mw',
'winter_capacity_mw', 'minimum_load_mw', 'technology_description',
'energy_source_code_1', 'energy_source_code_2',
'energy_source_code_3', 'energy_source_code_4',
'energy_source_code_5', 'energy_source_code_6',
'startup_source_code_1', 'startup_source_code_2',
'startup_source_code_3', 'startup_source_code_4',
'time_cold_shutdown_full_load_code', 'syncronized_transmission_grid',
'turbines_num', 'operational_status_code', 'operational_status',
'planned_modifications', 'planned_net_summer_capacity_uprate_mw',
'planned_net_winter_capacity_uprate_mw', 'planned_new_capacity_mw',
'planned_uprate_date', 'planned_net_summer_capacity_derate_mw',
'planned_net_winter_capacity_derate_mw', 'planned_derate_date',
'planned_new_prime_mover_code', 'planned_energy_source_code_1',
'planned_repower_date', 'other_planned_modifications',
'other_modifications_date', 'planned_retirement_date',
'carbon_capture', 'cofire_fuels', 'switch_oil_gas',
'turbines_inverters_hydrokinetics', 'nameplate_power_factor',
'uprate_derate_during_year', 'uprate_derate_completed_date',
'current_planned_operating_date', 'summer_estimated_capability_mw',
'winter_estimated_capability_mw', 'retirement_date', 'utility_id_eia'],
# need type fixing
{}
# {'plant_id_eia': 'int64',
# 'generator_id': 'str'},
],
# utilities must come after plants. plant location needs to be
# removed before the utility locations are compiled
'utilities': [ # base cols
['utility_id_eia'],
# static cols
['utility_name_eia',
'entity_type'],
# annual cols
['street_address', 'city', 'state', 'zip_code',
'plants_reported_owner', 'plants_reported_operator',
'plants_reported_asset_manager', 'plants_reported_other_relationship',
],
# need type fixing
{'utility_id_eia': 'int64', }, ],
'boilers': [ # base cols
['plant_id_eia', 'boiler_id'],
# static cols
['prime_mover_code'],
# annual cols
[],
# need type fixing
{}, ]}
"""dict: A dictionary containing table name strings (keys) and lists of columns
to keep for those tables (values).
"""
# EPA CEMS constants #####
epacems_rename_dict = {
"STATE": "state",
# "FACILITY_NAME": "plant_name", # Not reading from CSV
"ORISPL_CODE": "plant_id_eia",
"UNITID": "unitid",
# These op_date, op_hour, and op_time variables get converted to
# operating_date, operating_datetime and operating_time_interval in
# transform/epacems.py
"OP_DATE": "op_date",
"OP_HOUR": "op_hour",
"OP_TIME": "operating_time_hours",
"GLOAD (MW)": "gross_load_mw",
"GLOAD": "gross_load_mw",
"SLOAD (1000 lbs)": "steam_load_1000_lbs",
"SLOAD (1000lb/hr)": "steam_load_1000_lbs",
"SLOAD": "steam_load_1000_lbs",
"SO2_MASS (lbs)": "so2_mass_lbs",
"SO2_MASS": "so2_mass_lbs",
"SO2_MASS_MEASURE_FLG": "so2_mass_measurement_code",
# "SO2_RATE (lbs/mmBtu)": "so2_rate_lbs_mmbtu", # Not reading from CSV
# "SO2_RATE": "so2_rate_lbs_mmbtu", # Not reading from CSV
# "SO2_RATE_MEASURE_FLG": "so2_rate_measure_flg", # Not reading from CSV
"NOX_RATE (lbs/mmBtu)": "nox_rate_lbs_mmbtu",
"NOX_RATE": "nox_rate_lbs_mmbtu",
"NOX_RATE_MEASURE_FLG": "nox_rate_measurement_code",
"NOX_MASS (lbs)": "nox_mass_lbs",
"NOX_MASS": "nox_mass_lbs",
"NOX_MASS_MEASURE_FLG": "nox_mass_measurement_code",
"CO2_MASS (tons)": "co2_mass_tons",
"CO2_MASS": "co2_mass_tons",
"CO2_MASS_MEASURE_FLG": "co2_mass_measurement_code",
# "CO2_RATE (tons/mmBtu)": "co2_rate_tons_mmbtu", # Not reading from CSV
# "CO2_RATE": "co2_rate_tons_mmbtu", # Not reading from CSV
# "CO2_RATE_MEASURE_FLG": "co2_rate_measure_flg", # Not reading from CSV
"HEAT_INPUT (mmBtu)": "heat_content_mmbtu",
"HEAT_INPUT": "heat_content_mmbtu",
"FAC_ID": "facility_id",
"UNIT_ID": "unit_id_epa",
}
"""dict: A dictionary containing EPA CEMS column names (keys) and replacement
names to use when reading those columns into PUDL (values).
"""
# Any column that exactly matches one of these won't be read
epacems_columns_to_ignore = {
"FACILITY_NAME",
"SO2_RATE (lbs/mmBtu)",
"SO2_RATE",
"SO2_RATE_MEASURE_FLG",
"CO2_RATE (tons/mmBtu)",
"CO2_RATE",
"CO2_RATE_MEASURE_FLG",
}
"""set: The set of EPA CEMS columns to ignore when reading data.
"""
# Specify dtypes to for reading the CEMS CSVs
epacems_csv_dtypes = {
"STATE": pd.StringDtype(),
# "FACILITY_NAME": str, # Not reading from CSV
"ORISPL_CODE": pd.Int64Dtype(),
"UNITID": pd.StringDtype(),
# These op_date, op_hour, and op_time variables get converted to
# operating_date, operating_datetime and operating_time_interval in
# transform/epacems.py
"OP_DATE": pd.StringDtype(),
"OP_HOUR": pd.Int64Dtype(),
"OP_TIME": float,
"GLOAD (MW)": float,
"GLOAD": float,
"SLOAD (1000 lbs)": float,
"SLOAD (1000lb/hr)": float,
"SLOAD": float,
"SO2_MASS (lbs)": float,
"SO2_MASS": float,
"SO2_MASS_MEASURE_FLG": pd.StringDtype(),
# "SO2_RATE (lbs/mmBtu)": float, # Not reading from CSV
# "SO2_RATE": float, # Not reading from CSV
# "SO2_RATE_MEASURE_FLG": str, # Not reading from CSV
"NOX_RATE (lbs/mmBtu)": float,
"NOX_RATE": float,
"NOX_RATE_MEASURE_FLG": pd.StringDtype(),
"NOX_MASS (lbs)": float,
"NOX_MASS": float,
"NOX_MASS_MEASURE_FLG": pd.StringDtype(),
"CO2_MASS (tons)": float,
"CO2_MASS": float,
"CO2_MASS_MEASURE_FLG": pd.StringDtype(),
# "CO2_RATE (tons/mmBtu)": float, # Not reading from CSV
# "CO2_RATE": float, # Not reading from CSV
# "CO2_RATE_MEASURE_FLG": str, # Not reading from CSV
"HEAT_INPUT (mmBtu)": float,
"HEAT_INPUT": float,
"FAC_ID": pd.Int64Dtype(),
"UNIT_ID": pd.Int64Dtype(),
}
"""dict: A dictionary containing column names (keys) and data types (values)
for EPA CEMS.
"""
epacems_tables = ("hourly_emissions_epacems")
"""tuple: A tuple containing tables of EPA CEMS data to pull into PUDL.
"""
epacems_additional_plant_info_file = importlib.resources.open_text(
'pudl.package_data.epa.cems', 'plant_info_for_additional_cems_plants.csv')
"""typing.TextIO:
Todo:
Return to
"""
files_dict_epaipm = {
'transmission_single_epaipm': '*table_3-21*',
'transmission_joint_epaipm': '*transmission_joint_ipm*',
'load_curves_epaipm': '*table_2-2_*',
'plant_region_map_epaipm': '*needs_v6*',
}
"""dict: A dictionary of EPA IPM tables and strings that files of those tables
contain.
"""
epaipm_url_ext = {
'transmission_single_epaipm': 'table_3-21_annual_transmission_capabilities_of_u.s._model_regions_in_epa_platform_v6_-_2021.xlsx',
'load_curves_epaipm': 'table_2-2_load_duration_curves_used_in_epa_platform_v6.xlsx',
'plant_region_map_epaipm': 'needs_v6_november_2018_reference_case_0.xlsx',
}
"""dict: A dictionary of EPA IPM tables and associated URLs extensions for
downloading that table's data.
"""
read_excel_epaipm_dict = {
'transmission_single_epaipm': dict(
skiprows=3,
usecols='B:F',
index_col=[0, 1],
),
'transmission_joint_epaipm': {},
'load_curves_epaipm': dict(
skiprows=3,
usecols='B:AB',
),
'plant_region_map_epaipm_active': dict(
sheet_name='NEEDS v6_Active',
usecols='C,I',
),
'plant_region_map_epaipm_retired': dict(
sheet_name='NEEDS v6_Retired_Through2021',
usecols='C,I',
),
}
"""
dict: A dictionary of dictionaries containing EPA IPM tables and associated
information for reading those tables into PUDL (values).
"""
epaipm_region_names = [
'ERC_PHDL', 'ERC_REST', 'ERC_FRNT', 'ERC_GWAY', 'ERC_WEST',
'FRCC', 'NENG_CT', 'NENGREST', 'NENG_ME', 'MIS_AR', 'MIS_IL',
'MIS_INKY', 'MIS_IA', 'MIS_MIDA', 'MIS_LA', 'MIS_LMI', 'MIS_MNWI',
'MIS_D_MS', 'MIS_MO', 'MIS_MAPP', 'MIS_AMSO', 'MIS_WOTA',
'MIS_WUMS', 'NY_Z_A', 'NY_Z_B', 'NY_Z_C&E', 'NY_Z_D', 'NY_Z_F',
'NY_Z_G-I', 'NY_Z_J', 'NY_Z_K', 'PJM_West', 'PJM_AP', 'PJM_ATSI',
'PJM_COMD', 'PJM_Dom', 'PJM_EMAC', 'PJM_PENE', 'PJM_SMAC',
'PJM_WMAC', 'S_C_KY', 'S_C_TVA', 'S_D_AECI', 'S_SOU', 'S_VACA',
'SPP_NEBR', 'SPP_N', 'SPP_SPS', 'SPP_WEST', 'SPP_KIAM', 'SPP_WAUE',
'WECC_AZ', 'WEC_BANC', 'WECC_CO', 'WECC_ID', 'WECC_IID',
'WEC_LADW', 'WECC_MT', 'WECC_NM', 'WEC_CALN', 'WECC_NNV',
'WECC_PNW', 'WEC_SDGE', 'WECC_SCE', 'WECC_SNV', 'WECC_UT',
'WECC_WY', 'CN_AB', 'CN_BC', 'CN_NL', 'CN_MB', 'CN_NB', 'CN_NF',
'CN_NS', 'CN_ON', 'CN_PE', 'CN_PQ', 'CN_SK',
]
"""list: A list of EPA IPM region names."""
epaipm_region_aggregations = {
'PJM': [
'PJM_AP', 'PJM_ATSI', 'PJM_COMD', 'PJM_Dom',
'PJM_EMAC', 'PJM_PENE', 'PJM_SMAC', 'PJM_WMAC'
],
'NYISO': [
'NY_Z_A', 'NY_Z_B', 'NY_Z_C&E', 'NY_Z_D',
'NY_Z_F', 'NY_Z_G-I', 'NY_Z_J', 'NY_Z_K'
],
'ISONE': ['NENG_CT', 'NENGREST', 'NENG_ME'],
'MISO': [
'MIS_AR', 'MIS_IL', 'MIS_INKY', 'MIS_IA',
'MIS_MIDA', 'MIS_LA', 'MIS_LMI', 'MIS_MNWI', 'MIS_D_MS',
'MIS_MO', 'MIS_MAPP', 'MIS_AMSO', 'MIS_WOTA', 'MIS_WUMS'
],
'SPP': [
'SPP_NEBR', 'SPP_N', 'SPP_SPS', 'SPP_WEST', 'SPP_KIAM', 'SPP_WAUE'
],
'WECC_NW': [
'WECC_CO', 'WECC_ID', 'WECC_MT', 'WECC_NNV',
'WECC_PNW', 'WECC_UT', 'WECC_WY'
]
}
"""
dict: A dictionary containing EPA IPM regions (keys) and lists of their
associated abbreviations (values).
"""
epaipm_rename_dict = {
'transmission_single_epaipm': {
'From': 'region_from',
'To': 'region_to',
'Capacity TTC (MW)': 'firm_ttc_mw',
'Energy TTC (MW)': 'nonfirm_ttc_mw',
'Transmission Tariff (2016 mills/kWh)': 'tariff_mills_kwh',
},
'load_curves_epaipm': {
'day': 'day_of_year',
'region': 'region_id_epaipm',
},
'plant_region_map_epaipm': {
'ORIS Plant Code': 'plant_id_eia',
'Region Name': 'region',
},
}
glue_pudl_tables = ('plants_eia', 'plants_ferc', 'plants', 'utilities_eia',
'utilities_ferc', 'utilities', 'utility_plant_assn')
"""
dict: A dictionary of dictionaries containing EPA IPM tables (keys) and items
for each table to be renamed along with the replacement name (values).
"""
data_sources = (
'eia860',
'eia861',
'eia923',
'epacems',
'epaipm',
'ferc1',
# 'pudl'
)
"""tuple: A tuple containing the data sources we are able to pull into PUDL."""
# All the years for which we ought to be able to download these data sources
data_years = {
'eia860': tuple(range(2001, 2019)),
'eia861': tuple(range(1990, 2019)),
'eia923': tuple(range(2001, 2020)),
'epacems': tuple(range(1995, 2019)),
'epaipm': (None, ),
'ferc1': tuple(range(1994, 2019)),
}
"""
dict: A dictionary of data sources (keys) and tuples containing the years
that we expect to be able to download for each data source (values).
"""
# The full set of years we currently expect to be able to ingest, per source:
working_years = {
'eia860': tuple(range(2009, 2019)),
'eia861': tuple(range(1999, 2019)),
'eia923': tuple(range(2009, 2019)),
'epacems': tuple(range(1995, 2019)),
'epaipm': (None, ),
'ferc1': tuple(range(1994, 2019)),
}
"""
dict: A dictionary of data sources (keys) and tuples containing the years for
each data source that are able to be ingested into PUDL.
"""
pudl_tables = {
'eia860': eia860_pudl_tables,
'eia861': eia861_pudl_tables,
'eia923': eia923_pudl_tables,
'epacems': epacems_tables,
'epaipm': epaipm_pudl_tables,
'ferc1': ferc1_pudl_tables,
'ferc714': ferc714_pudl_tables,
'glue': glue_pudl_tables,
}
"""
dict: A dictionary containing data sources (keys) and the list of associated
tables from that datasource that can be pulled into PUDL (values).
"""
base_data_urls = {
'eia860': 'https://www.eia.gov/electricity/data/eia860',
'eia861': 'https://www.eia.gov/electricity/data/eia861/zip',
'eia923': 'https://www.eia.gov/electricity/data/eia923',
'epacems': 'ftp://newftp.epa.gov/dmdnload/emissions/hourly/monthly',
'ferc1': 'ftp://eforms1.ferc.gov/f1allyears',
'ferc714': 'https://www.ferc.gov/docs-filing/forms/form-714/data',
'ferceqr': 'ftp://eqrdownload.ferc.gov/DownloadRepositoryProd/BulkNew/CSV',
'msha': 'https://arlweb.msha.gov/OpenGovernmentData/DataSets',
'epaipm': 'https://www.epa.gov/sites/production/files/2019-03',
'pudl': 'https://catalyst.coop/pudl/'
}
"""
dict: A dictionary containing data sources (keys) and their base data URLs
(values).
"""
need_fix_inting = {
'plants_steam_ferc1': ('construction_year', 'installation_year'),
'plants_small_ferc1': ('construction_year', 'ferc_license_id'),
'plants_hydro_ferc1': ('construction_year', 'installation_year',),
'plants_pumped_storage_ferc1': ('construction_year', 'installation_year',),
'hourly_emissions_epacems': ('facility_id', 'unit_id_epa',),
}
"""
dict: A dictionary containing tables (keys) and column names (values)
containing integer - type columns whose null values need fixing.
"""
contributors = {
"catalyst-cooperative": {
"title": "C<NAME>ooperative",
"path": "https://catalyst.coop/",
"role": "publisher",
"email": "<EMAIL>",
"organization": "Catalyst Cooperative",
},
"zane-selvans": {
"title": "<NAME>",
"email": "<EMAIL>",
"path": "https://amateurearthling.org/",
"role": "wrangler",
"organization": "Catalyst Cooperative"
},
"christina-gosnell": {
"title": "<NAME>",
"email": "<EMAIL>",
"role": "contributor",
"organization": "Catalyst Cooperative",
},
"steven-winter": {
"title": "<NAME>",
"email": "<EMAIL>",
"role": "contributor",
"organization": "Catalyst Cooperative",
},
"alana-wilson": {
"title": "<NAME>",
"email": "<EMAIL>",
"role": "contributor",
"organization": "Catalyst Cooperative",
},
"karl-dunkle-werner": {
"title": "<NAME>",
"email": "<EMAIL>",
"path": "https://karldw.org/",
"role": "contributor",
"organization": "UC Berkeley",
},
'greg-schivley': {
"title": "<NAME>",
"role": "contributor",
},
}
"""
dict: A dictionary of dictionaries containing organization names (keys) and
their attributes (values).
"""
data_source_info = {
"eia860": {
"title": "EIA Form 860",
"path": "https://www.eia.gov/electricity/data/eia860/",
},
"eia861": {
"title": "EIA Form 861",
"path": "https://www.eia.gov/electricity/data/eia861/",
},
"eia923": {
"title": "EIA Form 923",
"path": "https://www.eia.gov/electricity/data/eia923/",
},
"eiawater": {
"title": "EIA Water Use for Power",
"path": "https://www.eia.gov/electricity/data/water/",
},
"epacems": {
"title": "EPA Air Markets Program Data",
"path": "https://ampd.epa.gov/ampd/",
},
"epaipm": {
"title": "EPA Integrated Planning Model",
"path": "https://www.epa.gov/airmarkets/national-electric-energy-data-system-needs-v6",
},
"ferc1": {
"title": "FERC Form 1",
"path": "https://www.ferc.gov/docs-filing/forms/form-1/data.asp",
},
"ferc714": {
"title": "FERC Form 714",
"path": "https://www.ferc.gov/docs-filing/forms/form-714/data.asp",
},
"ferceqr": {
"title": "FERC Electric Quarterly Report",
"path": "https://www.ferc.gov/docs-filing/eqr.asp",
},
"msha": {
"title": "Mining Safety and Health Administration",
"path": "https://www.msha.gov/mine-data-retrieval-system",
},
"phmsa": {
"title": "Pipelines and Hazardous Materials Safety Administration",
"path": "https://www.phmsa.dot.gov/data-and-statistics/pipeline/data-and-statistics-overview",
},
"pudl": {
"title": "The Public Utility Data Liberation Project (PUDL)",
"path": "https://catalyst.coop/pudl/",
"email": "<EMAIL>",
},
}
"""
dict: A dictionary of dictionaries containing datasources (keys) and
associated attributes (values)
"""
contributors_by_source = {
"pudl": [
"catalyst-cooperative",
"zane-selvans",
"christina-gosnell",
"steven-winter",
"alana-wilson",
"karl-dunkle-werner",
],
"eia923": [
"catalyst-cooperative",
"zane-selvans",
"christina-gosnell",
"steven-winter",
],
"eia860": [
"catalyst-cooperative",
"zane-selvans",
"christina-gosnell",
"steven-winter",
"alana-wilson",
],
"ferc1": [
"catalyst-cooperative",
"zane-selvans",
"christina-gosnell",
"steven-winter",
"alana-wilson",
],
"epacems": [
"catalyst-cooperative",
"karl-dunkle-werner",
"zane-selvans",
],
"epaipm": [
"greg-schivley",
],
}
"""
dict: A dictionary of data sources (keys) and lists of contributors (values).
"""
licenses = {
"cc-by-4.0": {
"name": "CC-BY-4.0",
"title": "Creative Commons Attribution 4.0",
"path": "https://creativecommons.org/licenses/by/4.0/"
},
"us-govt": {
"name": "other-pd",
"title": "U.S. Government Work",
"path": "http://www.usa.gov/publicdomain/label/1.0/",
}
}
"""
dict: A dictionary of dictionaries containing license types and their
attributes.
"""
output_formats = [
'sqlite',
'parquet',
'datapkg',
'notebook',
]
"""list: A list of types of PUDL output formats."""
keywords_by_data_source = {
'pudl': [
'us', 'electricity',
],
'eia860': [
'electricity', 'electric', 'boiler', 'generator', 'plant', 'utility',
'fuel', 'coal', 'natural gas', 'prime mover', 'eia860', 'retirement',
'capacity', 'planned', 'proposed', 'energy', 'hydro', 'solar', 'wind',
'nuclear', 'form 860', 'eia', 'annual', 'gas', 'ownership', 'steam',
'turbine', 'combustion', 'combined cycle', 'eia',
'energy information administration'
],
'eia923': [
'fuel', 'boiler', 'generator', 'plant', 'utility', 'cost', 'price',
'natural gas', 'coal', 'eia923', 'energy', 'electricity', 'form 923',
'receipts', 'generation', 'net generation', 'monthly', 'annual', 'gas',
'fuel consumption', 'MWh', 'energy information administration', 'eia',
'mercury', 'sulfur', 'ash', 'lignite', 'bituminous', 'subbituminous',
'heat content'
],
'epacems': [
'epa', 'us', 'emissions', 'pollution', 'ghg', 'so2', 'co2', 'sox',
'nox', 'load', 'utility', 'electricity', 'plant', 'generator', 'unit',
'generation', 'capacity', 'output', 'power', 'heat content', 'mmbtu',
'steam', 'cems', 'continuous emissions monitoring system', 'hourly'
'environmental protection agency', 'ampd', 'air markets program data',
],
'ferc1': [
'electricity', 'electric', 'utility', 'plant', 'steam', 'generation',
'cost', 'expense', 'price', 'heat content', 'ferc', 'form 1',
'federal energy regulatory commission', 'capital', 'accounting',
'depreciation', 'finance', 'plant in service', 'hydro', 'coal',
'natural gas', 'gas', 'opex', 'capex', 'accounts', 'investment',
'capacity'
],
'epaipm': [
'epaipm', 'integrated planning',
]
}
"""dict: A dictionary of datasets (keys) and keywords (values). """
column_dtypes = {
"ferc1": { # Obviously this is not yet a complete list...
"construction_year": pd.Int64Dtype(),
"installation_year": pd.Int64Dtype(),
'utility_id_ferc1': pd.Int64Dtype(),
'plant_id_pudl': pd.Int64Dtype(),
'plant_id_ferc1': pd.Int64Dtype(),
'utility_id_pudl': pd.Int64Dtype(),
'report_year': pd.Int64Dtype(),
'report_date': 'datetime64[ns]',
},
"ferc714": { # INCOMPLETE
"report_year": pd.Int64Dtype(),
"utility_id_ferc714": pd.Int64Dtype(),
"utility_id_eia": pd.Int64Dtype(),
"utility_name_ferc714": pd.StringDtype(),
"timezone": pd.CategoricalDtype(categories=[
"America/New_York", "America/Chicago", "America/Denver",
"America/Los_Angeles", "America/Anchorage", "Pacific/Honolulu"]),
"utc_datetime": "datetime64[ns]",
"peak_demand_summer_mw": float,
"peak_demand_winter_mw": float,
},
"epacems": {
'state': pd.StringDtype(),
'plant_id_eia': pd.Int64Dtype(), # Nullable Integer
'unitid': pd.StringDtype(),
'operating_datetime_utc': "datetime64[ns]",
'operating_time_hours': float,
'gross_load_mw': float,
'steam_load_1000_lbs': float,
'so2_mass_lbs': float,
'so2_mass_measurement_code': pd.StringDtype(),
'nox_rate_lbs_mmbtu': float,
'nox_rate_measurement_code': pd.StringDtype(),
'nox_mass_lbs': float,
'nox_mass_measurement_code': | pd.StringDtype() | pandas.StringDtype |
from energyOptimal.powerModel import powerModel
from energyOptimal.performanceModel import performanceModel
from energyOptimal.energyModel import energyModel
from energyOptimal.monitor import monitorProcess
from energyOptimal.dvfsModel import dvfsModel
import _pickle as pickle
from matplotlib import pyplot as plt
import pandas as pd
import numpy as np
import os
import warnings
warnings.filterwarnings("ignore", category=FutureWarning)
# plt.style.use('seaborn')
arg_dict= {"black":1, "canneal":4, "dedup":6,
"ferret":0, "fluid":1, "freq":1,
"rtview":7, "swap":3, "vips":1,
"x264":23, "xhpl":1, "openmc":0,
"body":2}
def createPowerModels(profile_path= "data/power_model/", output_path="data/models/power_model/", appname=None):
import numpy as np
for p in os.listdir(profile_path):
print(p)
pw_model= powerModel()
pw_model.loadData(filename=profile_path+p,verbose=1,freqs_filter=np.arange(1.2,2.3,0.1))
pw_model.fit()
pickle.dump(pw_model, open(output_path+p,"wb"))
error= pw_model.error()
print("Power model constants, ", pw_model.power_model_c)
print("Error, ", error)
def createPerformanceModels(profile_path= "data/performance_model/", output_path="data/models/performance_model/", appname=None):
for p in os.listdir(profile_path): #zip(parsecapps,parsecapps_argnum):
if not p.endswith("pkl"): continue
# if "freq" not in p: continue
# if "canneal" not in p: continue
if p in os.listdir(output_path): continue
print(p)
idx= -1
for k,v in arg_dict.items():
if k in p:
idx= v
break
if idx == -1:
raise("Program arg not found")
if appname and not appname in p:
continue
perf_model= performanceModel()
df= perf_model.loadData(filename=profile_path+p, arg_num=idx, verbose=1, method='constTime')
print("Inputs: {} Freqs: {} Thrs: {}".format(len(df["in"].unique()), len(df["freq"].unique()), len(df["thr"].unique())))
print("Total ", len(df))
# print("Inputs : ", df["in"].unique())
# print("Threads : ", df["thr"].unique())
# print("Frequencies : ", df["freq"].unique())
# print("")
# continue
if 'fluid' in p:
perf_model.dataFrame= perf_model.dataFrame[perf_model.dataFrame['thr'].isin([1,2,4,8,16,32])]
if 'x264' in p:
perf_model.dataFrame['in_cat']= 6-perf_model.dataFrame['in_cat']
if len(df['in_cat']) > 5: #limit to 5 inputs
cats= perf_model.dataFrame['in_cat'].unique()[-5:]
perf_model.dataFrame= perf_model.dataFrame[perf_model.dataFrame['in_cat'].isin(cats)]
perf_model.fit(C_=10e3,gamma_=0.1)
# scores= perf_model.crossValidate(method='mpe')
pickle.dump(perf_model, open(output_path+p,"wb"))
# print("Program", p)
# print(df.head(5))
print("MPE ", perf_model.error(method='mpe')*100)
print("MAE ", perf_model.error(method='mae'))
# print("CrossValidation ", np.mean(scores)*100, scores)
def figures(appname=None, energy= True, in_cmp=3):
from energyOptimal import plotData
for app, title in zip(parsec_models,titles):
if (appname and not appname in app) or (not app):
continue
pw_model= pickle.load(open("data/models/power_model/ipmi_2-32_cpuload.pkl","rb"))
perf_model= pickle.load(open("data/models/performance_model/"+app,"rb"))
en_model= energyModel(pw_model,perf_model,freq_range_=np.arange(1.2e6,2.3e6,0.1e6)/1e6)
plotData.setProps(xlabel='Frequencies (GHz)', ylabel='Active threads',
zlabel='Energy (kJ)' if energy else 'Time (s)', title=title)
df_= perf_model.dataFrame[perf_model.dataFrame['in_cat']==in_cmp].sort_values(['freq','thr'])
df_pred_= en_model.dataFrame[en_model.dataFrame['in_cat']==in_cmp].sort_values(['freq','thr'])
# df_pred_= df_pred_[df_pred_['thr'].isin(list(range(8,33,2)))]
# df_= df_[df_['thr'].isin(list(range(8,33,2)))]
plotData.plot3D(x=df_['freq'].unique(),y=df_['thr'].unique(),
z=df_['energy'].values/1e3 if energy else df_['time'].values,
points=True,legend='Measurements')
plotData.plot3D(x=df_pred_['freq'].unique(),y=df_pred_['thr'].unique(),
z=df_pred_['energy_model'].values/1e3 if energy else df_pred_['time'].values,
points=False,legend='Model')
plotData.ax.view_init(30,60)
if 'HPL' in app:
plotData.ax.set_zlim(0,15)
aux= 'energy' if energy else 'time'
plotData.savePlot('fotos/{}/{}.png'.format(aux, app),showLegend=True)
def createReducedPerformanceModel(path, arg_num, title_='', save_df='', save_svr=''):
perf_model= performanceModel()
perf_model.loadData(filename=path, arg_num=int(arg_num))
cats= perf_model.dataFrame['in_cat'].unique()[-5:]
perf_model.dataFrame= perf_model.dataFrame[perf_model.dataFrame['in_cat'].isin(cats)]
perf_model.dataFrame= perf_model.dataFrame[perf_model.dataFrame['freq']!=2.3]
if 'fluid' in path:
perf_model.dataFrame= perf_model.dataFrame[perf_model.dataFrame['thr'].isin([1,2,4,8,16,32])]
if 'x264' in path:
perf_model.dataFrame['in_cat']= 6-perf_model.dataFrame['in_cat']
df_ori= perf_model.dataFrame.sort_values(['freq','thr','in_cat']).copy()
x=[]
y_time=[]
y_en=[]
less_5= 0
for train_sz in range(0, perf_model.dataFrame.shape[0], 100)[1:]:
# print("Program", path)
aux= perf_model.fit(C_=10e3,gamma_=0.1,train_size_=train_sz,dataframe=True)
aux= pd.merge(aux[['freq','thr','in_cat']],df_ori)
perf_model.estimate(df_ori[['freq','thr','in_cat']],dataframe=True).sort_values(['freq','thr','in_cat'])
x.append(train_sz)
y_time.append(perf_model.error()*100)
y_en.append( aux['energy'].sum()/1e6 )
# print(y_en[-1])
# print( x[-1], y_time[-1] )
if y_time[-1] <= 6 and less_5 == 0:
less_5= y_time[-1]
print('%s_%i.pkl'%(title_,train_sz))
pickle.dump(perf_model, open("data/model/performance_model/%s_%i.pkl"%(title_,train_sz),"wb"))
break
# scores= perf_model.crossValidate(method='mpe')
# print("CrossValidation ", np.mean(scores)*100, scores)
fig, ax1 = plt.subplots()
ax1.plot(x,y_time)
# ax1.plot([min(x),max(x)],[less_5, less_5],'-')
ax1.set_ylabel('Mean error (%)')
ax2 = ax1.twinx()
ax2.plot(x,y_en)
ax2.set_ylabel('Energy (KJ)')
plt.xlabel('Train size')
plt.title(title_)
plt.savefig('fotos/over/%s.png'%title_)
# plt.show()
def mean_df():
def avg_ondemand(onds, arg):
ond= dvfsModel()
ond.loadData(filename= 'data/dvfs/ondemand/'+onds[0], arg_num= arg, method='constTime')
df= ond.dataFrame
for f in onds[1:]:
ond.loadData(filename= 'data/dvfs/ondemand/'+onds[0], arg_num= arg, method='constTime')
df['energy']+= ond.dataFrame['energy']
df['energy']/=len(onds)
return df
ondemand= avg_ondemand(['ferret_completo_2.pkl','ferret_completo_3.pkl'],6)
pw_model= pickle.load(open("data/models/power_model/ipmi_2-32_cpuload.pkl","rb"))
perf_model= pickle.load(open("data/models/performance_model/completo_ferret_3.pkl","rb"))
en_model= energyModel(pw_model,perf_model)
ond= ondemand[['in','thr','time','energy']].sort_values(['in','thr'])
nthreads= ond['thr'].unique().shape[0]
ond= pd.crosstab(ond['in'], ond['thr'], ond['energy'],aggfunc=min)
df= en_model.realMinimalEnergy().sort_values('in_cat')['energy']
df= pd.concat([df]*nthreads,axis=1)
ond= pd.DataFrame(ond.values/df.values,columns=ond.columns)
ond.plot.bar()
plt.plot([-1,6],[1,1], '--',color='k',label='proposto')
plt.title('Ferret')
plt.tight_layout()
plt.savefig('fotos/comp2/ferret.png')
def createReducedPerformanceModel2(path, arg_num, title_='', save_df='', save_svr=''):
perf_model= performanceModel()
perf_model.loadData(filename=path, arg_num=int(arg_num))
cats= perf_model.dataFrame['in_cat'].unique()[-5:]
perf_model.dataFrame= perf_model.dataFrame[perf_model.dataFrame['in_cat'].isin(cats)]
perf_model.dataFrame= perf_model.dataFrame[perf_model.dataFrame['freq']<2.3]
if 'fluid' in path:
perf_model.dataFrame= perf_model.dataFrame[perf_model.dataFrame['thr'].isin([1,2,4,8,16,32])]
if 'x264' in path:
perf_model.dataFrame['in_cat']= 6-perf_model.dataFrame['in_cat']
df_ori= perf_model.dataFrame.sort_values(['freq','thr','in_cat']).copy()
ori= perf_model.dataFrame.copy()
freqs= perf_model.dataFrame['freq'].unique()
x= []
y_time= []
y_en= []
for f in range(1,len(freqs),1):
use_freq= list(freqs[:int(f)])+list(freqs[-int(f):])
perf_model.dataFrame= perf_model.dataFrame[perf_model.dataFrame['freq'].isin(use_freq)]
# print(perf_model.dataFrame['freq'].unique())
aux= perf_model.fit(C_=10e3,gamma_=0.1,train_size_=0.9,dataframe=True)
aux= pd.merge(aux[['freq','thr','in_cat']],df_ori)
perf_model.dataFrame= ori.copy()
df_est= perf_model.estimate(df_ori[['freq','thr','in_cat']],dataframe=True).sort_values(['freq','thr','in_cat'])
error= sum( (abs(df_est['time']-df_ori['time']))/df_ori['time'] )/df_ori.shape[0]*100
x.append(aux.shape[0])
y_time.append(error)
y_en.append(aux['energy'].sum()/1e6)
# scores= perf_model.crossValidate(method='mpe')
print('%s_%i.pkl'%(title_,f), aux.shape, aux['energy'].sum()/1e6, error, perf_model.error()*100)
print(use_freq)
pickle.dump(perf_model, open("data/model/performance_model/%s_%i.pkl"%(title_,f),"wb"))
fig, ax1 = plt.subplots()
ax1.plot(x,y_time)
ax1.set_ylabel('Mean error (%)')
ax2 = ax1.twinx()
ax2.plot(x,y_en)
ax2.set_ylabel('Energy (KJ)')
plt.xlabel('Train size')
plt.title(title_)
plt.savefig('fotos/over/%s.png'%title_)
# plt.show()
def comparation(appname=None, proposed_bar=False, relative=True, thrs_filter= []):
row=[]
for title, dvfs, model, arg in zip(titles,parsec_dvfs,parsec_models,parsecapps_argnum):
if 'freq' in model or not model:
continue
if appname and not appname in dvfs:
continue
ondemand= dvfsModel()
ondemand.loadData(filename= 'data/dvfs/ondemand/'+dvfs, arg_num= arg, method='constTime')
pw_model= pickle.load(open("data/models/power_model/ipmi_2-32_cpuload.pkl","rb"))
perf_model= pickle.load(open("data/models/performance_model/"+model,"rb"))
en_model= energyModel(pw_model,perf_model)
#TODO verify if arguments match
ond= ondemand.dataFrame[['in','thr','time','energy']]
ond= pd.merge(ond,perf_model.dataFrame[['in','in_cat']]).drop_duplicates().sort_values(['in_cat','thr'])
if thrs_filter:
ond= ond[ond['thr'].isin(thrs_filter)]
ond_en= | pd.crosstab(ond['in_cat'], ond['thr'], ond['energy'],aggfunc=min) | pandas.crosstab |
"""
Author: <NAME> (<EMAIL>)
Date: 2020-02-10
-----Description-----
This script provides a class and set of functions for bringing CSPP science variables into Python memory.
This is set up for recovered_cspp streams, but should also work for telemetered data.
Note that CTD, DOSTA, SPKIR, PAR, and VELPT are the only data sets that are telemetered. OPTAA and NUTNR data
packets are too large to transfer in a short surface window.
There are three general functions and one function for each CSPP data stream.
To make multiple data requests, submit each request before checking to see if the data is available.
-----Required Libraries-----
requests: For issuing and checking request status.
re: For parsing returned json for URLs that contain instrument data.
time: For pausing the script while checking a data request status.
pandas: For organizing data.
xarray: For opening remote NetCDFs.
-----Class-----
OOIM2M() <<< This is the overall class. This must prepend a function.
Example 1: url = OOIM2M.create_url(url,start_date,start_time,stop_date,stop_time)
request = OOIM2M.make_request(url,user,token)
nc = OOIM2M.get_location(request)
Example 2: THIS_EXAMPLE_IS_TOO_LONG = OOIM2M()
url = THIS_EXAMPLE_IS_TOO_LONG.create_url(url)
request = THIS_EXAMPLE_IS_TOO_LONG.make_request(url,user,token)
nc = THIS_EXAMPLE_IS_TOO_LONG.get_location(request)
-----General Functions-----
url = OOIM2M.create_url(url,start_date,start_time,stop_date,stop_time) <<< Function for generating a request URL for data between two datetimes. Returns a complete request URL. URL is the base request url for the data you want. Dates in YYYY-MM-DD. Times in HH:MM:SS.
request = OOIM2M.make_request(url,user,token) <<< Function for making the request from the URL created from create_url. User and token are found in your account information on OOINet. Returns a requests object.
nc = OOIM2M.get_location(request) <<< Function that gathers the remote locations of the requested data. Returns a list of URLs where the data is stored as netCDFs. This list includes data that is used in the creation of data products. Example: CTD data accompanies DOSTA data.
-----Instrument Functions-----
ctd = cspp_ctd(nc) <<< Returns a pandas dataframe that contains datetime, pressure, temperature, salinity, and density.
dosta = cspp_dosta(nc) <<< Returns a pandas dataframe that contains datetime, pressure, temperature, concentration, and estimated saturation. CTD data is also made available.
flort = cspp_flort(nc) <<< Returns pandas dataframe that contains datetime, pressure, chlorophyll-a, cdom, and optical backscatter.
nutnr = cspp_nutnr(nc) <<< Interpolates pressure for nitrate data using time and CTD pressure. Returns a pandas dataframe that contains datetime, pressure, and nitrate.
par = cspp_parad(nc) <<< Returns a pandas dataframe that contains datetime, pressure, bulk photosynthetically active radiation.
velpt = cspp_velpt(nc) <<< Returns a pandas dataframe that contains datetime, pressure, northward velocity, eastward velocity, upward velocity, heading, pitch, roll, soundspeed, and temperature measured by the aquadopp.
batt1, batt2 = cspp_batts(nc) <<< Returns two pandas dataframes that contain datetime and voltage for each CSPP battery.
compass = cspp_cpass(nc) <<< Returns a pandas dataframe that contains datetime, pressure, heading, pitch, and roll from the control can.
sbe50 = cspp_sbe50(nc) <<< Returns a pandas dataframe that contains datetime, pressure, and profiler velocity calculated from the SBE50 in the control can.
winch = cspp_winch(nc) <<< Returns a pandas dataframe that contains datetime, pressure, internal temperature of the winch, current seen by the winch, voltage seen by the winch, and the rope on the winch drum.
cspp_spkir(nc) <<< Under development.
cspp_optaa(nc) <<< Under development.
-----Extra Functions-----
find_site(nc) <<< Function that identifies the requested CSPP site and standard depth of that site. Used in removing bad pressure data. Called by data functions. Not generally called by the user.
-----Notes/Issues-----
Flort_sample is the stream name for CSPP fluorometer data.
However, when requests are made for this stream, only deployments 5 and greater are returned.
For deployments 1-4, the current stream is flort_dj_cspp_instrument_recovered.
OOI personnel are working to make flort_sample the stream that contains all data from all deployments.
NUTNR data does not have pressure data associated with it in the raw files produces by the CSPP.
The function provided in this script interpolates based on time.
Alternatively, the user can call the int_ctd_pressure variable.
The cspp_optaa function is in the works.
OOI ion-function for VELPT-J assumes data from the instrument is output in mm/s, when it is actually output in m/s.
https://github.com/oceanobservatories/ion-functions/blob/master/ion_functions/data/vel_functions.py
The simple fix now is to multiply returned velocity values by 1000 to get it back into to m/s.
"""
import requests, re, time, pandas as pd, numpy as np, xarray as xr
#CE01ISSP URLs
CE01ISSP_OPTAA = 'https://ooinet.oceanobservatories.org/api/m2m/12576/sensor/inv/CE01ISSP/SP001/04-OPTAAJ000/recovered_cspp/optaa_dj_cspp_instrument_recovered'
CE01ISSP_CTDPF = 'https://ooinet.oceanobservatories.org/api/m2m/12576/sensor/inv/CE01ISSP/SP001/09-CTDPFJ000/recovered_cspp/ctdpf_j_cspp_instrument_recovered'
CE01ISSP_NUTNR = 'https://ooinet.oceanobservatories.org/api/m2m/12576/sensor/inv/CE01ISSP/SP001/06-NUTNRJ000/recovered_cspp/nutnr_j_cspp_instrument_recovered'
CE01ISSP_SPKIR = 'https://ooinet.oceanobservatories.org/api/m2m/12576/sensor/inv/CE01ISSP/SP001/07-SPKIRJ000/recovered_cspp/spkir_abj_cspp_instrument_recovered'
CE01ISSP_FLORT = 'https://ooinet.oceanobservatories.org/api/m2m/12576/sensor/inv/CE01ISSP/SP001/08-FLORTJ000/recovered_cspp/flort_sample'
CE01ISSP_PARAD = 'https://ooinet.oceanobservatories.org/api/m2m/12576/sensor/inv/CE01ISSP/SP001/10-PARADJ000/recovered_cspp/parad_j_cspp_instrument_recovered'
CE01ISSP_VELPT = 'https://ooinet.oceanobservatories.org/api/m2m/12576/sensor/inv/CE01ISSP/SP001/05-VELPTJ000/recovered_cspp/velpt_j_cspp_instrument_recovered'
CE01ISSP_DOSTA = 'https://ooinet.oceanobservatories.org/api/m2m/12576/sensor/inv/CE01ISSP/SP001/02-DOSTAJ000/recovered_cspp/dosta_abcdjm_cspp_instrument_recovered'
CE01ISSP_BATTS = 'https://ooinet.oceanobservatories.org/api/m2m/12576/sensor/inv/CE01ISSP/SP001/00-SPPENG000/recovered_cspp/cspp_eng_cspp_dbg_pdbg_batt_eng_recovered'
CE01ISSP_CPASS = 'https://ooinet.oceanobservatories.org/api/m2m/12576/sensor/inv/CE01ISSP/SP001/00-SPPENG000/recovered_cspp/cspp_eng_cspp_wc_hmr_eng_recovered'
CE01ISSP_SBE50 = 'https://ooinet.oceanobservatories.org/api/m2m/12576/sensor/inv/CE01ISSP/SP001/00-SPPENG000/recovered_cspp/cspp_eng_cspp_wc_sbe_eng_recovered'
CE01ISSP_WINCH = 'https://ooinet.oceanobservatories.org/api/m2m/12576/sensor/inv/CE01ISSP/SP001/00-SPPENG000/recovered_cspp/cspp_eng_cspp_wc_wm_eng_recovered'
#CE02SHSP URLs
CE02SHSP_OPTAA = 'https://ooinet.oceanobservatories.org/api/m2m/12576/sensor/inv/CE02SHSP/SP001/04-OPTAAJ000/recovered_cspp/optaa_dj_cspp_instrument_recovered'
CE02SHSP_CTDPF = 'https://ooinet.oceanobservatories.org/api/m2m/12576/sensor/inv/CE02SHSP/SP001/08-CTDPFJ000/recovered_cspp/ctdpf_j_cspp_instrument_recovered'
CE02SHSP_NUTNR = 'https://ooinet.oceanobservatories.org/api/m2m/12576/sensor/inv/CE02SHSP/SP001/05-NUTNRJ000/recovered_cspp/nutnr_j_cspp_instrument_recovered'
CE02SHSP_SPKIR = 'https://ooinet.oceanobservatories.org/api/m2m/12576/sensor/inv/CE02SHSP/SP001/06-SPKIRJ000/recovered_cspp/spkir_abj_cspp_instrument_recovered'
CE02SHSP_FLORT = 'https://ooinet.oceanobservatories.org/api/m2m/12576/sensor/inv/CE02SHSP/SP001/07-FLORTJ000/recovered_cspp/flort_sample'
CE02SHSP_PARAD = 'https://ooinet.oceanobservatories.org/api/m2m/12576/sensor/inv/CE02SHSP/SP001/09-PARADJ000/recovered_cspp/parad_j_cspp_instrument_recovered'
CE02SHSP_VELPT = 'https://ooinet.oceanobservatories.org/api/m2m/12576/sensor/inv/CE02SHSP/SP001/02-VELPTJ000/recovered_cspp/velpt_j_cspp_instrument_recovered'
CE02SHSP_DOSTA = 'https://ooinet.oceanobservatories.org/api/m2m/12576/sensor/inv/CE02SHSP/SP001/01-DOSTAJ000/recovered_cspp/dosta_abcdjm_cspp_instrument_recovered'
CE02SHSP_BATTS = 'https://ooinet.oceanobservatories.org/api/m2m/12576/sensor/inv/CE02SHSP/SP001/00-SPPENG000/recovered_cspp/cspp_eng_cspp_dbg_pdbg_batt_eng_recovered'
CE02SHSP_CPASS = 'https://ooinet.oceanobservatories.org/api/m2m/12576/sensor/inv/CE02SHSP/SP001/00-SPPENG000/recovered_cspp/cspp_eng_cspp_wc_hmr_eng_recovered'
CE02SHSP_SBE50 = 'https://ooinet.oceanobservatories.org/api/m2m/12576/sensor/inv/CE02SHSP/SP001/00-SPPENG000/recovered_cspp/cspp_eng_cspp_wc_sbe_eng_recovered'
CE02SHSP_WINCH = 'https://ooinet.oceanobservatories.org/api/m2m/12576/sensor/inv/CE02SHSP/SP001/00-SPPENG000/recovered_cspp/cspp_eng_cspp_wc_wm_eng_recovered'
#CE06ISSP URLs
CE06ISSP_OPTAA = 'https://ooinet.oceanobservatories.org/api/m2m/12576/sensor/inv/CE06ISSP/SP001/04-OPTAAJ000/recovered_cspp/optaa_dj_cspp_instrument_recovered'
CE06ISSP_CTDPF = 'https://ooinet.oceanobservatories.org/api/m2m/12576/sensor/inv/CE06ISSP/SP001/09-CTDPFJ000/recovered_cspp/ctdpf_j_cspp_instrument_recovered'
CE06ISSP_NUTNR = 'https://ooinet.oceanobservatories.org/api/m2m/12576/sensor/inv/CE06ISSP/SP001/06-NUTNRJ000/recovered_cspp/nutnr_j_cspp_instrument_recovered'
CE06ISSP_SPKIR = 'https://ooinet.oceanobservatories.org/api/m2m/12576/sensor/inv/CE06ISSP/SP001/07-SPKIRJ000/recovered_cspp/spkir_abj_cspp_instrument_recovered'
CE06ISSP_FLORT = 'https://ooinet.oceanobservatories.org/api/m2m/12576/sensor/inv/CE06ISSP/SP001/08-FLORTJ000/recovered_cspp/flort_sample'
CE06ISSP_PARAD = 'https://ooinet.oceanobservatories.org/api/m2m/12576/sensor/inv/CE06ISSP/SP001/10-PARADJ000/recovered_cspp/parad_j_cspp_instrument_recovered'
CE06ISSP_VELPT = 'https://ooinet.oceanobservatories.org/api/m2m/12576/sensor/inv/CE06ISSP/SP001/05-VELPTJ000/recovered_cspp/velpt_j_cspp_instrument_recovered'
CE06ISSP_DOSTA = 'https://ooinet.oceanobservatories.org/api/m2m/12576/sensor/inv/CE06ISSP/SP001/02-DOSTAJ000/recovered_cspp/dosta_abcdjm_cspp_instrument_recovered'
CE06ISSP_BATTS = 'https://ooinet.oceanobservatories.org/api/m2m/12576/sensor/inv/CE06ISSP/SP001/00-SPPENG000/recovered_cspp/cspp_eng_cspp_dbg_pdbg_batt_eng_recovered'
CE06ISSP_CPASS = 'https://ooinet.oceanobservatories.org/api/m2m/12576/sensor/inv/CE06ISSP/SP001/00-SPPENG000/recovered_cspp/cspp_eng_cspp_wc_hmr_eng_recovered'
CE06ISSP_SBE50 = 'https://ooinet.oceanobservatories.org/api/m2m/12576/sensor/inv/CE06ISSP/SP001/00-SPPENG000/recovered_cspp/cspp_eng_cspp_wc_sbe_eng_recovered'
CE06ISSP_WINCH = 'https://ooinet.oceanobservatories.org/api/m2m/12576/sensor/inv/CE06ISSP/SP001/00-SPPENG000/recovered_cspp/cspp_eng_cspp_wc_wm_eng_recovered'
#CE07SHSP URLs
CE07SHSP_OPTAA = 'https://ooinet.oceanobservatories.org/api/m2m/12576/sensor/inv/CE07SHSP/SP001/04-OPTAAJ000/recovered_cspp/optaa_dj_cspp_instrument_recovered'
CE07SHSP_CTDPF = 'https://ooinet.oceanobservatories.org/api/m2m/12576/sensor/inv/CE07SHSP/SP001/08-CTDPFJ000/recovered_cspp/ctdpf_j_cspp_instrument_recovered'
CE07SHSP_NUTNR = 'https://ooinet.oceanobservatories.org/api/m2m/12576/sensor/inv/CE07SHSP/SP001/05-NUTNRJ000/recovered_cspp/nutnr_j_cspp_instrument_recovered'
CE07SHSP_SPKIR = 'https://ooinet.oceanobservatories.org/api/m2m/12576/sensor/inv/CE07SHSP/SP001/06-SPKIRJ000/recovered_cspp/spkir_abj_cspp_instrument_recovered'
CE07SHSP_FLORT = 'https://ooinet.oceanobservatories.org/api/m2m/12576/sensor/inv/CE07SHSP/SP001/07-FLORTJ000/recovered_cspp/flort_sample'
CE07SHSP_PARAD = 'https://ooinet.oceanobservatories.org/api/m2m/12576/sensor/inv/CE07SHSP/SP001/09-PARADJ000/recovered_cspp/parad_j_cspp_instrument_recovered'
CE07SHSP_VELPT = 'https://ooinet.oceanobservatories.org/api/m2m/12576/sensor/inv/CE07SHSP/SP001/02-VELPTJ000/recovered_cspp/velpt_j_cspp_instrument_recovered'
CE07SHSP_DOSTA = 'https://ooinet.oceanobservatories.org/api/m2m/12576/sensor/inv/CE07SHSP/SP001/01-DOSTAJ000/recovered_cspp/dosta_abcdjm_cspp_instrument_recovered'
CE07SHSP_BATTS = 'https://ooinet.oceanobservatories.org/api/m2m/12576/sensor/inv/CE07SHSP/SP001/00-SPPENG000/recovered_cspp/cspp_eng_cspp_dbg_pdbg_batt_eng_recovered'
CE07SHSP_CPASS = 'https://ooinet.oceanobservatories.org/api/m2m/12576/sensor/inv/CE07SHSP/SP001/00-SPPENG000/recovered_cspp/cspp_eng_cspp_wc_hmr_eng_recovered'
CE07SHSP_SBE50 = 'https://ooinet.oceanobservatories.org/api/m2m/12576/sensor/inv/CE07SHSP/SP001/00-SPPENG000/recovered_cspp/cspp_eng_cspp_wc_sbe_eng_recovered'
CE07SHSP_WINCH = 'https://ooinet.oceanobservatories.org/api/m2m/12576/sensor/inv/CE07SHSP/SP001/00-SPPENG000/recovered_cspp/cspp_eng_cspp_wc_wm_eng_recovered'
class OOIM2M():
def __init__(self):
return
def create_url(url,start_date = '2014-04-04',start_time = '00:00:00',stop_date = '2035-12-31',stop_time = '23:59:59'): #Create a request URL.
timestring = "?beginDT=" + start_date + 'T' + start_time + ".000Z&endDT=" + stop_date + 'T' + stop_time + '.999Z' #Get the timespan into an OOI M2M format.
m2m_url = url + timestring #Combine the partial URL with the timespan to get a full url.
return m2m_url
def make_request(m2m_url, user ='OOIAPI-BCJPAYP2KUVXFX', token = '<KEY>O'): #Request data from UFRAME using the generated request URL.
request = requests.get(m2m_url,auth = (user,token))
if request.status_code == requests.codes.ok: #If the response is 200, then continue.
print('Request successful.')
return request
elif request.status_code == requests.codes.bad: #If the response is 400, then issue a warning to force the user to find an issue.
print(request)
print('Bad request. Check request URL, user, and token.')
return
elif request.status_code == requests.codes.not_found: #If the response is 404, there might not be data during the prescribed time period.
print(request)
print('Not found. There may be no data available during the requested time period.')
return
else: #If an error that is unusual is thrown, show this message.
print(request)
print('Unanticipated error code. Look up error code here: https://github.com/psf/requests/blob/master/requests/status_codes.py')
return
def get_location(request): #Check the status of the data request and return the remote location when complete.
data = request.json() #Return the request information as a json.
check = data['allURLs'][1] + '/status.txt' #Make a checker.
for i in range(60*30): #Given roughly half an hour...
r = requests.get(check) #check the request.
if r.status_code == requests.codes.ok: #If everything is okay.
print('Request complete.') #Print this message.
break
else:
print('Checking request...',end = " ")
print(i)
time.sleep(1) #If the request isn't complete, wait 1 second before checking again.
print("")
data_url = data['allURLs'][0] #This webpage provides all URLs for the request.
data_urls= requests.get(data_url).text #Convert the page to text.
data_nc = re.findall(r'(ooi/.*?.nc)',data_urls) #Find netCDF urls in the text.
for j in data_nc:
if j.endswith('.nc') == False: #If the URL does not end in .nc, toss it.
data_nc.remove(j)
for j in data_nc:
try:
float(j[-4]) == True #If the 4th to last value isn't a number, then toss it.
except:
data_nc.remove(j)
thredds_url = 'https://opendap.oceanobservatories.org/thredds/dodsC/' #This is the base url for remote data access.
fill = '#fillmismatch' #Applying fill mismatch prevents issues.
data_nc = np.char.add(thredds_url,data_nc) #Combine the thredds_url and the netCDF urls.
nc = np.char.add(data_nc,fill) #Append the fill.
return nc
def find_site(nc): #Function for finding the requested site and setting the standard depth.
df = pd.DataFrame(data = {'location':nc}) #Put the remote location in a dataframe.
url = df['location'].iloc[0] #Take the first URL...
banana = url.split("-") #Split it by the dashes.
site = banana[1] #The value in the second location is the site.
if site == 'CE01ISSP': #If the site is..
depth = 25 #This is the standard deployment depth.
elif site == 'CE02SHSP':
depth = 80
elif site == 'CE06ISSP':
depth = 29
elif site == 'CE07SHSP':
depth = 87
else:
depth = 87
return site,depth #Return the site and depth for use later.
def cspp_ctd(nc):
site,depth = OOIM2M.find_site(nc)
data = pd.DataFrame() #Create a placeholder dataframe.
for remote in nc: #For each remote netcdf location
dataset = xr.open_dataset(remote) #Open the dataset.
d = ({'datetime':dataset['profiler_timestamp'], #Pull the following variables.
'pressure':dataset['pressure'],
'temperature':dataset['temperature'],
'salinity':dataset['salinity'],
'density':dataset['density'],
'conductivity':dataset['conductivity']})
d = pd.DataFrame(data = d) #Put the variables in a dataframe.
data = pd.concat([data,d]) #Concatenate the new dataframe with the old dataframe.
data = data[data.pressure < depth] #Remove obviously bad values.
data = data[data.pressure > 0]
data = data[data.temperature > 0]
data = data[data.salinity > 2]
data = data[data.salinity < 42]
data = data.dropna() #Remove rows with any NaNs.
data = data.sort_values('datetime') #Sort the data chronologically.
data = data.reset_index(drop=True) #Reset the index.
print('CTD data for ' + site + ' available.')
print('CTD datetime in UTC.')
print('CTD pressure in dbars.')
print('CTD temperature in degC.')
print('CTD salinity in PSU.')
print('CTD density in kg m^-3.')
print('CTD conductivity in S m^-1.')
return data
def cspp_dosta(nc):
site,depth = OOIM2M.find_site(nc) #Determine the CSPP site and standard depth.
dfnc = pd.DataFrame(data = {'location':nc}) #The returned NetCDFs contain both DOSTA and CTDPF files.
dosta = dfnc.loc[~dfnc['location'].str.contains('ctdpf_j_cspp_instrument')] #Identify the DOSTA files. (Files that do not (~) contain "cspp-ctdpf_j_cspp_instrument".)
# ctd = dfnc.loc[dfnc['location'].str.contains('ctdpf_j_cspp_instrument')] #Identify the CTD file. CTD data accompanies DOSTA data because it is used in the computation of data products.
data = pd.DataFrame()
for remote in dosta['location']: #For each DOSTA remote location.
dataset = xr.open_dataset(remote) #Open the dataset.
d = ({'datetime':dataset['profiler_timestamp'], #Pull out these variables.
'pressure':dataset['pressure_depth'],
'temperature':dataset['optode_temperature'],
'concentration':dataset['dissolved_oxygen'],
'estimated_saturation':dataset['estimated_oxygen_saturation']})
d = pd.DataFrame(data = d)
data = pd.concat([data,d]) #Concatenate it with the previous loop.
data = data[data.pressure < depth] #Remove bad values.
data = data[data.pressure > 0]
data = data[data.estimated_saturation > 0]
data = data.dropna() #Remove rows with any NaNs.
data = data.sort_values('datetime') #Sort the data chronologically.
data = data.reset_index(drop=True) #Reset the index.
print('DOSTA data for ' + site + ' available.')
print('DOSTA datetime in UTC.')
print('DOSTA pressure in dbars.')
print('DOSTA temperature in degC.')
print('DOSTA concentration in umol kg^-1.')
print('DOSTA estimated_saturation in %.')
return data
def cspp_flort(nc):
site,depth = OOIM2M.find_site(nc)
dfnc = pd.DataFrame(data = {'location':nc})
flort = dfnc.loc[~dfnc['location'].str.contains('ctdpf_j_cspp_instrument')]
# ctd = dfnc.loc[dfnc['location'].str.contains('ctdpf_j_cspp_instrument')]
data = pd.DataFrame()
for remote in flort['location']:
dataset = xr.open_dataset(remote)
d = ({'datetime':dataset['time'],
'pressure':dataset['pressure_depth'],
'chla':dataset['fluorometric_chlorophyll_a'],
'cdom':dataset['fluorometric_cdom'],
'obs':dataset['optical_backscatter']})
d = pd.DataFrame(data = d)
data = pd.concat([data,d])
data = data[data.pressure < depth] #Remove obviously bad values.
data = data[data.pressure > 0]
data = data[data.chla > 0]
data = data[data.cdom > 0]
data = data[data.obs > 0]
data = data.dropna() #Remove rows with any NaNs.
data = data.sort_values('datetime') #Sort the data chronologically.
data = data.reset_index(drop=True) #Reset the index.
print('FLORT data for ' + site + ' available.')
print('FLORT datetime in UTC.')
print('FLORT pressure in dbars.')
print('FLORT chl in ug L^-1.')
print('FLORT cdom in ppb.')
print('FLORT obs in m^-1.')
return data
def cspp_par(nc):
site,depth = OOIM2M.find_site(nc)
dfnc = pd.DataFrame(data = {'location':nc})
par = dfnc.loc[~dfnc['location'].str.contains('ctdpf_j_cspp_instrument')]
# ctd = dfnc.loc[dfnc['location'].str.contains('ctdpf_j_cspp_instrument')]
data = pd.DataFrame()
for remote in par['location']:
dataset = xr.open_dataset(remote)
d = ({'datetime':dataset['profiler_timestamp'],
'pressure':dataset['pressure_depth'],
'par':dataset['parad_j_par_counts_output']})
d = pd.DataFrame(data = d)
data = pd.concat([data,d])
data = data[data.pressure < depth] #Remove obviously bad pressures.
data = data[data.pressure > 0]
data = data[data.par > 0] #Remove obviously bad values.
data = data.dropna() #Remove rows with any NaNs.
data = data.sort_values('datetime') #Sort the data chronologically.
data = data.reset_index(drop=True) #Reset the index.
print('PAR data for ' + site + ' available.')
print('PAR datetime in UTC.')
print('PAR pressure in dbars.')
print('PAR par in umol photons m^-2 s^-1.')
return data
def cspp_velpt(nc):
# OOI ion-function for VELPT-J assumes data from the instrument is output in mm/s, when it is actually output in m/s.
# https://github.com/oceanobservatories/ion-functions/blob/master/ion_functions/data/vel_functions.py
# The simple fix now is to multiply returned velocity values by 1000 to get it back into to m/s.
site,depth = OOIM2M.find_site(nc)
dfnc = pd.DataFrame(data = {'location':nc})
velpt = dfnc.loc[~dfnc['location'].str.contains('ctdpf_j_cspp_instrument')]
# ctd = dfnc.loc[dfnc['location'].str.contains('ctdpf_j_cspp_instrument')]
data = pd.DataFrame()
for remote in velpt['location']:
dataset = xr.open_dataset(remote)
d = ({'datetime':dataset['profiler_timestamp'],
'pressure':dataset['pressure_depth'],
'northward':dataset['velpt_j_northward_velocity'],
'eastward':dataset['velpt_j_eastward_velocity'],
'upward':dataset['velpt_j_upward_velocity'],
'heading':dataset['heading'],
'pitch':dataset['pitch'],
'roll':dataset['roll'],
'soundspeed':dataset['speed_of_sound'],
'temperature':dataset['temperature']})
d = pd.DataFrame(data = d)
data = pd.concat([data,d])
data = data[data.pressure < depth] #Remove obviously bad values.
data = data[data.pressure > 0]
data.northward = data.northward * 1000
data.eastward = data.eastward * 1000
data.upward = data.upward *1000
data = data[data.roll < 90]
data = data[data.roll > -90]
data = data[data.pitch < 90]
data = data[data.pitch > -90]
data = data[data.heading < 360]
data = data[data.heading > 0]
data = data.dropna() #Remove rows with any NaNs.
data = data.sort_values('datetime') #Sort the data chronologically.
data = data.reset_index(drop=True) #Reset the index.
print('VELPT data for ' + site + ' available.')
print('VELPT datetime in UTC.')
print('VELPT pressure in dbars.')
print('VELPT northward, eastward, and upward in m s^-1.')
print('VELPT heading, pitch, roll in degrees.')
print('VELPT sounds speed in m s^-1.')
print('VELPT temperature in degC.')
return data
def cspp_batts(nc): #Returns two dataframes, one for each battery.
site,depth = OOIM2M.find_site(nc)
dfnc = pd.DataFrame(data = {'location':nc})
batt = dfnc.loc[~dfnc['location'].str.contains('ctdpf_j_cspp_instrument')]
data = pd.DataFrame()
for remote in batt['location']:
dataset = xr.open_dataset(remote)
d = ({'datetime':dataset['profiler_timestamp'],
'voltage':dataset['battery_voltage_flt32'],
'battery_position':dataset['battery_number_uint8']})
d = pd.DataFrame(data = d)
data = pd.concat([data,d])
data = data.dropna() #Remove rows with any NaNs.
data = data.sort_values('datetime') #Sort the data chronologically.
batt1 = data.loc[data['battery_position'].astype('str').str.contains('1.0')]
batt2 = data.loc[data['battery_position'].astype('str').str.contains('2.0')]
batt1 = batt1.reset_index(drop=True)
batt2 = batt2.reset_index(drop=True)
print('Battery data for ' + site + ' available.')
print('Battery datetime in UTC.')
print('Battery voltage in volts.')
return batt1,batt2
def cspp_cpass(nc):
site,depth = OOIM2M.find_site(nc)
dfnc = pd.DataFrame(data = {'location':nc})
hmr = dfnc.loc[~dfnc['location'].str.contains('ctdpf_j_cspp_instrument')]
data = pd.DataFrame()
for remote in hmr['location']:
dataset = xr.open_dataset(remote)
d =({'datetime':dataset['profiler_timestamp'],
'pressure':dataset['pressure_depth'],
'heading':dataset['heading'],
'pitch':dataset['pitch'],
'roll':dataset['roll']})
d = pd.DataFrame(data = d)
data = pd.concat([data,d])
data = data[data.pressure < depth] #Remove obviously bad values.
data = data[data.pressure > 0]
data = data.dropna()
data = data.sort_values('datetime')
data = data.reset_index(drop = True)
print('Compass data for ' + site + ' available.')
print('Compass datetime in UTC.')
print('Compass pressure in dbars.')
print('Compass heading, pitch, and roll in degrees.')
return data
def cspp_sbe50(nc):
site,depth = OOIM2M.find_site(nc)
dfnc = pd.DataFrame(data = {'location':nc})
sbe50 = dfnc.loc[~dfnc['location'].str.contains('ctdpf_j_cspp_instrument')]
data = pd.DataFrame()
for remote in sbe50['location']:
dataset = xr.open_dataset(remote)
d = ({'datetime':dataset['profiler_timestamp'],
'pressure':dataset['pressure_depth'],
'velocity':dataset['velocity_flt32']})
d = pd.DataFrame(data = d)
data = pd.concat([data,d])
data = data[data.pressure < depth] #Remove obviously bad values.
data = data[data.pressure > 0]
data = data.dropna()
data = data.sort_values('datetime')
data = data.reset_index(drop = True)
print('SBE50 data for ' + site + ' available.')
print('SBE50 datetime in UTC.')
print('SBE50 pressure in dbars.')
print('SBE50 velocity in m s^-1.')
return data
def cspp_winch(nc):
site,depth = OOIM2M.find_site(nc)
dfnc = pd.DataFrame(data = {'location':nc})
winch = dfnc.loc[~dfnc['location'].str.contains('ctdpf_j_cspp_instrument')]
data = pd.DataFrame()
for remote in winch['location']:
dataset = xr.open_dataset(remote)
d = ({'datetime':dataset['profiler_timestamp'],
'pressure':dataset['pressure_depth'],
'wm_temp':dataset['temperature'],
'wm_current':dataset['current_flt32'],
'wm_voltage':dataset['voltage_flt32'],
'rope_on_drum':dataset['rope_on_drum']})
d = pd.DataFrame(data = d)
data = pd.concat([data,d])
data = data[data.pressure < depth] #Remove obviously bad values.
data = data[data.pressure > 0]
data = data.dropna()
data = data.sort_values('datetime')
data = data.reset_index(drop = True)
print('Winch data for ' + site + ' available.')
print('WM datetime in UTC.')
print('WM pressure in dbars.')
print('WM wm_temp in degC.')
print('WM wm_current in amps.')
print('WM wm_voltage in volts.')
print('WM rope_on_drum in meters.')
return data
def cspp_nutnr(nc):
site,depth = OOIM2M.find_site(nc)
dfnc = pd.DataFrame(data = {'location':nc})
nit = dfnc.loc[~dfnc['location'].str.contains('ctdpf_j_cspp_instrument')] #CTD data accompanies NUTNR data. Parse out the relevant URLs.
ctd = dfnc.loc[dfnc['location'].str.contains('ctdpf_j_cspp_instrument')]
nit_data = pd.DataFrame()
for nit_remote in nit['location']: #Pull nitrate data.
nit_dataset = xr.open_dataset(nit_remote)
n = ({'timestamp':nit_dataset['profiler_timestamp'],
'nitrate':nit_dataset['salinity_corrected_nitrate']})
n = pd.DataFrame(data = n)
nit_data = pd.concat([nit_data,n],sort = False)
nit_data = nit_data.sort_values('timestamp')
nit_data = nit_data[nit_data.nitrate > 0]
ctd_data = pd.DataFrame()
for ctd_remote in ctd['location']: #Pull CTD data.
ctd_dataset = xr.open_dataset(ctd_remote)
c = ({'timestamp':ctd_dataset['profiler_timestamp'], #Pull the following variables.
'ctdpressure':ctd_dataset['pressure']})
c = pd.DataFrame(data = c)
ctd_data = pd.concat([ctd_data,c],sort = False)
ctd_data = ctd_data[ctd_data.ctdpressure < depth] #Remove obviously bad values.
ctd_data = ctd_data[ctd_data.ctdpressure > 0]
ctd_data = ctd_data.sort_values('timestamp')
combo = | pd.concat([nit_data,ctd_data],sort = True) | pandas.concat |
import numpy as np
import pandas as pd
import pickle
from scipy.spatial.distance import pdist, squareform
from sklearn.linear_model import LinearRegression, RidgeCV
from sklearn.neighbors import KNeighborsRegressor
from sklearn.preprocessing import scale
# ******************************************************************************
# Utility functions
# ******************************************************************************
def load_ontology(ontology_file):
""" loads an ontology pickle file """
ontology = pickle.load(open(ontology_file, 'rb'))
return ontology
# ******************************************************************************
# Utility functions for mapping
# ******************************************************************************
def run_linear(data, scores, clf=RidgeCV(fit_intercept=False)):
"""
Run Knearest neighbor using precomputed distances to create an ontological mapping
Args:
data: dataframe with variables to reconstruct as columns
scores: ontological scores
clf: linear model that returns coefs
"""
y=scale(data)
clf.fit(scores, y)
out = clf.coef_
if len(out.shape)==1:
out = out.reshape(1,-1)
out = pd.DataFrame(out, columns=scores.columns)
out.index = data.columns
return out
def KNN_map(data, ontology, ontology_data=None, k=10, weights='distance',
distance_metric='correlation'):
"""
Maps variable into an ontology
Performs ontological mapping as described in PAPER
Args:
data: participant X variable dataset. Columns not included in the
ontology will be mapped using the rest of the (overlapping)
variables
ontology: DV x embedding (e.g. factor loadings) matrix (pandas df). Must overlap
with some variable in data
ontology_data: the data used to create the ontology (pandas df). Used to create a
distance matrix to train the KNN regressor. If ontology
data is set to None, the data is used to compute the
distances
k: passed to KNeighborsRegressor
weights: passed to KNeighborsRegressor
distance_metric: used to compute distances for KNNR
Returns:
mapping: dataframe with ontology embedding for variables not in the ontology
neighbors: dictionary with list of k tuples of (neighbor, distance) used for each variable
"""
# variables to map
tomap = list(set(data.columns) - set(ontology.index))
# contextual variables
overlap = list(set(data.columns) & set(ontology.index))
# subset/reorder data
ontology = ontology.loc[overlap]
data = data.loc[:, tomap+overlap]
# set up KNN regressor
if ontology_data is not None:
ontology_data = ontology_data.loc[:,overlap]
distances = pd.DataFrame(squareform(pdist(ontology_data.T, metric=distance_metric)),
index=ontology_data.columns,
columns=ontology_data.columns)
else:
distances = pd.DataFrame(squareform(pdist(data.loc[:,overlap].T, metric=distance_metric)),
index=overlap,
columns=overlap)
clf = KNeighborsRegressor(metric='precomputed', n_neighbors=k, weights=weights)
clf.fit(distances, ontology)
# test distances
tomap_distances = pd.DataFrame(squareform(pdist(data.T, metric=distance_metric)),
index=data.columns,
columns=data.columns)[tomap].drop(tomap).values
mapped = pd.DataFrame(clf.predict(tomap_distances.T), index=tomap,
columns=ontology.columns)
# get neighbors
neighbors = clf.kneighbors(tomap_distances.T)
neighbor_dict = {}
for i, v in enumerate(tomap):
v_neighbors = [(overlap[x], d) for d,x in zip(neighbors[0][i], neighbors[1][i])]
neighbor_dict[v] = v_neighbors
return mapped, neighbor_dict
# ******************************************************************************
# Utility functions to calculate factor scores a la self-regulation ontology
# ******************************************************************************
def transform_remove_skew(data, threshold=1,
positive_skewed=None,
negative_skewed=None):
data = data.copy()
if positive_skewed is None:
positive_skewed = data.skew()>threshold
if negative_skewed is None:
negative_skewed = data.skew()<-threshold
positive_subset = data.loc[:,positive_skewed]
negative_subset = data.loc[:,negative_skewed]
# transform variables
# log transform for positive skew
positive_subset = np.log(positive_subset)
successful_transforms = positive_subset.loc[:,abs(positive_subset.skew())<threshold]
dropped_vars = set(positive_subset)-set(successful_transforms)
# replace transformed variables
data.drop(positive_subset, axis=1, inplace = True)
successful_transforms.columns = [i + '.logTr' for i in successful_transforms]
print('*'*40)
print('Dropping %s positively skewed data that could not be transformed successfully:' % len(dropped_vars))
print('\n'.join(dropped_vars))
print('*'*40)
data = pd.concat([data, successful_transforms], axis = 1)
# reflected log transform for negative skew
negative_subset = np.log(negative_subset.max()+1-negative_subset)
successful_transforms = negative_subset.loc[:,abs(negative_subset.skew())<threshold]
dropped_vars = set(negative_subset)-set(successful_transforms)
# replace transformed variables
data.drop(negative_subset, axis=1, inplace = True)
successful_transforms.columns = [i + '.ReflogTr' for i in successful_transforms]
print('*'*40)
print('Dropping %s negatively skewed data that could not be transformed successfully:' % len(dropped_vars))
print('\n'.join(dropped_vars))
print('*'*40)
data = | pd.concat([data, successful_transforms], axis=1) | pandas.concat |
# pylint: disable-msg=W0612,E1101,W0141
import nose
from numpy.random import randn
import numpy as np
from pandas.core.index import Index, MultiIndex
from pandas import Panel, DataFrame, Series, notnull, isnull
from pandas.util.testing import (assert_almost_equal,
assert_series_equal,
assert_frame_equal,
assertRaisesRegexp)
import pandas.core.common as com
import pandas.util.testing as tm
from pandas.compat import (range, lrange, StringIO, lzip, u, cPickle,
product as cart_product, zip)
import pandas as pd
import pandas.index as _index
class TestMultiLevel(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
import warnings
warnings.filterwarnings(action='ignore', category=FutureWarning)
index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'],
['one', 'two', 'three']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['first', 'second'])
self.frame = DataFrame(np.random.randn(10, 3), index=index,
columns=Index(['A', 'B', 'C'], name='exp'))
self.single_level = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux']],
labels=[[0, 1, 2, 3]],
names=['first'])
# create test series object
arrays = [['bar', 'bar', 'baz', 'baz', 'qux', 'qux', 'foo', 'foo'],
['one', 'two', 'one', 'two', 'one', 'two', 'one', 'two']]
tuples = lzip(*arrays)
index = MultiIndex.from_tuples(tuples)
s = Series(randn(8), index=index)
s[3] = np.NaN
self.series = s
tm.N = 100
self.tdf = tm.makeTimeDataFrame()
self.ymd = self.tdf.groupby([lambda x: x.year, lambda x: x.month,
lambda x: x.day]).sum()
# use Int64Index, to make sure things work
self.ymd.index.set_levels([lev.astype('i8')
for lev in self.ymd.index.levels],
inplace=True)
self.ymd.index.set_names(['year', 'month', 'day'],
inplace=True)
def test_append(self):
a, b = self.frame[:5], self.frame[5:]
result = a.append(b)
tm.assert_frame_equal(result, self.frame)
result = a['A'].append(b['A'])
tm.assert_series_equal(result, self.frame['A'])
def test_dataframe_constructor(self):
multi = DataFrame(np.random.randn(4, 4),
index=[np.array(['a', 'a', 'b', 'b']),
np.array(['x', 'y', 'x', 'y'])])
tm.assert_isinstance(multi.index, MultiIndex)
self.assertNotIsInstance(multi.columns, MultiIndex)
multi = DataFrame(np.random.randn(4, 4),
columns=[['a', 'a', 'b', 'b'],
['x', 'y', 'x', 'y']])
tm.assert_isinstance(multi.columns, MultiIndex)
def test_series_constructor(self):
multi = Series(1., index=[np.array(['a', 'a', 'b', 'b']),
np.array(['x', 'y', 'x', 'y'])])
tm.assert_isinstance(multi.index, MultiIndex)
multi = Series(1., index=[['a', 'a', 'b', 'b'],
['x', 'y', 'x', 'y']])
tm.assert_isinstance(multi.index, MultiIndex)
multi = Series(lrange(4), index=[['a', 'a', 'b', 'b'],
['x', 'y', 'x', 'y']])
tm.assert_isinstance(multi.index, MultiIndex)
def test_reindex_level(self):
# axis=0
month_sums = self.ymd.sum(level='month')
result = month_sums.reindex(self.ymd.index, level=1)
expected = self.ymd.groupby(level='month').transform(np.sum)
assert_frame_equal(result, expected)
# Series
result = month_sums['A'].reindex(self.ymd.index, level=1)
expected = self.ymd['A'].groupby(level='month').transform(np.sum)
assert_series_equal(result, expected)
# axis=1
month_sums = self.ymd.T.sum(axis=1, level='month')
result = month_sums.reindex(columns=self.ymd.index, level=1)
expected = self.ymd.groupby(level='month').transform(np.sum).T
assert_frame_equal(result, expected)
def test_binops_level(self):
def _check_op(opname):
op = getattr(DataFrame, opname)
month_sums = self.ymd.sum(level='month')
result = op(self.ymd, month_sums, level='month')
broadcasted = self.ymd.groupby(level='month').transform(np.sum)
expected = op(self.ymd, broadcasted)
assert_frame_equal(result, expected)
# Series
op = getattr(Series, opname)
result = op(self.ymd['A'], month_sums['A'], level='month')
broadcasted = self.ymd['A'].groupby(
level='month').transform(np.sum)
expected = op(self.ymd['A'], broadcasted)
assert_series_equal(result, expected)
_check_op('sub')
_check_op('add')
_check_op('mul')
_check_op('div')
def test_pickle(self):
def _test_roundtrip(frame):
pickled = cPickle.dumps(frame)
unpickled = cPickle.loads(pickled)
assert_frame_equal(frame, unpickled)
_test_roundtrip(self.frame)
_test_roundtrip(self.frame.T)
_test_roundtrip(self.ymd)
_test_roundtrip(self.ymd.T)
def test_reindex(self):
reindexed = self.frame.ix[[('foo', 'one'), ('bar', 'one')]]
expected = self.frame.ix[[0, 3]]
assert_frame_equal(reindexed, expected)
def test_reindex_preserve_levels(self):
new_index = self.ymd.index[::10]
chunk = self.ymd.reindex(new_index)
self.assertIs(chunk.index, new_index)
chunk = self.ymd.ix[new_index]
self.assertIs(chunk.index, new_index)
ymdT = self.ymd.T
chunk = ymdT.reindex(columns=new_index)
self.assertIs(chunk.columns, new_index)
chunk = ymdT.ix[:, new_index]
self.assertIs(chunk.columns, new_index)
def test_sort_index_preserve_levels(self):
result = self.frame.sort_index()
self.assertEquals(result.index.names, self.frame.index.names)
def test_repr_to_string(self):
repr(self.frame)
repr(self.ymd)
repr(self.frame.T)
repr(self.ymd.T)
buf = StringIO()
self.frame.to_string(buf=buf)
self.ymd.to_string(buf=buf)
self.frame.T.to_string(buf=buf)
self.ymd.T.to_string(buf=buf)
def test_repr_name_coincide(self):
index = MultiIndex.from_tuples([('a', 0, 'foo'), ('b', 1, 'bar')],
names=['a', 'b', 'c'])
df = DataFrame({'value': [0, 1]}, index=index)
lines = repr(df).split('\n')
self.assert_(lines[2].startswith('a 0 foo'))
def test_getitem_simple(self):
df = self.frame.T
col = df['foo', 'one']
assert_almost_equal(col.values, df.values[:, 0])
self.assertRaises(KeyError, df.__getitem__, ('foo', 'four'))
self.assertRaises(KeyError, df.__getitem__, 'foobar')
def test_series_getitem(self):
s = self.ymd['A']
result = s[2000, 3]
result2 = s.ix[2000, 3]
expected = s.reindex(s.index[42:65])
expected.index = expected.index.droplevel(0).droplevel(0)
assert_series_equal(result, expected)
result = s[2000, 3, 10]
expected = s[49]
self.assertEquals(result, expected)
# fancy
result = s.ix[[(2000, 3, 10), (2000, 3, 13)]]
expected = s.reindex(s.index[49:51])
assert_series_equal(result, expected)
# key error
self.assertRaises(KeyError, s.__getitem__, (2000, 3, 4))
def test_series_getitem_corner(self):
s = self.ymd['A']
# don't segfault, GH #495
# out of bounds access
self.assertRaises(IndexError, s.__getitem__, len(self.ymd))
# generator
result = s[(x > 0 for x in s)]
expected = s[s > 0]
assert_series_equal(result, expected)
def test_series_setitem(self):
s = self.ymd['A']
s[2000, 3] = np.nan
self.assert_(isnull(s.values[42:65]).all())
self.assert_(notnull(s.values[:42]).all())
self.assert_(notnull(s.values[65:]).all())
s[2000, 3, 10] = np.nan
self.assert_(isnull(s[49]))
def test_series_slice_partial(self):
pass
def test_frame_getitem_setitem_boolean(self):
df = self.frame.T.copy()
values = df.values
result = df[df > 0]
expected = df.where(df > 0)
assert_frame_equal(result, expected)
df[df > 0] = 5
values[values > 0] = 5
assert_almost_equal(df.values, values)
df[df == 5] = 0
values[values == 5] = 0
assert_almost_equal(df.values, values)
# a df that needs alignment first
df[df[:-1] < 0] = 2
np.putmask(values[:-1], values[:-1] < 0, 2)
assert_almost_equal(df.values, values)
with assertRaisesRegexp(TypeError, 'boolean values only'):
df[df * 0] = 2
def test_frame_getitem_setitem_slice(self):
# getitem
result = self.frame.ix[:4]
expected = self.frame[:4]
assert_frame_equal(result, expected)
# setitem
cp = self.frame.copy()
cp.ix[:4] = 0
self.assert_((cp.values[:4] == 0).all())
self.assert_((cp.values[4:] != 0).all())
def test_frame_getitem_setitem_multislice(self):
levels = [['t1', 't2'], ['a', 'b', 'c']]
labels = [[0, 0, 0, 1, 1], [0, 1, 2, 0, 1]]
midx = MultiIndex(labels=labels, levels=levels, names=[None, 'id'])
df = DataFrame({'value': [1, 2, 3, 7, 8]}, index=midx)
result = df.ix[:, 'value']
assert_series_equal(df['value'], result)
result = df.ix[1:3, 'value']
assert_series_equal(df['value'][1:3], result)
result = df.ix[:, :]
assert_frame_equal(df, result)
result = df
df.ix[:, 'value'] = 10
result['value'] = 10
assert_frame_equal(df, result)
df.ix[:, :] = 10
assert_frame_equal(df, result)
def test_frame_getitem_multicolumn_empty_level(self):
f = DataFrame({'a': ['1', '2', '3'],
'b': ['2', '3', '4']})
f.columns = [['level1 item1', 'level1 item2'],
['', 'level2 item2'],
['level3 item1', 'level3 item2']]
result = f['level1 item1']
expected = DataFrame([['1'], ['2'], ['3']], index=f.index,
columns=['level3 item1'])
assert_frame_equal(result, expected)
def test_frame_setitem_multi_column(self):
df = DataFrame(randn(10, 4), columns=[['a', 'a', 'b', 'b'],
[0, 1, 0, 1]])
cp = df.copy()
cp['a'] = cp['b']
assert_frame_equal(cp['a'], cp['b'])
# set with ndarray
cp = df.copy()
cp['a'] = cp['b'].values
assert_frame_equal(cp['a'], cp['b'])
#----------------------------------------
# #1803
columns = MultiIndex.from_tuples([('A', '1'), ('A', '2'), ('B', '1')])
df = DataFrame(index=[1, 3, 5], columns=columns)
# Works, but adds a column instead of updating the two existing ones
df['A'] = 0.0 # Doesn't work
self.assertTrue((df['A'].values == 0).all())
# it broadcasts
df['B', '1'] = [1, 2, 3]
df['A'] = df['B', '1']
assert_series_equal(df['A', '1'], df['B', '1'])
assert_series_equal(df['A', '2'], df['B', '1'])
def test_getitem_tuple_plus_slice(self):
# GH #671
df = DataFrame({'a': lrange(10),
'b': lrange(10),
'c': np.random.randn(10),
'd': np.random.randn(10)})
idf = df.set_index(['a', 'b'])
result = idf.ix[(0, 0), :]
expected = idf.ix[0, 0]
expected2 = idf.xs((0, 0))
assert_series_equal(result, expected)
assert_series_equal(result, expected2)
def test_getitem_setitem_tuple_plus_columns(self):
# GH #1013
df = self.ymd[:5]
result = df.ix[(2000, 1, 6), ['A', 'B', 'C']]
expected = df.ix[2000, 1, 6][['A', 'B', 'C']]
assert_series_equal(result, expected)
def test_getitem_multilevel_index_tuple_unsorted(self):
index_columns = list("abc")
df = DataFrame([[0, 1, 0, "x"], [0, 0, 1, "y"]],
columns=index_columns + ["data"])
df = df.set_index(index_columns)
query_index = df.index[:1]
rs = df.ix[query_index, "data"]
xp = Series(['x'], index=MultiIndex.from_tuples([(0, 1, 0)]))
assert_series_equal(rs, xp)
def test_xs(self):
xs = self.frame.xs(('bar', 'two'))
xs2 = self.frame.ix[('bar', 'two')]
assert_series_equal(xs, xs2)
assert_almost_equal(xs.values, self.frame.values[4])
# GH 6574
# missing values in returned index should be preserrved
acc = [
('a','abcde',1),
('b','bbcde',2),
('y','yzcde',25),
('z','xbcde',24),
('z',None,26),
('z','zbcde',25),
('z','ybcde',26),
]
df = DataFrame(acc, columns=['a1','a2','cnt']).set_index(['a1','a2'])
expected = DataFrame({ 'cnt' : [24,26,25,26] }, index=Index(['xbcde',np.nan,'zbcde','ybcde'],name='a2'))
result = df.xs('z',level='a1')
assert_frame_equal(result, expected)
def test_xs_partial(self):
result = self.frame.xs('foo')
result2 = self.frame.ix['foo']
expected = self.frame.T['foo'].T
assert_frame_equal(result, expected)
assert_frame_equal(result, result2)
result = self.ymd.xs((2000, 4))
expected = self.ymd.ix[2000, 4]
assert_frame_equal(result, expected)
# ex from #1796
index = MultiIndex(levels=[['foo', 'bar'], ['one', 'two'], [-1, 1]],
labels=[[0, 0, 0, 0, 1, 1, 1, 1],
[0, 0, 1, 1, 0, 0, 1, 1],
[0, 1, 0, 1, 0, 1, 0, 1]])
df = DataFrame(np.random.randn(8, 4), index=index,
columns=list('abcd'))
result = df.xs(['foo', 'one'])
expected = df.ix['foo', 'one']
assert_frame_equal(result, expected)
def test_xs_level(self):
result = self.frame.xs('two', level='second')
expected = self.frame[self.frame.index.get_level_values(1) == 'two']
expected.index = expected.index.droplevel(1)
assert_frame_equal(result, expected)
index = MultiIndex.from_tuples([('x', 'y', 'z'), ('a', 'b', 'c'),
('p', 'q', 'r')])
df = DataFrame(np.random.randn(3, 5), index=index)
result = df.xs('c', level=2)
expected = df[1:2]
expected.index = expected.index.droplevel(2)
assert_frame_equal(result, expected)
# this is a copy in 0.14
result = self.frame.xs('two', level='second')
# setting this will give a SettingWithCopyError
# as we are trying to write a view
def f(x):
x[:] = 10
self.assertRaises(com.SettingWithCopyError, f, result)
def test_xs_level_multiple(self):
from pandas import read_table
text = """ A B C D E
one two three four
a b 10.0032 5 -0.5109 -2.3358 -0.4645 0.05076 0.3640
a q 20 4 0.4473 1.4152 0.2834 1.00661 0.1744
x q 30 3 -0.6662 -0.5243 -0.3580 0.89145 2.5838"""
df = read_table(StringIO(text), sep='\s+', engine='python')
result = df.xs(('a', 4), level=['one', 'four'])
expected = df.xs('a').xs(4, level='four')
assert_frame_equal(result, expected)
# this is a copy in 0.14
result = df.xs(('a', 4), level=['one', 'four'])
# setting this will give a SettingWithCopyError
# as we are trying to write a view
def f(x):
x[:] = 10
self.assertRaises(com.SettingWithCopyError, f, result)
# GH2107
dates = lrange(20111201, 20111205)
ids = 'abcde'
idx = MultiIndex.from_tuples([x for x in cart_product(dates, ids)])
idx.names = ['date', 'secid']
df = DataFrame(np.random.randn(len(idx), 3), idx, ['X', 'Y', 'Z'])
rs = df.xs(20111201, level='date')
xp = df.ix[20111201, :]
assert_frame_equal(rs, xp)
def test_xs_level0(self):
from pandas import read_table
text = """ A B C D E
one two three four
a b 10.0032 5 -0.5109 -2.3358 -0.4645 0.05076 0.3640
a q 20 4 0.4473 1.4152 0.2834 1.00661 0.1744
x q 30 3 -0.6662 -0.5243 -0.3580 0.89145 2.5838"""
df = read_table(StringIO(text), sep='\s+', engine='python')
result = df.xs('a', level=0)
expected = df.xs('a')
self.assertEqual(len(result), 2)
assert_frame_equal(result, expected)
def test_xs_level_series(self):
s = self.frame['A']
result = s[:, 'two']
expected = self.frame.xs('two', level=1)['A']
assert_series_equal(result, expected)
s = self.ymd['A']
result = s[2000, 5]
expected = self.ymd.ix[2000, 5]['A']
assert_series_equal(result, expected)
# not implementing this for now
self.assertRaises(TypeError, s.__getitem__, (2000, slice(3, 4)))
# result = s[2000, 3:4]
# lv =s.index.get_level_values(1)
# expected = s[(lv == 3) | (lv == 4)]
# expected.index = expected.index.droplevel(0)
# assert_series_equal(result, expected)
# can do this though
def test_get_loc_single_level(self):
s = Series(np.random.randn(len(self.single_level)),
index=self.single_level)
for k in self.single_level.values:
s[k]
def test_getitem_toplevel(self):
df = self.frame.T
result = df['foo']
expected = df.reindex(columns=df.columns[:3])
expected.columns = expected.columns.droplevel(0)
assert_frame_equal(result, expected)
result = df['bar']
result2 = df.ix[:, 'bar']
expected = df.reindex(columns=df.columns[3:5])
expected.columns = expected.columns.droplevel(0)
assert_frame_equal(result, expected)
assert_frame_equal(result, result2)
def test_getitem_setitem_slice_integers(self):
index = MultiIndex(levels=[[0, 1, 2], [0, 2]],
labels=[[0, 0, 1, 1, 2, 2],
[0, 1, 0, 1, 0, 1]])
frame = DataFrame(np.random.randn(len(index), 4), index=index,
columns=['a', 'b', 'c', 'd'])
res = frame.ix[1:2]
exp = frame.reindex(frame.index[2:])
assert_frame_equal(res, exp)
frame.ix[1:2] = 7
self.assert_((frame.ix[1:2] == 7).values.all())
series = Series(np.random.randn(len(index)), index=index)
res = series.ix[1:2]
exp = series.reindex(series.index[2:])
assert_series_equal(res, exp)
series.ix[1:2] = 7
self.assert_((series.ix[1:2] == 7).values.all())
def test_getitem_int(self):
levels = [[0, 1], [0, 1, 2]]
labels = [[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 1, 2]]
index = MultiIndex(levels=levels, labels=labels)
frame = DataFrame(np.random.randn(6, 2), index=index)
result = frame.ix[1]
expected = frame[-3:]
expected.index = expected.index.droplevel(0)
assert_frame_equal(result, expected)
# raises exception
self.assertRaises(KeyError, frame.ix.__getitem__, 3)
# however this will work
result = self.frame.ix[2]
expected = self.frame.xs(self.frame.index[2])
assert_series_equal(result, expected)
def test_getitem_partial(self):
ymd = self.ymd.T
result = ymd[2000, 2]
expected = ymd.reindex(columns=ymd.columns[ymd.columns.labels[1] == 1])
expected.columns = expected.columns.droplevel(0).droplevel(0)
assert_frame_equal(result, expected)
def test_getitem_slice_not_sorted(self):
df = self.frame.sortlevel(1).T
# buglet with int typechecking
result = df.ix[:, :np.int32(3)]
expected = df.reindex(columns=df.columns[:3])
assert_frame_equal(result, expected)
def test_setitem_change_dtype(self):
dft = self.frame.T
s = dft['foo', 'two']
dft['foo', 'two'] = s > s.median()
assert_series_equal(dft['foo', 'two'], s > s.median())
# tm.assert_isinstance(dft._data.blocks[1].items, MultiIndex)
reindexed = dft.reindex(columns=[('foo', 'two')])
assert_series_equal(reindexed['foo', 'two'], s > s.median())
def test_frame_setitem_ix(self):
self.frame.ix[('bar', 'two'), 'B'] = 5
self.assertEquals(self.frame.ix[('bar', 'two'), 'B'], 5)
# with integer labels
df = self.frame.copy()
df.columns = lrange(3)
df.ix[('bar', 'two'), 1] = 7
self.assertEquals(df.ix[('bar', 'two'), 1], 7)
def test_fancy_slice_partial(self):
result = self.frame.ix['bar':'baz']
expected = self.frame[3:7]
assert_frame_equal(result, expected)
result = self.ymd.ix[(2000, 2):(2000, 4)]
lev = self.ymd.index.labels[1]
expected = self.ymd[(lev >= 1) & (lev <= 3)]
assert_frame_equal(result, expected)
def test_getitem_partial_column_select(self):
idx = MultiIndex(labels=[[0, 0, 0], [0, 1, 1], [1, 0, 1]],
levels=[['a', 'b'], ['x', 'y'], ['p', 'q']])
df = DataFrame(np.random.rand(3, 2), index=idx)
result = df.ix[('a', 'y'), :]
expected = df.ix[('a', 'y')]
assert_frame_equal(result, expected)
result = df.ix[('a', 'y'), [1, 0]]
expected = df.ix[('a', 'y')][[1, 0]]
assert_frame_equal(result, expected)
self.assertRaises(KeyError, df.ix.__getitem__,
(('a', 'foo'), slice(None, None)))
def test_sortlevel(self):
df = self.frame.copy()
df.index = np.arange(len(df))
assertRaisesRegexp(TypeError, 'hierarchical index', df.sortlevel, 0)
# axis=1
# series
a_sorted = self.frame['A'].sortlevel(0)
with assertRaisesRegexp(TypeError, 'hierarchical index'):
self.frame.reset_index()['A'].sortlevel()
# preserve names
self.assertEquals(a_sorted.index.names, self.frame.index.names)
# inplace
rs = self.frame.copy()
rs.sortlevel(0, inplace=True)
assert_frame_equal(rs, self.frame.sortlevel(0))
def test_sortlevel_large_cardinality(self):
# #2684 (int64)
index = MultiIndex.from_arrays([np.arange(4000)]*3)
df = DataFrame(np.random.randn(4000), index=index, dtype = np.int64)
# it works!
result = df.sortlevel(0)
self.assertTrue(result.index.lexsort_depth == 3)
# #2684 (int32)
index = MultiIndex.from_arrays([np.arange(4000)]*3)
df = DataFrame(np.random.randn(4000), index=index, dtype = np.int32)
# it works!
result = df.sortlevel(0)
self.assert_((result.dtypes.values == df.dtypes.values).all() == True)
self.assertTrue(result.index.lexsort_depth == 3)
def test_delevel_infer_dtype(self):
tuples = [tuple for tuple in cart_product(['foo', 'bar'],
[10, 20], [1.0, 1.1])]
index = MultiIndex.from_tuples(tuples,
names=['prm0', 'prm1', 'prm2'])
df = DataFrame(np.random.randn(8, 3), columns=['A', 'B', 'C'],
index=index)
deleveled = df.reset_index()
self.assert_(com.is_integer_dtype(deleveled['prm1']))
self.assert_(com.is_float_dtype(deleveled['prm2']))
def test_reset_index_with_drop(self):
deleveled = self.ymd.reset_index(drop=True)
self.assertEquals(len(deleveled.columns), len(self.ymd.columns))
deleveled = self.series.reset_index()
tm.assert_isinstance(deleveled, DataFrame)
self.assertEqual(len(deleveled.columns),
len(self.series.index.levels) + 1)
deleveled = self.series.reset_index(drop=True)
tm.assert_isinstance(deleveled, Series)
def test_sortlevel_by_name(self):
self.frame.index.names = ['first', 'second']
result = self.frame.sortlevel(level='second')
expected = self.frame.sortlevel(level=1)
assert_frame_equal(result, expected)
def test_sortlevel_mixed(self):
sorted_before = self.frame.sortlevel(1)
df = self.frame.copy()
df['foo'] = 'bar'
sorted_after = df.sortlevel(1)
assert_frame_equal(sorted_before, sorted_after.drop(['foo'], axis=1))
dft = self.frame.T
sorted_before = dft.sortlevel(1, axis=1)
dft['foo', 'three'] = 'bar'
sorted_after = dft.sortlevel(1, axis=1)
assert_frame_equal(sorted_before.drop([('foo', 'three')], axis=1),
sorted_after.drop([('foo', 'three')], axis=1))
def test_count_level(self):
def _check_counts(frame, axis=0):
index = frame._get_axis(axis)
for i in range(index.nlevels):
result = frame.count(axis=axis, level=i)
expected = frame.groupby(axis=axis, level=i).count(axis=axis)
expected = expected.reindex_like(result).astype('i8')
assert_frame_equal(result, expected)
self.frame.ix[1, [1, 2]] = np.nan
self.frame.ix[7, [0, 1]] = np.nan
self.ymd.ix[1, [1, 2]] = np.nan
self.ymd.ix[7, [0, 1]] = np.nan
_check_counts(self.frame)
_check_counts(self.ymd)
_check_counts(self.frame.T, axis=1)
_check_counts(self.ymd.T, axis=1)
# can't call with level on regular DataFrame
df = tm.makeTimeDataFrame()
assertRaisesRegexp(TypeError, 'hierarchical', df.count, level=0)
self.frame['D'] = 'foo'
result = self.frame.count(level=0, numeric_only=True)
assert_almost_equal(result.columns, ['A', 'B', 'C'])
def test_count_level_series(self):
index = MultiIndex(levels=[['foo', 'bar', 'baz'],
['one', 'two', 'three', 'four']],
labels=[[0, 0, 0, 2, 2],
[2, 0, 1, 1, 2]])
s = Series(np.random.randn(len(index)), index=index)
result = s.count(level=0)
expected = s.groupby(level=0).count()
assert_series_equal(result.astype('f8'),
expected.reindex(result.index).fillna(0))
result = s.count(level=1)
expected = s.groupby(level=1).count()
assert_series_equal(result.astype('f8'),
expected.reindex(result.index).fillna(0))
def test_count_level_corner(self):
s = self.frame['A'][:0]
result = s.count(level=0)
expected = Series(0, index=s.index.levels[0])
assert_series_equal(result, expected)
df = self.frame[:0]
result = df.count(level=0)
expected = DataFrame({}, index=s.index.levels[0],
columns=df.columns).fillna(0).astype(np.int64)
assert_frame_equal(result, expected)
def test_unstack(self):
# just check that it works for now
unstacked = self.ymd.unstack()
unstacked2 = unstacked.unstack()
# test that ints work
unstacked = self.ymd.astype(int).unstack()
# test that int32 work
unstacked = self.ymd.astype(np.int32).unstack()
def test_unstack_multiple_no_empty_columns(self):
index = MultiIndex.from_tuples([(0, 'foo', 0), (0, 'bar', 0),
(1, 'baz', 1), (1, 'qux', 1)])
s = Series(np.random.randn(4), index=index)
unstacked = s.unstack([1, 2])
expected = unstacked.dropna(axis=1, how='all')
assert_frame_equal(unstacked, expected)
def test_stack(self):
# regular roundtrip
unstacked = self.ymd.unstack()
restacked = unstacked.stack()
assert_frame_equal(restacked, self.ymd)
unlexsorted = self.ymd.sortlevel(2)
unstacked = unlexsorted.unstack(2)
restacked = unstacked.stack()
assert_frame_equal(restacked.sortlevel(0), self.ymd)
unlexsorted = unlexsorted[::-1]
unstacked = unlexsorted.unstack(1)
restacked = unstacked.stack().swaplevel(1, 2)
assert_frame_equal(restacked.sortlevel(0), self.ymd)
unlexsorted = unlexsorted.swaplevel(0, 1)
unstacked = unlexsorted.unstack(0).swaplevel(0, 1, axis=1)
restacked = unstacked.stack(0).swaplevel(1, 2)
assert_frame_equal(restacked.sortlevel(0), self.ymd)
# columns unsorted
unstacked = self.ymd.unstack()
unstacked = unstacked.sort(axis=1, ascending=False)
restacked = unstacked.stack()
assert_frame_equal(restacked, self.ymd)
# more than 2 levels in the columns
unstacked = self.ymd.unstack(1).unstack(1)
result = unstacked.stack(1)
expected = self.ymd.unstack()
assert_frame_equal(result, expected)
result = unstacked.stack(2)
expected = self.ymd.unstack(1)
assert_frame_equal(result, expected)
result = unstacked.stack(0)
expected = self.ymd.stack().unstack(1).unstack(1)
assert_frame_equal(result, expected)
# not all levels present in each echelon
unstacked = self.ymd.unstack(2).ix[:, ::3]
stacked = unstacked.stack().stack()
ymd_stacked = self.ymd.stack()
assert_series_equal(stacked, ymd_stacked.reindex(stacked.index))
# stack with negative number
result = self.ymd.unstack(0).stack(-2)
expected = self.ymd.unstack(0).stack(0)
def test_unstack_odd_failure(self):
data = """day,time,smoker,sum,len
Fri,Dinner,No,8.25,3.
Fri,Dinner,Yes,27.03,9
Fri,Lunch,No,3.0,1
Fri,Lunch,Yes,13.68,6
Sat,Dinner,No,139.63,45
Sat,Dinner,Yes,120.77,42
Sun,Dinner,No,180.57,57
Sun,Dinner,Yes,66.82,19
Thur,Dinner,No,3.0,1
Thur,Lunch,No,117.32,44
Thur,Lunch,Yes,51.51,17"""
df = pd.read_csv(StringIO(data)).set_index(['day', 'time', 'smoker'])
# it works, #2100
result = df.unstack(2)
recons = result.stack()
assert_frame_equal(recons, df)
def test_stack_mixed_dtype(self):
df = self.frame.T
df['foo', 'four'] = 'foo'
df = df.sortlevel(1, axis=1)
stacked = df.stack()
assert_series_equal(stacked['foo'], df['foo'].stack())
self.assertEqual(stacked['bar'].dtype, np.float_)
def test_unstack_bug(self):
df = DataFrame({'state': ['naive', 'naive', 'naive',
'activ', 'activ', 'activ'],
'exp': ['a', 'b', 'b', 'b', 'a', 'a'],
'barcode': [1, 2, 3, 4, 1, 3],
'v': ['hi', 'hi', 'bye', 'bye', 'bye', 'peace'],
'extra': np.arange(6.)})
result = df.groupby(['state', 'exp', 'barcode', 'v']).apply(len)
unstacked = result.unstack()
restacked = unstacked.stack()
assert_series_equal(restacked,
result.reindex(restacked.index).astype(float))
def test_stack_unstack_preserve_names(self):
unstacked = self.frame.unstack()
self.assertEquals(unstacked.index.name, 'first')
self.assertEquals(unstacked.columns.names, ['exp', 'second'])
restacked = unstacked.stack()
self.assertEquals(restacked.index.names, self.frame.index.names)
def test_unstack_level_name(self):
result = self.frame.unstack('second')
expected = self.frame.unstack(level=1)
assert_frame_equal(result, expected)
def test_stack_level_name(self):
unstacked = self.frame.unstack('second')
result = unstacked.stack('exp')
expected = self.frame.unstack().stack(0)
assert_frame_equal(result, expected)
result = self.frame.stack('exp')
expected = self.frame.stack()
assert_series_equal(result, expected)
def test_stack_unstack_multiple(self):
unstacked = self.ymd.unstack(['year', 'month'])
expected = self.ymd.unstack('year').unstack('month')
assert_frame_equal(unstacked, expected)
self.assertEquals(unstacked.columns.names,
expected.columns.names)
# series
s = self.ymd['A']
s_unstacked = s.unstack(['year', 'month'])
assert_frame_equal(s_unstacked, expected['A'])
restacked = unstacked.stack(['year', 'month'])
restacked = restacked.swaplevel(0, 1).swaplevel(1, 2)
restacked = restacked.sortlevel(0)
assert_frame_equal(restacked, self.ymd)
self.assertEquals(restacked.index.names, self.ymd.index.names)
# GH #451
unstacked = self.ymd.unstack([1, 2])
expected = self.ymd.unstack(1).unstack(1).dropna(axis=1, how='all')
assert_frame_equal(unstacked, expected)
unstacked = self.ymd.unstack([2, 1])
expected = self.ymd.unstack(2).unstack(1).dropna(axis=1, how='all')
assert_frame_equal(unstacked, expected.ix[:, unstacked.columns])
def test_unstack_period_series(self):
# GH 4342
idx1 = pd.PeriodIndex(['2013-01', '2013-01', '2013-02', '2013-02',
'2013-03', '2013-03'], freq='M', name='period')
idx2 = Index(['A', 'B'] * 3, name='str')
value = [1, 2, 3, 4, 5, 6]
idx = MultiIndex.from_arrays([idx1, idx2])
s = Series(value, index=idx)
result1 = s.unstack()
result2 = s.unstack(level=1)
result3 = s.unstack(level=0)
e_idx = pd.PeriodIndex(['2013-01', '2013-02', '2013-03'], freq='M', name='period')
expected = DataFrame({'A': [1, 3, 5], 'B': [2, 4, 6]}, index=e_idx,
columns=['A', 'B'])
expected.columns.name = 'str'
assert_frame_equal(result1, expected)
assert_frame_equal(result2, expected)
assert_frame_equal(result3, expected.T)
idx1 = pd.PeriodIndex(['2013-01', '2013-01', '2013-02', '2013-02',
'2013-03', '2013-03'], freq='M', name='period1')
idx2 = pd.PeriodIndex(['2013-12', '2013-11', '2013-10', '2013-09',
'2013-08', '2013-07'], freq='M', name='period2')
idx = pd.MultiIndex.from_arrays([idx1, idx2])
s = Series(value, index=idx)
result1 = s.unstack()
result2 = s.unstack(level=1)
result3 = s.unstack(level=0)
e_idx = pd.PeriodIndex(['2013-01', '2013-02', '2013-03'], freq='M', name='period1')
e_cols = pd.PeriodIndex(['2013-07', '2013-08', '2013-09', '2013-10',
'2013-11', '2013-12'], freq='M', name='period2')
expected = DataFrame([[np.nan, np.nan, np.nan, np.nan, 2, 1],
[np.nan, np.nan, 4, 3, np.nan, np.nan],
[6, 5, np.nan, np.nan, np.nan, np.nan]],
index=e_idx, columns=e_cols)
assert_frame_equal(result1, expected)
assert_frame_equal(result2, expected)
assert_frame_equal(result3, expected.T)
def test_unstack_period_frame(self):
# GH 4342
idx1 = pd.PeriodIndex(['2014-01', '2014-02', '2014-02', '2014-02', '2014-01', '2014-01'],
freq='M', name='period1')
idx2 = pd.PeriodIndex(['2013-12', '2013-12', '2014-02', '2013-10', '2013-10', '2014-02'],
freq='M', name='period2')
value = {'A': [1, 2, 3, 4, 5, 6], 'B': [6, 5, 4, 3, 2, 1]}
idx = pd.MultiIndex.from_arrays([idx1, idx2])
df = pd.DataFrame(value, index=idx)
result1 = df.unstack()
result2 = df.unstack(level=1)
result3 = df.unstack(level=0)
e_1 = pd.PeriodIndex(['2014-01', '2014-02'], freq='M', name='period1')
e_2 = pd.PeriodIndex(['2013-10', '2013-12', '2014-02', '2013-10',
'2013-12', '2014-02'], freq='M', name='period2')
e_cols = pd.MultiIndex.from_arrays(['A A A B B B'.split(), e_2])
expected = DataFrame([[5, 1, 6, 2, 6, 1], [4, 2, 3, 3, 5, 4]],
index=e_1, columns=e_cols)
assert_frame_equal(result1, expected)
assert_frame_equal(result2, expected)
e_1 = pd.PeriodIndex(['2014-01', '2014-02', '2014-01',
'2014-02'], freq='M', name='period1')
e_2 = pd.PeriodIndex(['2013-10', '2013-12', '2014-02'], freq='M', name='period2')
e_cols = pd.MultiIndex.from_arrays(['A A B B'.split(), e_1])
expected = DataFrame([[5, 4, 2, 3], [1, 2, 6, 5], [6, 3, 1, 4]],
index=e_2, columns=e_cols)
assert_frame_equal(result3, expected)
def test_stack_multiple_bug(self):
""" bug when some uniques are not present in the data #3170"""
id_col = ([1] * 3) + ([2] * 3)
name = (['a'] * 3) + (['b'] * 3)
date = pd.to_datetime(['2013-01-03', '2013-01-04', '2013-01-05'] * 2)
var1 = np.random.randint(0, 100, 6)
df = DataFrame(dict(ID=id_col, NAME=name, DATE=date, VAR1=var1))
multi = df.set_index(['DATE', 'ID'])
multi.columns.name = 'Params'
unst = multi.unstack('ID')
down = unst.resample('W-THU')
rs = down.stack('ID')
xp = unst.ix[:, ['VAR1']].resample('W-THU').stack('ID')
xp.columns.name = 'Params'
assert_frame_equal(rs, xp)
def test_stack_dropna(self):
# GH #3997
df = pd.DataFrame({'A': ['a1', 'a2'],
'B': ['b1', 'b2'],
'C': [1, 1]})
df = df.set_index(['A', 'B'])
stacked = df.unstack().stack(dropna=False)
self.assertTrue(len(stacked) > len(stacked.dropna()))
stacked = df.unstack().stack(dropna=True)
assert_frame_equal(stacked, stacked.dropna())
def test_unstack_multiple_hierarchical(self):
df = DataFrame(index=[[0, 0, 0, 0, 1, 1, 1, 1],
[0, 0, 1, 1, 0, 0, 1, 1],
[0, 1, 0, 1, 0, 1, 0, 1]],
columns=[[0, 0, 1, 1], [0, 1, 0, 1]])
df.index.names = ['a', 'b', 'c']
df.columns.names = ['d', 'e']
# it works!
df.unstack(['b', 'c'])
def test_groupby_transform(self):
s = self.frame['A']
grouper = s.index.get_level_values(0)
grouped = s.groupby(grouper)
applied = grouped.apply(lambda x: x * 2)
expected = grouped.transform(lambda x: x * 2)
assert_series_equal(applied.reindex(expected.index), expected)
def test_unstack_sparse_keyspace(self):
# memory problems with naive impl #2278
# Generate Long File & Test Pivot
NUM_ROWS = 1000
df = DataFrame({'A': np.random.randint(100, size=NUM_ROWS),
'B': np.random.randint(300, size=NUM_ROWS),
'C': np.random.randint(-7, 7, size=NUM_ROWS),
'D': np.random.randint(-19, 19, size=NUM_ROWS),
'E': np.random.randint(3000, size=NUM_ROWS),
'F': np.random.randn(NUM_ROWS)})
idf = df.set_index(['A', 'B', 'C', 'D', 'E'])
# it works! is sufficient
idf.unstack('E')
def test_unstack_unobserved_keys(self):
# related to #2278 refactoring
levels = [[0, 1], [0, 1, 2, 3]]
labels = [[0, 0, 1, 1], [0, 2, 0, 2]]
index = MultiIndex(levels, labels)
df = DataFrame(np.random.randn(4, 2), index=index)
result = df.unstack()
self.assertEquals(len(result.columns), 4)
recons = result.stack()
assert_frame_equal(recons, df)
def test_groupby_corner(self):
midx = MultiIndex(levels=[['foo'], ['bar'], ['baz']],
labels=[[0], [0], [0]], names=['one', 'two', 'three'])
df = DataFrame([np.random.rand(4)], columns=['a', 'b', 'c', 'd'],
index=midx)
# should work
df.groupby(level='three')
def test_groupby_level_no_obs(self):
# #1697
midx = MultiIndex.from_tuples([('f1', 's1'), ('f1', 's2'),
('f2', 's1'), ('f2', 's2'),
('f3', 's1'), ('f3', 's2')])
df = DataFrame(
[[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]], columns=midx)
df1 = df.select(lambda u: u[0] in ['f2', 'f3'], axis=1)
grouped = df1.groupby(axis=1, level=0)
result = grouped.sum()
self.assert_((result.columns == ['f2', 'f3']).all())
def test_join(self):
a = self.frame.ix[:5, ['A']]
b = self.frame.ix[2:, ['B', 'C']]
joined = a.join(b, how='outer').reindex(self.frame.index)
expected = self.frame.copy()
expected.values[np.isnan(joined.values)] = np.nan
self.assert_(not np.isnan(joined.values).all())
assert_frame_equal(joined, expected, check_names=False) # TODO what should join do with names ?
def test_swaplevel(self):
swapped = self.frame['A'].swaplevel(0, 1)
swapped2 = self.frame['A'].swaplevel('first', 'second')
self.assert_(not swapped.index.equals(self.frame.index))
assert_series_equal(swapped, swapped2)
back = swapped.swaplevel(0, 1)
back2 = swapped.swaplevel('second', 'first')
self.assert_(back.index.equals(self.frame.index))
assert_series_equal(back, back2)
ft = self.frame.T
swapped = ft.swaplevel('first', 'second', axis=1)
exp = self.frame.swaplevel('first', 'second').T
assert_frame_equal(swapped, exp)
def test_swaplevel_panel(self):
panel = Panel({'ItemA': self.frame,
'ItemB': self.frame * 2})
result = panel.swaplevel(0, 1, axis='major')
expected = panel.copy()
expected.major_axis = expected.major_axis.swaplevel(0, 1)
tm.assert_panel_equal(result, expected)
def test_reorder_levels(self):
result = self.ymd.reorder_levels(['month', 'day', 'year'])
expected = self.ymd.swaplevel(0, 1).swaplevel(1, 2)
assert_frame_equal(result, expected)
result = self.ymd['A'].reorder_levels(['month', 'day', 'year'])
expected = self.ymd['A'].swaplevel(0, 1).swaplevel(1, 2)
assert_series_equal(result, expected)
result = self.ymd.T.reorder_levels(['month', 'day', 'year'], axis=1)
expected = self.ymd.T.swaplevel(0, 1, axis=1).swaplevel(1, 2, axis=1)
assert_frame_equal(result, expected)
with assertRaisesRegexp(TypeError, 'hierarchical axis'):
self.ymd.reorder_levels([1, 2], axis=1)
with assertRaisesRegexp(IndexError, 'Too many levels'):
self.ymd.index.reorder_levels([1, 2, 3])
def test_insert_index(self):
df = self.ymd[:5].T
df[2000, 1, 10] = df[2000, 1, 7]
tm.assert_isinstance(df.columns, MultiIndex)
self.assert_((df[2000, 1, 10] == df[2000, 1, 7]).all())
def test_alignment(self):
x = Series(data=[1, 2, 3],
index=MultiIndex.from_tuples([("A", 1), ("A", 2), ("B", 3)]))
y = Series(data=[4, 5, 6],
index=MultiIndex.from_tuples([("Z", 1), ("Z", 2), ("B", 3)]))
res = x - y
exp_index = x.index.union(y.index)
exp = x.reindex(exp_index) - y.reindex(exp_index)
assert_series_equal(res, exp)
# hit non-monotonic code path
res = x[::-1] - y[::-1]
exp_index = x.index.union(y.index)
exp = x.reindex(exp_index) - y.reindex(exp_index)
assert_series_equal(res, exp)
def test_is_lexsorted(self):
levels = [[0, 1], [0, 1, 2]]
index = MultiIndex(levels=levels,
labels=[[0, 0, 0, 1, 1, 1],
[0, 1, 2, 0, 1, 2]])
self.assert_(index.is_lexsorted())
index = MultiIndex(levels=levels,
labels=[[0, 0, 0, 1, 1, 1],
[0, 1, 2, 0, 2, 1]])
self.assert_(not index.is_lexsorted())
index = MultiIndex(levels=levels,
labels=[[0, 0, 1, 0, 1, 1],
[0, 1, 0, 2, 2, 1]])
self.assert_(not index.is_lexsorted())
self.assertEqual(index.lexsort_depth, 0)
def test_frame_getitem_view(self):
df = self.frame.T.copy()
# this works because we are modifying the underlying array
# really a no-no
df['foo'].values[:] = 0
self.assert_((df['foo'].values == 0).all())
# but not if it's mixed-type
df['foo', 'four'] = 'foo'
df = df.sortlevel(0, axis=1)
# this will work, but will raise/warn as its chained assignment
def f():
df['foo']['one'] = 2
return df
self.assertRaises(com.SettingWithCopyError, f)
try:
df = f()
except:
pass
self.assert_((df['foo', 'one'] == 0).all())
def test_frame_getitem_not_sorted(self):
df = self.frame.T
df['foo', 'four'] = 'foo'
arrays = [np.array(x) for x in zip(*df.columns._tuple_index)]
result = df['foo']
result2 = df.ix[:, 'foo']
expected = df.reindex(columns=df.columns[arrays[0] == 'foo'])
expected.columns = expected.columns.droplevel(0)
assert_frame_equal(result, expected)
assert_frame_equal(result2, expected)
df = df.T
result = df.xs('foo')
result2 = df.ix['foo']
expected = df.reindex(df.index[arrays[0] == 'foo'])
expected.index = expected.index.droplevel(0)
assert_frame_equal(result, expected)
assert_frame_equal(result2, expected)
def test_series_getitem_not_sorted(self):
arrays = [['bar', 'bar', 'baz', 'baz', 'qux', 'qux', 'foo', 'foo'],
['one', 'two', 'one', 'two', 'one', 'two', 'one', 'two']]
tuples = lzip(*arrays)
index = MultiIndex.from_tuples(tuples)
s = Series(randn(8), index=index)
arrays = [np.array(x) for x in zip(*index._tuple_index)]
result = s['qux']
result2 = s.ix['qux']
expected = s[arrays[0] == 'qux']
expected.index = expected.index.droplevel(0)
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
def test_count(self):
frame = self.frame.copy()
frame.index.names = ['a', 'b']
result = frame.count(level='b')
expect = self.frame.count(level=1)
assert_frame_equal(result, expect, check_names=False)
result = frame.count(level='a')
expect = self.frame.count(level=0)
assert_frame_equal(result, expect, check_names=False)
series = self.series.copy()
series.index.names = ['a', 'b']
result = series.count(level='b')
expect = self.series.count(level=1)
assert_series_equal(result, expect)
result = series.count(level='a')
expect = self.series.count(level=0)
assert_series_equal(result, expect)
self.assertRaises(KeyError, series.count, 'x')
self.assertRaises(KeyError, frame.count, level='x')
AGG_FUNCTIONS = ['sum', 'prod', 'min', 'max', 'median', 'mean', 'skew',
'mad', 'std', 'var']
def test_series_group_min_max(self):
for op, level, skipna in cart_product(self.AGG_FUNCTIONS,
lrange(2),
[False, True]):
grouped = self.series.groupby(level=level)
aggf = lambda x: getattr(x, op)(skipna=skipna)
# skipna=True
leftside = grouped.agg(aggf)
rightside = getattr(self.series, op)(level=level, skipna=skipna)
assert_series_equal(leftside, rightside)
def test_frame_group_ops(self):
self.frame.ix[1, [1, 2]] = np.nan
self.frame.ix[7, [0, 1]] = np.nan
for op, level, axis, skipna in cart_product(self.AGG_FUNCTIONS,
lrange(2), lrange(2),
[False, True]):
if axis == 0:
frame = self.frame
else:
frame = self.frame.T
grouped = frame.groupby(level=level, axis=axis)
pieces = []
def aggf(x):
pieces.append(x)
return getattr(x, op)(skipna=skipna, axis=axis)
leftside = grouped.agg(aggf)
rightside = getattr(frame, op)(level=level, axis=axis,
skipna=skipna)
# for good measure, groupby detail
level_index = frame._get_axis(axis).levels[level]
self.assert_(leftside._get_axis(axis).equals(level_index))
self.assert_(rightside._get_axis(axis).equals(level_index))
assert_frame_equal(leftside, rightside)
def test_stat_op_corner(self):
obj = Series([10.0], index=MultiIndex.from_tuples([(2, 3)]))
result = obj.sum(level=0)
expected = Series([10.0], index=[2])
assert_series_equal(result, expected)
def test_frame_any_all_group(self):
df = DataFrame(
{'data': [False, False, True, False, True, False, True]},
index=[
['one', 'one', 'two', 'one', 'two', 'two', 'two'],
[0, 1, 0, 2, 1, 2, 3]])
result = df.any(level=0)
ex = DataFrame({'data': [False, True]}, index=['one', 'two'])
assert_frame_equal(result, ex)
result = df.all(level=0)
ex = DataFrame({'data': [False, False]}, index=['one', 'two'])
assert_frame_equal(result, ex)
def test_std_var_pass_ddof(self):
index = MultiIndex.from_arrays([np.arange(5).repeat(10),
np.tile(np.arange(10), 5)])
df = DataFrame(np.random.randn(len(index), 5), index=index)
for meth in ['var', 'std']:
ddof = 4
alt = lambda x: getattr(x, meth)(ddof=ddof)
result = getattr(df[0], meth)(level=0, ddof=ddof)
expected = df[0].groupby(level=0).agg(alt)
assert_series_equal(result, expected)
result = getattr(df, meth)(level=0, ddof=ddof)
expected = df.groupby(level=0).agg(alt)
assert_frame_equal(result, expected)
def test_frame_series_agg_multiple_levels(self):
result = self.ymd.sum(level=['year', 'month'])
expected = self.ymd.groupby(level=['year', 'month']).sum()
assert_frame_equal(result, expected)
result = self.ymd['A'].sum(level=['year', 'month'])
expected = self.ymd['A'].groupby(level=['year', 'month']).sum()
assert_series_equal(result, expected)
def test_groupby_multilevel(self):
result = self.ymd.groupby(level=[0, 1]).mean()
k1 = self.ymd.index.get_level_values(0)
k2 = self.ymd.index.get_level_values(1)
expected = self.ymd.groupby([k1, k2]).mean()
assert_frame_equal(result, expected, check_names=False) # TODO groupby with level_values drops names
self.assertEquals(result.index.names, self.ymd.index.names[:2])
result2 = self.ymd.groupby(level=self.ymd.index.names[:2]).mean()
assert_frame_equal(result, result2)
def test_groupby_multilevel_with_transform(self):
pass
def test_multilevel_consolidate(self):
index = MultiIndex.from_tuples([('foo', 'one'), ('foo', 'two'),
('bar', 'one'), ('bar', 'two')])
df = DataFrame(np.random.randn(4, 4), index=index, columns=index)
df['Totals', ''] = df.sum(1)
df = df.consolidate()
def test_ix_preserve_names(self):
result = self.ymd.ix[2000]
result2 = self.ymd['A'].ix[2000]
self.assertEquals(result.index.names, self.ymd.index.names[1:])
self.assertEquals(result2.index.names, self.ymd.index.names[1:])
result = self.ymd.ix[2000, 2]
result2 = self.ymd['A'].ix[2000, 2]
self.assertEquals(result.index.name, self.ymd.index.names[2])
self.assertEquals(result2.index.name, self.ymd.index.names[2])
def test_partial_set(self):
# GH #397
df = self.ymd.copy()
exp = self.ymd.copy()
df.ix[2000, 4] = 0
exp.ix[2000, 4].values[:] = 0
assert_frame_equal(df, exp)
df['A'].ix[2000, 4] = 1
exp['A'].ix[2000, 4].values[:] = 1
assert_frame_equal(df, exp)
df.ix[2000] = 5
exp.ix[2000].values[:] = 5
assert_frame_equal(df, exp)
# this works...for now
df['A'].ix[14] = 5
self.assertEquals(df['A'][14], 5)
def test_unstack_preserve_types(self):
# GH #403
self.ymd['E'] = 'foo'
self.ymd['F'] = 2
unstacked = self.ymd.unstack('month')
self.assertEqual(unstacked['A', 1].dtype, np.float64)
self.assertEqual(unstacked['E', 1].dtype, np.object_)
self.assertEqual(unstacked['F', 1].dtype, np.float64)
def test_unstack_group_index_overflow(self):
labels = np.tile(np.arange(500), 2)
level = np.arange(500)
index = MultiIndex(levels=[level] * 8 + [[0, 1]],
labels=[labels] * 8 + [np.arange(2).repeat(500)])
s = Series(np.arange(1000), index=index)
result = s.unstack()
self.assertEqual(result.shape, (500, 2))
# test roundtrip
stacked = result.stack()
assert_series_equal(s,
stacked.reindex(s.index))
# put it at beginning
index = MultiIndex(levels=[[0, 1]] + [level] * 8,
labels=[np.arange(2).repeat(500)] + [labels] * 8)
s = Series(np.arange(1000), index=index)
result = s.unstack(0)
self.assertEqual(result.shape, (500, 2))
# put it in middle
index = MultiIndex(levels=[level] * 4 + [[0, 1]] + [level] * 4,
labels=([labels] * 4 + [np.arange(2).repeat(500)]
+ [labels] * 4))
s = Series(np.arange(1000), index=index)
result = s.unstack(4)
self.assertEqual(result.shape, (500, 2))
def test_getitem_lowerdim_corner(self):
self.assertRaises(KeyError, self.frame.ix.__getitem__,
(('bar', 'three'), 'B'))
# in theory should be inserting in a sorted space????
self.frame.ix[('bar','three'),'B'] = 0
self.assertEqual(self.frame.sortlevel().ix[('bar','three'),'B'], 0)
#----------------------------------------------------------------------
# AMBIGUOUS CASES!
def test_partial_ix_missing(self):
raise nose.SkipTest("skipping for now")
result = self.ymd.ix[2000, 0]
expected = self.ymd.ix[2000]['A']
assert_series_equal(result, expected)
# need to put in some work here
# self.ymd.ix[2000, 0] = 0
# self.assert_((self.ymd.ix[2000]['A'] == 0).all())
# Pretty sure the second (and maybe even the first) is already wrong.
self.assertRaises(Exception, self.ymd.ix.__getitem__, (2000, 6))
self.assertRaises(Exception, self.ymd.ix.__getitem__, (2000, 6), 0)
#----------------------------------------------------------------------
def test_to_html(self):
self.ymd.columns.name = 'foo'
self.ymd.to_html()
self.ymd.T.to_html()
def test_level_with_tuples(self):
index = MultiIndex(levels=[[('foo', 'bar', 0), ('foo', 'baz', 0),
('foo', 'qux', 0)],
[0, 1]],
labels=[[0, 0, 1, 1, 2, 2], [0, 1, 0, 1, 0, 1]])
series = Series(np.random.randn(6), index=index)
frame = DataFrame(np.random.randn(6, 4), index=index)
result = series[('foo', 'bar', 0)]
result2 = series.ix[('foo', 'bar', 0)]
expected = series[:2]
expected.index = expected.index.droplevel(0)
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
self.assertRaises(KeyError, series.__getitem__, (('foo', 'bar', 0), 2))
result = frame.ix[('foo', 'bar', 0)]
result2 = frame.xs(('foo', 'bar', 0))
expected = frame[:2]
expected.index = expected.index.droplevel(0)
assert_frame_equal(result, expected)
assert_frame_equal(result2, expected)
index = MultiIndex(levels=[[('foo', 'bar'), ('foo', 'baz'),
('foo', 'qux')],
[0, 1]],
labels=[[0, 0, 1, 1, 2, 2], [0, 1, 0, 1, 0, 1]])
series = Series(np.random.randn(6), index=index)
frame = DataFrame(np.random.randn(6, 4), index=index)
result = series[('foo', 'bar')]
result2 = series.ix[('foo', 'bar')]
expected = series[:2]
expected.index = expected.index.droplevel(0)
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
result = frame.ix[('foo', 'bar')]
result2 = frame.xs(('foo', 'bar'))
expected = frame[:2]
expected.index = expected.index.droplevel(0)
assert_frame_equal(result, expected)
assert_frame_equal(result2, expected)
def test_int_series_slicing(self):
s = self.ymd['A']
result = s[5:]
expected = s.reindex(s.index[5:])
assert_series_equal(result, expected)
exp = self.ymd['A'].copy()
s[5:] = 0
exp.values[5:] = 0
self.assert_numpy_array_equal(s.values, exp.values)
result = self.ymd[5:]
expected = self.ymd.reindex(s.index[5:])
assert_frame_equal(result, expected)
def test_mixed_depth_get(self):
arrays = [['a', 'top', 'top', 'routine1', 'routine1', 'routine2'],
['', 'OD', 'OD', 'result1', 'result2', 'result1'],
['', 'wx', 'wy', '', '', '']]
tuples = sorted(zip(*arrays))
index = MultiIndex.from_tuples(tuples)
df = DataFrame(randn(4, 6), columns=index)
result = df['a']
expected = df['a', '', '']
assert_series_equal(result, expected)
self.assertEquals(result.name, 'a')
result = df['routine1', 'result1']
expected = df['routine1', 'result1', '']
assert_series_equal(result, expected)
self.assertEquals(result.name, ('routine1', 'result1'))
def test_mixed_depth_insert(self):
arrays = [['a', 'top', 'top', 'routine1', 'routine1', 'routine2'],
['', 'OD', 'OD', 'result1', 'result2', 'result1'],
['', 'wx', 'wy', '', '', '']]
tuples = sorted(zip(*arrays))
index = MultiIndex.from_tuples(tuples)
df = DataFrame(randn(4, 6), columns=index)
result = df.copy()
expected = df.copy()
result['b'] = [1, 2, 3, 4]
expected['b', '', ''] = [1, 2, 3, 4]
assert_frame_equal(result, expected)
def test_mixed_depth_drop(self):
arrays = [['a', 'top', 'top', 'routine1', 'routine1', 'routine2'],
['', 'OD', 'OD', 'result1', 'result2', 'result1'],
['', 'wx', 'wy', '', '', '']]
tuples = sorted(zip(*arrays))
index = MultiIndex.from_tuples(tuples)
df = DataFrame(randn(4, 6), columns=index)
result = df.drop('a', axis=1)
expected = df.drop([('a', '', '')], axis=1)
assert_frame_equal(expected, result)
result = df.drop(['top'], axis=1)
expected = df.drop([('top', 'OD', 'wx')], axis=1)
expected = expected.drop([('top', 'OD', 'wy')], axis=1)
assert_frame_equal(expected, result)
result = df.drop(('top', 'OD', 'wx'), axis=1)
expected = df.drop([('top', 'OD', 'wx')], axis=1)
assert_frame_equal(expected, result)
expected = df.drop([('top', 'OD', 'wy')], axis=1)
expected = df.drop('top', axis=1)
result = df.drop('result1', level=1, axis=1)
expected = df.drop([('routine1', 'result1', ''),
('routine2', 'result1', '')], axis=1)
assert_frame_equal(expected, result)
def test_drop_nonunique(self):
df = DataFrame([["x-a", "x", "a", 1.5], ["x-a", "x", "a", 1.2],
["z-c", "z", "c", 3.1], ["x-a", "x", "a", 4.1],
["x-b", "x", "b", 5.1], ["x-b", "x", "b", 4.1],
["x-b", "x", "b", 2.2],
["y-a", "y", "a", 1.2], ["z-b", "z", "b", 2.1]],
columns=["var1", "var2", "var3", "var4"])
grp_size = df.groupby("var1").size()
drop_idx = grp_size.ix[grp_size == 1]
idf = df.set_index(["var1", "var2", "var3"])
# it works! #2101
result = idf.drop(drop_idx.index, level=0).reset_index()
expected = df[-df.var1.isin(drop_idx.index)]
result.index = expected.index
assert_frame_equal(result, expected)
def test_mixed_depth_pop(self):
arrays = [['a', 'top', 'top', 'routine1', 'routine1', 'routine2'],
['', 'OD', 'OD', 'result1', 'result2', 'result1'],
['', 'wx', 'wy', '', '', '']]
tuples = sorted(zip(*arrays))
index = MultiIndex.from_tuples(tuples)
df = DataFrame(randn(4, 6), columns=index)
df1 = df.copy()
df2 = df.copy()
result = df1.pop('a')
expected = df2.pop(('a', '', ''))
assert_series_equal(expected, result)
assert_frame_equal(df1, df2)
self.assertEquals(result.name, 'a')
expected = df1['top']
df1 = df1.drop(['top'], axis=1)
result = df2.pop('top')
assert_frame_equal(expected, result)
assert_frame_equal(df1, df2)
def test_reindex_level_partial_selection(self):
result = self.frame.reindex(['foo', 'qux'], level=0)
expected = self.frame.ix[[0, 1, 2, 7, 8, 9]]
assert_frame_equal(result, expected)
result = self.frame.T.reindex_axis(['foo', 'qux'], axis=1, level=0)
assert_frame_equal(result, expected.T)
result = self.frame.ix[['foo', 'qux']]
assert_frame_equal(result, expected)
result = self.frame['A'].ix[['foo', 'qux']]
assert_series_equal(result, expected['A'])
result = self.frame.T.ix[:, ['foo', 'qux']]
assert_frame_equal(result, expected.T)
def test_setitem_multiple_partial(self):
expected = self.frame.copy()
result = self.frame.copy()
result.ix[['foo', 'bar']] = 0
expected.ix['foo'] = 0
expected.ix['bar'] = 0
assert_frame_equal(result, expected)
expected = self.frame.copy()
result = self.frame.copy()
result.ix['foo':'bar'] = 0
expected.ix['foo'] = 0
expected.ix['bar'] = 0
assert_frame_equal(result, expected)
expected = self.frame['A'].copy()
result = self.frame['A'].copy()
result.ix[['foo', 'bar']] = 0
expected.ix['foo'] = 0
expected.ix['bar'] = 0
assert_series_equal(result, expected)
expected = self.frame['A'].copy()
result = self.frame['A'].copy()
result.ix['foo':'bar'] = 0
expected.ix['foo'] = 0
expected.ix['bar'] = 0
assert_series_equal(result, expected)
def test_drop_level(self):
result = self.frame.drop(['bar', 'qux'], level='first')
expected = self.frame.ix[[0, 1, 2, 5, 6]]
assert_frame_equal(result, expected)
result = self.frame.drop(['two'], level='second')
expected = self.frame.ix[[0, 2, 3, 6, 7, 9]]
assert_frame_equal(result, expected)
result = self.frame.T.drop(['bar', 'qux'], axis=1, level='first')
expected = self.frame.ix[[0, 1, 2, 5, 6]].T
assert_frame_equal(result, expected)
result = self.frame.T.drop(['two'], axis=1, level='second')
expected = self.frame.ix[[0, 2, 3, 6, 7, 9]].T
assert_frame_equal(result, expected)
def test_drop_preserve_names(self):
index = MultiIndex.from_arrays([[0, 0, 0, 1, 1, 1],
[1, 2, 3, 1, 2, 3]],
names=['one', 'two'])
df = DataFrame(np.random.randn(6, 3), index=index)
result = df.drop([(0, 2)])
self.assertEqual(result.index.names, ('one', 'two'))
def test_unicode_repr_issues(self):
levels = [Index([u('a/\u03c3'), u('b/\u03c3'), u('c/\u03c3')]),
| Index([0, 1]) | pandas.core.index.Index |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Jan 21 12:37:24 2018
@author: mohit
"""
import cv2
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import os
from tqdm import tqdm #to show progress
from sklearn.model_selection import train_test_split
from keras.models import Sequential, load_model
from keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPool2D
from keras.preprocessing.image import ImageDataGenerator
from keras.callbacks import ModelCheckpoint
from sklearn.metrics import confusion_matrix, classification_report, f1_score
#Load image
image_dir_test = 'images/plants/test/'
image_dir_train = 'images/plants/train/'
#define the range for green color
sensitivity = 30
#define final image size
image_size = 64
'''
define a function to remove background from the image to only leave the green leaves. SUbsequenty transfer it to
gray scale, followed by resizing them to 64 x 64 size image
'''
def image_transformation(imageName, sensitivity):
imagePlatCV = cv2.imread(imageName) #read image
hsvImage = cv2.cvtColor(imagePlatCV, cv2.COLOR_BGR2HSV)
#define the range for green color
lower_green = np.array([60 - sensitivity, 100, 50])
upper_green = np.array([60 + sensitivity, 255, 255])
# threshold the hsv image to get only green colors
mask = cv2.inRange(hsvImage, lower_green, upper_green)
#apply bitwise_and between mask and the original image
greenOnlyImage = cv2.bitwise_and(imagePlatCV, imagePlatCV, mask=mask)
#lets define a kernal with ones
kernel0 = np.ones((15,15), np.uint8)
#lets apply closing morphological operation
closing0 = cv2.morphologyEx(greenOnlyImage, cv2.MORPH_CLOSE, kernel0)
#convert to gray scale
grayScale = cv2.cvtColor(closing0, cv2.COLOR_BGR2GRAY)
print(grayScale.shape)
#blur the edges
blurImage = cv2.GaussianBlur(grayScale, (15,15), 0)
#resize image
resizeImage = cv2.resize(blurImage, (image_size, image_size), interpolation=cv2.INTER_AREA)
resizeImage = resizeImage/255 #normalize
resizeImage = resizeImage.reshape(64,64,1) #to make it in right dimensions for the Keras add 1 channel
print(resizeImage.shape)
return resizeImage
#define classes
classes = ['Black-grass', 'Charlock', 'Cleavers', 'Common Chickweed', 'Common wheat', 'Fat Hen', 'Loose Silky-bent', 'Maize', 'Scentless Mayweed'
, 'Shepherds Purse', 'Small-flowered Cranesbill', 'Sugar beet']
'''
Data extraction: The loop below will create a data list containing image file path name, the classifcation lable (0 -11) and the specific plant name
'''
train = [] #data list
for species_lable, speciesName in enumerate(classes):
for fileName in os.listdir(os.path.join(image_dir_train, speciesName)):
train.append([image_dir_train + '{}/{}'.format(speciesName, fileName), species_lable, speciesName])
#convert the list into dataframe using Pandas
trainigDataFrame = | pd.DataFrame(train, columns=['FilePath', 'PlantLabel', 'PlantName']) | pandas.DataFrame |
import os
import pandas as pd
class Save():
def save_metrix(self, path_to_save, metrix):
"""
save metrix in path_to_save
"""
path_metrix = os.path.join(path_to_save)
metrix = pd.DataFrame(metrix)
if os.path.isfile(path_metrix):
metrix_0 = pd.read_csv(path_metrix)
metrix = | pd.concat([metrix_0, metrix], axis=0) | pandas.concat |
import logging
from operator import itemgetter
from logging.config import dictConfig
from datetime import datetime, timedelta, date
from math import ceil
import dash
import dash_table
from dash_table.Format import Format, Scheme
import dash_core_components as dcc
import dash_html_components as html
import dash_bootstrap_components as dbc
import plotly.express as px
import pandas as pd
from chinese_calendar import get_holidays
import plotly.graph_objects as go
import numpy as np
from keysersoze.models import (
Deal,
Asset,
AssetMarketHistory,
)
from keysersoze.utils import (
get_accounts_history,
get_accounts_summary,
)
from keysersoze.apps.app import APP
from keysersoze.apps.utils import make_card_component
LOGGER = logging.getLogger(__name__)
dictConfig({
'version': 1,
'formatters': {
'simple': {
'format': '%(asctime)s - %(filename)s:%(lineno)s: %(message)s',
}
},
'handlers': {
'default': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'simple',
"stream": "ext://sys.stdout",
},
},
'loggers': {
'__main__': {
'handlers': ['default'],
'level': 'DEBUG',
'propagate': True
},
'keysersoze': {
'handlers': ['default'],
'level': 'DEBUG',
'propagate': True
}
}
})
pd.options.mode.chained_assignment = 'raise'
COLUMN_MAPPINGS = {
'code': '代码',
'name': '名称',
'ratio': '占比',
'return_rate': '收益率',
'cost': '投入',
'avg_cost': '成本',
'price': '价格',
'price_date': '价格日期',
'amount': '份额',
'money': '金额',
'return': '收益',
'action': '操作',
'account': '账户',
'date': '日期',
'time': '时间',
'fee': '费用',
'position': '仓位',
'day_return': '日收益',
}
FORMATS = {
'价格日期': {'type': 'datetime', 'format': Format(nully='N/A')},
'日期': {'type': 'datetime', 'format': Format(nully='N/A')},
'时间': {'type': 'datetime', 'format': Format(nully='N/A')},
'占比': {'type': 'numeric', 'format': Format(scheme='%', precision=2)},
'收益率': {'type': 'numeric', 'format': Format(nully='N/A', scheme='%', precision=2)},
'份额': {'type': 'numeric', 'format': Format(nully='N/A', precision=2, scheme=Scheme.fixed)},
'金额': {'type': 'numeric', 'format': Format(nully='N/A', precision=2, scheme=Scheme.fixed)},
'费用': {'type': 'numeric', 'format': Format(nully='N/A', precision=2, scheme=Scheme.fixed)},
'投入': {'type': 'numeric', 'format': Format(nully='N/A', precision=2, scheme=Scheme.fixed)},
'成本': {'type': 'numeric', 'format': Format(nully='N/A', precision=4, scheme=Scheme.fixed)},
'价格': {'type': 'numeric', 'format': Format(nully='N/A', precision=4, scheme=Scheme.fixed)},
'收益': {'type': 'numeric', 'format': Format(nully='N/A', precision=2, scheme=Scheme.fixed)},
}
ACCOUNT_PRIORITIES = {
'长期投资': 0,
'长赢定投': 1,
'U定投': 2,
'投资实证': 3,
'稳健投资': 4,
'证券账户': 6,
'蛋卷基金': 7,
}
all_accounts = [deal.account for deal in Deal.select(Deal.account).distinct()]
all_accounts.sort(key=lambda name: ACCOUNT_PRIORITIES.get(name, 1000))
layout = html.Div(
[
dcc.Store(id='assets'),
dcc.Store(id='stats'),
dcc.Store(id='accounts_history'),
dcc.Store(id='index_history'),
dcc.Store(id='deals'),
dcc.Store(id='start-date'),
dcc.Store(id='end-date'),
html.H3('投资账户概览'),
dbc.Checklist(
id='show-money',
options=[{'label': '显示金额', 'value': 'show'}],
value=[],
switch=True,
),
html.Hr(),
dbc.InputGroup(
[
dbc.InputGroupAddon('选择账户', addon_type='prepend', className='mr-2'),
dbc.Checklist(
id='checklist',
options=[{'label': a, 'value': a} for a in all_accounts],
value=[all_accounts[0]],
inline=True,
className='my-auto'
),
],
className='my-2',
),
html.Div(id='account-summary'),
html.Br(),
dbc.Tabs([
dbc.Tab(
label='资产走势',
children=[
dcc.Graph(
id='asset-history-chart',
config={
'displayModeBar': False,
}
),
]
),
dbc.Tab(
label='累计收益走势',
children=[
dcc.Graph(
id="total-return-chart",
config={
'displayModeBar': False
}
),
]
),
dbc.Tab(
label='累计收益率走势',
children=[
dbc.InputGroup(
[
dbc.InputGroupAddon('比较基准', addon_type='prepend', className='mr-2'),
dbc.Checklist(
id='compare',
options=[
{'label': '中证全指', 'value': '000985.CSI'},
{'label': '上证指数', 'value': '000001.SH'},
{'label': '深证成指', 'value': '399001.SZ'},
{'label': '沪深300', 'value': '000300.SH'},
{'label': '中证500', 'value': '000905.SH'},
],
value=['000985.CSI'],
inline=True,
className='my-auto'
),
],
className='my-2',
),
dcc.Graph(
id="return-curve-chart",
config={
'displayModeBar': False
}
),
]
),
dbc.Tab(
label='日收益历史',
children=[
dcc.Graph(
id="day-return-chart",
config={
'displayModeBar': False
},
),
]
),
]),
html.Center(
[
dbc.RadioItems(
id="date-range",
className='btn-group',
labelClassName='btn btn-light border',
labelCheckedClassName='active',
options=[
{"label": "近一月", "value": "1m"},
{"label": "近三月", "value": "3m"},
{"label": "近半年", "value": "6m"},
{"label": "近一年", "value": "12m"},
{"label": "今年以来", "value": "thisyear"},
{"label": "本月", "value": "thismonth"},
{"label": "本周", "value": "thisweek"},
{"label": "所有", "value": "all"},
{"label": "自定义", "value": "customized"},
],
value="thisyear",
),
],
className='radio-group',
),
html.Div(
id='customized-date-range-container',
children=[
dcc.RangeSlider(
id='customized-date-range',
min=2018,
max=2022,
step=None,
marks={year: str(year) for year in range(2018, 2023)},
value=[2018, 2022],
)
],
className='my-auto ml-0 mr-0',
style={'max-width': '100%', 'display': 'none'}
),
html.Hr(),
dbc.Tabs([
dbc.Tab(
label='持仓明细',
children=[
html.Br(),
dbc.Checklist(
id='show-cleared',
options=[{'label': '显示清仓品种', 'value': 'show'}],
value=[],
switch=True,
),
html.Div(id='assets_cards'),
html.Center(
[
dbc.RadioItems(
id="assets-pagination",
className="btn-group",
labelClassName="btn btn-secondary",
labelCheckedClassName="active",
options=[
{"label": "1", "value": 0},
],
value=0,
),
],
className='radio-group',
),
]
),
dbc.Tab(
label='交易记录',
children=[
html.Br(),
html.Div(id='deals_table'),
html.Center(
[
dbc.RadioItems(
id="deals-pagination",
className="btn-group",
labelClassName="btn btn-secondary",
labelCheckedClassName="active",
options=[
{"label": "1", "value": 0},
],
value=0,
),
],
className='radio-group',
),
]
),
])
],
)
@APP.callback(
[
dash.dependencies.Output('assets', 'data'),
dash.dependencies.Output('stats', 'data'),
dash.dependencies.Output('accounts_history', 'data'),
dash.dependencies.Output('index_history', 'data'),
dash.dependencies.Output('deals', 'data'),
dash.dependencies.Output('deals-pagination', 'options'),
dash.dependencies.Output('assets-pagination', 'options'),
],
[
dash.dependencies.Input('checklist', 'value'),
dash.dependencies.Input('compare', 'value'),
],
)
def update_after_check(accounts, index_codes):
accounts = accounts or all_accounts
summary_data, assets_data = get_accounts_summary(accounts)
history = get_accounts_history(accounts).to_dict('records')
history.sort(key=itemgetter('account', 'date'))
index_history = []
for index_code in index_codes:
index = Asset.get(zs_code=index_code)
for record in index.history:
index_history.append({
'account': index.name,
'date': record.date,
'price': record.close_price
})
index_history.sort(key=itemgetter('account', 'date'))
deals = []
for record in Deal.get_deals(accounts):
deals.append({
'account': record.account,
'time': record.time,
'code': record.asset.zs_code,
'name': record.asset.name,
'action': record.action,
'amount': record.amount,
'price': record.price,
'money': record.money,
'fee': record.fee,
})
deals.sort(key=itemgetter('time'), reverse=True)
valid_deals_count = 0
for item in deals:
if item['action'] == 'fix_cash':
continue
if item['code'] == 'CASH' and item['action'] == 'reinvest':
continue
valid_deals_count += 1
pagination_options = [
{'label': idx + 1, 'value': idx}
for idx in range(ceil(valid_deals_count / 100))
]
assets_pagination_options = []
return (
assets_data,
summary_data,
history,
index_history,
deals,
pagination_options,
assets_pagination_options
)
@APP.callback(
dash.dependencies.Output('account-summary', 'children'),
[
dash.dependencies.Input('stats', 'data'),
dash.dependencies.Input('show-money', 'value')
]
)
def update_summary(stats, show_money):
body_content = []
body_content.append(
make_card_component(
[
{
'item_cls': html.P,
'type': 'text',
'content': '总资产',
'color': 'bg-primary',
},
{
'item_cls': html.H4,
'type': 'money',
'content': stats['money'],
'color': 'bg-primary',
},
],
show_money=show_money,
inverse=True
)
)
body_content.append(
make_card_component(
[
{
'item_cls': html.P,
'type': 'text',
'content': '日收益',
'color': 'bg-primary',
},
{
'item_cls': html.H4,
'type': 'money',
'content': stats['day_return'],
'color': 'bg-primary',
},
{
'item_cls': html.P,
'type': 'percent',
'content': stats['day_return_rate'],
'color': 'bg-primary',
},
],
show_money=show_money,
inverse=True
)
)
body_content.append(
make_card_component(
[
{
'item_cls': html.P,
'type': 'text',
'content': '累计收益',
'color': 'bg-primary',
},
{
'item_cls': html.H4,
'type': 'money',
'content': stats['return'],
'color': 'bg-primary',
},
{
'item_cls': html.P,
'type': 'percent',
'content': stats['return_rate'] if stats['amount'] > 0 else 'N/A(已清仓)',
'color': 'bg-primary',
},
],
show_money=show_money,
inverse=True
)
)
body_content.append(
make_card_component(
[
{
'item_cls': html.P,
'type': 'text',
'content': '年化收益率',
'color': 'bg-primary',
},
{
'item_cls': html.H4,
'type': 'percent',
'content': stats['annualized_return'],
'color': 'bg-primary',
},
],
show_money=show_money,
inverse=True,
)
)
body_content.append(
make_card_component(
[
{
'item_cls': html.P,
'type': 'text',
'content': '现金',
'color': 'bg-primary',
},
{
'item_cls': html.H4,
'type': 'money',
'content': stats['cash'],
'color': 'bg-primary',
},
],
show_money=show_money,
inverse=True
)
)
body_content.append(
make_card_component(
[
{
'item_cls': html.P,
'type': 'text',
'content': '仓位',
'color': 'bg-primary',
},
{
'item_cls': html.H4,
'type': 'percent',
'content': stats['position'],
'color': 'bg-primary',
},
],
show_money=show_money,
inverse=True
)
)
card = dbc.Card(
[
dbc.CardBody(
dbc.Row(
[dbc.Col([card_component]) for card_component in body_content],
),
className='py-2',
)
],
className='my-auto',
color='primary',
)
return [card]
@APP.callback(
dash.dependencies.Output('assets_cards', 'children'),
[
dash.dependencies.Input('assets', 'data'),
dash.dependencies.Input('show-money', 'value'),
dash.dependencies.Input('show-cleared', 'value'),
]
)
def update_assets_table(assets_data, show_money, show_cleared):
cards = [html.Hr()]
for row in assets_data:
if not show_cleared and abs(row['amount']) <= 0.001:
continue
if row["code"] in ('CASH', 'WZZNCK'):
continue
cards.append(make_asset_card(row, show_money))
cards.append(html.Br())
return cards
def make_asset_card(asset_info, show_money=True):
def get_color(value):
if not isinstance(value, (float, int)):
return None
if value > 0:
return 'text-danger'
if value < 0:
return 'text-success'
return None
header = dbc.CardHeader([
html.H5(
html.A(
f'{asset_info["name"]}({asset_info["code"]})',
href=f'/asset/{asset_info["code"].replace(".", "").lower()}',
target='_blank'
),
className='mb-0'
),
html.P(f'更新日期 {asset_info["price_date"]}', className='mb-0'),
])
body_content = []
body_content.append(
make_card_component(
[
{'item_cls': html.P, 'type': 'text', 'content': '持有金额/份额'},
{'item_cls': html.H4, 'type': 'money', 'content': asset_info['money']},
{'item_cls': html.P, 'type': 'amount', 'content': asset_info['amount']}
],
show_money=show_money,
)
)
body_content.append(
make_card_component(
[
{'item_cls': html.P, 'type': 'text', 'content': '日收益'},
{
'item_cls': html.H4,
'type': 'money',
'content': asset_info['day_return'],
'color': get_color(asset_info['day_return']),
},
{
'item_cls': html.P,
'type': 'percent',
'content': asset_info['day_return_rate'],
'color': get_color(asset_info['day_return']),
}
],
show_money=show_money,
)
)
body_content.append(
make_card_component(
[
{'item_cls': html.P, 'type': 'text', 'content': '现价/成本'},
{'item_cls': html.H4, 'type': 'price', 'content': asset_info['price']},
{'item_cls': html.P, 'type': 'price', 'content': asset_info['avg_cost'] or 'N/A'}
],
show_money=show_money,
)
)
asset = Asset.get(zs_code=asset_info['code'])
prices = []
for item in asset.history.order_by(AssetMarketHistory.date.desc()).limit(10):
if item.close_price is not None:
prices.append({
'date': item.date,
'price': item.close_price,
})
else:
prices.append({
'date': item.date,
'price': item.nav,
})
if len(prices) >= 10:
break
prices.sort(key=itemgetter('date'))
df = pd.DataFrame(prices)
df['date'] = pd.to_datetime(df['date'])
fig = go.Figure()
fig.add_trace(
go.Scatter(
x=df['date'],
y=df['price'],
showlegend=False,
marker={'color': 'orange'},
mode='lines+markers',
)
)
fig.update_layout(
width=150,
height=100,
margin={'l': 4, 'r': 4, 'b': 20, 't': 10, 'pad': 4},
xaxis={'showticklabels': False, 'showgrid': False, 'fixedrange': True},
yaxis={'showticklabels': False, 'showgrid': False, 'fixedrange': True},
)
fig.update_xaxes(
rangebreaks=[
{'bounds': ["sat", "mon"]},
{
'values': get_holidays(df.date.min(), df.date.max(), False)
}
]
)
body_content.append(
make_card_component(
[
{'item_cls': html.P, 'type': 'text', 'content': '十日走势'},
{
'item_cls': None,
'type': 'figure',
'content': fig
}
],
show_money=show_money
)
)
body_content.append(
make_card_component(
[
{'item_cls': html.P, 'type': 'text', 'content': '累计收益'},
{
'item_cls': html.H4,
'type': 'money',
'content': asset_info['return'],
'color': get_color(asset_info['return']),
},
{
'item_cls': html.P,
'type': 'percent',
'content': asset_info['return_rate'],
'color': get_color(asset_info['return']),
}
],
show_money=show_money,
)
)
body_content.append(
make_card_component(
[
{'item_cls': html.P, 'type': 'text', 'content': '占比'},
{'item_cls': html.H4, 'type': 'percent', 'content': asset_info['position']},
],
show_money=show_money,
)
)
card = dbc.Card(
[
header,
dbc.CardBody(
dbc.Row(
[dbc.Col([card_component]) for card_component in body_content],
),
className='py-2',
)
],
className='my-auto'
)
return card
@APP.callback(
dash.dependencies.Output('return-curve-chart', 'figure'),
[
dash.dependencies.Input('accounts_history', 'data'),
dash.dependencies.Input('index_history', 'data'),
dash.dependencies.Input('start-date', 'data'),
dash.dependencies.Input('end-date', 'data'),
]
)
def draw_return_chart(accounts_history, index_history, start_date, end_date):
df = pd.DataFrame(accounts_history)[['amount', 'account', 'date', 'nav']]
df['date'] = pd.to_datetime(df['date'])
if start_date is not None:
df = df[df['date'] >= pd.to_datetime(start_date)]
if end_date is not None:
df = df[df['date'] < pd.to_datetime(end_date)]
df = df[df['account'] == '总计']
df['account'] = '我的'
fig = go.Figure()
if len(df) > 0:
start_nav = float(df[df['date'] == df['date'].min()].nav)
df.loc[:, 'nav'] = df['nav'] / start_nav - 1.0
df.rename(columns={'nav': 'return'}, inplace=True)
df = df.drop(df[df['amount'] <= 0].index)[['account', 'date', 'return']]
start_date = df.date.min()
fig.add_trace(
go.Scatter(
x=df['date'],
y=df['return'],
marker={'color': 'orange'},
name='我的',
mode='lines',
)
)
index_df = None
if index_history:
index_history = pd.DataFrame(index_history)
index_history['date'] = pd.to_datetime(index_history['date'])
if start_date is not None:
index_history = index_history[index_history['date'] >= | pd.to_datetime(start_date) | pandas.to_datetime |
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import pandas_datareader as web
from matplotlib.ticker import FuncFormatter
from pypfopt.efficient_frontier import EfficientFrontier
from pypfopt import risk_models
from pypfopt import expected_returns
from matplotlib.ticker import FuncFormatter
from pypfopt import objective_functions, base_optimizer
from scipy.stats import norm
import math
import datetime as dt
tickers = ['GOOGL','FB','AAPL','NFLX','AMZN']
Time=1440 #No of days(steps or trading days in this case)
pvalue = 1000 #portfolio value
num_of_years = 3
start_date = dt.datetime.now() - dt.timedelta(int(365.25 * num_of_years))
end_date = dt.datetime.now()
length = len(tickers)
price_data = []
for ticker in range(length):
prices = web.DataReader(tickers[ticker], start = start_date, end = end_date, data_source='yahoo')
price_data.append(prices[['Adj Close']])
df_stocks = pd.concat(price_data, axis=1)
df_stocks.columns=tickers
mu = expected_returns.mean_historical_return(df_stocks)
Sigma = risk_models.sample_cov(df_stocks)
ef = EfficientFrontier(mu, Sigma, weight_bounds=(0,1))
sharpe_pfolio=ef.max_sharpe()
sharpe_pwt=ef.clean_weights()
print(sharpe_pwt)
#VaR Calculation
ticker_rx2 = []
sh_wt = list(sharpe_pwt.values())
sh_wt=np.array(sh_wt)
for a in range(length):
ticker_rx = df_stocks[[tickers[a]]].pct_change()
ticker_rx = (ticker_rx+1).cumprod()
ticker_rx2.append(ticker_rx[[tickers[a]]])
ticker_final = pd.concat(ticker_rx2,axis=1)
#Plot graph of Cumulative/HPR of all stocks
for i, col in enumerate(ticker_final.columns):
ticker_final[col].plot()
plt.title('Cumulative Returns')
plt.xticks(rotation=80)
plt.legend(ticker_final.columns)
plt.subplots()
plt.show()
#Taking Latest Values of Return
pret = []
pre1 = []
price =[]
for x in range(length):
pret.append(ticker_final.iloc[[-1],[x]])
price.append((df_stocks.iloc[[-1],[x]]))
pre1 = pd.concat(pret,axis=1)
pre1 = np.array(pre1)
price = | pd.concat(price,axis=1) | pandas.concat |
#!/usr/bin/env python
# coding: utf-8
# In[3]:
import warnings
warnings.filterwarnings('ignore')
import pandas as pd
import numpy as np
import matplotlib as mpl
import matplotlib.patheffects
import matplotlib.pyplot as plt
import seaborn as sns
import sys
from decimal import Decimal
from matplotlib import gridspec
from mpl_toolkits.axes_grid1 import make_axes_locatable
from matplotlib.font_manager import FontProperties
from matplotlib import transforms
from scipy import stats
from scipy.spatial import distance
from scipy.cluster import hierarchy
from statsmodels.sandbox.stats import multicomp
mpl.rcParams['figure.dpi'] = 90
# ## style pre-sets
# In[4]:
NOTEBOOK_PRESET = {"style": "ticks", "font": "Helvetica", "font_scale": 1.2, "context": "notebook"}
NOTEBOOK_FONTSIZE = 10
# In[5]:
PAPER_PRESET = {"style": "ticks", "font": "Helvetica", "context": "paper",
"rc": {"font.size":8,"axes.titlesize":8,
"axes.labelsize":8, 'axes.linewidth':0.5,
"legend.fontsize":8, "xtick.labelsize":8,
"ytick.labelsize":8, "xtick.major.size": 3.0,
"ytick.major.size": 3.0, "axes.edgecolor": "black",
"xtick.major.pad": 3.0, "ytick.major.pad": 3.0}}
PAPER_FONTSIZE = 8
# ## palette pre-sets
# In[6]:
husl = sns.color_palette("husl", 9)
BETTER_TYPE_PALETTE = {"CONTROL": husl[3], "CONTROL_SNP": husl[4], "WILDTYPE": husl[5], "FLIPPED": husl[6],
"SNP": husl[7], "DELETION": husl[0], "SCRAMBLED": "lightgray", "RANDOM": "darkgray"}
# In[ ]:
TSS_CLASS_PALETTE = {"Enhancer": sns.color_palette("deep")[1],
"intergenic": sns.color_palette("deep")[2], "protein_coding": sns.color_palette("deep")[5],
"div_lnc": sns.color_palette("deep")[3], "div_pc": sns.color_palette("deep")[0]}
# In[ ]:
COLOR_DICT = {"A": "crimson", "C": "mediumblue", "G": "orange", "T": "forestgreen"}
# ## label pre-sets
# In[7]:
BETTER_TYPE_ORDER1 = ["CONTROL", "CONTROL_SNP", "WILDTYPE", "FLIPPED", "SNP", "SCRAMBLED", "RANDOM"]
BETTER_TYPE_ORDER2 = ["CONTROL", "CONTROL_SNP", "WILDTYPE", "FLIPPED", "SNP", "DELETION", "SCRAMBLED", "RANDOM"]
# In[ ]:
TSS_CLASS_ORDER = ["Enhancer", "intergenic", "div_lnc", "protein_coding", "div_pc"]
# ## class
# In[ ]:
class Scale(matplotlib.patheffects.RendererBase):
def __init__(self, sx, sy=None):
self._sx = sx
self._sy = sy
def draw_path(self, renderer, gc, tpath, affine, rgbFace):
affine = affine.identity().scale(self._sx, self._sy)+affine
renderer.draw_path(gc, tpath, affine, rgbFace)
# ## plotting functions
# In[ ]:
def add_margin(ax,x=0.05,y=0.05):
# This will, by default, add 5% to the x and y margins. You
# can customise this using the x and y arguments when you call it.
xlim = ax.get_xlim()
ylim = ax.get_ylim()
xmargin = (xlim[1]-xlim[0])*x
ymargin = (ylim[1]-ylim[0])*y
ax.set_xlim(xlim[0]-xmargin,xlim[1]+xmargin)
ax.set_ylim(ylim[0]-ymargin,ylim[1]+ymargin)
# In[8]:
def mimic_r_boxplot(ax):
for i, patch in enumerate(ax.artists):
r, g, b, a = patch.get_facecolor()
col = (r, g, b, 1)
patch.set_facecolor((r, g, b, .5))
patch.set_edgecolor((r, g, b, 1))
# Each box has 6 associated Line2D objects (to make the whiskers, fliers, etc.)
# Loop over them here, and use the same colour as above
line_order = ["lower", "upper", "whisker_1", "whisker_2", "med", "fliers"]
for j in range(i*6,i*6+6):
elem = line_order[j%6]
line = ax.lines[j]
if "whisker" in elem:
line.set_visible(False)
line.set_color(col)
line.set_mfc(col)
line.set_mec(col)
if "fliers" in elem:
line.set_alpha(0.5)
# In[ ]:
def annotate_pval(ax, x1, x2, y, h, text_y, val, fontsize):
from decimal import Decimal
ax.plot([x1, x1, x2, x2], [y, y+h, y+h, y], lw=1, c="black", linewidth=0.5)
if val < 0.0004:
text = "{:.2e}".format(Decimal(val))
#text = "**"
elif val < 0.05:
text = "%.3f" % val
#text = "*"
else:
text = "%.2f" % val
ax.text((x1+x2)*.5, text_y, text, ha='center', va='bottom', color="black", size=fontsize)
# In[ ]:
def neg_control_plot(df, order, palette, fontsize, cell_type, ax, figsize, ylabel, sharey, title, save, plotname):
df_sub = df[df["better_type"].isin(order)].drop_duplicates()
if ax == None:
plt.figure(figsize=figsize)
ax = sns.boxplot(data=df_sub, x="better_type", y="overall_mean", order=order, palette=palette, linewidth=1,
saturation=1, flierprops = dict(marker='o', markersize=5))
else:
sns.boxplot(data=df_sub, x="better_type", y="overall_mean", order=order, palette=palette, linewidth=1,
saturation=1, flierprops = dict(marker='o', markersize=5), ax=ax)
ax.set_xticklabels(order, rotation=30)
mimic_r_boxplot(ax)
# calc p-vals b/w dists
rand_dist = np.asarray(df[df["better_type"] == "random"]["overall_mean"])
ctrl_dist = np.asarray(df[df["better_type"] == "control"]["overall_mean"])
rand_dist = rand_dist[~np.isnan(rand_dist)]
ctrl_dist = ctrl_dist[~np.isnan(ctrl_dist)]
rand_u, rand_pval = stats.mannwhitneyu(rand_dist, ctrl_dist, alternative="two-sided", use_continuity=False)
if sharey:
ax.set_ylim((-10, 10))
ax.yaxis.set_ticks(np.arange(-10, 11, 5))
y_1 = 8
y_2 = 6
text_y_1 = 7.5
text_y_2 = 5.5
else:
ax.set_ylim((np.min(rand_dist)-2, np.max(wt_dist)+3.5))
y_1 = np.max(wt_dist)+1.85
y_2 = np.max(wt_dist)+0.75
text_y_1 = np.max(wt_dist)+1.65
text_y_2 = np.max(wt_dist)+0.55
# statistical annotation
annotate_pval(ax, 0, 2, y_1, 0, text_y_1, rand_pval, fontsize)
annotate_pval(ax, 1, 2, y_2, 0, text_y_2, scram_pval, fontsize)
ax.set_ylabel(ylabel)
ax.set_xlabel("")
if title:
ax.set_title("%s" % (cell_type))
if save:
plt.savefig("%s/%s.pdf" % (figs_dir, plotname), dpi="figure", bbox_inches="tight")
# In[ ]:
def plot_activ_and_tiles(figsize, df, reps, color, palette, x_margin_percent, tss, x_tick_size, save, plotname):
fig = plt.figure(figsize=(figsize))
gs = gridspec.GridSpec(2, 1, height_ratios=[4, 1], hspace=0)
activ_ax = plt.subplot(gs[0])
tile_ax = plt.subplot(gs[1])
## plot activities ##
df["adjusted_tile_start"] = df["actual_start"] + ((df["actual_end"] - df["actual_start"])/2)
cols = list(reps)
cols.extend(["element_id", "element", "adjusted_tile_start", "combined_sig"])
df_sub = df[cols]
# sort and melt
df_sub = df_sub.sort_values(by="adjusted_tile_start")
df_melt = pd.melt(df_sub, id_vars=["element_id", "element", "adjusted_tile_start", "combined_sig"])
sns.swarmplot(data=df_melt, x="adjusted_tile_start", y="value", ax=activ_ax, color="lightslategrey", size=5,
hue="combined_sig", palette=palette)
sns.boxplot(data=df_melt, x="adjusted_tile_start", y="value", ax=activ_ax,
showcaps=False, showfliers=False, whiskerprops={'linewidth':0},
zorder=1, hue="combined_sig", palette=palette, dodge=False)
# fix boxplot colors
for i,artist in enumerate(activ_ax.artists):
# Set the linecolor on the artist to the facecolor, and set the facecolor to None
col = artist.get_facecolor()
artist.set_edgecolor(col)
artist.set_facecolor('None')
# Each box has 6 associated Line2D objects (to make the whiskers, fliers, etc.)
# Loop over them here, and use the same colour as above
for j in range(i*3,i*3+3):
line = activ_ax.lines[j]
line.set_color(col)
line.set_mfc(col)
line.set_mec(col)
add_margin(activ_ax, x=x_margin_percent, y=0)
activ_ax.xaxis.set_visible(False)
activ_ax.set_ylabel("MPRA activity")
activ_ax.legend_.remove()
## plot tiles ##
for i, elem_id in enumerate(df.sort_values(by="tile_number").element_id):
tile_num = df[df["element_id"] == elem_id]["tile_number"].iloc[0]
tile_start = df[df["element_id"] == elem_id]["actual_start"].iloc[0]
tile_end = df[df["element_id"] == elem_id]["actual_end"].iloc[0]
tile_strand = df[df["element_id"] == elem_id]["strand"].iloc[0]
if i % 2 == 0:
y = 0.5
else:
y = 0
tile_ax.plot((tile_start, tile_end), (y, y), color="black", linewidth=5, solid_capstyle="butt")
tile_ax.get_xaxis().get_major_formatter().set_useOffset(False)
tile_ax.get_xaxis().get_major_formatter().set_scientific(False)
tile_ax.plot((tss, tss), (0.75, 1.4), '-', color=color)
if tile_strand == "+":
tile_ax.arrow(tss, 1.4, 40, 0, fc=color, ec=color, head_width=0.45, head_length=30, linewidth=1)
else:
tile_ax.arrow(tss, 1.4, -40, 0, fc=color, ec=color, head_width=0.45, head_length=30, linewidth=1)
#tile_ax.xaxis.set_major_locator(ticker.MultipleLocator(tick_spacing))
tile_ax.set_xticks(np.arange(df.actual_start.min(), df.actual_start.max()+200, 144))
plt.setp(tile_ax.get_xticklabels(), fontsize=x_tick_size)
tile_ax.set_ylim((-0.5, 1.75))
tile_ax.yaxis.set_visible(False)
tile_ax.spines["left"].set_visible(False)
tile_ax.spines["right"].set_visible(False)
tile_ax.spines["bottom"].set_visible(False)
if save:
fig.savefig(plotname, dpi="figure", bbox_inches="tight")
plt.show()
# In[1]:
def plot_dendrogram(linkage, max_dist, title):
plt.figure(figsize=(25, 8))
dg = hierarchy.dendrogram(linkage, show_leaf_counts=True)
dists = []
for i, d, c in zip(dg['icoord'], dg['dcoord'], dg['color_list']):
x = 0.5 * sum(i[1:3])
y = d[1]
plt.plot(x, y, 'o', c=c)
if y > max_dist:
plt.annotate("%.3g" % y, (x, y), xytext=(0, -5),
textcoords='offset points',
va='top', ha='center')
dists.append(y)
plt.axhline(y=max_dist)
plt.title(title)
plt.show()
return dists
# In[ ]:
def pearsonfunc(x, y, **kws):
r, p = stats.pearsonr(x, y)
ax = plt.gca()
ax.annotate("pearson r = {:.2f}\np = {:.2e}".format(r, Decimal(p)),
xy=(.1, .9), xycoords=ax.transAxes)
def spearmanfunc(x, y, **kws):
r, p = stats.spearmanr(x, y)
ax = plt.gca()
ax.annotate("spearman r = {:.2f}\np = {:.2e}".format(r, Decimal(p)),
xy=(.1, .9), xycoords=ax.transAxes)
# In[ ]:
def plot_peaks_and_tfbs(figsize, seq_len, seq_name, cell, scores, yerrs, motif_vals, bases, plotname, save):
fig = plt.figure(figsize=figsize)
gs = gridspec.GridSpec(3, 1, height_ratios=[4, 3, 1], hspace=0.2)
peak_ax = plt.subplot(gs[0])
motif_ax = plt.subplot(gs[1])
# plot deletion values
xs = list(range(0, seq_len))
peak_ax.bar(xs, scores, yerr=yerrs, color="lightgray", edgecolor="gray", linewidth=0.5, ecolor="gray",
error_kw={"elinewidth": 0.75})
# labels
peak_ax.set_xlim((-0.5, seq_len))
peak_ax.set_xlabel("")
peak_ax.set_ylabel("log2(del/WT)", fontsize=5)
peak_ax.xaxis.set_visible(False)
peak_ax.set_title("filtered scores and peaks: %s (%s)" % (seq_name, cell))
# plot motif nums
xs = list(range(0, seq_len))
max_motif_val = np.nanmax(np.abs(motif_vals))
motif_ax.axhline(y=0, color="darkgrey", linewidth=0.5, linestyle="dashed")
motif_ax.plot(xs, motif_vals, color="black", linewidth=0.75, zorder=10)
# labels
motif_ax.set_xlim((-0.5, seq_len))
motif_ax.set_ylim((-max_motif_val-1, max_motif_val+1))
motif_ax.set_xlabel("nucleotide number")
motif_ax.set_ylabel(r'$\Delta$ motifs', fontsize=5)
motif_ax.xaxis.set_visible(False)
plt.show()
if save:
fig.savefig("%s.pdf" % (plotname), dpi="figure", bbox_inches="tight", transparent=True)
plt.close()
# In[ ]:
def paired_swarmplots_w_pval(n_rows, n_cols, figsize, snp_df, data_df, fontsize, figs_dir, plotname, save):
fig, axarr = plt.subplots(figsize=figsize, squeeze=False)
pal = {"ref": "grey", "alt": sns.color_palette()[2]}
median_width = 0.3
# make axes objects
axes = []
counter = 0
for r in range(n_rows):
for c in range(n_cols):
if counter < len(snp_df):
ax = plt.subplot2grid((n_rows, n_cols), (r, c))
axes.append(ax)
counter += 1
# add plots
counter = 0
for i, row in snp_df.iterrows():
ax = axes[counter]
wt_id = row.wt_id
snp_id = row.unique_id
df = data_df[data_df["unique_id"].isin([wt_id, snp_id])]
df = df.sort_values(by="wt_or_snp", ascending=False)
if not "NA" in str(row.combined_padj) and not pd.isnull(row.combined_padj):
sns.swarmplot(data=df, x="wt_or_snp", y="rep_mean", ax=ax, palette=pal)
for tick, text in zip(ax.get_xticks(), ax.get_xticklabels()):
snp = text.get_text()
# calculate the median value for all replicates of either X or Y
median_val = df[df["wt_or_snp"]==snp]["rep_mean"].median()
# plot horizontal lines across the column, centered on the tick
ax.plot([tick-median_width/2, tick+median_width/2], [median_val, median_val],
lw=2, color='k', zorder=10)
else:
sns.swarmplot(data=df, x="wt_or_snp", y="rep_mean", ax=ax, color="lightgray")
for tick, text in zip(ax.get_xticks(), ax.get_xticklabels()):
snp = text.get_text()
# calculate the median value for all replicates of either X or Y
median_val = df[df["wt_or_snp"]==snp]["rep_mean"].median()
# plot horizontal lines across the column, centered on the tick
ax.plot([tick-median_width/2, tick+median_width/2], [median_val, median_val],
lw=2, color='k', zorder=10)
if len(row.SNP) > 50:
ax.set_title("SNP: long haplotype", fontsize=fontsize)
else:
ax.set_title("SNP: %s" % row.SNP, fontsize=fontsize)
ax.set_ylim((df.rep_mean.min()-2, df.rep_mean.max()+3))
ax.set_ylabel("")
ax.set_xlabel("")
# statistical annotation
x1, x2 = 0, 1 # columns (first column: 0, see plt.xticks())
y, h, col = df["rep_mean"].max() + 0.75, 0, "black"
ax.plot([x1, x1, x2, x2], [y, y+h, y+h, y], lw=0.5, c=col)
if not "NA" in str(row.combined_padj) and not | pd.isnull(row.combined_padj) | pandas.isnull |
################################################################################
## Imports and configurations
import sys
import os
PROJ_PATH = '.'
#PROJ_PATH = os.path.realpath(os.path.join(os.path.dirname(__file__), '../../'))
#sys.path.append(PROJ_PATH)
import pandas as pd
import numpy as np
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
# feature selection
from sklearn.feature_selection import SelectFromModel
from rfpimp import importances as permutation_importances, plot_importances
# classifiers
from sklearn.ensemble import RandomForestClassifier
# reporting
from src.reporting.reports import reports
## configs
DATA_PATH = PROJ_PATH+'/data/DGT/central_pt/'
RAW_PATH = DATA_PATH+'raw/'
PROCESSED_PATH = DATA_PATH+'processed/'
TRAIN_DATA = RAW_PATH+'training.csv'
TEST_DATA = RAW_PATH+'testing.csv'
LABELS_PATH = RAW_PATH+'Class_legend.txt'
random_state = 0
################################################################################
## read data and preprocess
# read
df_train = pd.read_csv(TRAIN_DATA).drop(columns='Unnamed: 0')
X = df_train.drop(columns='CLASS')
y = df_train['CLASS'].astype(int)
# get feature names and labels
feat_labels = list(X.columns)
class_labels = pd.read_csv(LABELS_PATH, sep='\t', header=None,
index_col=0)[1].to_dict()
# standardize
scaler = StandardScaler()
scaler.fit(X)
X = scaler.transform(X)
################################################################################
## feature selection
# Split data into 40% test and 60% training
_X_tr, _X_te, _y_tr, _y_te = train_test_split(X, y, test_size=0.4,
random_state=random_state)
# Create and train a random forest classifier
clf = RandomForestClassifier(n_estimators=100, n_jobs=-1,
random_state=random_state)
clf.fit(_X_tr, _y_tr)
# Gini Index Importance Feature Selection Method
gini_imp_feat_sel = SelectFromModel(clf, prefit=True, threshold='.8*mean')
gini_accepted = gini_imp_feat_sel.get_support()
# Permutation
imp = permutation_importances(
clf,
pd.DataFrame(_X_te, columns=feat_labels),
pd.Series(_y_te, name='CLASS')
)
permutation_accepted = (imp['Importance']>0).loc[feat_labels].values
# Keep the ones accepted with both methods
accepted_feats = (gini_accepted.astype(int)+permutation_accepted.astype(int))==2
# save feature selection results
feat_sel_results = pd.DataFrame(
np.array([gini_accepted, permutation_accepted, accepted_feats]).T,
index=feat_labels,
columns=['Gini', 'Permutation', 'Selected']
)
feat_sel_results.to_csv(PROCESSED_PATH+'feature_selection_results.csv')
################################################################################
## test different methods using test set
df_train = pd.read_csv(TRAIN_DATA).drop(columns='Unnamed: 0')
X_train = df_train.drop(columns='CLASS')
y_train = df_train['CLASS'].astype(int)
df_test = pd.read_csv(TEST_DATA).drop(columns='Unnamed: 0')
X_test = df_test.drop(columns='CLASS')
y_test = df_test['CLASS'].astype(int)
features_selected = pd.read_csv(PROCESSED_PATH+'feature_selection_results.csv')\
.rename(columns={'Unnamed: 0': 'features'}).set_index('features')
features_selected['Original'] = True
#pd.DataFrame(features_selected[features_selected].count(),
# columns=['# features used'])\
# .sort_values('# features used', ascending=False)\
# .to_csv('feature_selection_count.csv')
# get feature names and labels
feat_labels = list(X_train.columns)
class_labels = pd.read_csv(LABELS_PATH, sep='\t', header=None,
index_col=0)[1].to_dict()
# standardize
scaler = StandardScaler()
scaler.fit(X_train)
scaler.transform(X_train.values, copy=False)
scaler.transform(X_test.values, copy=False)
scores = []
for method in features_selected.columns:
rfc = RandomForestClassifier(100, random_state=0)
features = features_selected[method]
_X_tr = X_train[features[features].index]
_y_tr = y_train.copy()
rfc.fit(_X_tr, _y_tr)
_X_te = X_test[features[features].index]
_y_te = y_test.copy()
_y_pred = rfc.predict(_X_te)
scores.append(reports(_y_te, _y_pred)[-1].rename({'Score': method}))
pd.DataFrame(features_selected[features_selected].count(),
columns=['# features used'])\
.join( | pd.concat(scores, 1) | pandas.concat |
import importlib as imp
import pandas as pd
from collections import Counter
import itertools
import torch
import torch.optim as optim
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import os
from torch.autograd import Variable
from torch.utils.data import Dataset, Subset
import torch.nn as nn
import contrastive
#import netr as net
import net
MODEL_PATH = 'model.model'
use_cuda = True
to_predict = ['Survived']
onehot_cols = ['Pclass', 'Embarked', 'Sex', 'Survived', 'Surname'] # Survived
discrete_cols = [] # ['Parch']
continuous_cols = ['Fare', 'Age'] #
text_cols = [] # ['Ticket', 'Cabin', 'Name'] # 'Ticket', 'Cabin', 'Cabin']
margin = 1.0
missing_val = -1.0
'''
add a '' entry to charcounts to account for test-time chars that aren't in the training set
'''
# rawdata, charcounts, maxlens, unique_onehotvals = fetch_and_preprocess()
def fetch_and_preprocess():
data = pd.read_csv('train.csv')
data['Surname'] = data['Name'].apply(lambda x: x.split(',')[0])
charcounts = Counter('')
maxlens = {}
for c in text_cols:
data[c] = data[c].apply(lambda x: str(x).lower() if (str(x) != 'nan') else '')
charcounts += data[c].apply(Counter).sum()
maxlens[c] = data[c].apply(lambda x: len(x)).max()
unique_onehotvals = {}
for c in onehot_cols:
unique_onehotvals[c] = data[c].unique()
# data.drop('PassengerId', 1, inplace=True)
maxlens['Name'] = 20
maxlens['Ticket'] = 7
maxlens['Cabin'] = 3
return data, charcounts, maxlens, unique_onehotvals
class Dataseq(Dataset):
"""Titanic dataset."""
def __init__(self, data, charcounts, input_dict, unique_onehotvals, maxlens):
"""
Args:
data: pandas dataframe
"""
self.data = data # assignment is done with a shallow copy in python, so data is not duplicated
self.charcounts = charcounts
self.charindex = {k: i for i, k in enumerate(charcounts.keys(), 1)}
self.charindexreverse = {i: k for i, k in enumerate(charcounts.keys(), 1)}
self.input_dict = input_dict
self.unique_onehotvals = unique_onehotvals
self.maxlens = maxlens
self.onehotindex = {}
self.onehotindexreverse = {}
for k in input_dict['onehot']:
self.onehotindex[k] = {v: i for i, v in enumerate(unique_onehotvals[k])}
self.onehotindexreverse[k] = {i: v for i, v in enumerate(unique_onehotvals[k])}
self.cols = [col for dtype in self.input_dict.keys() for col in self.input_dict[dtype]]
self.scalings = {}
for k in input_dict['continuous']:
self.scalings[k] = {'min': self.data[k].min() - 0.05 * (self.data[k].max() - self.data[k].min()),
'max': self.data[k].max() + 0.05 * (self.data[k].max() - self.data[k].min())}
self.scalings[k]['mean'] = self.data[k].mean()
self.scalings[k]['std'] = self.data[k].std()
def __len__(self):
return self.data.shape[0]
def __getrawitem__(self, idx):
# sample = {col: self.data.loc[idx, col].values for dtype in self.input_dict.keys() for col in self.input_dict[dtype]}
sample = self.data.loc[idx, self.cols].to_dict()
return sample
def __getitem__(self, idx):
sample = self.__getrawitem__(idx)
# encode onehot variables
for k in self.input_dict['onehot'].keys():
sample[k] = self.onehotindex[k][sample[k]]
# encode text variables
for k in self.input_dict['text'].keys():
t = np.array([self.charindex[c] for c in sample[k]], dtype=int)[0:self.maxlens[k]]
# print(sample[k], t)
sample[k] = np.zeros(self.maxlens[k], dtype=int)
sample[k][0:t.shape[0]] = t
# scale continous variables
for k in self.input_dict['continuous']:
sample[k] = (sample[k] - self.scalings[k]['min']) / (self.scalings[k]['max'] - self.scalings[k]['min'])
# sample[k] = (sample[k] - self.scalings[k]['mean']) / self.scalings[k]['std'] / 2.5
if np.isnan(sample[k]):
sample[k] = missing_val # -np.random.rand() #
return sample
def discretize(X2d, embeddings, maxlens):
T2 = {col: X2d[col] for col in X2d.keys()}
mb_size = X2d[list(X2d.keys())[0]].size(0)
for col, embedding in embeddings.items():
n_tokens = embedding.weight.size(0)
embedding_dim = embedding.weight.size(1)
adotb = torch.matmul(X2d[col], embedding.weight.permute(1, 0))
if col in maxlens.keys(): #if col is text data
adota = torch.matmul(X2d[col].view(-1, maxlens[col], 1, embedding_dim),
X2d[col].view(-1, maxlens[col], embedding_dim, 1))
adota = adota.view(-1, maxlens[col], 1).repeat(1, 1, n_tokens)
else: #if col is not text data
adota = torch.matmul(X2d[col].view(-1, 1, embedding_dim), X2d[col].view(-1, embedding_dim, 1))
adota = adota.view(-1, 1).repeat(1, n_tokens)
bdotb = torch.bmm(embedding.weight.unsqueeze(-1).permute(0, 2, 1), embedding.weight.unsqueeze(-1)).permute(1, 2,
0)
if col in maxlens.keys():
bdotb = bdotb.repeat(mb_size, maxlens[col], 1)
else:
bdotb = bdotb.reshape(1, n_tokens).repeat(mb_size, 1)
dist = adota - 2 * adotb + bdotb
T2[col] = torch.min(dist, dim=len(dist.size()) - 1)[1]
# for col in continuous_cols:
# T2[col] = -1.0*torch.lt(T2[col], torch.zeros_like(T2[col])).float() + T2[col]*torch.gt(T2[col], torch.zeros_like(T2[col])).float()
return T2
def are_equal(x0):
equ = None
mb_size = x0[list(x0.keys())[0]].size(0)
for col in x0.keys():
if not (col in to_predict):
if len(x0[col].size()) == 1:
# consider the situation where missing values have been encoded with missing_val = -1
# t = (torch.eq(x0[col], x1[col])).float() + (torch.lt(x0[col], torch.zeros_like(x0[col]))).float()*(torch.lt(x1[col], torch.zeros_like(x1[col]))).float()
t0 = x0[col].float() * (torch.gt(x0[col].float(), torch.zeros_like(x0[col].float()))).float() - (
torch.lt(x0[col].float(), torch.zeros_like(x0[col].float()))).float()
t0 = t0.view(x0[col].size() + (1,))
t1 = t0.permute(0, 1).repeat(1, mb_size, 1)
t2 = t0.permute(1, 0).repeat(mb_size, 1, 1)
t = torch.mean(torch.eq(t1, t2).float().view(mb_size, mb_size, -1), -1)
else: # len(x0[col].size()) == 2:
t0 = x0[col].view(x0[col].size() + (1,))
t1 = t0.permute(0, 2, 1).repeat(1, mb_size, 1)
t2 = t0.permute(2, 0, 1).repeat(mb_size, 1, 1)
t = torch.mean(torch.eq(t1, t2).float().view(mb_size, mb_size, -1), -1)
if equ is None:
equ = torch.floor(t)
else:
equ *= torch.floor(t)
return equ
# T, X, X2, mu, mu2, mu2d, mu_tm, logvar_tm, logvar2d, logvar_tm = calc_losses(T, embeddings, enc, dec)
def calc_mus(T, embeddings, reverse_embeddings, enc, dec, mode='train'):
mb_size = T[list(T.keys())[0]].size(0)
n_targetvals = embeddings[to_predict[0]].weight.size(0)
if use_cuda:
T = {col: Variable(tt).cuda() for col, tt in T.items()}
else:
T = {col: Variable(tt) for col, tt in T.items()}
X = {}
for col, tt in T.items():
if col in embeddings.keys():
X[col] = embeddings[col](tt)
else:
X[col] = tt.float()
mu, logvar = enc({col: X[col] if not col in to_predict else 0.0 * X[col] for col in X.keys()})
#t = int(mu.size(0) / 2)
if mode == 'train':
mu = enc.reparameterize(mu, logvar)
mu_tm = torch.zeros_like(mu).unsqueeze(1).repeat((1, 1 + n_targetvals, 1))
logvar_tm = torch.zeros_like(mu).unsqueeze(1).repeat((1, 1 + n_targetvals, 1))
mu_tm[:, 0, :] = mu
logvar_tm[:, 0, :] = logvar
# encodings for all the possible target embedding values
for i in range(n_targetvals):
m, lgv = enc({col: X[col] if not col in to_predict else embeddings[col](i * torch.ones_like(T[col])) for col in
X.keys()})
if mode == 'train':
#m, lgv = enc(dec(enc.reparameterize(m, lgv)))
#m, lgv = enc(dec(m))
m = enc.reparameterize(m, lgv)
#None
if mode == 'train':
use = torch.eq(i * torch.ones_like(T[to_predict[0]]), T[to_predict[0]]).float().view(-1, 1).repeat(1, mu.size(1))
use[0:int(1*mb_size/2)] = 0.0
mu = mu + m * use - use * mu
logvar = logvar + lgv * use - use * logvar
mu_tm[:, i+1, :] = m
logvar_tm[:, i + 1, :] = lgv
X2 = dec(mu)
'''
if mode == 'train':
#mu_tm[:, 0, :] = enc.reparameterize(mu, logvar)
X2 = dec(mu_tm[:, 0, :])
else:
X2 = dec(mu)
'''
T2 = {}
X2d = {col: (1.0 * tt).detach() for col, tt in X2.items()}
'''
for col, embedding in embeddings.items():
if not (col in to_predict):
None
T2[col] = reverse_embeddings[col](X2[col])
X2d[col] = embeddings[col](T2[col].detach()) #+ 0.5*X2d[col]
else:
#None
T2[col] = reverse_embeddings[col](X2[col])
X2d[col] = embeddings[col](T2[col].detach()) #+ 0.5*X2d[col]
X2d[col] = torch.cat((0.0*X2d[col][0:int(1*mb_size/2)], X2d[col][int(1*mb_size/2):]), 0)
'''
mu2, logvar2 = enc(X2)
if mode == 'train':
mu2 = enc.reparameterize(mu2, logvar2)
mu2 = mu2.view(mb_size, -1)
mu2d, logvar2d = enc(X2d)
if mode == 'train':
mu2d = enc.reparameterize(mu2d, logvar2d)
mu2d = mu2d.view(mb_size, -1)
mu = mu.view(mb_size, -1)
return T, X, X2, mu, logvar, mu2, mu2d, mu_tm, logvar2, logvar2d, logvar_tm
# enc_loss, enc_loss0, enc_loss1, enc_loss2, enc_loss3 = calc_losses(T, embeddings, mu, logvar, mu2, mu2d, mu_tm, logvar2, logvar2d, logvar_tm, logloss)
def calc_losses(T, embeddings, mu, logvar, mu2, mu2d, mu_tm, logvar2, logvar2d, logvar_tm, logloss, lookfordups=True):
mb_size = mu.size(0)
latent_dim = mu.size(1)
n_targetvals = mu_tm.size(1) - 1 # len(mu_tm) - 1
adotb = torch.matmul(mu, mu2.permute(1, 0)) # batch_size x batch_size
adota = torch.matmul(mu.view(-1, 1, latent_dim), mu.view(-1, latent_dim, 1)) # batch_size x 1 x 1
bdotb = torch.matmul(mu2.view(-1, 1, latent_dim), mu2.view(-1, latent_dim, 1))
diffsquares = (adota.view(-1, 1).repeat(1, mb_size) + bdotb.view(1, -1).repeat(mb_size, 1) - 2 * adotb) / latent_dim
tt = np.triu_indices(mb_size, k=1)
diffsquares = diffsquares[tt]
adotb2 = torch.matmul(mu, mu.permute(1, 0)) # batch_size x batch_size
adota2 = torch.matmul(mu.view(-1, 1, latent_dim), mu.view(-1, latent_dim, 1)) # batch_size x 1 x 1
bdotb2 = torch.matmul(mu.view(-1, 1, latent_dim), mu.view(-1, latent_dim, 1))
diffsquares2 = (adota2.view(-1, 1).repeat(1, mb_size) + bdotb2.view(1, -1).repeat(mb_size,
1) - 2 * adotb2) / latent_dim
diffsquares2 = diffsquares2[tt]
if lookfordups:
are_same = are_equal(T)
are_same = are_same[tt]
else:
are_same = torch.zeros_like(diffsquares)
# print('shapes', are_same.size())
# print('fraction same', torch.mean(are_same))
enc_loss0 = 0.25 * logloss(diffsquares, are_same, weights=1.0 - are_same)
#enc_loss0 += 0.125 * logloss(diffsquares2, are_same, weights=1.0 - are_same)
# enc_loss0 = logloss(torch.mean(torch.pow(mu[::2]-mu[1::2], 2), 1), are_same, weights=1-are_same)
enc_loss1 = 0.5 * logloss(torch.mean(torch.pow(mu - mu2, 2), 1), torch.ones(mb_size).cuda())
# enc_loss1 = 0.5 * logloss(torch.mean(torch.pow(mu_tm[:, 0, :] - mu2, 2), 1), torch.ones(mb_size).cuda())
enc_loss2 = 0.25 * logloss(torch.mean(torch.pow(mu - mu2d, 2), 1), torch.zeros(mb_size).cuda())
# enc_loss3 = 0.5 * logloss(torch.mean(torch.pow(mu - mu_tm, 2), 1), torch.ones(mb_size).cuda())
# enc_loss3 += 0.5 * logloss(torch.mean(torch.pow(mu - mu_false, 2), 1), torch.zeros(mb_size).cuda())
enc_loss3 = 0.0
for i in range(n_targetvals):
if use_cuda:
target = torch.eq(i * torch.ones_like(T[to_predict[0]]), T[to_predict[0]]).float().cuda()
else:
target = torch.eq(i * torch.ones_like(T[to_predict[0]]), T[to_predict[0]]).float()
factor = 0.5 * ((1 - target) / (n_targetvals - 1) + target)
enc_loss3 += torch.mean(
factor * logloss(torch.mean(torch.pow(mu_tm[:, 0] - mu_tm[:, i + 1], 2), 1), target,
do_batch_mean=False))
'''
#bdot is the square of the difference between the latent vectors for each of the possible (non-missing)
#values of the target variable
bdot = torch.bmm(mu_tm[:, 1:, :], mu_tm[:, 1:, :].permute(0, 2, 1)) # batch_size x n_target_vals x n_target_vals
diag = torch.diagonal(bdot, offset=0, dim1=1, dim2=2)
bdot = -2.0*bdot
bdot += diag.view(-1, n_targetvals, 1).repeat(1, 1, n_targetvals)
bdot += diag.view(-1, 1, n_targetvals).repeat(1, n_targetvals, 1)
tt = np.triu_indices(n_targetvals, k=1)
uptri_size = len(tt[0])
#calculate the upper-triangular indices batchwise
tt = (np.arange(mb_size).repeat(uptri_size ),) + (np.tile(tt[0], mb_size),) + (np.tile(tt[1], mb_size),)
bdot = bdot[tt]
#bdot = bdot[tt].view(uptri_size, mb_size).permute(1,0)
#print(bdot.size())
ltemp = 0.25*torch.mean(logloss(bdot/latent_dim, torch.zeros(uptri_size*mb_size).cuda()))
#print(ltemp)
enc_loss3 += ltemp
'''
enc_loss = 1.0*(enc_loss0 + enc_loss1 + enc_loss2) + 2.0 * enc_loss3
enc_loss += 2.0 / 64.0 * torch.mean(torch.pow(mu, 2))
enc_loss += 2.0 / 64.0 * torch.mean(torch.pow(mu2, 2))
enc_loss += 4.0 / 256.0 * torch.mean(torch.exp(logvar) - logvar) #1.0/64
enc_loss += 4.0 / 256.0 * torch.mean(torch.exp(logvar2) - logvar2)
enc_loss += 4.0 / 256.0 * torch.mean(torch.exp(logvar2d) - logvar2d)
enc_loss += 4.0 / 256.0 * torch.mean(torch.exp(logvar_tm[:, 1:]) - logvar_tm[:, 1:])
for col, emb in embeddings.items():
enc_loss += 4.0/64.0 * torch.sum(torch.mean(torch.pow(emb.weight, 2), 1)) / np.sqrt(emb.weight.size(0))
return enc_loss, enc_loss0, enc_loss1, enc_loss2, enc_loss3
#mu, mu2, target_arr = do_train(rawdata, charcounts, maxlens, unique_onehotvals)
def do_train(rawdata, charcounts, maxlens, unique_onehotvals):
n_batches = 2800
mb_size = 128
lr = 2.0e-4
momentum = 0.5
cnt = 0
latent_dim = 32 # 24#
recurrent_hidden_size = 24
epoch_len = 8
max_veclen = 0.0
patience = 12 * epoch_len
patience_duration = 0
input_dict = {}
input_dict['discrete'] = discrete_cols
input_dict['continuous'] = continuous_cols
input_dict['onehot'] = {}
for k in onehot_cols:
dim = int(np.ceil(np.log(len(unique_onehotvals[k])) / np.log(2.0)))
input_dict['onehot'][k] = dim
if len(charcounts) > 0:
text_dim = int(np.ceil(np.log(len(charcounts)) / np.log(2.0)))
input_dict['text'] = {t: text_dim for t in text_cols}
else:
text_dim = 0
input_dict['text'] = {}
data = Dataseq(rawdata, charcounts, input_dict, unique_onehotvals, maxlens)
#data_idx = np.arange(data.__len__())
data_idx = np.arange(rawdata.shape[0])
np.random.shuffle(data_idx)
n_folds = 6
fold_size = 1.0 * rawdata.shape[0] / n_folds #data.__len__() / n_folds
folds = [data_idx[int(i * fold_size):int((i + 1) * fold_size)] for i in range(6)]
fold_groups = {}
fold_groups[0] = {'train': [0, 1, 2, 3], 'val': [4]}
fold_groups[1] = {'train': [1, 2, 3, 4], 'val': [0]}
fold_groups[2] = {'train': [0, 2, 3, 4], 'val': [1]}
fold_groups[3] = {'train': [0, 1, 3, 4], 'val': [2]}
fold_groups[4] = {'train': [0, 1, 2, 4], 'val': [3]}
for fold in range(1):
train_idx = np.array(list(itertools.chain.from_iterable([folds[i] for i in fold_groups[fold]['train']])))
val_idx = np.array(list(itertools.chain.from_iterable([folds[i] for i in fold_groups[fold]['val']])))
#data = Dataseq(rawdata, charcounts, input_dict, unique_onehotvals, maxlens)
train = Subset(data, train_idx)
val = Subset(data, val_idx)
kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}
train_iter = torch.utils.data.DataLoader(train, batch_size=int(mb_size / 1), shuffle=True, **kwargs)
train_iter_unshuffled = torch.utils.data.DataLoader(train, batch_size=mb_size, shuffle=False, **kwargs)
val_iter = torch.utils.data.DataLoader(val, batch_size=mb_size, shuffle=False, **kwargs)
embeddings = {}
reverse_embeddings = {}
onehot_embedding_weights = {}
for k in onehot_cols:
dim = input_dict['onehot'][k]
onehot_embedding_weights[k] = net.get_embedding_weight(len(unique_onehotvals[k]), dim, use_cuda=use_cuda)
embeddings[k] = nn.Embedding(len(unique_onehotvals[k]), dim, _weight=onehot_embedding_weights[k])
reverse_embeddings[k] = net.EmbeddingToIndex(len(unique_onehotvals[k]), dim,
_weight=onehot_embedding_weights[k])
if text_dim > 0:
text_embedding_weights = net.get_embedding_weight(len(charcounts) + 1, text_dim, use_cuda=use_cuda)
text_embedding = nn.Embedding(len(charcounts) + 1, text_dim, _weight=text_embedding_weights)
text_embeddingtoindex = net.EmbeddingToIndex(len(charcounts) + 1, text_dim, _weight=text_embedding_weights)
for k in text_cols:
embeddings[k] = text_embedding
reverse_embeddings[k] = text_embeddingtoindex
enc = net.Encoder(input_dict, dim=latent_dim, recurrent_hidden_size=recurrent_hidden_size)
dec = net.Decoder(input_dict, maxlens, dim=latent_dim, recurrent_hidden_size=recurrent_hidden_size)
if use_cuda:
embeddings = {k: embeddings[k].cuda() for k in embeddings.keys()}
enc.cuda()
dec.cuda()
logloss = contrastive.GaussianOverlap()
solver = optim.RMSprop(
[p for em in embeddings.values() for p in em.parameters()] + [p for p in enc.parameters()] + [p for p in
dec.parameters()],
lr=lr, momentum=momentum)
print('starting training')
loss = 0.0
loss0 = 0.0
loss1 = 0.0
loss2 = 0.0
loss3 = 0.0
logger_df = | pd.DataFrame(
columns=['iter', 'train_loss', 'train_veclen', 'val_veclen', 'val_loss', 'val_acc']+[t+'_correct' for t in to_predict]+[t+'_false' for t in to_predict]) | pandas.DataFrame |
import pandas as pd
import numpy as np
import pytest
from pypfopt import expected_returns
from tests.utilities_for_tests import get_data, get_benchmark_data
def test_returns_dataframe():
df = get_data()
returns_df = expected_returns.returns_from_prices(df)
assert isinstance(returns_df, pd.DataFrame)
assert returns_df.shape[1] == 20
assert len(returns_df) == 7125
assert returns_df.index.is_all_dates
assert not ((returns_df > 1) & returns_df.notnull()).any().any()
def test_prices_from_returns():
df = get_data()
returns_df = df.pct_change() # keep NaN row
# convert pseudo-price to price
pseudo_prices = expected_returns.prices_from_returns(returns_df)
initial_prices = df.bfill().iloc[0]
test_prices = pseudo_prices * initial_prices
# check equality, robust to floating point issues
assert ((test_prices[1:] - df[1:]).fillna(0) < 1e-10).all().all()
def test_prices_from_log_returns():
df = get_data()
returns_df = df.pct_change() # keep NaN row
log_returns_df = np.log1p(returns_df)
# convert pseudo-price to price
pseudo_prices = expected_returns.prices_from_returns(
log_returns_df, log_returns=True
)
initial_prices = df.bfill().iloc[0]
test_prices = pseudo_prices * initial_prices
# check equality, robust to floating point issues
assert ((test_prices[1:] - df[1:]).fillna(0) < 1e-10).all().all()
def test_returns_from_prices():
df = get_data()
returns_df = expected_returns.returns_from_prices(df)
pd.testing.assert_series_equal(returns_df.iloc[-1], df.pct_change().iloc[-1])
def test_log_returns_from_prices():
df = get_data()
old_nan = df.isnull().sum(axis=1).sum()
log_rets = expected_returns.returns_from_prices(df, log_returns=True)
new_nan = log_rets.isnull().sum(axis=1).sum()
assert new_nan == old_nan
np.testing.assert_almost_equal(log_rets.iloc[-1, -1], 0.0001682740081102576)
def test_mean_historical_returns_dummy():
data = pd.DataFrame(
[
[4.0, 2.0, 0.6, -12],
[4.2, 2.1, 0.59, -13.2],
[3.9, 2.0, 0.58, -11.3],
[4.3, 2.1, 0.62, -11.7],
[4.1, 2.2, 0.63, -10.1],
]
)
mean = expected_returns.mean_historical_return(data, frequency=1)
test_answer = pd.Series([0.0061922, 0.0241137, 0.0122722, -0.0421775])
pd.testing.assert_series_equal(mean, test_answer, rtol=1e-3)
mean = expected_returns.mean_historical_return(data, compounding=False, frequency=1)
test_answer = pd.Series([0.0086560, 0.0250000, 0.0128697, -0.03632333])
pd.testing.assert_series_equal(mean, test_answer, rtol=1e-3)
def test_mean_historical_returns():
df = get_data()
mean = expected_returns.mean_historical_return(df)
assert isinstance(mean, pd.Series)
assert list(mean.index) == list(df.columns)
assert mean.notnull().all()
assert mean.dtype == "float64"
correct_mean = np.array(
[
0.247967,
0.294304,
0.284037,
0.1923164,
0.371327,
0.1360093,
0.0328503,
0.1200115,
0.105540,
0.0423457,
0.1002559,
0.1442237,
-0.0792602,
0.1430506,
0.0736356,
0.238835,
0.388665,
0.226717,
0.1561701,
0.2318153,
]
)
np.testing.assert_array_almost_equal(mean.values, correct_mean)
def test_mean_historical_returns_type_warning():
df = get_data()
mean = expected_returns.mean_historical_return(df)
with pytest.warns(RuntimeWarning) as w:
mean_from_array = expected_returns.mean_historical_return(np.array(df))
assert len(w) == 1
assert str(w[0].message) == "prices are not in a dataframe"
np.testing.assert_array_almost_equal(mean.values, mean_from_array.values, decimal=6)
def test_mean_historical_returns_frequency():
df = get_data()
mean = expected_returns.mean_historical_return(df, compounding=False)
mean2 = expected_returns.mean_historical_return(df, compounding=False, frequency=52)
np.testing.assert_array_almost_equal(mean / 252, mean2 / 52)
def test_ema_historical_return():
df = get_data()
mean = expected_returns.ema_historical_return(df)
assert isinstance(mean, pd.Series)
assert list(mean.index) == list(df.columns)
assert mean.notnull().all()
assert mean.dtype == "float64"
# Test the (warning triggering) case that input is not a dataFrame
with pytest.warns(RuntimeWarning):
mean_np = expected_returns.ema_historical_return(df.to_numpy())
mean_np.name = mean.name # These will differ.
reset_mean = mean.reset_index(drop=True) # Index labels would be tickers.
pd.testing.assert_series_equal(mean_np, reset_mean)
def test_ema_historical_return_frequency():
df = get_data()
mean = expected_returns.ema_historical_return(df, compounding=False)
mean2 = expected_returns.ema_historical_return(df, compounding=False, frequency=52)
np.testing.assert_array_almost_equal(mean / 252, mean2 / 52)
def test_ema_historical_return_limit():
df = get_data()
sma = expected_returns.mean_historical_return(df, compounding=False)
ema = expected_returns.ema_historical_return(df, compounding=False, span=1e10)
np.testing.assert_array_almost_equal(ema.values, sma.values)
def test_capm_no_benchmark():
df = get_data()
mu = expected_returns.capm_return(df)
assert isinstance(mu, pd.Series)
assert list(mu.index) == list(df.columns)
assert mu.notnull().all()
assert mu.dtype == "float64"
correct_mu = np.array(
[
0.22148462799238577,
0.2835429647498704,
0.14693081977908462,
0.1488989354304723,
0.4162399750335195,
0.22716772604184535,
0.3970337136813829,
0.16733214988182069,
0.31791477659742146,
0.17279931642386534,
0.15271750464365566,
0.351778014382922,
0.32859883451716376,
0.1501938182844417,
0.268295486802897,
0.31632339201710874,
0.27753479916328516,
0.16959588523287855,
0.3089119447773357,
0.2558719211959501,
]
)
np.testing.assert_array_almost_equal(mu.values, correct_mu)
# Test the (warning triggering) case that input is not a dataFrame
with pytest.warns(RuntimeWarning):
mu_np = expected_returns.capm_return(df.to_numpy())
mu_np.name = mu.name # These will differ.
mu_np.index = mu.index # Index labels would be tickers.
| pd.testing.assert_series_equal(mu_np, mu) | pandas.testing.assert_series_equal |
__copyright__ = 'Copyright 2017 <NAME>'
__license__ = 'Apache 2.0'
from sklearn.model_selection import train_test_split
import sklearn.metrics
import pandas as pd
from ._util import FrameworkManager
def train(model_name, params):
# Add in features
_, valid_amnt, test_amnt = FrameworkManager.train_valid_test_splits
f_train_valid, _ = train_test_split(FrameworkManager.features, test_size=test_amnt, random_state=137)
f_train, f_valid = train_test_split(f_train_valid, test_size=valid_amnt/(1-test_amnt), random_state=137)
train_X = pd.concat([FrameworkManager.train['X'], f_train], axis=1)
validation_X = | pd.concat([FrameworkManager.validation['X'], f_valid], axis=1) | pandas.concat |
import os
import time
import glob
import numpy as np
import pandas as pd
from docopt import docopt
from sklearn.preprocessing import OneHotEncoder
import metrics
import models
from dataset import utils as dataset_utils
def main():
args = docopt("""
Usage:
evaluate_models.py [options] <graphs_path>
Options:
--disassociative Input graphs are disassociative
""")
graphs_path = args["<graphs_path>"]
associative = not args["--disassociative"]
metrics_order = {
'Modularity': 0,
'Soft Overlap': 1,
'Hard Overlap': 2,
'Mutual Information': 3
}
all_models = [
models.TruePartitionModel(),
models.BetheHessianModel(associative_communities=associative),
models.KipfModularityNet(
associative=associative,
bethe_hessian_init=False,
verbose=False),
models.KipfModularityNet(
associative=associative,
bethe_hessian_init=True,
verbose=False),
models.AttentionModularityNet(
associative=associative,
bethe_hessian_init=False,
verbose=False
),
models.AttentionModularityNet(
associative=associative,
bethe_hessian_init=True,
verbose=False
)
]
if associative:
all_models.extend([
models.GreedyModularityModel(),
models.LouvainModularityModel(),
])
num_test_graphs = len(glob.glob(os.path.join(graphs_path, "adj-*.npy")))
print(f"Number of graphs found: {num_test_graphs}")
output_path = os.path.join(graphs_path, "results")
os.makedirs(output_path, exist_ok=True)
results = np.zeros([num_test_graphs, len(all_models), len(metrics_order)])
label_encoder = OneHotEncoder(categories='auto', sparse=False)
print(time.time())
for idx in range(num_test_graphs):
if idx and idx % 10 == 0:
print(idx, time.time())
print_aggregated_results(results[:idx], metrics_order, all_models)
np.save(os.path.join(output_path, f"{idx}.npy"), results[:idx])
labels_path = os.path.join(graphs_path, f"labels-{idx}.npy")
adj_path = os.path.join(graphs_path, f"adj-{idx}.npy")
labels = np.load(labels_path, allow_pickle=True)
true_labels = label_encoder.fit_transform(labels.reshape(-1, 1))
all_models[0].true_labels = true_labels
adjacency = np.load(adj_path, allow_pickle=True).tolist()
graph_nx = dataset_utils.graph_from_adjacency(adjacency, labels)
for j, m in enumerate(all_models):
predictions = m.fit_transform(graph_nx)
model_res = metrics.compute_all(graph_nx, true_labels, predictions)
for metric, k in metrics_order.items():
results[idx, j, k] = model_res[metric] if metric is not None else 0
print_aggregated_results(results, metrics_order, all_models)
np.save(os.path.join(output_path, f"all.npy"), results)
def print_aggregated_results(results, metrics_order, all_models):
means = results.mean(axis=0)
dt_means = {metric: means[:, order] for metric, order in metrics_order.items()}
index = [type(m).__name__ for m in all_models]
print( | pd.DataFrame(dt_means, index=index) | pandas.DataFrame |
from abc import ABC
import pandas as pd
import us
from can_tools.scrapers import CMU, variables
from can_tools.scrapers.official.base import MicrosoftBIDashboard
from can_tools.scrapers.util import flatten_dict
class PennsylvaniaCountyVaccines(MicrosoftBIDashboard):
"""
Fetch county level vaccine data from Pennsylvania's PowerBI dashboard
"""
has_location = False
location_type = "county"
state_fips = int(us.states.lookup("Pennsylvania").fips)
source = "https://www.health.pa.gov/topics/disease/coronavirus/Vaccine/Pages/Dashboard.aspx"
source_name = "Pennsylvania Department of Health"
powerbi_url = "https://wabi-us-gov-iowa-api.analysis.usgovcloudapi.net"
def construct_body(self, resource_key, ds_id, model_id, report_id):
body = {}
# Set version
body["version"] = "1.0.0"
body["cancelQueries"] = []
body["modelId"] = model_id
body["queries"] = [
{
"Query": {
"Commands": [
{
"SemanticQueryDataShapeCommand": {
"Query": {
"Version": 2,
"From": self.construct_from(
[
(
"c",
"Counts of People by County",
0,
)
]
),
"Select": self.construct_select(
[
("c", "County", "location_name"),
(
"c",
"PartiallyCovered",
"total_vaccine_initiated",
),
(
"c",
"FullyCovered",
"total_vaccine_completed",
),
],
[],
[],
),
}
}
}
]
},
"QueryId": "",
"ApplicationContext": self.construct_application_context(
ds_id, report_id
),
}
]
return body
def fetch(self):
# Get general information
self._setup_sess()
dashboard_frame = self.get_dashboard_iframe()
resource_key = self.get_resource_key(dashboard_frame)
ds_id, model_id, report_id = self.get_model_data(resource_key)
# Get the post url
url = self.powerbi_query_url()
# Build post headers
headers = self.construct_headers(resource_key)
# Build post body
body = self.construct_body(resource_key, ds_id, model_id, report_id)
res = self.sess.post(url, json=body, headers=headers)
return res.json()
def normalize(self, resjson):
# Extract components we care about from json
foo = resjson["results"][0]["result"]["data"]
descriptor = foo["descriptor"]["Select"]
data = foo["dsr"]["DS"][0]["PH"][0]["DM0"]
# Build dict of dicts with relevant info
col_mapping = {x["Value"]: x["Name"] for x in descriptor}
col_keys = list(col_mapping.keys())
# Iterate through all of the rows and store relevant data
data_rows = []
for record in data:
flat_record = flatten_dict(record)
row = {}
for k in col_keys:
flat_record_key = [frk for frk in flat_record.keys() if k in frk]
if len(flat_record_key) > 0:
row[col_mapping[k]] = flat_record[flat_record_key[0]]
data_rows.append(row)
# Dump records into a DataFrame
df = | pd.DataFrame.from_records(data_rows) | pandas.DataFrame.from_records |
"""
Open AI Gym LunarLander-v2
<NAME>
2021
"""
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
from stable_baselines3.common.callbacks import BaseCallback
from tqdm import tqdm
from os import listdir
from tensorflow.python.summary.summary_iterator import summary_iterator
class LogStepsCallback(BaseCallback):
def __init__(self, log_dir, verbose=0):
self.log_dir = log_dir
super(LogStepsCallback, self).__init__(verbose)
def _on_training_start(self) -> None:
self.results = pd.DataFrame(columns=['Reward', 'Done'])
print("Τraining starts!")
def _on_step(self) -> bool:
if 'reward' in self.locals:
keys = ['reward', 'done']
else:
keys = ['rewards', 'dones']
self.results.loc[len(self.results)] = [self.locals[keys[0]][0], self.locals[keys[1]][0]]
return True
def _on_training_end(self) -> None:
self.results.to_csv(self.log_dir + 'training_data.csv', index=False)
print("Τraining ends!")
class TqdmCallback(BaseCallback):
def __init__(self):
super().__init__()
self.progress_bar = None
def _on_training_start(self):
self.progress_bar = tqdm(total=self.locals['total_timesteps'])
def _on_step(self):
self.progress_bar.update(1)
return True
def _on_training_end(self):
self.progress_bar.close()
self.progress_bar = None
def save_dict_to_file(dict, path, txt_name='hyperparameter_dict'):
f = open(path + '/' + txt_name + '.txt', 'w')
f.write(str(dict))
f.close()
def calc_episode_rewards(training_data):
# Calculate the rewards for each training episode
episode_rewards = []
temp_reward_sum = 0
for step in range(training_data.shape[0]):
reward, done = training_data.iloc[step, :]
temp_reward_sum += reward
if done:
episode_rewards.append(temp_reward_sum)
temp_reward_sum = 0
result = pd.DataFrame(columns=['Reward'])
result['Reward'] = episode_rewards
return result
def learning_curve(episode_rewards, log_dir, window=10):
# Calculate rolling window metrics
rolling_average = episode_rewards.rolling(window=window, min_periods=window).mean().dropna()
rolling_max = episode_rewards.rolling(window=window, min_periods=window).max().dropna()
rolling_min = episode_rewards.rolling(window=window, min_periods=window).min().dropna()
# Change column name
rolling_average.columns = ['Average Reward']
rolling_max.columns = ['Max Reward']
rolling_min.columns = ['Min Reward']
rolling_data = pd.concat([rolling_average, rolling_max, rolling_min], axis=1)
# Plot
sns.set()
ax = sns.lineplot(data=rolling_data)
ax.fill_between(rolling_average.index, rolling_min.iloc[:, 0], rolling_max.iloc[:, 0], alpha=0.2)
ax.set_title('Learning Curve')
ax.set_ylabel('Reward')
ax.set_xlabel('Episodes')
ax.set(ylim=(-250, 325))
# Save figure
plt.savefig(log_dir + 'learning_curve' + str(window) + '.png')
def learning_curve_baselines(log_dir, window=10):
# Read data
training_data = pd.read_csv(log_dir + 'training_data.csv', index_col=None)
# Calculate episode rewards
episode_rewards = calc_episode_rewards(training_data)
learning_curve(episode_rewards=episode_rewards, log_dir=log_dir, window=window)
def learning_curve_tianshou(log_dir, window=10):
# Find event file
files = listdir(log_dir)
for f in files:
if 'events' in f:
event_file = f
break
# Read episode rewards
episode_rewards_list = []
episode_rewards = pd.DataFrame(columns=['Reward'])
try:
for e in summary_iterator(log_dir + event_file):
if len(e.summary.value) > 0:
if e.summary.value[0].tag == 'train/reward':
episode_rewards_list.append(e.summary.value[0].simple_value)
except Exception as e:
pass
episode_rewards['Reward'] = episode_rewards_list
# Learning curve
learning_curve(episode_rewards, log_dir, window=window)
def learning_curve_tianshou_multiple_runs(log_dirs, window=10):
episode_rewards_list = []
episode_rewards = | pd.DataFrame(columns=['Reward']) | pandas.DataFrame |
########################################################################################################################
# #
# Author: <NAME>, ETH Zürich, December 8th 2020 #
# See below for function description #
# #
# Acknowledgements: #
# Dr. <NAME> #
# Dr. <NAME> #
# <NAME> #
# #
########################################################################################################################
# Check if required modules are installed
# test if sys is installed in python
try:
import sys
except ImportError:
print('Error, module sys is required.')
exit()
import sys
# test if shutil is installed in python
try:
import shutil
except ImportError:
print('Error, module shutil is required.')
sys.exit()
# test if distutils is installed in python
try:
from distutils.dir_util import copy_tree
except ImportError:
print('Error, module distutils is required.')
sys.exit()
# test if re is installed in python
try:
import re
except ImportError:
print('Error, module re is required.')
sys.exit()
# test if os is installed in python
try:
from os import listdir
except ImportError:
print('Error, module os is required.')
sys.exit()
try:
from os.path import isfile, join, isdir
except ImportError:
print('Error, module os is required.')
sys.exit()
# test if pandas is installed in python
try:
import pandas as pd
except ImportError:
print('Error, module pandas is required.')
sys.exit()
# test if statistics is installed in python
try:
from statistics import median
except ImportError:
print('Error, module statistics is required.')
sys.exit()
# test if json is installed in python
try:
import json
except ImportError:
print('Error, module json is required.')
sys.exit()
# test if numpy is installed in python
try:
import numpy as np
except ImportError:
print('Error, module numpy is required.')
sys.exit()
# test if matplotlib is installed in python
try:
import matplotlib
except ImportError:
print('Error, module matplotlib is required.')
sys.exit()
matplotlib.use('Agg') # This is to avoid issues when running the script and producing pdfs when being connected to
# server over ssh.
# test if matplotlib is installed in python
try:
import matplotlib.pyplot as plt
except ImportError:
print('Error, module matplotlib is required.')
sys.exit()
try:
from matplotlib.patches import Polygon
except ImportError:
print('Error, module matplotlib is required.')
sys.exit()
# test if mpl_toolkits is installed in python
try:
from mpl_toolkits.axes_grid1 import Divider, Size
except ImportError:
print('Error, module mpl_toolkits is required.')
sys.exit()
try:
from mpl_toolkits.axes_grid1.mpl_axes import Axes
except ImportError:
print('Error, module mpl_toolkits is required.')
sys.exit()
# test if pathlib is installed in python
try:
import pathlib
except ImportError:
print('Error, module pathlib is required.')
sys.exit()
# test if Bio is installed in python
try:
import Bio.ExPASy.ScanProsite
except ImportError:
print('Error, module Bio is required.')
sys.exit()
# test if urllib is installed in python
try:
import urllib
except ImportError:
print('Error, module urllib is required.')
sys.exit()
# test if urllib is installed in python
try:
from urllib.error import HTTPError
except ImportError:
print('Error, module urllib is required.')
sys.exit()
# test if math is installed in python
try:
import math
except ImportError:
print('Error, module math is required.')
sys.exit()
# test if time is installed in python
try:
import time
except ImportError:
print('Error, module time is required.')
sys.exit()
# test if datetime is installed in python
try:
from datetime import date
except ImportError:
print('Error, module datetime is required.')
sys.exit()
########################################################################################################################
# #
# f_process_results #
# Main function of propplot.py. See help for further information #
# #
# Mandatory arguments: #
# - vjobid [string]: Id of the run. Is used to produce the output file names. #
# - vinputfile [string]: Indicates the location of the input fasta file. #
# - vignoredb [string] gets converted into True/False: Indicates whether previously gained results should be #
# ignored #
# - vsavefolder [string]: Indicates the location of the folder where result files should be saved. #
# - vdbfolder [string]: Indicates the location of the folder where the databases of previous results should be #
# built up. #
# - vgroupfile [string]: Indicates the location of a tsv file that contains the information about associations of #
# proteins to their protein groups. First column is fasta headers second column are user #
# defined group names. #
# - vcolorfile [string]: Indicates the location of a tsv file that contains the information about domain coloring. #
# First column is the domain name, second column is a hex code for the color #
# - vignoredomainfile [string]: Indicates a file, with domains as rows. If a domain shows up in this file, it is #
# not displayed on plots. #
# - vcutoff [string] gets converted into float: Defines if a domain should be displayed for a group of proteins. #
# Only if the domain surpasses the cutoff in relative abundance, it #
# is displayed. #
# - vmaxcutoff [string] gets converted into float: Same as vcutoff, however, before the domain could exist anywhere #
# in the protein. Here the domains need to be present at the same #
# location to make the cut. #
# - vcustom_scaling_on [string] gets converted into boolean: Allows for custom scaling of the figure. The standard #
# case is that this is not on, and thus the scaling is #
# the same regardless of the size of the protein. If #
# this is on, one can set with option api (default 100 #
# amino acids per inch) the number of amino acids #
# displayed per inch to scale the width of the figure. #
# - vscalingfigure [string] gets converted into float: Indicates the number of amino acids per inch that are #
# displayed per inch of x-axis. #
# - vabsolute [string] gets converted into True/False: Indicates whether absolute numbers are displayed on the #
# y-axis. #
# - vwarnings [string] gets converted into True/False: Indicates whether warnings are written out. #
# - vfrom_scratch [0 or 1] gets converted into True/False: Indicates whether previous results should be discarded. #
# - vnotolderthan [string] gets converted into int: Indicates the date that should be used as cutoff to load data #
# from databases. If the storing date is older than the date #
# given, the data is not observed. #
# #
# Optional arguments: #
# - none #
# #
# Output: #
# - jobid_proteingroup_prosite.pdf [file]: Pdf plot for Prosite domains relative to the median length of amino acid #
# sequence of all proteins in the protein group. #
# - jobid_proteingroup_pfam.pdf [file]: Pdf plot as above, but for PFAM domains. #
# - jobid_proteingroup_combined.pdf [file]: Pdf plot for Prosite and PFAM domains. #
# - proteingroup_prosite_colors.txt [file]: Tsv of the domains and colors used in the plot. Can be modified and #
# funneled back into the script to modify colors. See vcolorfile for #
# structure. #
# - proteingroup_pfam_colors.txt [file]: Same file as above, but for pfam plot. #
# - proteingroup_combined_colors.txt [file]: Same file as above, but for combined plot. #
# - jobid_proteingroup_prosite.csv [file]: Results of Prosite for protein group. #
# - jobid_proteingroup_pfam.csv [file]: Results of PFAM for protein group. #
# - jobid_prosite_res.tsv [file]: Results of Prosite for this job. #
# - jobid_pfam_res.tsv [file]: Results of PFAM for this job. #
# - Prosite_db_[first five amino acids of sequence] [file]: TSV database files for sequences run through Prosite. #
# - Pfam_db_[first five amino acids of sequence] [file]: TSV database files for sequences run through PFAM. #
# #
########################################################################################################################
# noinspection PyTypeChecker
def f_run_propplot(vjobid, vinputfile, vignoredb, vsavefolder, vdbfolder, vgroupfile, vcolorfile, vignoredomainfile,
vcutoff, vmaxcutoff, vcustom_scaling_on, vscalingfigure, vabsolute, vwarnings, vfrom_scratch,
vnotolderthan):
# Write initial cookie
f_write_cookie(0, vsavefolder, vjobid, 'Job initialized')
# Constants:
# For plotting:
vstandardwidth = 7
vstandardheight = 4.49
vpaddingwidth = 1.0
vpaddingheight = 0.7
vfontsize = 10
vcurrentdate = int(re.sub('-', '', str(date.fromtimestamp(time.time()))))
# Write initial job:
f_write_log(vsavefolder, vjobid, 'Job initialized with following parameters\nJobid: ' + vjobid + '\nInputfile: ' +
vinputfile + '\nIgnoredb: ' + vignoredb + '\nSavefolder: ' + vsavefolder + '\nDbfolder: ' + vdbfolder +
'\nGroupfile: ' + vgroupfile + '\nColorfile: ' + vcolorfile + '\nIgnoredomainfile: ' +
vignoredomainfile + '\nCutoff: ' + vcutoff + '\nMaxcutoff: ' + vmaxcutoff + '\nCustomscaling: ' +
vcustom_scaling_on + '\nScalingfigure: ' + vscalingfigure + '\nAbsolute: ' + vabsolute +
'\nWarnings: ' + vwarnings + '\nFrom_scratch: ' + vfrom_scratch + '\nNotolderthan: ' + vnotolderthan +
'\n\n', 'w')
# Modify parameters
f_write_log(vsavefolder, vjobid, 'Modifying parameters: ', 'a')
if vignoredb == '0':
vignoredb = False
elif vignoredb == '1':
vignoredb = True
else:
vignoredb = False
vcutoff = float(vcutoff)
vmaxcutoff = float(vmaxcutoff)
if vcustom_scaling_on == '0':
vcustom_scaling_on = False
elif vcustom_scaling_on == '1':
vcustom_scaling_on = True
else:
vcustom_scaling_on = False
vscalingfigure = float(vscalingfigure)
if vabsolute == '0':
vabsolute = False
elif vabsolute == '1':
vabsolute = True
else:
vabsolute = False
if vwarnings == '0':
vwarnings = False
elif vwarnings == '1':
vwarnings = True
else:
vwarnings = False
if vfrom_scratch == '0':
vfrom_scratch = False
elif vfrom_scratch == '1':
vfrom_scratch = True
else:
vfrom_scratch = False
vnotolderthan = int(vnotolderthan)
f_success(vsavefolder, vjobid)
# Additional internal constants
f_write_log(vsavefolder, vjobid, 'Add internal constants parameters: ', 'a')
vlen_dbid = 5 # Length of db names (defines how many different db files are made.)
f_success(vsavefolder, vjobid)
# Read in headers and sequences that were used to produce the prosite and or pfam results
f_write_log(vsavefolder, vjobid, 'Reading in fasta file:\n', 'a')
vheaders, vsequences = f_read_in_file(vinputfile, vsavefolder, vjobid)
if vwarnings:
print('Headers: ' + str(len(vheaders)) + ', Sequences: ' + str(len(vsequences)))
f_write_log(vsavefolder, vjobid, 'Fasta file read in successfully.\n', 'a')
# Write first cookie
f_write_cookie(1, vsavefolder, vjobid, 'Input file successfully read')
# Get Prosite results
f_write_log(vsavefolder, vjobid, 'Gathering prosite domains:\n', 'a')
vprositefile = join(vsavefolder, vjobid + '_prosite_res.tsv')
# Check if Prosite results have been produced that can be loaded.
f_write_log(vsavefolder, vjobid, 'Check if prosite domain results file exists: ', 'a')
try:
vfh_colors = open(vprositefile, 'r')
vprosite_already_done = True
vfh_colors.close()
f_success(vsavefolder, vjobid)
except IOError:
vprosite_already_done = False
f_no(vsavefolder, vjobid)
# Check if results should be run from scratch
if vfrom_scratch:
f_write_log(vsavefolder, vjobid, 'All results for Prosite will be run from scratch (no dbs, no previous '
'saves).\n', 'a')
vprosite_already_done = False
# Getting results from Prosite
if not vprosite_already_done:
f_write_log(vsavefolder, vjobid, 'Create new Prosite output file: ', 'a')
vfh_prosite_output = open(vprositefile, 'w') # Emptying the prosite output file.
vfh_prosite_output.close()
f_success(vsavefolder, vjobid)
# Defining storing places of dbs
if vdbfolder == '':
vdbfiles = 'Prosite_db'
else:
vdbfiles = join(vdbfolder, 'Prosite_db')
f_write_log(vsavefolder, vjobid, 'Prosite DB files stored at: ' + vdbfiles + '\n', 'a')
# Process every header
for vheader_id, vheader in enumerate(vheaders):
if vwarnings:
print('Searching header ' + str(vheader_id + 1) + ' of ' + str(len(vheaders)) + '.')
f_write_log(vsavefolder, vjobid, 'Processing header: ' + vheader + '\n', 'a')
vfound = False # Defines if the header was found in the db
vdbid = vsequences[vheader_id][0:vlen_dbid] # Defines the db file that should be searched.
if not vignoredb: # If the db should be searched.
f_write_log(vsavefolder, vjobid, 'Searching in Prosite DB ' + vdbid + ': ', 'a')
try: # Check if db exists.
vfh_db_prosite = open(vdbfiles + '_' + vdbid, 'r') # Open db.
f_write_log(vsavefolder, vjobid, 'exists\n', 'a')
for ventry in vfh_db_prosite: # Go through all entries of db.
ventry = ventry.rstrip('\n') # Get rid of new line character at the end.
vsplitentry = ventry.split('\t') # Split record into its attributes.
if int(vsplitentry[0]) >= vnotolderthan: # Check if the entry is young enough.
if vsequences[vheader_id] == vsplitentry[1]: # check if first attribute is sequence of
# interest.
vfound = True # if it is, say that the sequence was found in the db.
f_write_log(vsavefolder, vjobid, 'Found entry in DB ' + vdbid + '\n', 'a')
f_write_pfam_prosite_res(vprositefile, vheader, vsplitentry[1:], False, True,
vsavefolder, vjobid) # Write the record to the
# output file.
vfh_db_prosite.close()
except IOError: # If the db can not be read, it likely does not exist.
f_write_log(vsavefolder, vjobid, 'does not yet exist\n', 'a')
if vwarnings:
print(vdbfiles + '_' + vdbid + ' does not yet exist.')
if not vfound: # If the sequence has not been found in a db.
f_write_log(vsavefolder, vjobid, 'Sending sequence to Prosite\n', 'a')
ventry_found = f_run_sequences_through_prosite(vheader, vsequences[vheader_id], vsavefolder, vjobid,
vwarnings)
# Search for the sequence in prosite.
f_write_pfam_prosite_db(vdbfiles, vdbid, ventry_found, vsavefolder, vjobid, vcurrentdate) # Write
# the results into the db.
f_write_pfam_prosite_res(vprositefile, vheader, ventry_found, False, False, vsavefolder, vjobid)
# Write the results into the results file.
# Update cookies
vcid = math.floor((vheader_id + 1) / len(vheaders) * 100 / 5) + 1
if vcid > 1:
f_write_cookie(vcid, vsavefolder, vjobid, '')
vprositedata = f_read_tsv(vprositefile, vsavefolder, vjobid) # Read the data of prosite from the result file.
f_write_log(vsavefolder, vjobid, 'Successfully gathered Prosite domain results.\n', 'a')
f_write_cookie(22, vsavefolder, vjobid, 'Finished searching Prosite')
# Get PFAM results
f_write_log(vsavefolder, vjobid, 'Gathering PFAM domains:\n', 'a')
vpfamfile = join(vsavefolder, vjobid + '_pfam_res.tsv')
# Check if PFAM results have been produced that can be loaded.
f_write_log(vsavefolder, vjobid, 'Check if PFAM domain results file exists: ', 'a')
try:
vfh_colors = open(vpfamfile, 'r')
vpfam_already_done = True
vfh_colors.close()
f_success(vsavefolder, vjobid)
except IOError:
vpfam_already_done = False
f_no(vsavefolder, vjobid)
# Check if results should be run from scratch
if vfrom_scratch:
f_write_log(vsavefolder, vjobid, 'All results for PFAM will be run from scratch (no dbs, no previous '
'saves).\n', 'a')
vpfam_already_done = False
# Getting results from PFAM (see above Prosite for reference. Only commenting when different.)
if not vpfam_already_done:
f_write_log(vsavefolder, vjobid, 'Create new PFAM output file: ', 'a')
vfh_pfam_output = open(vpfamfile, 'w') # writing the header of the pfam result file.
# vfh_pfam_output.write('Sequence id\tFamily id\tFamily Accession\tClan\tEnv. Start\tEnv. End\tAli. Start\t'
# 'Ali. End\tModel Start\tModel End\tBit Score\tInd. E-value\tCond. E-value\tDescription\t'
# 'aliIdCount\taliL\taliM\taliN\taliSim\taliSimCount\taliaseq\talicsline\talihindex\t'
# 'alimline\talimmline\talimodel\talintseq\talippline\talirfline\talisqacc\talisqdesc\t'
# 'alisqfrom\talisqname\talisqto\tbias\tdisplay\tis_included\tis_reported\toasc\t'
# 'outcompeted\tsignificant\tuniq\n')
vfh_pfam_output.write('Sequence id\tFamily id\tFamily Accession\tClan\tEnv. Start\tEnv. End\tAli. Start\t'
'Ali. End\tModel Start\tModel End\tBit Score\tInd. E-value\tCond. E-value\tDescription\t'
'outcompeted\tsignificant\tuniq\n')
vfh_pfam_output.close()
# Defining storing places of dbs
if vdbfolder == '':
vdbfiles = 'PFAM_db'
else:
vdbfiles = join(vdbfolder, 'PFAM_db')
f_write_log(vsavefolder, vjobid, 'PFAM DB files stored at: ' + vdbfiles + '\n', 'a')
# Process every header
for vheader_id, vheader in enumerate(vheaders):
if vwarnings:
print('Searching header ' + str(vheader_id + 1) + ' of ' + str(len(vheaders)) + '.')
f_write_log(vsavefolder, vjobid, 'Processing header: ' + vheader + '\n', 'a')
vfound = False
vdbid = vsequences[vheader_id][0:vlen_dbid]
if not vignoredb:
f_write_log(vsavefolder, vjobid, 'Searching in PFAM DB ' + vdbid + ': ', 'a')
try:
vfh_db_pfam = open(vdbfiles + '_' + vdbid, 'r')
f_write_log(vsavefolder, vjobid, 'exists\n', 'a')
for ventry in vfh_db_pfam:
ventry = ventry.rstrip('\n')
vsplitentry = ventry.split('\t')
if int(vsplitentry[0]) >= vnotolderthan: # Check if the entry is young enough.
if vsequences[vheader_id] == vsplitentry[1]: # check if first attribute is sequence of
# interest.
vfound = True
f_write_log(vsavefolder, vjobid, 'Found entry in DB ' + vdbid + '\n', 'a')
if vsplitentry[len(vsplitentry) - 1 - 1] == '1': # Checks if result was significant.
# and vsplitentry[len(vsplitentry) - 1 - 2] == '0': # This indicates that only non
# outcompeted domains would be found. Decided not to display both domains, but not
# non-significant ones.
f_write_pfam_prosite_res(vpfamfile, vheader, vsplitentry[1:], True, True,
vsavefolder, vjobid)
vfh_db_pfam.close()
except IOError:
f_write_log(vsavefolder, vjobid, 'does not yet exist\n', 'a')
if vwarnings:
print(vdbfiles + '_' + vdbid + ' does not yet exist.')
if not vfound:
f_write_log(vsavefolder, vjobid, 'Sending sequence to PFAM\n', 'a')
ventry_found = f_run_sequences_through_pfam(vheader, vsequences[vheader_id], vsavefolder, vjobid,
vwarnings)
ventry_screened = []
for ventry in ventry_found:
if ventry[len(ventry) - 1 - 1] == '1': # (see above)
# and ventry[len(ventry)-1-2] == '0': # (see above)
ventry_screened.append(ventry)
f_write_pfam_prosite_db(vdbfiles, vdbid, ventry_found, vsavefolder, vjobid, vcurrentdate)
f_write_pfam_prosite_res(vpfamfile, vheader, ventry_screened, True, False, vsavefolder, vjobid) # Only
# write significant results into the result file.
# Update cookies
vcid = math.floor((vheader_id + 1) / len(vheaders) * 100 / 5) + 22
if vcid > 22:
f_write_cookie(vcid, vsavefolder, vjobid, '')
vpfamdata = f_read_tsv(vpfamfile, vsavefolder, vjobid)
f_write_log(vsavefolder, vjobid, 'Successfully gathered PFAM domain results.\n', 'a')
f_write_cookie(43, vsavefolder, vjobid, 'Finished searching PFAM')
# Gather protein groups of protein data
if vgroupfile == '': # Default case, if no group association file is handed over.
f_write_log(vsavefolder, vjobid, 'No custom protein group file used\n', 'a')
vgroup = []
vgroup_u = []
for vheader in vheaders:
f_write_log(vsavefolder, vjobid, 'Appending ' + vheader + ' to protein group "ProteinGroup".\n', 'a')
if vwarnings:
print('Appending ' + vheader + ' to protein group "ProteinGroup".')
vgroup.append('ProteinGroup')
vgroup_u.append('ProteinGroup')
else:
f_write_log(vsavefolder, vjobid, 'Reading in protein group file:\n', 'a')
vgroup, vgroup_u = f_read_in_groupfile(vgroupfile, vheaders, vsavefolder, vjobid)
vgroup_u = list(set(vgroup_u))
f_write_log(vsavefolder, vjobid, 'Read protein group file successfully\n', 'a')
# Per group of protein: get median sequence length
f_write_log(vsavefolder, vjobid, 'Calculating median length of proteins for each protein group: ', 'a')
vmedlengroup = []
for vgitem in vgroup_u:
vlenproteins = []
for vg, vitem in enumerate(vgroup):
if vitem == vgitem:
vlenproteins.append(len(vsequences[vg]))
vmedlengroup.append(math.ceil(median(vlenproteins)))
f_success(vsavefolder, vjobid)
# Get specific coloring
if vcolorfile != '':
f_write_log(vsavefolder, vjobid, 'Reading in color file:\n', 'a')
vcolor_domain, vcolor_hexcode = f_read_in_colorfile(vcolorfile, vsavefolder, vjobid)
f_write_log(vsavefolder, vjobid, 'Read color file successfully\n', 'a')
else:
f_write_log(vsavefolder, vjobid, 'No custom color file used\n', 'a')
vcolor_domain = []
vcolor_hexcode = []
# Get list of domains to ignore
if vignoredomainfile != '':
f_write_log(vsavefolder, vjobid, 'Reading in ignore domain file:\n', 'a')
vignore_domain = f_read_in_ignoredomainfile(vignoredomainfile, vsavefolder, vjobid)
f_write_log(vsavefolder, vjobid, 'Read ignore domain file successfully\n', 'a')
else:
f_write_log(vsavefolder, vjobid, 'No custom ignore domain file used\n', 'a')
vignore_domain = []
# Get unique list of Prosite domains
f_write_log(vsavefolder, vjobid, 'Produce unique list of Prosite domains: ', 'a')
vprositedomains_u = []
vprositedomains_u_color = []
vprositedomains_u_ignore = []
if vprositefile != '':
for vprdc in range(len(vprositedata.columns)):
vprositedomains_u.append(vprositedata[vprdc][3])
vprositedomains_u = list(set(vprositedomains_u))
for vitem in vprositedomains_u:
vfound = 0
for vc, vcitem in enumerate(vcolor_domain):
if vitem == vcitem:
if vfound == 0:
vprositedomains_u_color.append(vcolor_hexcode[vc])
vfound = 1
if vfound == 0:
vprositedomains_u_color.append('')
vfound = 0
for viitem in vignore_domain:
if vitem == viitem:
if vfound == 0:
vprositedomains_u_ignore.append(1)
vfound = 1
if vfound == 0:
vprositedomains_u_ignore.append(0)
f_success(vsavefolder, vjobid)
# Get unique list of Pfam domains
f_write_log(vsavefolder, vjobid, 'Produce unique list of PFAM domains: ', 'a')
vpfamdomains_u = []
vpfamdomains_u_color = []
vpfamdomains_u_ignore = []
if vpfamfile != '':
for vpdc in range(1, len(vpfamdata.columns)):
vpfamdomains_u.append(vpfamdata[vpdc][1])
vpfamdomains_u = list(set(vpfamdomains_u))
for vitem in vpfamdomains_u:
vfound = 0
for vc, vcitem in enumerate(vcolor_domain):
if vitem == vcitem:
if vfound == 0:
vpfamdomains_u_color.append(vcolor_hexcode[vc])
vfound = 1
if vfound == 0:
vpfamdomains_u_color.append('')
vfound = 0
for viitem in vignore_domain:
if vitem == viitem:
if vfound == 0:
vpfamdomains_u_ignore.append(1)
vfound = 1
if vfound == 0:
vpfamdomains_u_ignore.append(0)
f_success(vsavefolder, vjobid)
# Remove beginning ('>') from fasta
f_write_log(vsavefolder, vjobid, 'Processing fasta headers\n', 'a')
vheaders_no_fastastart = []
for vitem in vheaders:
vheaders_no_fastastart.append(vitem.lstrip('>'))
f_write_cookie(60, vsavefolder, vjobid, 'Finished reading and processing additional data')
# Per group of protein: get annotations of Prosite and PFAM domains and make one plot per protein group.
f_write_log(vsavefolder, vjobid, 'Collecting Prosite and PFAM domain information per protein group\n', 'a')
vprositedomainsofgroup_all = []
vprositedomainsingenes_all = []
vpfamdomainsofgroup_all = []
vpfamdomainsingenes_all = []
vsize_of_prosite_data_all = []
vsize_of_pfam_data_all = []
vprositedomainsofgroup_rel_all = []
vpfamdomainsofgroup_rel_all = []
vn_prot_per_group_all = []
vprositedomainsofgroup = np.zeros((0, 0), dtype=float)
vprositedomainsingenes = np.zeros((0, 0), dtype=float)
vpfamdomainsofgroup = np.zeros((0, 0), dtype=float)
vpfamdomainsingenes = np.zeros((0, 0), dtype=float)
# Define domain colors
if vsavefolder != '':
vdomaincolorfile = join(vsavefolder, vjobid + '_domain_color_file.txt')
else:
vdomaincolorfile = vjobid + '_domain_color_file.txt'
f_write_log(vsavefolder, vjobid, 'Storing domain color information in ' + vdomaincolorfile + '\n', 'a')
vfh_colors = open(vdomaincolorfile, 'w')
# Prepare switches for each domain to decide if they should be processed
vprositedomains_u_process = []
for _ in vprositedomains_u:
vprositedomains_u_process.append(True)
vpfamdomains_u_process = []
for _ in vpfamdomains_u:
vpfamdomains_u_process.append(True)
# Prepare dummy figure that contains all domains of all groups to get consistent coloring
f_write_log(vsavefolder, vjobid, 'Prepare dummy figure of all domains and proteins: ', 'a')
if not vcustom_scaling_on:
vscalingfigure = vscalingfigure * 500 / max(vmedlengroup)
else:
vstandardwidth = (vstandardwidth - 2 * vpaddingwidth) * vscalingfigure * max(vmedlengroup) / 500 + \
(2 * vpaddingwidth)
fig = plt.figure(figsize=[vstandardwidth, vstandardheight])
h = [Size.Fixed(vpaddingwidth),
Size.Fixed(vscalingfigure * max(vmedlengroup) / 100)]
v = [Size.Fixed(vpaddingheight), Size.Fixed(vstandardheight - 2 * vpaddingheight)]
divider = Divider(fig, (0.0, 0.0, 1., 1.), h, v, aspect=False)
ax = Axes(fig, divider.get_position())
ax.set_axes_locator(divider.new_locator(nx=1, ny=1))
fig.add_axes(ax)
for vug, vgitem in enumerate(vgroup_u): # Go through all groups of proteins
f_write_log(vsavefolder, vjobid, 'Processing protein group ' + vgitem + '\n', 'a')
vn_prot_per_group = 0 # Initialize the counting of sequences per group
if vprositefile != '':
vprositedomainsofgroup = np.zeros((len(vprositedomains_u), vmedlengroup[vug]), dtype=float) # Initialize
# with number of unique prosite domains and median length of proteins
vprositedomainsingenes = np.zeros((len(vprositedomains_u), len(vheaders)), dtype=int) # Stores if a domain
# has been found for a gene
if vpfamfile != '':
vpfamdomainsofgroup = np.zeros((len(vpfamdomains_u), vmedlengroup[vug]), dtype=float) # Initialize with
# number of unique prosite domains and median length of proteins
vpfamdomainsingenes = np.zeros((len(vpfamdomains_u), len(vheaders)), dtype=int) # Stores if a domain has
# been found for a gene
vseqaddedfromprosite = 0
vseqaddedfrompfam = 0
for vh, vitem in enumerate(vheaders_no_fastastart): # Go through all sequences
if vgroup[vh] == vgitem: # If the sequence has the same group association as is currently searched
vn_prot_per_group = vn_prot_per_group + 1 # Count the number of sequence per this group up by one
if vprositefile != '':
# Process Prosite
for vheader_id in range(len(vprositedata.columns)): # go through all prosite data
if vprositedata[vheader_id][0] == vitem: # If the prosite data is about the sequence we
# currently look at
for vp, vpd in enumerate(vprositedomains_u): # Go through all domains
if vprositedata[vheader_id][3] == vpd: # Check if the domain is the domain we currently
# look at
vmedianstartbp = f_float2int((int(vprositedata[vheader_id][1]) - 1) /
len(vsequences[vh]) * vmedlengroup[vug]) # Calculate
# the start position relative to the median length of the protein group:
vmedianendbp = f_float2int((int(vprositedata[vheader_id][2]) - 1) /
len(vsequences[vh]) * vmedlengroup[vug]) # Calculate the
# end position relative to the median length of the protein group:
vprositedomainsofgroup[vp, vmedianstartbp:vmedianendbp] += 1 # Mark the placing of
# the domain
vprositedomainsingenes[vp, vh] = 1 # Define that the domain has been found for that
# gene
vseqaddedfromprosite += 1
if vpfamfile != '':
# Process PFAM
for vheader_id in range(len(vpfamdata.columns)): # Go through all pfam data
if vpfamdata[vheader_id][0].lstrip('>') == vitem: # If the pfam data is about the sequence we
# currently look at
for vp, vpd in enumerate(vpfamdomains_u): # Go through all domains
if vpfamdata[vheader_id][1] == vpd: # If the domain is the domain we currently look at
vmedianstartbp = f_float2int((int(vpfamdata[vheader_id][6]) - 1) /
len(vsequences[vh]) * vmedlengroup[vug]) # Calculate
# the start position relative to the median length of the protein group:
vmedianendbp = f_float2int((int(vpfamdata[vheader_id][7]) - 1) /
len(vsequences[vh]) * vmedlengroup[vug]) # Calculate the
# end position relative to the median length of the protein group:
vpfamdomainsofgroup[vp, vmedianstartbp:vmedianendbp] += 1 # Mark the placing of the
# domain
vpfamdomainsingenes[vp, vh] = 1 # Define that the domain has been found for
# that gene
vseqaddedfrompfam += 1
f_write_log(vsavefolder, vjobid, 'Added domain information from ' + str(vseqaddedfromprosite) + ' sequences for'
' Prosite.\nAdded domain information from ' + str(vseqaddedfrompfam) +
' sequences for PFAM.\n', 'a')
vprositedomainsingenes_all.append(vprositedomainsingenes)
vpfamdomainsingenes_all.append(vpfamdomainsingenes)
vsize_of_prosite_data = [0, 0]
vsize_of_pfam_data = [0, 0]
if vprositefile != '':
vsize_of_prosite_data = vprositedomainsofgroup.shape
if vpfamfile != '':
vsize_of_pfam_data = vpfamdomainsofgroup.shape
vsize_of_prosite_data_all.append(vsize_of_prosite_data)
vsize_of_pfam_data_all.append(vsize_of_pfam_data)
# Normalization of data
f_write_log(vsavefolder, vjobid, 'Compute relative prevalence of domains per group: ', 'a')
vprositedomainsofgroup_rel = np.zeros((0, 0), dtype=float)
vpfamdomainsofgroup_rel = np.zeros((0, 0), dtype=float)
if vprositefile != '':
vprositedomainsofgroup_rel = np.zeros((len(vprositedomains_u), vmedlengroup[vug]), dtype=float)
for vprd in range(vsize_of_prosite_data[0]):
for vj in range(vsize_of_prosite_data[1]):
if vprositedomainsofgroup[vprd][vj] != 0:
vprositedomainsofgroup_rel[vprd][vj] = vprositedomainsofgroup[vprd][vj]/vn_prot_per_group * 100
if vpfamfile != '':
vpfamdomainsofgroup_rel = np.zeros((len(vpfamdomains_u), vmedlengroup[vug]), dtype=float)
for vpfd in range(vsize_of_pfam_data[0]):
for vj in range(vsize_of_pfam_data[1]):
if vpfamdomainsofgroup[vpfd][vj] != 0:
vpfamdomainsofgroup_rel[vpfd][vj] = vpfamdomainsofgroup[vpfd][vj]/vn_prot_per_group * 100
vprositedomainsofgroup_rel_all.append(vprositedomainsofgroup_rel)
vpfamdomainsofgroup_rel_all.append(vpfamdomainsofgroup_rel)
vn_prot_per_group_all.append(vn_prot_per_group)
f_success(vsavefolder, vjobid)
# If not using absolute values
if not vabsolute:
f_write_log(vsavefolder, vjobid, 'Use relative prevalence of domains per group.\n', 'a')
if vprositefile != '':
vprositedomainsofgroup = vprositedomainsofgroup_rel
if vpfamfile != '':
vpfamdomainsofgroup = vpfamdomainsofgroup_rel
else:
f_write_log(vsavefolder, vjobid, 'Use absolute prevalence of domains per group.\n', 'a')
vprositedomainsofgroup_all.append(vprositedomainsofgroup)
vpfamdomainsofgroup_all.append(vpfamdomainsofgroup)
# Save of data
f_write_log(vsavefolder, vjobid, 'Save domain data of group as .csvs: ', 'a')
if vprositefile != '':
if vsavefolder != '':
np.savetxt(join(vsavefolder, vjobid + '_' + vgitem + '_prosite.csv'), vprositedomainsofgroup,
delimiter="\t")
else:
np.savetxt(vjobid + '_' + vgitem + '_prosite.csv', vprositedomainsofgroup, delimiter="\t")
if vpfamfile != '':
if vsavefolder != '':
np.savetxt(join(vsavefolder, vjobid + '_' + vgitem + '_pfam.csv'), vpfamdomainsofgroup, delimiter="\t")
else:
np.savetxt(vjobid + '_' + vgitem + '_pfam.csv', vpfamdomainsofgroup, delimiter="\t")
f_success(vsavefolder, vjobid)
# Plot domains into figure with all domains of all groups to get coloring identical
f_write_log(vsavefolder, vjobid, 'Add domains into dummy figure to get consistent domain data of group as'
' .csvs: ', 'a')
vmaxes = []
for vprd in range(vsize_of_prosite_data[0]):
vmaxes.append(max(vprositedomainsofgroup_rel[vprd]))
for vpfd in range(vsize_of_pfam_data[0]):
vmaxes.append(max(vpfamdomainsofgroup_rel[vpfd]))
vmaxids = np.argsort(vmaxes)[::-1]
bin_edges = np.arange(vsize_of_prosite_data[1] + 1)
for vmaxi in vmaxids:
if vmaxi < vsize_of_prosite_data[0]:
if max(vprositedomainsofgroup[vmaxi]) > (vmaxcutoff * 100) and float(
sum(vprositedomainsingenes[vmaxi])) / float(vn_prot_per_group) > vcutoff and \
vprositedomains_u_ignore[vmaxi] == 0 and vprositedomains_u_process[vmaxi]:
if vprositedomains_u_color[vmaxi] == '':
vcurrbar = ax.bar(bin_edges[:-1], vprositedomainsofgroup[vmaxi], width=1, alpha=0.7,
label=vprositedomains_u[vmaxi])
else:
vcurrbar = ax.bar(bin_edges[:-1], vprositedomainsofgroup[vmaxi], width=1, alpha=0.7,
label=vprositedomains_u[vmaxi],
color=vprositedomains_u_color[vmaxi])
vcurrcolor = f_get_hex(vcurrbar.patches[0].get_facecolor())
vprositedomains_u_color[vmaxi] = vcurrcolor
vfh_colors.write(vprositedomains_u[vmaxi] + '\t' + vcurrcolor + '\n')
vprositedomains_u_process[vmaxi] = False
else:
vimod = vmaxi - vsize_of_prosite_data[0]
if max(vpfamdomainsofgroup[vimod]) > (vmaxcutoff * 100) and float(
sum(vpfamdomainsingenes[vimod])) / float(vn_prot_per_group) > vcutoff and \
vpfamdomains_u_ignore[vimod] == 0 and vpfamdomains_u_process[vimod]:
if vpfamdomains_u_color[vimod] == '':
vcurrbar = ax.bar(bin_edges[:-1], vpfamdomainsofgroup[vimod], width=1, alpha=0.7,
label=vpfamdomains_u[vimod])
else:
vcurrbar = ax.bar(bin_edges[:-1], vpfamdomainsofgroup[vimod], width=1, alpha=0.7,
label=vpfamdomains_u[vimod], color=vpfamdomains_u_color[vimod])
vcurrcolor = f_get_hex(vcurrbar.patches[0].get_facecolor())
vpfamdomains_u_color[vimod] = vcurrcolor
vfh_colors.write(vpfamdomains_u[vimod] + '\t' + vcurrcolor + '\n')
vpfamdomains_u_process[vimod] = False
vfh_colors.close()
# Write fifth cookie
f_write_cookie(80, vsavefolder, vjobid, 'Finished collecting and formatting all data')
for vug, vgitem in enumerate(vgroup_u): # Go through all groups of proteins
vprositedomainsofgroup = vprositedomainsofgroup_all[vug]
vpfamdomainsofgroup = vpfamdomainsofgroup_all[vug]
vn_prot_per_group = vn_prot_per_group_all[vug]
vprositedomainsofgroup_rel = vprositedomainsofgroup_rel_all[vug]
vpfamdomainsofgroup_rel = vpfamdomainsofgroup_rel_all[vug]
vprositedomainsingenes = vprositedomainsingenes_all[vug]
vpfamdomainsingenes = vpfamdomainsingenes_all[vug]
vsize_of_prosite_data = vsize_of_prosite_data_all[vug]
vsize_of_pfam_data = vsize_of_pfam_data_all[vug]
# Plot data of Prosite
f_write_log(vsavefolder, vjobid, 'Plot Prosite domains of protein group ' + vgitem + '.\n', 'a')
if vprositefile != '':
print('Plot data of Prosite for ' + vgitem)
fig = plt.figure(figsize=[vstandardwidth, vstandardheight])
h = [Size.Fixed(vpaddingwidth), Size.Fixed(vscalingfigure * max(vmedlengroup) / 100)]
v = [Size.Fixed(vpaddingheight), Size.Fixed(vstandardheight - 2 * vpaddingheight)]
divider = Divider(fig, (0.0, 0.0, 1., 1.), h, v, aspect=False)
ax = Axes(fig, divider.get_position())
ax.set_axes_locator(divider.new_locator(nx=1, ny=1))
fig.add_axes(ax)
vmaxes = []
for vprd in range(vsize_of_prosite_data[0]):
vmaxes.append(max(vprositedomainsofgroup_rel[vprd]))
vmaxids = np.argsort(vmaxes)[::-1]
bin_edges = np.arange(vsize_of_prosite_data[1] + 1)
for vheader_id in vmaxids:
if max(vprositedomainsofgroup[vheader_id]) > (vmaxcutoff * 100) and \
float(sum(vprositedomainsingenes[vheader_id]))/float(vn_prot_per_group) > vcutoff and \
vprositedomains_u_ignore[vheader_id] == 0:
# Create function
a = min(bin_edges[:-1]) # integral lower limit
b = max(bin_edges[:-1]) # integral upper limit
x_temp = [0]
for vx_i in range(1, b + 1):
x_temp.append(vx_i)
x = np.array(x_temp)
y = vprositedomainsofgroup[vheader_id] # Function value
# Create Polygon
ix = np.array(x_temp)
iy = vprositedomainsofgroup[vheader_id]
verts = [(a, 0), *zip(ix, iy), (b, 0)]
if vprositedomains_u_color[vheader_id] == '':
poly = Polygon(verts, alpha=0.7, label=vprositedomains_u[vheader_id])
else:
poly = Polygon(verts, facecolor=vprositedomains_u_color[vheader_id], alpha=0.7,
label=vprositedomains_u[vheader_id])
# Plot function
ax.plot(x, y, 'r', linewidth=0)
ax.set_ylim(bottom=0)
# Plot Polygon
ax.add_patch(poly)
plt.xlim(min(bin_edges), max(bin_edges))
if not vabsolute:
plt.ylim(0, 100)
plt.grid(axis='y', alpha=0.75)
plt.xlabel('Median length: ' + str(vmedlengroup[vug]) + ' bp', fontsize=vfontsize)
if vabsolute:
plt.ylabel('# of occurrences', fontsize=vfontsize)
else:
plt.ylabel('Percent occurrence (n = ' + str(vn_prot_per_group) + ')', fontsize=vfontsize)
plt.xticks(fontsize=vfontsize)
plt.yticks(fontsize=vfontsize)
plt.title('Distribution of Prosite protein domains\nfor ' + vgitem, fontsize=vfontsize)
plt.legend(loc='upper left', frameon=False)
if vsavefolder != '':
plt.savefig(join(vsavefolder, vjobid + '_' + vgitem + '_prosite.pdf'))
else:
plt.savefig(vjobid + '_' + vgitem + '_prosite.pdf')
# Plot data of PFAM
f_write_log(vsavefolder, vjobid, 'Plot PFAM domains of protein group ' + vgitem + '.\n', 'a')
if vpfamfile != '':
print('Plot data of PFAM for ' + vgitem)
fig = plt.figure(figsize=[vstandardwidth, vstandardheight])
h = [Size.Fixed(vpaddingwidth),
Size.Fixed(vscalingfigure * max(vmedlengroup) / 100)]
v = [Size.Fixed(vpaddingheight), Size.Fixed(vstandardheight - 2 * vpaddingheight)]
divider = Divider(fig, (0.0, 0.0, 1., 1.), h, v, aspect=False)
ax = Axes(fig, divider.get_position())
ax.set_axes_locator(divider.new_locator(nx=1, ny=1))
fig.add_axes(ax)
vmaxes = []
for vheader_id in range(vsize_of_pfam_data[0]):
vmaxes.append(max(vpfamdomainsofgroup_rel[vheader_id]))
vmaxids = np.argsort(vmaxes)[::-1]
bin_edges = np.arange(vsize_of_pfam_data[1] + 1)
for vheader_id in vmaxids:
if max(vpfamdomainsofgroup[vheader_id]) > (vmaxcutoff * 100) and \
float(sum(vpfamdomainsingenes[vheader_id])) / \
float(vn_prot_per_group) > vcutoff and vpfamdomains_u_ignore[vheader_id] == 0:
# Create function
a = min(bin_edges[:-1]) # integral lower limit
b = max(bin_edges[:-1]) # integral upper limit
x_temp = [0]
for vx_i in range(1, b + 1):
x_temp.append(vx_i)
x = np.array(x_temp)
y = vpfamdomainsofgroup[vheader_id] # Function value
# Create Polygon
ix = np.array(x_temp)
iy = vpfamdomainsofgroup[vheader_id]
verts = [(a, 0), *zip(ix, iy), (b, 0)]
if vpfamdomains_u_color[vheader_id] == '':
poly = Polygon(verts, alpha=0.7, label=vpfamdomains_u[vheader_id])
else:
poly = Polygon(verts, facecolor=vpfamdomains_u_color[vheader_id], alpha=0.7,
label=vpfamdomains_u[vheader_id])
# Plot function
ax.plot(x, y, 'r', linewidth=0)
ax.set_ylim(bottom=0)
# Plot Polygon
ax.add_patch(poly)
plt.xlim(min(bin_edges), max(bin_edges))
if not vabsolute:
plt.ylim(0, 100)
plt.grid(axis='y', alpha=0.75)
plt.xlabel('Median length: ' + str(vmedlengroup[vug]) + ' bp', fontsize=vfontsize)
if vabsolute:
plt.ylabel('# of occurrences', fontsize=vfontsize)
else:
plt.ylabel('Percent occurrence (n = ' + str(vn_prot_per_group) + ')', fontsize=vfontsize)
plt.xticks(fontsize=vfontsize)
plt.yticks(fontsize=vfontsize)
plt.title('Distribution of PFAM protein domains\nfor ' + vgitem, fontsize=vfontsize)
plt.legend(loc='upper left', frameon=False)
if vsavefolder != '':
plt.savefig(join(vsavefolder, vjobid + '_' + vgitem + '_pfam.pdf'))
else:
plt.savefig(vjobid + '_' + vgitem + '_pfam.pdf')
# Combined plot
f_write_log(vsavefolder, vjobid, 'Plot all domains of protein group ' + vgitem + '.\n', 'a')
if vprositefile != '' and vpfamfile != '':
print('Plot data of Prosite and PFAM for ' + vgitem)
fig = plt.figure(figsize=[vstandardwidth, vstandardheight])
h = [Size.Fixed(vpaddingwidth),
Size.Fixed(vscalingfigure * max(vmedlengroup) / 100)]
v = [Size.Fixed(vpaddingheight), Size.Fixed(vstandardheight - 2 * vpaddingheight)]
divider = Divider(fig, (0.0, 0.0, 1., 1.), h, v, aspect=False)
ax = Axes(fig, divider.get_position())
ax.set_axes_locator(divider.new_locator(nx=1, ny=1))
fig.add_axes(ax)
vmaxes = []
for vprd in range(vsize_of_prosite_data[0]):
vmaxes.append(max(vprositedomainsofgroup_rel[vprd]))
for vpfd in range(vsize_of_pfam_data[0]):
vmaxes.append(max(vpfamdomainsofgroup_rel[vpfd]))
vmaxids = np.argsort(vmaxes)[::-1]
bin_edges = np.arange(vsize_of_prosite_data[1] + 1)
for vmaxi in vmaxids:
if vmaxi < vsize_of_prosite_data[0]:
if max(vprositedomainsofgroup[vmaxi]) > (vmaxcutoff * 100) and float(
sum(vprositedomainsingenes[vmaxi])) / float(vn_prot_per_group) > vcutoff and \
vprositedomains_u_ignore[vmaxi] == 0:
# Create function
a = min(bin_edges[:-1]) # integral lower limit
b = max(bin_edges[:-1]) # integral upper limit
x_temp = [0]
for vx_i in range(1, b + 1):
x_temp.append(vx_i)
x = np.array(x_temp)
y = vprositedomainsofgroup[vmaxi] # Function value
# Create Polygon
ix = np.array(x_temp)
iy = vprositedomainsofgroup[vmaxi]
verts = [(a, 0), *zip(ix, iy), (b, 0)]
if vprositedomains_u_color[vmaxi] == '':
poly = Polygon(verts, alpha=0.7, label=vprositedomains_u[vmaxi])
else:
poly = Polygon(verts, facecolor=vprositedomains_u_color[vmaxi], alpha=0.7,
label=vprositedomains_u[vmaxi])
# Plot function
ax.plot(x, y, 'r', linewidth=0)
ax.set_ylim(bottom=0)
# Plot Polygon
ax.add_patch(poly)
else:
vimod = vmaxi - vsize_of_prosite_data[0]
if max(vpfamdomainsofgroup[vimod]) > (vmaxcutoff * 100) and float(
sum(vpfamdomainsingenes[vimod])) / float(vn_prot_per_group) > vcutoff and \
vpfamdomains_u_ignore[vimod] == 0:
# Create function
a = min(bin_edges[:-1]) # integral lower limit
b = max(bin_edges[:-1]) # integral upper limit
x_temp = [0]
for vx_i in range(1, b + 1):
x_temp.append(vx_i)
x = np.array(x_temp)
y = vpfamdomainsofgroup[vimod] # Function value
# Create Polygon
ix = np.array(x_temp)
iy = vpfamdomainsofgroup[vimod]
verts = [(a, 0), *zip(ix, iy), (b, 0)]
if vpfamdomains_u_color[vimod] == '':
poly = Polygon(verts, alpha=0.7, label=vpfamdomains_u[vimod])
else:
poly = Polygon(verts, facecolor=vpfamdomains_u_color[vimod], alpha=0.7,
label=vpfamdomains_u[vimod])
# Plot function
ax.plot(x, y, 'r', linewidth=0)
ax.set_ylim(bottom=0)
# Plot Polygon
ax.add_patch(poly)
plt.xlim(min(bin_edges), max(bin_edges))
if not vabsolute:
plt.ylim(0, 100)
plt.grid(axis='y', alpha=0.75)
plt.xlabel('Median length: ' + str(vmedlengroup[vug]) + ' bp', fontsize=vfontsize)
if vabsolute:
plt.ylabel('# of occurrences', fontsize=vfontsize)
else:
plt.ylabel('Percent occurrence (n = ' + str(vn_prot_per_group) + ')', fontsize=vfontsize)
plt.xticks(fontsize=vfontsize)
plt.yticks(fontsize=vfontsize)
plt.title('Distribution of protein domains\nfor ' + vgitem, fontsize=vfontsize)
plt.legend(loc='upper left', frameon=False)
if vsavefolder != '':
plt.savefig(join(vsavefolder, vjobid + '_' + vgitem + '_combined.pdf'))
else:
plt.savefig(vjobid + '_' + vgitem + '_combined.pdf')
# Write fifth cookie
f_write_cookie(100, vsavefolder, vjobid, 'Job ' + vjobid + 'successful')
f_write_log(vsavefolder, vjobid, 'Job ' + vjobid + ' ran successfully.\n', 'a')
print('done')
########################################################################################################################
# #
# f_float2int #
# Writes success.\n into log file. #
# #
# Mandatory arguments: #
# - vfloat [float]: The number that should be rounded to an integer #
# #
# Output: #
# - [int]: of the number above. #
# #
########################################################################################################################
def f_float2int(vfloat):
if vfloat - math.floor(vfloat) >= 0.5:
return int(math.ceil(vfloat))
else:
return int(math.floor(vfloat))
########################################################################################################################
# #
# f_success #
# Writes success.\n into log file. #
# #
# Mandatory arguments: #
# - vsavefolder [string]: Indicates the location of the folder where result files should be saved. #
# - vjobid [string]: Id of the run. Is used to produce the output file names. #
# #
# Output: #
# - Entry in [jobid]_log.log [file]: Log file. #
# #
########################################################################################################################
def f_success(vsavefolder, vjobid):
f_write_log(vsavefolder, vjobid, 'success.\n', 'a')
########################################################################################################################
# #
# f_no #
# Writes no.\n into log file. #
# #
# Mandatory arguments: #
# - vsavefolder [string]: Indicates the location of the folder where result files should be saved. #
# - vjobid [string]: Id of the run. Is used to produce the output file names. #
# #
# Output: #
# - Entry in [jobid]_log.log [file]: Log file. #
# #
########################################################################################################################
def f_no(vsavefolder, vjobid):
f_write_log(vsavefolder, vjobid, 'no.\n', 'a')
########################################################################################################################
# #
# f_write_log #
# Writes a log file to give output if something breaks. #
# #
# Mandatory arguments: #
# - vsavefolder [string]: Indicates the location of the folder where result files should be saved. #
# - vjobid [string]: Id of the run. Is used to produce the output file names. #
# - vmessage [string]: Message that is written into the file. #
# - vmode [string]: Should either be 'w' for write or 'a' for append. #
# #
# Output: #
# - [jobid]_log.log [file]: Log file. #
# #
########################################################################################################################
def f_write_log(vsavefolder, vjobid, vmessage, vmode):
vfh_c = open(join(vsavefolder, vjobid + '_log.log'), vmode)
vfh_c.write(vmessage)
vfh_c.close()
########################################################################################################################
# #
# f_write_cookie #
# Writes a cookie file to give output to where the job currently is. #
# #
# Mandatory arguments: #
# - vid [int]: Id of the cookie to be written. #
# - vsavefolder [string]: Indicates the location of the folder where result files should be saved. #
# - vjobid [string]: Id of the run. Is used to produce the output file names. #
# - vmessage [string]: Message that is written into the file. #
# #
# Output: #
# - cookie_[id] [file]: Cookie defining the progress (1 - 5) or failure (-1). #
# #
########################################################################################################################
def f_write_cookie(vid, vsavefolder, vjobid, vmessage):
vfh_c = open(join(vsavefolder, vjobid + '_cookie_' + str(vid)), 'w')
vfh_c.write(vmessage + '\n')
vfh_c.close()
########################################################################################################################
# #
# f_get_hex #
# Converts a 3 value tuple into hex code (tuple is assumed to be between 0 and 1). #
# #
# Mandatory arguments: #
# - vtuple [list]: a list of numbers e.g. (0,0.5,0.912). #
# #
# Output: #
# - [string]: hex color code e.g. #af0010. #
# #
########################################################################################################################
def f_get_hex(vtuple):
vr = '%02x' % int(round(vtuple[0] * 255))
vg = '%02x' % int(round(vtuple[1] * 255))
vb = '%02x' % int(round(vtuple[2] * 255))
return '#' + vr + vg + vb
########################################################################################################################
# #
# f_read_in_ignoredomainfile #
# Reads a file on where each line is assumed to either be the name of a Prosite or a PFAM domain. This list is then #
# later used to ignore the domains when making figures. #
# #
# Mandatory arguments: #
# - vfile [string]: Should indicate a text file containing the domain names to ignore. #
# - vsavefolder [string]: Indicates the location of the folder where result files should be saved. #
# - vjobid [string]: Id of the run. Is used to produce the output file names. #
# #
# Output: #
# - vignore_domain [list]: Strings representing domain names of Prosite or PFAM. #
# #
########################################################################################################################
def f_read_in_ignoredomainfile(vfile, vsavefolder, vjobid):
vignore_domain = []
try:
vfile_fh = open(vfile, 'r')
for vline in vfile_fh:
vline = re.sub("[\n\r]", "", vline)
vsplit = vline.split('\t')
vignore_domain.append(vsplit[0])
vfile_fh.close()
except IOError:
f_write_log(vsavefolder, vjobid, 'Can not read ignore domain file ' + vfile + '\n', 'a')
print('Can not read file: ' + vfile)
# Write kill cookie
f_write_cookie(-1, vsavefolder, vjobid, 'Can not read ignore domain file ' + vfile)
sys.exit()
return vignore_domain
########################################################################################################################
# #
# f_read_in_colorfile #
# Reads a tab separated file in where each line is assumed to first contain a Prosite or PFAM domain, and the second #
# column contains a hexcode for a color to be applied in the figures. #
# #
# Mandatory arguments: #
# - vfile [string]: Should indicate a text file containing the domain color matchings. #
# - vsavefolder [string]: Indicates the location of the folder where result files should be saved. #
# - vjobid [string]: Id of the run. Is used to produce the output file names. #
# #
# Output: #
# - vcolor_domain [list]: Strings representing domain names of Prosite or PFAM. #
# - vcolor_hexcode [list]: Hex codes for the coloring of the domains. e.g. #af0010. #
# #
########################################################################################################################
def f_read_in_colorfile(vfile, vsavefolder, vjobid):
vcolor_domain = []
vcolor_hexcode = []
try:
vfile_fh = open(vfile, 'r')
for vline in vfile_fh:
vline = re.sub("[\n\r]", "", vline)
vsplit = vline.split('\t')
vcolor_domain.append(vsplit[0])
vcolor_hexcode.append(vsplit[1])
vfile_fh.close()
except IOError:
f_write_log(vsavefolder, vjobid, 'Can not read color file ' + vfile + '\n', 'a')
print('Can not read color file: ' + vfile)
# Write kill cookie
f_write_cookie(-1, vsavefolder, vjobid, 'Can not read color file ' + vfile)
sys.exit()
return vcolor_domain, vcolor_hexcode
########################################################################################################################
# #
# f_read_in_groupfile #
# Reads a tab separated file in where each line is assumed to first contain a sequence header, and the second column #
# contains a group identifier by which the sequences should be grouped by. #
# #
# Mandatory arguments: #
# - vfile [string]: String indicating a text file containing the domain color matchings. #
# - vheaders [list]: Fasta headers that should be used to search for domain associations. #
# - vsavefolder [string]: Indicates the location of the folder where result files should be saved. #
# - vjobid [string]: Id of the run. Is used to produce the output file names. #
# #
# Output: #
# - vgroup [list]: Group associations that match to the sequence headers. #
# - vgroup_u [list]: the same list as above, but as a unique set, to later ensure that a independent set of unique #
# groups can be made. #
# #
########################################################################################################################
def f_read_in_groupfile(vfile, vheaders, vsavefolder, vjobid):
vgroup = []
vgroup_u = []
try:
for vitem in vheaders:
vfile_fh = open(vfile, 'r')
vhit = 0
for vline in vfile_fh:
vline = re.sub("[\n\r]", "", vline)
vsplit = vline.split('\t')
if vsplit[0] == vitem:
if vhit == 0:
vgroup.append(vsplit[1])
vgroup_u.append(vsplit[1])
vhit += 1
else:
print('Warning: group domain file has more than one entry for ' + vitem + '. Only first '
'instance is used.')
f_write_log(vsavefolder, vjobid, 'Warning: group domain file has more than one entry '
'for ' + vitem + '. Only first instance is used.\n', 'a')
vfile_fh.close()
if vhit == 0:
f_write_log(vsavefolder, vjobid, 'Warning: no protein group entry found for ' + vitem +
'. Please add one in ' + vfile + '\nUsing "ProteinGroup" as group name'
' instead.\n', 'a')
print('Warning: no protein group entry found for ' + vitem + '. Please add one in ' + vfile)
vgroup.append('ProteinGroup')
vgroup_u.append('ProteinGroup')
except IOError:
f_write_log(vsavefolder, vjobid, 'Unable to read protein group file ' + vfile + '\n', 'a')
print('Can not read protein group file: ' + vfile)
# Write kill cookie
f_write_cookie(-1, vsavefolder, vjobid, 'Can not read protein group file: ' + vfile)
sys.exit()
return vgroup, vgroup_u
########################################################################################################################
# #
# f_read_tsv #
# Reads a tab separated file and gives back a pandas data frame with the information stored in the tsv. #
# #
# Mandatory arguments: #
# - vfile [string]: String indicating a tsv file. #
# - vsavefolder [string]: Indicates the location of the folder where result files should be saved. #
# - vjobid [string]: Id of the run. Is used to produce the output file names. #
# #
# Output: #
# - vtable [pandas data frame]: pandas data frame containing the information of the tsv. #
# #
########################################################################################################################
def f_read_tsv(vfile, vsavefolder, vjobid):
try:
vinit = True
vtable = pd.DataFrame()
vfile_fh = open(vfile, 'r')
vw = -1
vn_split = 0
for vline in vfile_fh:
if vline.find('\t') != -1:
vline = re.sub("[\n\r]", "", vline)
vsplit = vline.split('\t')
if vinit:
vtable = | pd.DataFrame(vsplit) | pandas.DataFrame |
import time
import pandas as pd
import numpy as np
import gc
from os.path import join as opj
import matplotlib.pyplot as plt
import pickle
from tqdm import tqdm
import torchvision
import torch
from torch import nn, optim
from torch.utils.data import DataLoader
from dataset import HuBMAPDatasetTrain
from models import build_model
from scheduler import CosineLR
from utils import elapsed_time
from lovasz_loss import lovasz_hinge
from losses import criterion_lovasz_hinge_non_empty
from metrics import dice_sum, dice_sum_2
from get_config import get_config
config = get_config()
output_path = config['OUTPUT_PATH']
fold_list = config['FOLD_LIST']
pretrain_path_list = config['pretrain_path_list']
device = config['device']
def feature_imshow(inp, title=None):
"""Imshow for Tensor."""
inp = inp.detach().numpy().transpose((1, 2, 0))
# mean = np.array([0.5, 0.5, 0.5])
# std = np.array([0.5, 0.5, 0.5])
MEAN = np.array([0.485, 0.456, 0.406])
STD = np.array([0.229, 0.224, 0.225])
# inp = STD * inp + MEAN
inp = np.clip(inp, 0, 1)
plt.imshow(inp)
plt.pause(0.001) # pause a bit so that plots are updated
def run(seed, data_df, pseudo_df, trn_idxs_list, val_idxs_list):
log_cols = ['fold', 'epoch', 'lr',
'loss_trn', 'loss_val',
'trn_score', 'val_score',
'elapsed_time']
criterion = nn.BCEWithLogitsLoss().to(device)
criterion_clf = nn.BCEWithLogitsLoss().to(device)
for fold, (trn_idxs, val_idxs) in enumerate(zip(trn_idxs_list, val_idxs_list)):
if fold in fold_list:
pass
else:
continue
print('seed = {}, fold = {}'.format(seed, fold))
log_df = pd.DataFrame(columns=log_cols, dtype=object)
log_counter = 0
#dataset
trn_df = data_df.iloc[trn_idxs].reset_index(drop=True)
val_df = data_df.iloc[val_idxs].reset_index(drop=True)
#add pseudo label
if pseudo_df is not None:
trn_df = pd.concat([trn_df, pseudo_df], axis=0).reset_index(drop=True)
# dataloader
valid_dataset = HuBMAPDatasetTrain(val_df, config, mode='valid')
valid_loader = DataLoader(valid_dataset, batch_size=config['test_batch_size'],
shuffle=False, num_workers=4, pin_memory=True)
#model
model = build_model(model_name=config['model_name'],
resolution=config['resolution'],
deepsupervision=config['deepsupervision'],
clfhead=config['clfhead'],
clf_threshold=config['clf_threshold'],
load_weights=True).to(device, torch.float32)
# if pretrain_path_list is not None:
# model.load_state_dict(torch.load(pretrain_path_list[fold]))
# print("pre-trained models loaded")
# for p in model.parameters():
# p.requires_grad = True
optimizer = optim.Adam(model.parameters(), **config['Adam'])
#optimizer = optim.RMSprop(model.parameters(), **config['RMSprop'])
# Creates a GradScaler once at the beginning of training.
scaler = torch.cuda.amp.GradScaler()
if config['lr_scheduler_name']=='ReduceLROnPlateau':
scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, **config['lr_scheduler']['ReduceLROnPlateau'])
elif config['lr_scheduler_name']=='CosineAnnealingLR':
#scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, **config['lr_scheduler']['CosineAnnealingLR'])
scheduler = CosineLR(optimizer, **config['lr_scheduler']['CosineAnnealingLR'])
elif config['lr_scheduler_name']=='OneCycleLR':
scheduler = optim.lr_scheduler.OneCycleLR(optimizer, steps_per_epoch=len(train_loader),
**config['lr_scheduler']['OneCycleLR'])
#training
val_score_best = -1e+99
val_score_best2 = -1e+99
loss_val_best = 1e+99
epoch_best = 0
counter_ES = 0
trn_score = 0
trn_score_each = 0
start_time = time.time()
for epoch in range(1, config['num_epochs']+1):
if epoch < config['restart_epoch_list'][fold]:
scheduler.step()
continue
# if elapsed_time(start_time) > config['time_limit']:
# print('elapsed_time go beyond {} sec'.format(config['time_limit']))
# break
#print('lr = ', scheduler.get_lr()[0])
print('lr : ', [ group['lr'] for group in optimizer.param_groups ])
#train
trn_df['binned'] = trn_df['binned'].apply(lambda x:config['binned_max'] if x>=config['binned_max'] else x)
n_sample = trn_df['is_masked'].value_counts().min()
trn_df_0 = trn_df[trn_df['is_masked']==False].sample(n_sample, replace=True)
trn_df_1 = trn_df[trn_df['is_masked']==True].sample(n_sample, replace=True)
n_bin = int(trn_df_1['binned'].value_counts().mean())
trn_df_list = []
for bin_size in trn_df_1['binned'].unique():
trn_df_list.append(trn_df_1[trn_df_1['binned']==bin_size].sample(n_bin, replace=True))
trn_df_1 = pd.concat(trn_df_list, axis=0)
trn_df_balanced = | pd.concat([trn_df_1, trn_df_0], axis=0) | pandas.concat |
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 28 09:27:49 2020
@author: <NAME>
"""
import pickle
import pandas as pd
import numpy as np
from country import country
from scipy.integrate import solve_ivp
from scipy.optimize import minimize
from scipy.optimize import dual_annealing
from scipy.optimize import brute
from scipy.interpolate import interp1d
from scipy.ndimage.filters import uniform_filter1d
import psutil
from functools import partial
import multiprocessing as mp
from tqdm import tqdm_notebook as tqdm
import pdb
from datetime import date, datetime, timedelta
import time
from pathlib import Path
from matplotlib import pyplot as plt
import statsmodels.api as sm
from sklearn import linear_model
import matplotlib.patches as mpatches
import country_converter as coco
import math
import seaborn as sns
# --------------------------------------------------------
# Global variables, chosen cohorts of data and estimates
# --------------------------------------------------------
from param_simple import *
# ----------------------
# Main class
# ----------------------
class solveCovid:
def __init__(self,iso2: str): # eg 'US'
self.iso2 = iso2
# Policy strategies for forecast
self.policy = 'optim' # ['optim', 'linear']
self.phi_option = 'fit' # ['fit','exo']: Fit phi to latest data or specify as exogenous
self.phi_exo = 2.5e-9 # weight on mobility in social welfare function
self.phi_min = 1e-13 # Lowerbound for phi - authorities care about output
# Infection rate model for forecast
self.gamma_tilde_model = 'AR1' # ['AR1','AR2','shock']
self.gamma_shock_length = 10 # Shock gamma_tilde for x days
self.gamma_shock_depth = 0.5 # Daily increment of gamma
self.default_init_single = default_init_single
self.default_bounds_single = default_bounds_single
# Vaccine assumptions
self.vac_assump = 'vac_base' # Vaccination scenarios: ['vac_base','vac_worse','vac_better']
self.vac_receiver = 'S+R' # Vaccines given to S or S+R? ['S only','S+R']
self.effi_one = 0.5 # Efficacy after one dose in %
self.effi_two = 0.95 # Efficacy after two doses in %
self.target_weight = 0.7 # How targeted vaccine distribution is (1 = sequenced from eldest to youngest, 0 is random)
self.vac_base_cover = 1 # Baseline: (already started): % of effective coverage by December 2021 (to be controlled by country-specific scaling factor below)
self.vac_base_delayedstart = '2021-06-30' # Baseline: (hasn't started): first date of vaccination
self.vac_base_delayedcover = 0.75 # Baseline: (hasn't started): % of contracted dosages deployed by December 2021
self.vac_worse_cover = 0.3 # Worse (started): Use by end of 2021
self.vac_worse_delayedstart = '2021-09-30' # Worse (hasn't started): Starting date
self.vac_worse_delayedcover = 0.3 # Worse (hasn't started): Use by end of 2021
self.vac_better_cover = 1.3
self.vac_better_delayedstart = '2021-06-30'
self.vac_better_delayedcover = 1
# Reinfection and loss of immunity
self.reinfect = 'immune' # ['immune','reinfect']
self.r_re1_R = np.log(2)/10000 # Baseline: R loses immunity after 3 years
self.r_re1_V = np.log(2)/10000 # Baseline: V loses immunity after 3 years
self.r_re2_R = np.log(2)/60 # Downside risk: R loses immunity after 60 days, approx 1% of R lose immunity each day
self.r_re2_V = np.log(2)/60 # Downside risk: V loses immunity after 60 days, approx 1% of V lose immunity each day
# Death probabilities
self.pdth_assump = 'martingale' # ['martingale','treatment']
self.pdth_min = 0.005 # Lowerbound on death probability - countries with very few cases still think there is death probability
self.pdth_halflife = 60 # Halflife for treatment case; no. of days it takes to close half the gap of current and assumed minimum death prob
self.pdth_theta = np.exp(-np.log(2)/self.pdth_halflife)
# --------------- 1. Preliminary: Get the data ------------------------
def prelim(self):
iso2 = self.iso2
self.N = df1.fillna(method='ffill')['population'][iso2].iloc[-1]
df2 = df1.iloc[:,df1.columns.get_level_values(1)==iso2][[
'total_cases','total_deaths','new_cases','new_deaths',
'google_smooth','icu_patients','hosp_patients','reproduction_rate',
'new_tests','tests_per_case','aged_70_older',
'vac_total','vac_people',
'vac_fully']][df1['total_cases'][iso2] > virus_thres]
df2 = df2.droplevel('iso2',axis=1)
df2['vac_total'] = df2['vac_total'].interpolate()
df2['vac_people'] = df2['vac_people'].interpolate()
if iso2 == 'AU' or iso2 == 'SA': # Countries with no breakdowns; do manual approximation
df2['vac_partial'] = 0.8 * df2['vac_total']
df2['vac_fully'] = 0.2 * df2['vac_total']
else : # For most countries,
date1 = df2['vac_fully'].first_valid_index() # Next 2 lines fill NA in 'vac_fully', so vac_partial is defined
df2['vac_fully'].iloc[:df2.index.get_loc(date1)-1] = 0
df2['vac_fully'] = df2['vac_fully'].interpolate()
df2['vac_partial'] = df2['vac_people'] - df2['vac_fully']
df2 = df2.fillna(0) # Replace NaN by 0 - deaths and vaccinations
PopulationI = df2['total_cases'][0]
PopulationD = df2['total_deaths'][0]
if PopulationD==0:
PopulationD = 0
PopulationR = 5
else:
PopulationR = PopulationD * 5
PopulationCI = PopulationI - PopulationD - PopulationR # Undetected and infectious cases
self.cases_data_fit = df2['total_cases'].tolist()
self.deaths_data_fit = df2['total_deaths'].tolist()
self.newcases_data_fit = df2['new_cases'].tolist()
self.newdeaths_data_fit = df2['new_deaths'].tolist()
self.balance = self.cases_data_fit[-1] / max(self.deaths_data_fit[-1], 10) / 3
date_day_since100 = pd.to_datetime(df2.index[0])
self.maxT = (default_maxT - date_day_since100).days + 1
self.mobility_vec = df2['google_smooth'].values
self.T = len(df2)
self.t_cases = np.arange(0,self.T)
self.mobility_interp = interp1d(self.t_cases,self.mobility_vec,bounds_error=False,fill_value=0.,kind='cubic')
self.GLOBAL_PARAMS = (self.N, PopulationCI, PopulationR, PopulationD, PopulationI, p_d, p_h, p_v)
self.gamma_0_days = 1 # average of gamma_t during first n days becomes the target
# Compute vaccination parameters
self.vac_partial = df2['vac_partial'].values
self.vac_fully = df2['vac_fully'].values
#self.vac_contracted = 1000*df_vac.loc[iso2]['No. of people covered (thousands)']/self.N
df2['V_'] = self.N * (self.effi_one*df2['vac_partial']
+ self.effi_two*df2['vac_fully'])/100 # V = expected number of effectively vaccinated persons
ix = pd.date_range(start=df2.index[0], end=default_maxT, freq='D') # Expand time-sample, to include forecast later
df_v = df2.reindex(ix)
# Vaccination assumptions
if self.iso2 in ['GB','US']:
vac_scale = 1
elif self.iso2 in ['BE','FR','DE','IT','NL','PL','SG','ES','CH','RO','CL','CA']:
vac_scale = 0.8
elif self.iso2 in ['AU','SA','SE','TR']:
vac_scale = 0.65
elif self.iso2 in ['AR','BR','MX','RU']:
vac_scale = 0.50
elif self.iso2 in ['ID','IN','JP','KR','MY','TH']:
vac_scale = 0.25
elif self.iso2 in ['ZA']:
vac_scale = 0.10
else:
vac_scale = 0.50
print('Missing vaccine assumption for selected country')
if self.vac_assump == 'vac_base':
if df2['V_'][-1] > 0: # already started
df_v['V_'].loc['2021-12-31'] = self.vac_base_cover * vac_scale * self.N
elif df2['V_'][-1] == 0: # If has not started, assume starting by xxx and cover xxx at year end
df_v['V_'].loc[self.vac_base_delayedstart] = 100 # 100 = assumed number of effectively vaccinated on first day
df_v['V_'].loc['2021-12-31'] = self.vac_base_delayedcover* vac_scale*self.N # partial orders filled by year end
elif self.vac_assump == 'vac_worse':
if df2['V_'][-1] > 0:
df_v['V_'].loc['2021-12-31'] = self.vac_worse_cover * vac_scale * self.N
elif df2['V_'][-1] == 0:
df_v['V_'].loc[self.vac_worse_delayedstart] = 100
df_v['V_'].loc['2021-12-31'] = self.vac_worse_delayedcover* vac_scale*self.N
elif self.vac_assump == 'vac_better':
if df2['V_'][-1]>0:
df_v['V_'].loc['2021-12-31'] = self.vac_better_cover * vac_scale * self.N
elif df2['V_'][-1] == 0:
df_v['V_'].loc[self.vac_better_delayedstart] = 100
df_v['V_'].loc['2021-12-31'] = self.vac_better_delayedcover* vac_scale*self.N
df_v['V_'] = df_v['V_'].interpolate()
df_v['V_'] = df_v['V_'].clip(0,self.N)
self.df2 = df2
self.df_v = df_v
print(f'Data preparation for {iso2} done')
# --------------------------3 . SEIR model ------------------
def step_seir(self, t, x, gamma_t, p_dth) -> list:
"""
SEIR model building on DELPHI v.3
Features 16 distinct states, taking into account undetected, deaths, hospitalized and
recovered
[0 S, 1 E, 2 I, 3 UR, 4 DHR, 5 DQR, 6 UD, 7 DHD, 8 DQD, 9 R, 10 D,
11 TH, 12 DVR,13 DVD, 14 DD, 15 DT, 16 V]
"""
S, E, I, AR, DHR, DQR, AD, DHD, DQD, R, D, TH, DVR, DVD, DD, DT, V = x
r_v = self.df_v['V_'].iloc[t+1] - self.df_v['V_'].iloc[t]
# Reinfection parameters
if self.reinfect == 'immune':
r_re_R = self.r_re1_R
r_re_V = self.r_re1_V
elif self.reinfect == 'reinfect':
if t <= self.T:
r_re_R = self.r_re1_R
r_re_V = self.r_re1_V
else:
r_re_R = self.r_re2_R
r_re_V = self.r_re2_V
# Vaccination recipients (S, or S+R)
if self.vac_receiver == 'S only':
zeta = 1
elif self.vac_receiver == 'S+R':
zeta = S/(S+R)
else:
print('Re-specify vaccine recipient choice')
# Main equations
S1 = S - gamma_t * S * I / self.N + r_re_R*R +r_re_V*V - r_v * zeta
if S1 < 0: # Vaccination reaches saturating point
S1 = 0
r_v = (S - gamma_t * S * I / self.N + r_re_R*R +r_re_V*V) /zeta
E1 = E + gamma_t * S * I / self.N - r_i * E
I1 = I + r_i * E - r_d * I
AR1 = AR + r_d * (1 - p_dth) * (1 - p_d) * I - r_ri * AR
DHR1 = DHR + r_d * (1 - p_dth) * p_d * p_h * I - r_rh * DHR
DQR1 = DQR + r_d * (1 - p_dth) * p_d * (1 - p_h) * I - r_ri * DQR
AD1 = AD + r_d * p_dth * (1 - p_d) * I - r_dth * AD
DHD1 = DHD + r_d * p_dth * p_d * p_h * I - r_dth * DHD
DQD1 = DQD + r_d * p_dth * p_d * (1 - p_h) * I - r_dth * DQD
R1 = R + r_ri * (AR + DQR) + r_rh * DHR - r_re_R*R - r_v * (1-zeta)
D1 = D + r_dth * (AD + DQD + DHD)
# Helper states
TH1 = TH + r_d * p_d * p_h * I
DVR1 = DVR + r_d * (1 - p_dth) * p_d * p_h * p_v * I - r_rv * DVR
DVD1 = DVD + r_d * p_dth * p_d * p_h * p_v * I - r_dth * DVD
DD1 = DD + r_dth * (DHD + DQD)
DT1 = DT + r_d * p_d * I
V1 = V + r_v -r_re_V*V
x1 = [S1, E1, I1, AR1, DHR1, DQR1, AD1, DHD1, DQD1,
R1, D1, TH1, DVR1, DVD1, DD1, DT1, V1]
return x1
# ------------------ X. Construct initial conditions
def initial_states_func(self,k):
N, PopulationCI, PopulationR, PopulationD, PopulationI, p_d, p_h, p_v = self.GLOBAL_PARAMS
p_dth0 = self.newdeaths_data_fit[0]/(r_dth*PopulationCI) # Set p_dth0 to match D1-D0 to newdeaths_data_fit
E_0 = PopulationCI / p_d * k
I_0 = PopulationCI / p_d * k
UR_0 = (PopulationCI / p_d - PopulationCI) * (1 - p_dth0)
DHR_0 = (PopulationCI * p_h) * (1 - p_dth0)
DQR_0 = PopulationCI * (1 - p_h) * (1 - p_dth0)
UD_0 = (PopulationCI / p_d - PopulationCI) * p_dth0
DHD_0 = PopulationCI * p_h * p_dth0
DQD_0 = PopulationCI * (1 - p_h) * p_dth0
R_0 = PopulationR / p_d
D_0 = PopulationD / p_d
S_0 = N - (E_0 +I_0 +UR_0 +DHR_0 +DQR_0 +UD_0 +DHD_0 +DQD_0 +R_0 +D_0)
TH_0 = PopulationCI * p_h
DVR_0 = (PopulationCI * p_h * p_v) * (1 - p_dth0)
DVD_0 = (PopulationCI * p_h * p_v) * p_dth0
DD_0 = PopulationD
DT_0 = PopulationI
V_0 = 0
x_init = [
S_0, E_0, I_0, UR_0, DHR_0, DQR_0, UD_0, DHD_0, DQD_0, R_0,
D_0, TH_0, DVR_0, DVD_0, DD_0, DT_0, V_0
]
return x_init
# Find k=k1,k2 that matches gamma_0 to 2.08 (R0=6 equivalent)
def loss_gamma0(self,k):
newcases = np.array(self.newcases_data_fit)
newdeaths = np.array(self.newdeaths_data_fit)
newcases_sm = uniform_filter1d(newcases, size=21, mode='nearest')
newdeaths_sm = uniform_filter1d(newdeaths, size=21, mode='nearest')
gamma_t_vec = []
x_init = self.initial_states_func(k)
(S_0, E_0, I_0, UR_0, DHR_0, DQR_0, UD_0, DHD_0, DQD_0, R_0,
D_0, TH_0, DVR_0, DVD_0, DD_0, DT_0, V_0) = x_init
newcases_sm2 = np.append(newcases_sm, newcases_sm[-2:]) # Extend the list for forward projection below
newdeaths_sm2 = np.append(newdeaths_sm, newdeaths_sm[-1])
x_0 = x_init.copy()
for t in range(self.gamma_0_days): # Target first n days
gamma_t = (newcases_sm2[t+2]/(r_d*p_d) - (1-r_d)**2 *I_0 - r_i*(2-r_d-r_i)*E_0 )*self.N/(r_i*S_0*I_0)
p_dth = (newdeaths_sm2[t+1] - r_dth*(1-r_dth)*(DHD_0 + DQD_0))/(r_dth*r_d*p_d*I_0)
gamma_t = np.clip(gamma_t, 0.01, 10)
p_dth = np.clip(p_dth,0,1) # Probability limit [0,1]
x_1 = self.step_seir(t, x_0, gamma_t, p_dth)
x_0 = x_1
gamma_t_vec.append(gamma_t)
gamma_0 = np.mean(gamma_t_vec)
loss = (gamma_0 - (r_d*6) )**2 # gamma_0 equivalent to R0=6 is 2.08
return loss
def fit_gamma0(self):
output = dual_annealing(
self.loss_gamma0,
x0 = [5],
bounds = [(1,50)],
)
k_star = output.x
return k_star
def get_initial_conditions(self):
if Path(f'../params/param_fixed/kstar.csv').exists():
df = pd.read_csv(f'../params/param_fixed/kstar.csv')
kstar = df[self.iso2].values[0]
else:
kstar = self.fit_gamma0()[0] # find kstar that matches gamma_0 to target
x_init = self.initial_states_func(kstar)
return x_init
# -------------------- x. Implied gamma_t and pdth_t in-sample -------------------
def gamma_t_compute(self):
newcases = np.array(self.newcases_data_fit)
newdeaths = np.array(self.newdeaths_data_fit)
newcases_sm = uniform_filter1d(newcases, size=21, mode='nearest')
newdeaths_sm = uniform_filter1d(newdeaths, size=21, mode='nearest')
gamma_t_vec = []
p_dth_vec = []
x_init = self.get_initial_conditions()
S_0, E_0, I_0, AR_0, DHR_0, DQR_0, AD_0, DHD_0, DQD_0, R_0, D_0, TH_0, DVR_0, DVD_0, DD_0, DT_0, V_0 = x_init
S_vec = [S_0]
E_vec = [E_0]
I_vec = [I_0]
DT_vec = [DT_0]
DD_vec = [DD_0]
DHR_vec = [DHR_0]
DHD_vec = [DHD_0]
newcases_sm2 = np.append(newcases_sm, newcases_sm[-2:]) # Extend the list for forward projection below
newdeaths_sm2 = np.append(newdeaths_sm, newdeaths_sm[-1])
x_0 = x_init.copy()
for t in range(len(newcases)):
# Work backwards to compute 'exact' gamma_t and p_dth
gamma_t = (newcases_sm2[t+2]/(r_d*p_d) - (1-r_d)**2 *I_0 - r_i*(2-r_d-r_i)*E_0 )*self.N/(r_i*S_0*I_0)
p_dth = (newdeaths_sm2[t+1] - r_dth*(1-r_dth)*(DHD_0 + DQD_0))/(r_dth*r_d*p_d*I_0)
gamma_t = np.clip(gamma_t, 0.01, 10)
p_dth = np.clip(p_dth,0,1) # Probability limit [0,1]
x_1 = self.step_seir(t, x_0, gamma_t, p_dth)
S_0, E_0, I_0, AR_0, DHR_0, DQR_0, AD_0, DHD_0, DQD_0, R_0, D_0, TH_0, DVR_0, DVD_0, DD_0, DT_0, V_0 = x_1
x_0 = x_1
gamma_t_vec.append(gamma_t)
p_dth_vec.append(p_dth)
S_vec.append(S_0)
I_vec.append(I_0)
E_vec.append(E_0)
DT_vec.append(DT_0)
DD_vec.append(DD_0)
DHR_vec.append(DHR_0)
DHD_vec.append(DHD_0)
self.df2['gamma_t'] = gamma_t_vec
self.df2['pdth_t'] = p_dth_vec
self.S_vec = S_vec # In-sample estmates, useful for phi calculation later on
self.I_vec = I_vec
self.DHR_vec = DHR_vec # For fitting death probability
self.DHD_vec = DHD_vec
HD_HR = np.array(self.DHR_vec) + np.array(self.DHD_vec)
self.df2['HD_HR'] = 100*HD_HR[:-1]/self.N
# gamma_t_sm = uniform_filter1d(gamma_t_vec, size=6, mode='nearest')
# self.df2['gamma_sm'] = gamma_t_sm
return gamma_t_vec, p_dth_vec
# -------------------- x. Estimating the model -----------
def gamma_func(self, params):
m_t = self.df2['google_smooth'].values
tvec = np.arange(len(m_t))
beta0, beta1 = params
gamma_vec = beta0*np.exp(beta1* m_t)
return gamma_vec
def loss_betas(self, params) -> float:
gamma_model = self.gamma_func(params)
loss = sum( (self.df2['gamma_t'].values[:len(gamma_model)] - gamma_model)**2 )
return loss
def fitmodel(self):
# A. Fit beta0 and beta1
x0 = self.default_init_single
bounds_0 = self.default_bounds_single
output = dual_annealing(
self.loss_betas,
x0 = x0,
bounds = bounds_0,
)
best_betas = output.x
self.best_betas = best_betas
# B. Fit the residual (gamma_tilde) to AR models
m_t = self.df2['google_smooth'].values
tvec = np.arange(len(self.df2))
beta0, beta1 = self.best_betas
self.df2['gamma_mob'] = beta0*np.exp(beta1* m_t)
self.df2['gamma_tilde'] = self.df2['gamma_t'] - self.df2['gamma_mob']
self.df2['gamma_tilde_sm'] = uniform_filter1d(self.df2['gamma_tilde'],
size=21, mode='reflect')
self.df2['gamma_tilde_resid'] = self.df2['gamma_tilde'] - self.df2['gamma_tilde_sm']
y = self.df2['gamma_tilde_sm']
self.df2['gamma_tilde_sm_lag1'] = self.df2['gamma_tilde_sm'].shift(1) # No constant term
self.df2['gamma_tilde_sm_lag2'] = self.df2['gamma_tilde_sm'].shift(2)
reg_AR1 = sm.OLS(y,self.df2['gamma_tilde_sm_lag1'],missing='drop').fit()
reg_AR2 = sm.OLS(y,self.df2[['gamma_tilde_sm_lag1','gamma_tilde_sm_lag2']],missing='drop').fit()
best_rho1 = reg_AR1.params[0]
best_rho1 = np.clip(best_rho1, 0.1, 0.99) #Assume stationarity
best_rho2 = reg_AR2.params[:]
best_params = np.array([beta0, beta1, best_rho1, best_rho2[0], best_rho2[1]])
self.best_rho1 = best_rho1
self.best_rho2 = best_rho2
self.best_params = best_params
# C. Empirically fit phi for optimal policy to last observation
if self.phi_option == 'fit':
m = self.df2['google_smooth'][-15:].mean() # Take average of last 15 days to smooth volatility
s = self.S_vec[-1]/self.N
i = self.I_vec[-1]/self.N
gamma_tilde = self.df2['gamma_tilde'][-1]
pdth = self.df2['pdth_t'][-1]
pdth = max(pdth, self.pdth_min) # Get around cases where pdth=0 for countries with very few cases
LHS1 = pdth*r_d*i*s*(beta0*beta1*np.exp(beta1*m))
LHS2 = pdth*r_d*i*(1 - r_d + s*(gamma_tilde + beta0*np.exp(beta1*m)))
phi = -(LHS1 * LHS2)/m
self.phi = max(phi, self.phi_min)
elif self.phi_option == 'exo':
self.phi = self.phi_exo
return best_params
# ------------------ x. Forecasts ---------------------------
def step_gamma_tilde(self, gamma_tilde_lag1, gamma_tilde_lag2, model='AR1'):
if model =='AR1':
return self.best_rho1*gamma_tilde_lag1
elif model =='AR2':
return self.best_rho2[0]*gamma_tilde_lag1 + self.best_rho2[1]*gamma_tilde_lag2
def mobility_choice(self,x,gamma_tilde,pdth):
if self.policy == 'constant':
mob = self.poparam_constant
elif self.policy == 'linear-I': # Respond linearly to infection level
mob = self.poparam_linear_I[0] + self.poparam_linear_I[1]*x[2]
elif self.policy == 'linear-dI': # Respond to new infections
dI = r_i*x[1] - r_d*x[2] # x[1]=E, x[2]=I
mob = self.poparam_linear_dI[0] + self.poparam_linear_dI[1]*dI
elif self.policy == 'optim': # Analytical optimal policy based on simplified model and quadratic losses
beta0 = self.best_params[0]
beta1 = self.best_params[1]
phi = self.phi
s = x[0]/self.N
i = x[2]/self.N
m_set = np.linspace(-1,0,101)
RHS = -phi*m_set
LHS1 = pdth*r_d*i*s*(beta0*beta1*np.exp(beta1*m_set))
LHS2 = pdth*r_d*i*(1 - r_d + s*(gamma_tilde + beta0*np.exp(beta1*m_set)))
LHS = LHS1 * LHS2
m_id = np.argmin(np.abs(RHS-LHS))
mob = m_set[m_id]
return mob
def fatality_factor(self,V): # Factor to adjust 'base' fatality prob
idx = (f_table[self.iso2]['vaccine_%'] - V/self.N).abs().argmin() # Find idx to look up in fatality table
factor = f_table[self.iso2]['fatality_ratio'][idx]
return factor
def sim_seir(self):
df2 = self.df2
ix = pd.date_range(start=df2.index[0], end=default_maxT, freq='D') # Expand time-sample, to include forecast later
df3 = df2.reindex(ix)
x_init = self.get_initial_conditions()
x_data = np.array(x_init)
gamma_tilde_fc = self.df2['gamma_tilde'].values
gamma_tilde_sm_fc = self.df2['gamma_tilde_sm'].values
pdth_t_targ = [] # Death prob when vaccines are targeted
pdth_t_base = [] # Base death prob if vaccines are given randomly
pdth_t_fc = self.df2['pdth_t'].values
pdth_t_base_fc = pdth_t_fc.copy()
gamma_mob_fc = self.df2['gamma_mob'].values
mob_fc = self.df2['google_smooth'].values
# Load parameters
if hasattr(self, 'best_params'):
beta0, beta1, rho, rhos_1, rhos_2 = self.best_params
else:
df_param = pd.read_csv(f'../params/{param_load_folder}/param_est.csv')
beta0, beta1, rho, rhos_1, rhos_2 = df_param[self.iso2]
for t in range(self.maxT):
factor = self.fatality_factor(x_init[-1])
eta = self.target_weight
if t<len(self.df2): # In sample
pdth_t = pdth_t_fc[t]
pdth_base = pdth_t/(eta*factor + 1-eta)
pdth_targ = factor*pdth_base
# if t==len(self.df2): # Parse pdth_base of hospitalised/N
# y = pdth_t_base
# X = self.df2['HD_HR'].shift(30) # Use lagged hospitalised as the predictor
# X = sm.add_constant(X)
# reg_pdth = sm.OLS(y,X, missing='drop').fit()
# thetas = reg_pdth.params
# self.best_theta = thetas
# pdb.set_trace()
# pdth_t_basex = y - thetas[0] - thetas[1]*X # Base death prob, parsed of hospitalisation wave
# self.df2['pdth_base'] = pdth_t_base
# self.df2['pdth_base_x'] = pdth_t_basex
if t>len(self.df2)-1: # Out of sample
# Death probability
if self.pdth_assump == 'martingale': # Martingale death rate
pdth_base = pdth_t_base[-1]
elif self.pdth_assump == 'treatment': # Death prob slowly declines to assumed minimum and assumed halflife
pdth_base = self.pdth_theta*pdth_t_base[-1] + (1-self.pdth_theta)*self.pdth_min
pdth_base = max(pdth_base, self.pdth_min) # To get around pdth=0 for countries with very few cases
pdth_t = (eta*factor + 1-eta)*pdth_base
pdth_targ = factor*pdth_base
# Gamma_tilde
if self.gamma_tilde_model == 'AR1':
gamma_tilde = rho*gamma_tilde_sm_fc[t-1]
elif self.gamma_tilde_model == 'AR2':
gamma_tilde = rhos_1*gamma_tilde_sm_fc[t-1] + rhos_2*gamma_tilde_sm_fc[t-2]
elif self.gamma_tilde_model =='shock':
if t < len(self.df2) + self.gamma_shock_length:
gamma_tilde = gamma_tilde_sm_fc[len(self.df2)-1] + self.gamma_shock_depth
else:
gamma_tilde = rho*gamma_tilde_sm_fc[t-1]
# Mobility and overall gamma_t
mob_t = self.mobility_choice(x_init, gamma_tilde, pdth_t)
mob_t = max(mob_t, max_lockdown)
gamma_mob_t = beta0*np.exp(beta1*mob_t)
gamma_t = gamma_tilde + gamma_mob_t
# Append to data array
gamma_tilde_sm_fc = np.append(gamma_tilde_sm_fc, gamma_tilde)
gamma_tilde_fc = np.append(gamma_tilde_fc, gamma_tilde)
gamma_mob_fc = np.append(gamma_mob_fc, gamma_mob_t)
mob_fc = np.append(mob_fc, mob_t)
pdth_t_fc = np.append(pdth_t_fc, pdth_t)
pdth_t_base.append(pdth_base)
pdth_t_targ.append(pdth_targ)
# For in sample, use 'true' inputs
gamma_t = gamma_tilde_fc[t] + gamma_mob_fc[t]
p_dth = pdth_t_fc[t]
if t < range(self.maxT)[-1]: # Stop forecasting at the final period
x_next = self.step_seir(t, x_init, gamma_t, p_dth)
x_data = np.vstack((x_data, np.array(x_next)))
x_init = x_next
# Fill dataframe
col_temp = ['S', 'E', 'I', 'AR', 'DHR', 'DQR', 'AD', 'DHD', 'DQD', 'R', 'D', 'TH', 'DVR', 'DVD', 'DD', 'DT', 'V']
df4 = pd.DataFrame(x_data, columns=col_temp, index=df3.index)
df3 = df3.merge(df4, how='left', left_index=True, right_index=True)
df3['gamma_tilde_fc'] = gamma_tilde_fc
df3['gamma_mob_fc'] = gamma_mob_fc
df3['gamma_t_fc'] = df3['gamma_tilde_fc'] + df3['gamma_mob_fc']
df3['mob_fc'] = mob_fc
df3['pdth_t_fc'] = pdth_t_fc
df3['pdth_t_base'] = np.array(pdth_t_base)
df3['pdth_t_targ'] = np.array(pdth_t_targ)
df3[['S_N','I_N','DT_N','DD_N','V_N']] = df3[['S','I','DT','DD','V']]/self.N
self.df3 = df3
return df3
# ------------------ 5. Predict and plot ---------------------
def plot_all(self, saveplot=False):
df = self.df3
transpa = 0.0
fig, ax = plt.subplots(nrows=2, ncols=3, figsize=(15,8), constrained_layout=True)
# df_bar = df_bar0[['GDP lost','Total deaths']]
# df_bar.plot(kind='bar', ax=ax[1,2], secondary_y='Total deaths', rot=0, legend=False)
# ax[1,2].set_ylabel('percent')
# ax[1,2].right_ax.set_ylabel('per million')
# ax[1,2].set_title('Losses of lives and output',fontsize='x-large')
# L = [mpatches.Patch(color=c, label=col)
# for col,c in zip( ('GDP loss','Deaths (rhs)'), plt.rcParams['axes.prop_cycle'].by_key()['color'])]
# ax[1,2] = plt.legend(handles=L, loc=1, framealpha=transpa)
ax[0,0].plot(df.index, 100*df['total_cases']/self.N, linewidth = 3, label='Case data', color='blue')
ax[0,0].plot(df.index, 100*df['DT']/self.N, label='$DT_t$', color='red')
ax[0,0].axvline(df.index[self.T], linewidth = 2, color='gray', linestyle=':')
ax[0,0].set_title('Cases',fontsize='x-large')
ax[0,0].set(ylabel = '% of population')
ax2 = ax[0,0].twinx()
ax2.plot(df.index, 100*df['I']/self.N, label='$I_t$ (rhs)',color='green',linestyle='--')
lines, labels = ax[0,0].get_legend_handles_labels()
lines2, labels2 = ax2.get_legend_handles_labels()
ax2.legend(lines + lines2, labels + labels2, loc='center right', framealpha=transpa,fontsize='x-large')
#ax2.set(ylabel='% of population')
ax[0,1].plot(df.index, 100*df['total_deaths']/self.N, linewidth = 3, label='Death data', color='blue')
ax[0,1].plot(df.index, 100*df['DD']/self.N, label='$DD_t$', color='red')
ax[0,1].axvline(df.index[self.T], linewidth = 2, color='gray', linestyle=':')
ax[0,1].set_title('Deaths',fontsize='x-large')
ax[0,1].set(ylabel='% of population')
ax[0,1].legend(loc='best', framealpha=transpa ,fontsize='x-large')
ax[0,2].plot(df.index, 100*df['S']/self.N, label='$S_t$',color='red')
ax[0,2].plot(df.index, 100*df['V']/self.N, label='$V_t$',color='red',linestyle=':')
ax[0,2].axvline(df.index[self.T], linewidth = 2, color='gray', linestyle=':')
ax[0,2].set_title('Susceptible & vaccinated',fontsize='x-large')
ax[0,2].legend(loc='best',framealpha=transpa ,fontsize='x-large')
ax[0,2].set(ylabel='% of population')
ax[1,0].plot(df.index, df['gamma_t'], label=r'$\gamma_t$',color='red')
ax[1,0].plot(df.index, df['gamma_mob'], label=r'$\gamma^{m}_t$', color ='blue')
ax[1,0].plot(df.index, df['gamma_tilde'], label=r'$\gamma^{d}$', color='orange')
ax[1,0].plot(df.index, df['gamma_t_fc'], color='red',linestyle=':')
ax[1,0].plot(df.index, df['gamma_mob_fc'], color ='blue',linestyle=':')
ax[1,0].plot(df.index, df['gamma_tilde_fc'], color='orange',linestyle=':')
ax[1,0].axvline(df.index[self.T], linewidth = 2, color='gray', linestyle=':')
ax[1,0].set_title('Infection rate',fontsize='x-large')
ax[1,0].legend(loc='best',framealpha=transpa ,fontsize='x-large')
ax[1,1].plot(df.index, 100*df['google_smooth'], linewidth = 3, label='Google mobility', color='blue')
ax[1,1].plot(df.index, 100*df['mob_fc'], label='Model', color='red')
ax[1,1].axvline(df.index[self.T], linewidth = 2, color='gray', linestyle=':')
ax[1,1].legend(loc=0,framealpha=transpa ,fontsize='x-large')
ax[1,1].set_title('Activity',fontsize='x-large')
ax[1,1].set(ylabel='% deviations from norm')
ax[1,2].plot(df.index, 100*df['pdth_t'], label='Death probability', linewidth=3, color='blue')
ax[1,2].plot(df.index, 100*df['pdth_t_fc'], color='black', label='Forecast')
ax[1,2].plot(df.index, 100*df['pdth_t_base'], color='black', linestyle='dashed', label='Random vaccines')
ax[1,2].plot(df.index, 100*df['pdth_t_targ'], color='black', linestyle=':', label='Targeted vaccines')
ax[1,2].axvline(df.index[self.T], linewidth = 2, color='gray', linestyle=':')
ax[1,2].legend(loc=0,framealpha=transpa ,fontsize='x-large')
ax[1,2].set_title('Death probability',fontsize='x-large')
ax[1,2].set(ylabel='%')
plt.setp(ax[0,0].get_xticklabels(), rotation=30, horizontalalignment='right')
plt.setp(ax[0,1].get_xticklabels(), rotation=30, horizontalalignment='right')
plt.setp(ax[0,2].get_xticklabels(), rotation=30, horizontalalignment='right')
plt.setp(ax[1,0].get_xticklabels(), rotation=30, horizontalalignment='right')
plt.setp(ax[1,1].get_xticklabels(), rotation=30, horizontalalignment='right')
plt.setp(ax[1,2].get_xticklabels(), rotation=30, horizontalalignment='right')
cname = coco.convert(names=self.iso2,to='name_short')
fig.suptitle(f'{cname}-{self.vac_assump}-{self.reinfect}',fontsize='xx-large')
if saveplot:
Path(f'../pics/fig_{date.today()}').mkdir(exist_ok=True)
fig.savefig(f'../pics/fig_{date.today()}/{self.iso2}-{self.policy}-{self.gamma_tilde_model}-{self.vac_assump}-{self.reinfect}.png')
return fig
def plot_portrait(self, saveplot=False):
df = self.df3
transpa = 0.0
fig, ax = plt.subplots(nrows=3, ncols=2, figsize=(10,12), constrained_layout=True)
ax[0,0].plot(df.index, 100*df['total_cases']/self.N, linewidth = 3, label='Case data', color='blue')
ax[0,0].plot(df.index, 100*df['DT']/self.N, label='$DT_t$', color='red')
ax[0,0].axvline(df.index[self.T], linewidth = 2, color='gray', linestyle=':')
ax[0,0].set_title('Cases',fontsize='x-large')
ax[0,0].set(ylabel = '% of population')
ax2 = ax[0,0].twinx()
ax2.plot(df.index, 100*df['I']/self.N, label='$I_t$ (rhs)',color='green',linestyle='--')
ax2.grid(None)
lines, labels = ax[0,0].get_legend_handles_labels()
lines2, labels2 = ax2.get_legend_handles_labels()
ax2.legend(lines + lines2, labels + labels2, loc='center right', framealpha=transpa,fontsize='x-large')
#ax2.set(ylabel='% of population')
ax[0,1].plot(df.index, 100*df['total_deaths']/self.N, linewidth = 3, label='Death data', color='blue')
ax[0,1].plot(df.index, 100*df['DD']/self.N, label='$DD_t$', color='red')
ax[0,1].axvline(df.index[self.T], linewidth = 2, color='gray', linestyle=':')
ax[0,1].set_title('Deaths',fontsize='x-large')
ax[0,1].set(ylabel='% of population')
ax[0,1].legend(loc='best', framealpha=transpa ,fontsize='x-large')
ax[1,0].plot(df.index, 100*df['S']/self.N, label='$S_t$',color='red')
ax[1,0].plot(df.index, 100*df['V']/self.N, label='$V_t$',color='red',linestyle=':')
ax[1,0].axvline(df.index[self.T], linewidth = 2, color='gray', linestyle=':')
ax[1,0].set_title('Susceptible & vaccinated',fontsize='x-large')
ax[1,0].legend(loc='best',framealpha=transpa ,fontsize='x-large')
ax[1,0].set(ylabel='% of population')
ax[1,1].plot(df.index, df['gamma_t'], label=r'$\gamma_t$',color='red')
ax[1,1].plot(df.index, df['gamma_mob'], label=r'$\gamma^{m}_t$', color ='blue')
ax[1,1].plot(df.index, df['gamma_tilde'], label=r'$\gamma^{d}$', color='orange')
ax[1,1].plot(df.index, df['gamma_t_fc'], color='red',linestyle=':')
ax[1,1].plot(df.index, df['gamma_mob_fc'], color ='blue',linestyle=':')
ax[1,1].plot(df.index, df['gamma_tilde_fc'], color='orange',linestyle=':')
ax[1,1].axvline(df.index[self.T], linewidth = 2, color='gray', linestyle=':')
ax[1,1].set_title('Infection rate',fontsize='x-large')
ax[1,1].legend(loc='best',framealpha=transpa ,fontsize='x-large')
ax[2,0].plot(df.index, 100*df['google_smooth'], linewidth = 3, label='Google mobility', color='blue')
ax[2,0].plot(df.index, 100*df['mob_fc'], label='Model', color='red')
ax[2,0].axvline(df.index[self.T], linewidth = 2, color='gray', linestyle=':')
ax[2,0].legend(loc=0,framealpha=transpa ,fontsize='x-large')
ax[2,0].set_title('Mobility',fontsize='x-large')
ax[2,0].set(ylabel='% deviations from norm')
ax[2,1].plot(df.index, 100*df['pdth_t'], label='Death probability', linewidth=3, color='blue')
ax[2,1].plot(df.index, 100*df['pdth_t_fc'], color='black', label='Forecast')
ax[2,1].plot(df.index, 100*df['pdth_t_base'], color='black', linestyle='dashed', label='Random vaccines')
ax[2,1].plot(df.index, 100*df['pdth_t_targ'], color='black', linestyle=':', label='Targeted vaccines')
ax[2,1].axvline(df.index[self.T], linewidth = 2, color='gray', linestyle=':')
ax[2,1].legend(loc=0,framealpha=transpa ,fontsize='x-large')
ax[2,1].set_title('Death probability',fontsize='x-large')
ax[2,1].set(ylabel='%')
plt.setp(ax[0,0].get_xticklabels(), rotation=30, horizontalalignment='right')
plt.setp(ax[0,1].get_xticklabels(), rotation=30, horizontalalignment='right')
plt.setp(ax[1,0].get_xticklabels(), rotation=30, horizontalalignment='right')
plt.setp(ax[1,1].get_xticklabels(), rotation=30, horizontalalignment='right')
plt.setp(ax[2,0].get_xticklabels(), rotation=30, horizontalalignment='right')
plt.setp(ax[2,1].get_xticklabels(), rotation=30, horizontalalignment='right')
cname = coco.convert(names=self.iso2,to='name_short')
fig.suptitle(f'{cname}',fontsize=18)
if saveplot:
Path(f'../pics/fig_{date.today()}').mkdir(exist_ok=True)
fig.savefig(f'../pics/fig_{date.today()}/Portrait-{self.iso2}-{self.policy}-{self.gamma_tilde_model}-{self.vac_assump}-{self.reinfect}.pdf')
return fig
# ---------------------------------------------
# Calling functions
# ---------------------------------------------
# -----------------------------------------
# x. Prelim parameters estimation
# Estimate k_star and save in file (only need to do this once)
def estimate_kstar(cset=['US']):
dict = {'Parameter': ['kstar']}
for c in cset:
tmp = solveCovid(c)
tmp.prelim()
kstar = tmp.fit_gamma0()
dict[c] = kstar
df = pd.DataFrame(dict)
df.to_csv(f'../params/param_fixed/kstar.csv',index=False)
return df
# -------------------------
# x. Run complete package under scenarios: estimate, forecast, plot, save
def run_baseline(cset=['US']):
p_dict = {'Parameters': ['beta0','beta1','rho','rhos_1','rhos_2','phi']}
for c in cset:
tmp = solveCovid(c)
tmp.prelim()
tmp.gamma_t_compute()
tmp.fitmodel()
p_dict[c] = np.append(tmp.best_params, 1e9*tmp.phi)
tmp.sim_seir()
tmp.plot_all(saveplot='False')
tmp.df3.to_csv(f'../output/{out_save_folder}/df3_{tmp.iso2}.csv')
pd.DataFrame(p_dict).to_csv(f'../params/{param_save_folder}/param_est.csv',float_format='%.4f',index=False)
def run_gammashock(cset=['US']):
for c in cset:
tmp = solveCovid(c)
tmp.prelim()
tmp.gamma_t_compute()
tmp.fitmodel()
tmp.gamma_tilde_model = 'shock'
tmp.sim_seir()
tmp.plot_all(saveplot=True)
def run_vaccines(cset=['US'],vac_assump='vac_worse'):
for c in cset:
tmp = solveCovid(c)
tmp.vac_assump = vac_assump
tmp.prelim()
tmp.gamma_t_compute()
tmp.fitmodel()
tmp.sim_seir()
tmp.plot_all(saveplot=True)
def run_reinfect(cset=['US'],reinfect = 'reinfect'):
for c in cset:
tmp = solveCovid(c)
tmp.reinfect = reinfect
tmp.prelim()
tmp.gamma_t_compute()
tmp.fitmodel()
tmp.sim_seir()
tmp.plot_all(saveplot=True)
def run_scenarios(cset=['US']): # Save class objects under various scenarios so we could draw plots across countries/scenarios
p_dict = {'Parameters': ['beta0','beta1','rho','rhos_1','rhos_2','phi']}
for c in cset:
#Baseline
tmp = solveCovid(c)
tmp.prelim()
tmp.gamma_t_compute()
tmp.fitmodel()
p_dict[c] = np.append(tmp.best_params, 1e9*tmp.phi)
tmp.sim_seir()
tmp.plot_all(saveplot=True)
name = f'../output/{out_save_folder}/{c}_baseline.pkl'
pickle.dump(tmp,open(name,'wb'))
# Vaccines
t_vac = solveCovid(c)
t_vac.vac_assump = 'vac_worse'
t_vac.prelim()
t_vac.gamma_t_compute()
t_vac.fitmodel()
t_vac.sim_seir()
t_vac.plot_all(saveplot=True)
name = f'../output/{out_save_folder}/{c}_vacworse.pkl'
pickle.dump(t_vac,open(name,'wb'))
# Spikes
t_spike = solveCovid(c)
t_spike.prelim()
t_spike.gamma_t_compute()
t_spike.fitmodel()
t_spike.gamma_tilde_model = 'shock'
t_spike.sim_seir()
t_spike.plot_all(saveplot=True)
name = f'../output/{out_save_folder}/{c}_shock.pkl'
pickle.dump(t_spike,open(name,'wb'))
# Reinfection
t_reinfect = solveCovid(c)
t_reinfect.reinfect = 'reinfect'
t_reinfect.prelim()
t_reinfect.gamma_t_compute()
t_reinfect.fitmodel()
t_reinfect.sim_seir()
t_reinfect.plot_all(saveplot=True)
name = f'../output/{out_save_folder}/{c}_reinfect.pkl'
pickle.dump(t_reinfect,open(name,'wb'))
# Better
t_better = solveCovid(c)
t_better.vac_assump = 'vac_better' # (a) 30% Faster vaccines
t_better.target_weight = 0.9 # (b) More targeted
t_better.prelim()
t_better.gamma_t_compute()
t_better.fitmodel()
t_better.sim_seir()
t_better.plot_all(saveplot=True)
name = f'../output/{out_save_folder}/{c}_better.pkl'
pickle.dump(t_better,open(name,'wb'))
pd.DataFrame(p_dict).to_csv(f'../params/{param_save_folder}/param_est.csv',float_format='%.4f',index=False)
def save_results(cset=['US']): # Unpack pickle and save all results into an excel
with pd.ExcelWriter(f'../output/{out_save_folder}/output_all.xlsx') as writer:
for c in cset:
print(f'Loading pickle for {c}')
tmp = pickle.load(open(f'../output/{out_load_folder}/{c}_baseline.pkl','rb'))
t_vac = pickle.load(open(f'../output/{out_load_folder}/{c}_vacworse.pkl','rb'))
t_spike = pickle.load(open(f'../output/{out_load_folder}/{c}_shock.pkl','rb'))
t_reinfect = pickle.load(open(f'../output/{out_load_folder}/{c}_reinfect.pkl','rb'))
#t_better = pickle.load(open(f'../output/{out_load_folder}/{c}_better.pkl','rb'))
tmp.df3.to_excel(writer, sheet_name=f'{c}_base')
t_vac.df3.to_excel(writer, sheet_name=f'{c}_vacworse')
t_spike.df3.to_excel(writer, sheet_name=f'{c}_shock')
t_reinfect.df3.to_excel(writer, sheet_name=f'{c}_reinfect')
#t_better.df3.to_excel(writer, sheet_name=f'{c}_better')
# ---------------------------------------------------
# x. Plotting functions
# ***** Utilities *****
def scatter1(x,y,xlab,ylab,df):
x1 = df[x]
y1 = df[y]
fig, ax = plt.subplots(figsize=(10,8))
ax.scatter(x1,y1,marker='o',facecolors='none', edgecolors='none')
for i, label in enumerate(df.index):
ax.annotate(label, (x1.iloc[i], y1.iloc[i]), size=16)
ax.plot(np.unique(x1),
np.poly1d(np.polyfit(x1, y1, 1))(np.unique(x1)),
color='black')
ax.set_xlabel(xlab,size=20)
ax.set_ylabel(ylab,size=20)
plt.xticks(fontsize= 20)
plt.yticks(fontsize= 20)
return fig, ax
def scatter2(x,y,x2,y2,xlab,ylab,df):
x1 = df[x]
y1 = df[y]
x2 = df[x2]
y2 = df[y2]
fig, ax = plt.subplots(figsize=(10,8))
ax.scatter(x1,y1,marker='o',facecolors='none', edgecolors='none')
for i, label in enumerate(df.index):
ax.annotate(label, (x1.iloc[i], y1.iloc[i]), size=16, color='gray')
ax.plot(np.unique(x1),
np.poly1d(np.polyfit(x1, y1, 1))(np.unique(x1)),
color='gray')
ax.set_xlabel(xlab,size=20)
ax.set_ylabel(ylab,size=20)
# Super impose with a new set
ax.scatter(x2,y2,marker='o',facecolors='none', edgecolors='none')
for i, label in enumerate(df.index):
ax.annotate(label, (x2.iloc[i], y2.iloc[i]), size=16, color='blue')
ax.plot(np.unique(x2),
np.poly1d(np.polyfit(x2, y2, 1))(np.unique(x2)),
color='blue')
ax.set_xlabel(xlab,size=20)
ax.set_ylabel(ylab,size=20)
plt.xticks(fontsize= 20)
plt.yticks(fontsize= 20)
return fig, ax
def all_output(cset=['US','DE']):
data_col = ['Mob 2021','Mob fc',
'GDP 2021','GDP fc',
'dDeath 2021','dDeath fc',
'dD/mn 2021','dD/mn fc',
'Mob 2021 3rdwave', 'Mob fc 3rdwave',
'GDP 2021 3rdwave', 'GDP fc 3rdwave',
'dDeath 2021 3rdwave', 'dDeath fc 3rdwave',
'dD/mn 2021 3rdwave', 'dD/mn fc 3rdwave',
'Mob 2021 vacworse', 'Mob fc vacworse',
'GDP 2021 vacworse', 'GDP fc vacworse',
'dDeath 2021 vacworse', 'dDeath fc vacworse',
'dD/mn 2021 vacworse', 'dD/mn fc vacworse',
'Mob 2021 reinfect', 'Mob fc reinfect',
'GDP 2021 reinfect', 'GDP fc reinfect',
'dDeath 2021 reinfect', 'dDeath fc reinfect',
'dD/mn 2021 reinfect', 'dD/mn fc reinfect',
# 'Mob 2021 better', 'Mob fc better',
# 'GDP 2021 better', 'GDP fc better',
# 'dDeath 2021 better', 'dDeath fc better',
# 'dD/mn 2021 better', 'dD/mn fc better',
]
data = {}
df_yratio = pd.read_csv(f'../output/growth-mob.csv', index_col=0)
for c in cset:
tmp = pickle.load(open(f'../output/{out_load_folder}/{c}_baseline.pkl','rb'))
tmp1 = pickle.load(open(f'../output/{out_load_folder}/{c}_shock.pkl','rb'))
tmp2 = pickle.load(open(f'../output/{out_load_folder}/{c}_vacworse.pkl','rb'))
tmp3 = pickle.load(open(f'../output/{out_load_folder}/{c}_reinfect.pkl','rb'))
# tmp4 = pickle.load(open(f'../output/{out_load_folder}/{c}_better.pkl','rb'))
cnum = tmp.df3.index.get_loc('2020-12-31')+1
d = tmp.df3['total_cases'].last_valid_index()
dnum = tmp.df3.index.get_loc(d)+1
mob_2021 = tmp.df3['mob_fc'].iloc[cnum:].mean() # Average mobility for 2021
mob_fc = tmp.df3['mob_fc'].iloc[dnum:].mean() # Average mobility from current date till year end
GDP_2021 = 100*mob_2021*df_yratio.loc[c]['ym_ratio']
GDP_fc = 100*mob_fc*df_yratio.loc[c]['ym_ratio']
dD_2021 = tmp.df3['DD'][-1] - tmp.df3['DD'][cnum]
dD_fc = tmp.df3['DD'][-1] - tmp.df3['DD'][dnum]
dD_mn_2021 = 1000000*dD_2021/tmp.N
dD_mn_fc = 1000000*dD_fc/tmp.N
mob_2021_shock = tmp1.df3['mob_fc'].iloc[cnum:].mean() # Average mobility for 2021
mob_fc_shock = tmp1.df3['mob_fc'].iloc[dnum:].mean() # Average mobility from current date till year end
GDP_2021_shock = 100*mob_2021_shock*df_yratio.loc[c]['ym_ratio']
GDP_fc_shock = 100*mob_fc_shock*df_yratio.loc[c]['ym_ratio']
dD_2021_shock = tmp1.df3['DD'][-1] - tmp1.df3['DD'][cnum]
dD_fc_shock = tmp1.df3['DD'][-1] - tmp1.df3['DD'][dnum]
dD_mn_2021_shock = 1000000*dD_2021_shock/tmp.N
dD_mn_fc_shock = 1000000*dD_fc_shock/tmp.N
mob_2021_vacworse = tmp2.df3['mob_fc'].iloc[cnum:].mean() # Average mobility for 2021
mob_fc_vacworse = tmp2.df3['mob_fc'].iloc[dnum:].mean() # Average mobility from current date till year end
GDP_2021_vacworse = 100*mob_2021_vacworse*df_yratio.loc[c]['ym_ratio']
GDP_fc_vacworse = 100*mob_fc_vacworse*df_yratio.loc[c]['ym_ratio']
dD_2021_vacworse = tmp2.df3['DD'][-1] - tmp2.df3['DD'][cnum]
dD_fc_vacworse = tmp2.df3['DD'][-1] - tmp2.df3['DD'][dnum]
dD_mn_2021_vacworse = 1000000*dD_2021_vacworse/tmp.N
dD_mn_fc_vacworse = 1000000*dD_fc_vacworse/tmp.N
mob_2021_reinfect = tmp3.df3['mob_fc'].iloc[cnum:].mean() # Average mobility for 2021
mob_fc_reinfect = tmp3.df3['mob_fc'].iloc[dnum:].mean() # Average mobility from current date till year end
GDP_2021_reinfect = 100*mob_2021_reinfect*df_yratio.loc[c]['ym_ratio']
GDP_fc_reinfect = 100*mob_fc_reinfect*df_yratio.loc[c]['ym_ratio']
dD_2021_reinfect = tmp3.df3['DD'][-1] - tmp3.df3['DD'][cnum]
dD_fc_reinfect = tmp3.df3['DD'][-1] - tmp3.df3['DD'][dnum]
dD_mn_2021_reinfect = 1000000*dD_2021_reinfect/tmp.N
dD_mn_fc_reinfect = 1000000*dD_fc_reinfect/tmp.N
# mob_2021_better = tmp4.df3['mob_fc'].iloc[cnum:].mean() # Average mobility for 2021
# mob_fc_better = tmp4.df3['mob_fc'].iloc[dnum:].mean() # Average mobility from current date till year end
# GDP_2021_better = 100*mob_2021_better*df_yratio.loc[c]['ym_ratio']
# GDP_fc_better = 100*mob_fc_better*df_yratio.loc[c]['ym_ratio']
# dD_2021_better = tmp4.df3['DD'][-1] - tmp4.df3['DD'][cnum]
# dD_fc_better = tmp4.df3['DD'][-1] - tmp4.df3['DD'][dnum]
# dD_mn_2021_better = 1000000*dD_2021_better/tmp.N
# dD_mn_fc_better = 1000000*dD_fc_better/tmp.N
data[c] = [mob_2021,mob_fc,
GDP_2021,GDP_fc,
dD_2021,dD_fc,
dD_mn_2021,dD_mn_fc,
mob_2021_shock, mob_fc_shock,
GDP_2021_shock, GDP_fc_shock,
dD_2021_shock, dD_fc_shock,
dD_mn_2021_shock, dD_mn_fc_shock,
mob_2021_vacworse, mob_fc_vacworse,
GDP_2021_vacworse, GDP_fc_vacworse,
dD_2021_vacworse, dD_fc_vacworse,
dD_mn_2021_vacworse, dD_mn_fc_vacworse,
mob_2021_reinfect, mob_fc_reinfect,
GDP_2021_reinfect, GDP_fc_reinfect,
dD_2021_reinfect, dD_fc_reinfect,
dD_mn_2021_reinfect, dD_mn_fc_reinfect,
# mob_2021_better, mob_fc_better,
# GDP_2021_better, GDP_fc_better,
# dD_2021_better, dD_fc_better,
# dD_mn_2021_better, dD_mn_fc_better,
]
df_out = | pd.DataFrame.from_dict(data, orient='index', columns=data_col) | pandas.DataFrame.from_dict |
from PhiRelevance.PhiUtils1 import phiControl,phi
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
class RandomUnderSamplerRegression:
"""
Class RandomUnderSamplerRegression takes arguments as follows:
data - Pandas data frame with target value as last column; if read from .csv, recommend to use 'index_col=0'
method - "auto"("extremes") as default,"range"
extrType - "high", "both" as default, "low"
thr_rel - user defined relevance threadhold between 0 to 1, all the target values with relevance below
the threshold are candicates to be undersampled
controlPts - list of control points formatted as [y1, phi(y1), phi'(y1), y2, phi(y2), phi'(y2)], where
y1: target value; phi(y1): relevane value of y1; phi'(y1): derivative of phi(y1), etc.
c_perc - undersampling percentage should be applied in each bump with uninteresting values,
possible types are defined below,
"balance" - will try to distribute the examples evenly across the existing bumps
"extreme" - invert existing frequency of interesting/uninteresting set
<percentage> - A list of percentage values with either one value apply to all bumps of undersampling set
or multiple percentage values mapping to each bump of undersampling set
"""
def __init__(self, data, method='auto', extrType='both', thr_rel=1.0, controlPts=[], c_perc="balance"):
self.data = data;
self.method = 'extremes' if method in ['extremes', 'auto'] else 'range'
if self.method == 'extremes':
if extrType in ['high','low','both']:
self.extrType = extrType
else:
self.extrType = 'both'
else:
self.extrType =''
self.thr_rel = thr_rel
if method == 'extremes':
self.controlPts = []
else:
self.controlPts = controlPts
if str == type(c_perc):
self.c_perc = c_perc if c_perc in ["balance", "extreme"] else c_perc
elif list == type(c_perc):
self.c_perc = c_perc
self.coef = 1.5
def getMethod(self):
return self.method
def getData(self):
return self.data
def getExtrType(self):
return self.extrType
def getThrRel(self):
return self.thr_rel
def getControlPtr(self):
return self.controlPts
def getCPerc(self):
return self.c_perc
def resample(self):
yPhi, ydPhi, yddPhi = self.calc_rel_values()
data1 = self.preprocess_data(yPhi)
#interesting set
interesting_set = self.get_interesting_set(data1)
#uninteresting set
bumps_undersampling, bumps_interesting = self.calc_bumps(data1)
if self.c_perc == 'balance':
resampled = self.process_balance(bumps_undersampling, interesting_set)
elif self.c_perc == 'extreme':
resampled = self.process_extreme(bumps_undersampling, bumps_interesting, interesting_set)
elif isinstance(self.c_perc, list):
resampled = self.process_percentage(bumps_undersampling, interesting_set)
#clean up resampled set and return
self.postprocess_data(resampled)
return resampled
def postprocess_data(self, resampled):
self.data.drop('yPhi',axis=1,inplace=True )
resampled.drop('yPhi',axis=1,inplace=True )
resampled.sort_index(inplace=True)
return resampled
def preprocess_data(self, yPhi):
#append column 'yPhi'
data1 = self.data
data1['yPhi'] = yPhi
data1 = self.data.sort_values(by=['Tgt'])
return data1
def get_interesting_set(self, data):
interesting_set = data[data.yPhi >= self.thr_rel]
return interesting_set
def get_undersampling_set(self, data):
undersampleing_set = data[data.yPhi < self.thr_rel]
return undersampleing_set
def calc_rel_values(self):
#retrieve target(last column) from DataFrame
y = self.data.iloc[:,-1]
#generate control ptrs
if self.method == 'extremes':
controlPts, npts = phiControl(y, extrType=self.extrType)
else:
controlPts, npts = phiControl(y, 'range', extrType="", controlPts=self.controlPts)
#calculate relevance value
yPhi, ydPhi, yddPhi = phi(y, controlPts, npts, self.method)
return yPhi, ydPhi, yddPhi
def process_balance(self, bumps_undersampling, interesting_set):
resample_size = round(len(interesting_set) / len(bumps_undersampling))
#print('process_balance(): resample_size per bump='+str(resample_size))
resampled_sets = []
for s in bumps_undersampling:
resampled_sets.append(s.sample(n=resample_size))
#includes interesting set
resampled_sets.append(interesting_set)
result = pd.concat(resampled_sets)
return result
def process_extreme(self, bumps_undersampling, bumps_interesting, interesting_set):
#print('process_extreme(): size of bumps_undersampling='+str(len(bumps_undersampling)))
#print('process_extreme(): size of bumps_interesting='+str(len(bumps_interesting)))
#print('process_extreme(): size of interesting_set='+str(len(interesting_set)))
resampled_sets = []
#calculate average cnt
len_interesting_set = len(interesting_set)
len_total = len(self.data)
#print('process_extreme(): size of total_set='+str(len_total))
average_cnt_interesting_set = len_interesting_set/len(bumps_interesting)
#print('process_extreme(): average_cnt_interesting_set='+str(average_cnt_interesting_set))
resample_size = (average_cnt_interesting_set**2.0)/(len_total-len_interesting_set)
#print('process_extreme(): resample_size='+str(resample_size))
resample_size_per_bump = round(resample_size / len(bumps_undersampling))
#print('process_extreme(): resample_size_per_bump='+str(resample_size_per_bump))
for s in bumps_undersampling:
resampled_sets.append(s.sample(n = resample_size_per_bump))
#includes interesting set
resampled_sets.append(interesting_set)
result = pd.concat(resampled_sets)
return result
def process_percentage(self, bumps_undersampling, interesting_set):
#make sure all percentage values are float values and <= 1.0
for c in self.c_perc:
if (not isinstance(c, float)) or (c>1.0):
print('c_perc must be list of float number <= 1.0')
return[]
#make sure c_perc values matches bumps
resampled_sets = []
if (len(bumps_undersampling) != len(self.c_perc)) and (len(self.c_perc) != 1):
print('c_perc value list must have either one value or values equal to number of bumps')
return []
elif len(self.c_perc) == 1:
undersample_perc = self.c_perc[0]
#print('len(self.c_perc) == 1')
#print('process_percentage(): undersample_perc='+str(undersample_perc))
for s in bumps_undersampling:
#print('process_percentage(): bump size='+str(len(s)))
resample_size = round(len(s)*undersample_perc)
#print('process_percentage(): resample_size='+str(resample_size))
resampled_sets.append(s.sample(n = resample_size))
#adding interesting set
resampled_sets.append(interesting_set)
result = pd.concat(resampled_sets)
else:
for i in range(len(bumps_undersampling)):
#print('len(self.c_perc) > 1 loop i='+str(i))
undersample_perc = self.c_perc[i]
#print('process_percentage(): undersample_perc='+str(undersample_perc))
resample_size = round(len(bumps_undersampling[i])*undersample_perc)
#print('process_percentage(): resample_size='+str(resample_size))
resampled_sets.append(bumps_undersampling[i].sample(n = resample_size))
#adding interesting set
resampled_sets.append(interesting_set)
result = | pd.concat(resampled_sets) | pandas.concat |
from datetime import (
datetime,
timedelta,
timezone,
)
import numpy as np
import pytest
import pytz
from pandas import (
Categorical,
DataFrame,
DatetimeIndex,
NaT,
Period,
Series,
Timedelta,
Timestamp,
date_range,
isna,
)
import pandas._testing as tm
class TestSeriesFillNA:
def test_fillna_nat(self):
series = Series([0, 1, 2, NaT.value], dtype="M8[ns]")
filled = series.fillna(method="pad")
filled2 = series.fillna(value=series.values[2])
expected = series.copy()
expected.values[3] = expected.values[2]
tm.assert_series_equal(filled, expected)
tm.assert_series_equal(filled2, expected)
df = DataFrame({"A": series})
filled = df.fillna(method="pad")
filled2 = df.fillna(value=series.values[2])
expected = DataFrame({"A": expected})
tm.assert_frame_equal(filled, expected)
tm.assert_frame_equal(filled2, expected)
series = Series([NaT.value, 0, 1, 2], dtype="M8[ns]")
filled = series.fillna(method="bfill")
filled2 = series.fillna(value=series[1])
expected = series.copy()
expected[0] = expected[1]
tm.assert_series_equal(filled, expected)
tm.assert_series_equal(filled2, expected)
df = DataFrame({"A": series})
filled = df.fillna(method="bfill")
filled2 = df.fillna(value=series[1])
expected = DataFrame({"A": expected})
tm.assert_frame_equal(filled, expected)
tm.assert_frame_equal(filled2, expected)
def test_fillna_value_or_method(self, datetime_series):
msg = "Cannot specify both 'value' and 'method'"
with pytest.raises(ValueError, match=msg):
datetime_series.fillna(value=0, method="ffill")
def test_fillna(self):
ts = Series([0.0, 1.0, 2.0, 3.0, 4.0], index=tm.makeDateIndex(5))
tm.assert_series_equal(ts, ts.fillna(method="ffill"))
ts[2] = np.NaN
exp = Series([0.0, 1.0, 1.0, 3.0, 4.0], index=ts.index)
tm.assert_series_equal(ts.fillna(method="ffill"), exp)
exp = Series([0.0, 1.0, 3.0, 3.0, 4.0], index=ts.index)
tm.assert_series_equal(ts.fillna(method="backfill"), exp)
exp = Series([0.0, 1.0, 5.0, 3.0, 4.0], index=ts.index)
tm.assert_series_equal(ts.fillna(value=5), exp)
msg = "Must specify a fill 'value' or 'method'"
with pytest.raises(ValueError, match=msg):
ts.fillna()
def test_fillna_nonscalar(self):
# GH#5703
s1 = Series([np.nan])
s2 = Series([1])
result = s1.fillna(s2)
expected = Series([1.0])
tm.assert_series_equal(result, expected)
result = s1.fillna({})
tm.assert_series_equal(result, s1)
result = s1.fillna(Series((), dtype=object))
tm.assert_series_equal(result, s1)
result = s2.fillna(s1)
tm.assert_series_equal(result, s2)
result = s1.fillna({0: 1})
tm.assert_series_equal(result, expected)
result = s1.fillna({1: 1})
tm.assert_series_equal(result, Series([np.nan]))
result = s1.fillna({0: 1, 1: 1})
tm.assert_series_equal(result, expected)
result = s1.fillna(Series({0: 1, 1: 1}))
tm.assert_series_equal(result, expected)
result = s1.fillna(Series({0: 1, 1: 1}, index=[4, 5]))
tm.assert_series_equal(result, s1)
def test_fillna_aligns(self):
s1 = Series([0, 1, 2], list("abc"))
s2 = Series([0, np.nan, 2], list("bac"))
result = s2.fillna(s1)
expected = Series([0, 0, 2.0], list("bac"))
tm.assert_series_equal(result, expected)
def test_fillna_limit(self):
ser = Series(np.nan, index=[0, 1, 2])
result = ser.fillna(999, limit=1)
expected = Series([999, np.nan, np.nan], index=[0, 1, 2])
tm.assert_series_equal(result, expected)
result = ser.fillna(999, limit=2)
expected = Series([999, 999, np.nan], index=[0, 1, 2])
tm.assert_series_equal(result, expected)
def test_fillna_dont_cast_strings(self):
# GH#9043
# make sure a string representation of int/float values can be filled
# correctly without raising errors or being converted
vals = ["0", "1.5", "-0.3"]
for val in vals:
ser = Series([0, 1, np.nan, np.nan, 4], dtype="float64")
result = ser.fillna(val)
expected = Series([0, 1, val, val, 4], dtype="object")
tm.assert_series_equal(result, expected)
def test_fillna_consistency(self):
# GH#16402
# fillna with a tz aware to a tz-naive, should result in object
ser = Series([Timestamp("20130101"), NaT])
result = ser.fillna(Timestamp("20130101", tz="US/Eastern"))
expected = Series(
[Timestamp("20130101"), Timestamp("2013-01-01", tz="US/Eastern")],
dtype="object",
)
tm.assert_series_equal(result, expected)
msg = "The 'errors' keyword in "
with tm.assert_produces_warning(FutureWarning, match=msg):
# where (we ignore the errors=)
result = ser.where(
[True, False], Timestamp("20130101", tz="US/Eastern"), errors="ignore"
)
tm.assert_series_equal(result, expected)
with tm.assert_produces_warning(FutureWarning, match=msg):
result = ser.where(
[True, False], Timestamp("20130101", tz="US/Eastern"), errors="ignore"
)
tm.assert_series_equal(result, expected)
# with a non-datetime
result = ser.fillna("foo")
expected = Series([Timestamp("20130101"), "foo"])
tm.assert_series_equal(result, expected)
# assignment
ser2 = ser.copy()
ser2[1] = "foo"
tm.assert_series_equal(ser2, expected)
def test_fillna_downcast(self):
# GH#15277
# infer int64 from float64
ser = Series([1.0, np.nan])
result = ser.fillna(0, downcast="infer")
expected = Series([1, 0])
tm.assert_series_equal(result, expected)
# infer int64 from float64 when fillna value is a dict
ser = Series([1.0, np.nan])
result = ser.fillna({1: 0}, downcast="infer")
expected = Series([1, 0])
tm.assert_series_equal(result, expected)
def test_timedelta_fillna(self, frame_or_series):
# GH#3371
ser = Series(
[
Timestamp("20130101"),
Timestamp("20130101"),
Timestamp("20130102"),
Timestamp("20130103 9:01:01"),
]
)
td = ser.diff()
obj = frame_or_series(td)
# reg fillna
result = obj.fillna(Timedelta(seconds=0))
expected = Series(
[
timedelta(0),
timedelta(0),
timedelta(1),
timedelta(days=1, seconds=9 * 3600 + 60 + 1),
]
)
expected = frame_or_series(expected)
tm.assert_equal(result, expected)
# interpreted as seconds, no longer supported
msg = "value should be a 'Timedelta', 'NaT', or array of those. Got 'int'"
with pytest.raises(TypeError, match=msg):
obj.fillna(1)
result = obj.fillna(Timedelta(seconds=1))
expected = Series(
[
timedelta(seconds=1),
timedelta(0),
timedelta(1),
timedelta(days=1, seconds=9 * 3600 + 60 + 1),
]
)
expected = frame_or_series(expected)
tm.assert_equal(result, expected)
result = obj.fillna(timedelta(days=1, seconds=1))
expected = Series(
[
timedelta(days=1, seconds=1),
timedelta(0),
timedelta(1),
timedelta(days=1, seconds=9 * 3600 + 60 + 1),
]
)
expected = frame_or_series(expected)
tm.assert_equal(result, expected)
result = obj.fillna(np.timedelta64(10 ** 9))
expected = Series(
[
timedelta(seconds=1),
timedelta(0),
timedelta(1),
timedelta(days=1, seconds=9 * 3600 + 60 + 1),
]
)
expected = frame_or_series(expected)
tm.assert_equal(result, expected)
result = obj.fillna(NaT)
expected = Series(
[
NaT,
timedelta(0),
timedelta(1),
timedelta(days=1, seconds=9 * 3600 + 60 + 1),
],
dtype="m8[ns]",
)
expected = frame_or_series(expected)
tm.assert_equal(result, expected)
# ffill
td[2] = np.nan
obj = frame_or_series(td)
result = obj.ffill()
expected = td.fillna(Timedelta(seconds=0))
expected[0] = np.nan
expected = frame_or_series(expected)
tm.assert_equal(result, expected)
# bfill
td[2] = np.nan
obj = frame_or_series(td)
result = obj.bfill()
expected = td.fillna(Timedelta(seconds=0))
expected[2] = timedelta(days=1, seconds=9 * 3600 + 60 + 1)
expected = frame_or_series(expected)
tm.assert_equal(result, expected)
def test_datetime64_fillna(self):
ser = Series(
[
Timestamp("20130101"),
Timestamp("20130101"),
Timestamp("20130102"),
Timestamp("20130103 9:01:01"),
]
)
ser[2] = np.nan
# ffill
result = ser.ffill()
expected = Series(
[
Timestamp("20130101"),
Timestamp("20130101"),
Timestamp("20130101"),
Timestamp("20130103 9:01:01"),
]
)
tm.assert_series_equal(result, expected)
# bfill
result = ser.bfill()
expected = Series(
[
Timestamp("20130101"),
Timestamp("20130101"),
Timestamp("20130103 9:01:01"),
Timestamp("20130103 9:01:01"),
]
)
tm.assert_series_equal(result, expected)
def test_datetime64_fillna_backfill(self):
# GH#6587
# make sure that we are treating as integer when filling
msg = "containing strings is deprecated"
with tm.assert_produces_warning(FutureWarning, match=msg):
# this also tests inference of a datetime-like with NaT's
ser = Series([NaT, NaT, "2013-08-05 15:30:00.000001"])
expected = Series(
[
"2013-08-05 15:30:00.000001",
"2013-08-05 15:30:00.000001",
"2013-08-05 15:30:00.000001",
],
dtype="M8[ns]",
)
result = ser.fillna(method="backfill")
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("tz", ["US/Eastern", "Asia/Tokyo"])
def test_datetime64_tz_fillna(self, tz):
# DatetimeLikeBlock
ser = Series(
[
Timestamp("2011-01-01 10:00"),
NaT,
Timestamp("2011-01-03 10:00"),
NaT,
]
)
null_loc = Series([False, True, False, True])
result = ser.fillna(Timestamp("2011-01-02 10:00"))
expected = Series(
[
Timestamp("2011-01-01 10:00"),
Timestamp("2011-01-02 10:00"),
Timestamp("2011-01-03 10:00"),
Timestamp("2011-01-02 10:00"),
]
)
tm.assert_series_equal(expected, result)
# check s is not changed
tm.assert_series_equal(isna(ser), null_loc)
result = ser.fillna(Timestamp("2011-01-02 10:00", tz=tz))
expected = Series(
[
Timestamp("2011-01-01 10:00"),
Timestamp("2011-01-02 10:00", tz=tz),
Timestamp("2011-01-03 10:00"),
Timestamp("2011-01-02 10:00", tz=tz),
]
)
tm.assert_series_equal(expected, result)
tm.assert_series_equal(isna(ser), null_loc)
result = ser.fillna("AAA")
expected = Series(
[
Timestamp("2011-01-01 10:00"),
"AAA",
Timestamp("2011-01-03 10:00"),
"AAA",
],
dtype=object,
)
tm.assert_series_equal(expected, result)
tm.assert_series_equal(isna(ser), null_loc)
result = ser.fillna(
{
1: Timestamp("2011-01-02 10:00", tz=tz),
3: Timestamp("2011-01-04 10:00"),
}
)
expected = Series(
[
Timestamp("2011-01-01 10:00"),
Timestamp("2011-01-02 10:00", tz=tz),
Timestamp("2011-01-03 10:00"),
Timestamp("2011-01-04 10:00"),
]
)
tm.assert_series_equal(expected, result)
tm.assert_series_equal(isna(ser), null_loc)
result = ser.fillna(
{1: Timestamp("2011-01-02 10:00"), 3: Timestamp("2011-01-04 10:00")}
)
expected = Series(
[
Timestamp("2011-01-01 10:00"),
Timestamp("2011-01-02 10:00"),
Timestamp("2011-01-03 10:00"),
Timestamp("2011-01-04 10:00"),
]
)
tm.assert_series_equal(expected, result)
tm.assert_series_equal(isna(ser), null_loc)
# DatetimeTZBlock
idx = DatetimeIndex(["2011-01-01 10:00", NaT, "2011-01-03 10:00", NaT], tz=tz)
ser = Series(idx)
assert ser.dtype == f"datetime64[ns, {tz}]"
tm.assert_series_equal(isna(ser), null_loc)
result = ser.fillna(Timestamp("2011-01-02 10:00"))
expected = Series(
[
Timestamp("2011-01-01 10:00", tz=tz),
Timestamp("2011-01-02 10:00"),
Timestamp("2011-01-03 10:00", tz=tz),
Timestamp("2011-01-02 10:00"),
]
)
tm.assert_series_equal(expected, result)
tm.assert_series_equal(isna(ser), null_loc)
result = ser.fillna(Timestamp("2011-01-02 10:00", tz=tz))
idx = DatetimeIndex(
[
"2011-01-01 10:00",
"2011-01-02 10:00",
"2011-01-03 10:00",
"2011-01-02 10:00",
],
tz=tz,
)
expected = Series(idx)
tm.assert_series_equal(expected, result)
tm.assert_series_equal(isna(ser), null_loc)
result = ser.fillna(Timestamp("2011-01-02 10:00", tz=tz).to_pydatetime())
idx = DatetimeIndex(
[
"2011-01-01 10:00",
"2011-01-02 10:00",
"2011-01-03 10:00",
"2011-01-02 10:00",
],
tz=tz,
)
expected = Series(idx)
tm.assert_series_equal(expected, result)
tm.assert_series_equal(isna(ser), null_loc)
result = ser.fillna("AAA")
expected = Series(
[
Timestamp("2011-01-01 10:00", tz=tz),
"AAA",
Timestamp("2011-01-03 10:00", tz=tz),
"AAA",
],
dtype=object,
)
tm.assert_series_equal(expected, result)
tm.assert_series_equal(isna(ser), null_loc)
result = ser.fillna(
{
1: Timestamp("2011-01-02 10:00", tz=tz),
3: Timestamp("2011-01-04 10:00"),
}
)
expected = Series(
[
Timestamp("2011-01-01 10:00", tz=tz),
Timestamp("2011-01-02 10:00", tz=tz),
Timestamp("2011-01-03 10:00", tz=tz),
Timestamp("2011-01-04 10:00"),
]
)
tm.assert_series_equal(expected, result)
tm.assert_series_equal(isna(ser), null_loc)
result = ser.fillna(
{
1: Timestamp("2011-01-02 10:00", tz=tz),
3: Timestamp("2011-01-04 10:00", tz=tz),
}
)
expected = Series(
[
Timestamp("2011-01-01 10:00", tz=tz),
Timestamp("2011-01-02 10:00", tz=tz),
Timestamp("2011-01-03 10:00", tz=tz),
Timestamp("2011-01-04 10:00", tz=tz),
]
)
tm.assert_series_equal(expected, result)
tm.assert_series_equal(isna(ser), null_loc)
# filling with a naive/other zone, coerce to object
result = ser.fillna(Timestamp("20130101"))
expected = Series(
[
Timestamp("2011-01-01 10:00", tz=tz),
Timestamp("2013-01-01"),
Timestamp("2011-01-03 10:00", tz=tz),
Timestamp("2013-01-01"),
]
)
tm.assert_series_equal(expected, result)
tm.assert_series_equal(isna(ser), null_loc)
with tm.assert_produces_warning(FutureWarning, match="mismatched timezone"):
result = ser.fillna(Timestamp("20130101", tz="US/Pacific"))
expected = Series(
[
Timestamp("2011-01-01 10:00", tz=tz),
Timestamp("2013-01-01", tz="US/Pacific"),
Timestamp("2011-01-03 10:00", tz=tz),
Timestamp("2013-01-01", tz="US/Pacific"),
]
)
tm.assert_series_equal(expected, result)
tm.assert_series_equal(isna(ser), null_loc)
def test_fillna_dt64tz_with_method(self):
# with timezone
# GH#15855
ser = Series([Timestamp("2012-11-11 00:00:00+01:00"), NaT])
exp = Series(
[
Timestamp("2012-11-11 00:00:00+01:00"),
Timestamp("2012-11-11 00:00:00+01:00"),
]
)
tm.assert_series_equal(ser.fillna(method="pad"), exp)
ser = Series([NaT, Timestamp("2012-11-11 00:00:00+01:00")])
exp = Series(
[
Timestamp("2012-11-11 00:00:00+01:00"),
Timestamp("2012-11-11 00:00:00+01:00"),
]
)
tm.assert_series_equal(ser.fillna(method="bfill"), exp)
def test_fillna_pytimedelta(self):
# GH#8209
ser = Series([np.nan, Timedelta("1 days")], index=["A", "B"])
result = ser.fillna(timedelta(1))
expected = Series(Timedelta("1 days"), index=["A", "B"])
tm.assert_series_equal(result, expected)
def test_fillna_period(self):
# GH#13737
ser = Series([Period("2011-01", freq="M"), Period("NaT", freq="M")])
res = ser.fillna(Period("2012-01", freq="M"))
exp = Series([Period("2011-01", freq="M"), Period("2012-01", freq="M")])
tm.assert_series_equal(res, exp)
assert res.dtype == "Period[M]"
def test_fillna_dt64_timestamp(self, frame_or_series):
ser = Series(
[
Timestamp("20130101"),
Timestamp("20130101"),
Timestamp("20130102"),
Timestamp("20130103 9:01:01"),
]
)
ser[2] = np.nan
obj = frame_or_series(ser)
# reg fillna
result = obj.fillna(Timestamp("20130104"))
expected = Series(
[
Timestamp("20130101"),
Timestamp("20130101"),
Timestamp("20130104"),
Timestamp("20130103 9:01:01"),
]
)
expected = frame_or_series(expected)
tm.assert_equal(result, expected)
result = obj.fillna(NaT)
expected = obj
tm.assert_equal(result, expected)
def test_fillna_dt64_non_nao(self):
# GH#27419
ser = Series([Timestamp("2010-01-01"), NaT, Timestamp("2000-01-01")])
val = np.datetime64("1975-04-05", "ms")
result = ser.fillna(val)
expected = Series(
[Timestamp("2010-01-01"), Timestamp("1975-04-05"), Timestamp("2000-01-01")]
)
tm.assert_series_equal(result, expected)
def test_fillna_numeric_inplace(self):
x = Series([np.nan, 1.0, np.nan, 3.0, np.nan], ["z", "a", "b", "c", "d"])
y = x.copy()
return_value = y.fillna(value=0, inplace=True)
assert return_value is None
expected = x.fillna(value=0)
tm.assert_series_equal(y, expected)
# ---------------------------------------------------------------
# CategoricalDtype
@pytest.mark.parametrize(
"fill_value, expected_output",
[
("a", ["a", "a", "b", "a", "a"]),
({1: "a", 3: "b", 4: "b"}, ["a", "a", "b", "b", "b"]),
({1: "a"}, ["a", "a", "b", np.nan, np.nan]),
({1: "a", 3: "b"}, ["a", "a", "b", "b", np.nan]),
(Series("a"), ["a", np.nan, "b", np.nan, np.nan]),
(Series("a", index=[1]), ["a", "a", "b", np.nan, np.nan]),
(Series({1: "a", 3: "b"}), ["a", "a", "b", "b", np.nan]),
(Series(["a", "b"], index=[3, 4]), ["a", np.nan, "b", "a", "b"]),
],
)
def test_fillna_categorical(self, fill_value, expected_output):
# GH#17033
# Test fillna for a Categorical series
data = ["a", np.nan, "b", np.nan, np.nan]
ser = Series(Categorical(data, categories=["a", "b"]))
exp = Series(Categorical(expected_output, categories=["a", "b"]))
result = ser.fillna(fill_value)
tm.assert_series_equal(result, exp)
@pytest.mark.parametrize(
"fill_value, expected_output",
[
(Series(["a", "b", "c", "d", "e"]), ["a", "b", "b", "d", "e"]),
(Series(["b", "d", "a", "d", "a"]), ["a", "d", "b", "d", "a"]),
(
Series(
Categorical(
["b", "d", "a", "d", "a"], categories=["b", "c", "d", "e", "a"]
)
),
["a", "d", "b", "d", "a"],
),
],
)
def test_fillna_categorical_with_new_categories(self, fill_value, expected_output):
# GH#26215
data = ["a", np.nan, "b", np.nan, np.nan]
ser = Series(Categorical(data, categories=["a", "b", "c", "d", "e"]))
exp = Series(Categorical(expected_output, categories=["a", "b", "c", "d", "e"]))
result = ser.fillna(fill_value)
tm.assert_series_equal(result, exp)
def test_fillna_categorical_raises(self):
data = ["a", np.nan, "b", np.nan, np.nan]
ser = Series(Categorical(data, categories=["a", "b"]))
cat = ser._values
msg = "Cannot setitem on a Categorical with a new category"
with pytest.raises(TypeError, match=msg):
ser.fillna("d")
msg2 = "Length of 'value' does not match."
with pytest.raises(ValueError, match=msg2):
cat.fillna(Series("d"))
with pytest.raises(TypeError, match=msg):
ser.fillna({1: "d", 3: "a"})
msg = '"value" parameter must be a scalar or dict, but you passed a "list"'
with pytest.raises(TypeError, match=msg):
ser.fillna(["a", "b"])
msg = '"value" parameter must be a scalar or dict, but you passed a "tuple"'
with pytest.raises(TypeError, match=msg):
ser.fillna(("a", "b"))
msg = (
'"value" parameter must be a scalar, dict '
'or Series, but you passed a "DataFrame"'
)
with pytest.raises(TypeError, match=msg):
ser.fillna(DataFrame({1: ["a"], 3: ["b"]}))
@pytest.mark.parametrize("dtype", [float, "float32", "float64"])
@pytest.mark.parametrize("fill_type", tm.ALL_REAL_NUMPY_DTYPES)
def test_fillna_float_casting(self, dtype, fill_type):
# GH-43424
ser = Series([np.nan, 1.2], dtype=dtype)
fill_values = Series([2, 2], dtype=fill_type)
result = ser.fillna(fill_values)
expected = Series([2.0, 1.2], dtype=dtype)
tm.assert_series_equal(result, expected)
def test_fillna_f32_upcast_with_dict(self):
# GH-43424
ser = Series([np.nan, 1.2], dtype=np.float32)
result = ser.fillna({0: 1})
expected = Series([1.0, 1.2], dtype=np.float32)
tm.assert_series_equal(result, expected)
# ---------------------------------------------------------------
# Invalid Usages
def test_fillna_invalid_method(self, datetime_series):
try:
datetime_series.fillna(method="ffil")
except ValueError as inst:
assert "ffil" in str(inst)
def test_fillna_listlike_invalid(self):
ser = Series(np.random.randint(-100, 100, 50))
msg = '"value" parameter must be a scalar or dict, but you passed a "list"'
with pytest.raises(TypeError, match=msg):
ser.fillna([1, 2])
msg = '"value" parameter must be a scalar or dict, but you passed a "tuple"'
with pytest.raises(TypeError, match=msg):
ser.fillna((1, 2))
def test_fillna_method_and_limit_invalid(self):
# related GH#9217, make sure limit is an int and greater than 0
ser = Series([1, 2, 3, None])
msg = "|".join(
[
r"Cannot specify both 'value' and 'method'\.",
"Limit must be greater than 0",
"Limit must be an integer",
]
)
for limit in [-1, 0, 1.0, 2.0]:
for method in ["backfill", "bfill", "pad", "ffill", None]:
with pytest.raises(ValueError, match=msg):
ser.fillna(1, limit=limit, method=method)
def test_fillna_datetime64_with_timezone_tzinfo(self):
# https://github.com/pandas-dev/pandas/issues/38851
# different tzinfos representing UTC treated as equal
ser = Series(date_range("2020", periods=3, tz="UTC"))
expected = ser.copy()
ser[1] = NaT
result = ser.fillna(datetime(2020, 1, 2, tzinfo=timezone.utc))
tm.assert_series_equal(result, expected)
# but we dont (yet) consider distinct tzinfos for non-UTC tz equivalent
ts = Timestamp("2000-01-01", tz="US/Pacific")
ser2 = Series(ser._values.tz_convert("dateutil/US/Pacific"))
assert ser2.dtype.kind == "M"
with tm.assert_produces_warning(FutureWarning, match="mismatched timezone"):
result = ser2.fillna(ts)
expected = Series([ser[0], ts, ser[2]], dtype=object)
# TODO(2.0): once deprecation is enforced
# expected = Series(
# [ser2[0], ts.tz_convert(ser2.dtype.tz), ser2[2]],
# dtype=ser2.dtype,
# )
tm.assert_series_equal(result, expected)
def test_fillna_pos_args_deprecation(self):
# https://github.com/pandas-dev/pandas/issues/41485
srs = Series([1, 2, 3, np.nan], dtype=float)
msg = (
r"In a future version of pandas all arguments of Series.fillna "
r"except for the argument 'value' will be keyword-only"
)
with tm.assert_produces_warning(FutureWarning, match=msg):
result = srs.fillna(0, None, None)
expected = Series([1, 2, 3, 0], dtype=float)
tm.assert_series_equal(result, expected)
class TestFillnaPad:
def test_fillna_bug(self):
ser = Series([np.nan, 1.0, np.nan, 3.0, np.nan], ["z", "a", "b", "c", "d"])
filled = ser.fillna(method="ffill")
expected = Series([np.nan, 1.0, 1.0, 3.0, 3.0], ser.index)
tm.assert_series_equal(filled, expected)
filled = ser.fillna(method="bfill")
expected = Series([1.0, 1.0, 3.0, 3.0, np.nan], ser.index)
tm.assert_series_equal(filled, expected)
def test_ffill(self):
ts = Series([0.0, 1.0, 2.0, 3.0, 4.0], index=tm.makeDateIndex(5))
ts[2] = np.NaN
tm.assert_series_equal(ts.ffill(), ts.fillna(method="ffill"))
def test_ffill_pos_args_deprecation(self):
# https://github.com/pandas-dev/pandas/issues/41485
ser = | Series([1, 2, 3]) | pandas.Series |
#!/usr/bin/env python
# coding: utf-8
# # Scenario
#
# As an analyst for OilyGiant mining company our task is to find the best place for a new well.
#
# We will use several techniques, including machine learning and bootstrapping, to select the region with the highest profit margin.
#
# Machine learning prediction question: What is the predicted volume of reserves in thousand barrels for each region?
#
# Target (response): product (volume of reserves in thousand barrels)
#
# Useful Features (predictor variables): f0, f1, f2 unknown features important to analysis
#
# Datasets: geo_data_0.csv, geo_data_1.csv, geo_data_2.csv
#
# Analysis done December 2021
# In[1]:
# import libraries
# sklearn used for basic machine learning
from sklearn.linear_model import LinearRegression
from sklearn import tree
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error
import matplotlib.pyplot as plt
get_ipython().run_line_magic('matplotlib', 'inline')
import seaborn as sns
from sklearn import metrics
import pandas as pd
import numpy as np
import math
from scipy import stats as st
from numpy.random import RandomState
state = RandomState(12345)
# import sys and insert code to ignore warnings
import sys
if not sys.warnoptions:
import warnings
warnings.simplefilter("ignore")
# # Step 1: Download and prepare the data
# In[2]:
# load the data for region0
try:
region0 = pd.read_csv('/datasets/geo_data_0.csv')
except:
print('ERROR: Unable to find or access file.')
# load the data for region1
try:
region1 = pd.read_csv('/datasets/geo_data_1.csv')
except:
print('ERROR: Unable to find or access file.')
# load the data for region2
try:
region2 = pd.read_csv('/datasets/geo_data_2.csv')
except:
print('ERROR: Unable to find or access file.')
# In[3]:
# create basic loop to get info on dfs
# create list of dfs
dfs = [region0, region1, region2]
for df in dfs:
print('\n')
print("=" * 23)
name =[x for x in globals() if globals()[x] is df][0]
print("Dataframe Name: %s" % name)
print("=" * 23)
print('Number of duplicate rows:', df.duplicated().sum())
print('Number rows and columns:', df.shape, '\n')
print("Count total NaN at each column in a DataFrame :")
print(df.isnull().sum())
# # Observations on data
#
# Each of the three data frames include
# - **id**: unique oil well identifier
# - **f0, f1, f2** unknown features important to analysis
# - **product** volume of reserves in the oil well (thousand barrels)
#
# There are no NaN / missing values in any dataframes.
#
# There are no duplicate rows.
#
# Each df is the same size, 5 columns and 100000 rows.
# # Step 2: Train and test the model for each region
# - 2.1 Split the data into a training set and validation set at a ratio of 75/25.
# - 2.2 Train the model and make predictions for the validation set.
# - 2.3 Save the predictions and correct answers for the validation set.
# - 2.4 Print the average volume of predicted reserves and model RMSE.
# - 2.5 Analyze the results.
#
# We will create a method for the lr and calculations to minimize duplication of code.
#
# Then we will split each dataframe into train and valid and call the method.
# In[4]:
# examine correlations since we will use linear regression
for df in dfs:
print('\n')
print("=" * 25)
name = [x for x in globals() if globals()[x] is df][0]
print("Correlations for: %s" % name)
print("=" * 25)
correlations = df.corr()
print(correlations)
# We note a strong correlation between f2 and region1 and moderate correlations between f2 and region0, region2. Also there is a moderate negative correlation between f1 and region0.
# In[5]:
def lr(df, X_train, X_valid, y_train, y_valid):
"""This method
instantiates a linear regression model,
fits/predicts using split data,
calculates the average volume of target reserves,
calculates the average volume of predicted reserves,
calculates model coefficients,
calculates model intercept,
calculates R-2 Sqaured,
calculates mean square error,
calcuates root mean sqaure error,
calculates percent error.
Input Arguments: df, X_train, X_valid, y_train, y_valid.
Returns: y_pred, y_pred_avg, y_valid, y_valid_avg, r2, mse, rmse, volumes, pct_error.
"""
print('\n')
print("=" * 23)
name = [x for x in globals() if globals()[x] is df][0]
print("Dataframe Name: %s" % name)
print("=" * 23)
print('\nVerify the shape of X (features) and y (target) for', name)
print(X.shape, y.shape)
print('Verify size of divided df X_train, X_test, y_train, y_test\n'
, X_train.shape, X_valid.shape, y_train.shape, y_valid.shape)
# instantiate model
model = LinearRegression()
# train the model
model.fit(X_train, y_train)
# get predictions
y_pred = model.predict(X_valid)
# get avg prediction
y_pred_avg = y_pred.mean()
print('\nAverage volume of predicted reserves:', y_pred_avg)
y_valid_avg = y_valid.mean()
print('Average volume of target reserves:', y_valid.mean())
# Parameters calculated by the model
# coefficients indicate influence of parameters/features on pred volume
print('\nCoefficients for columns', X.columns)
coef0= model.coef_
print(coef0)
# model intercept indicates the base value, or where the line would cross y
# without any influence of the parameters/features
print('Model intercept', model.intercept_)
# R-squared measures the fitness of the model
correlation_matrix = np.corrcoef(y_pred, y_valid)
correlation_xy = correlation_matrix[0,1]
r2 = correlation_xy**2
print('The R-squared value:', r2)
# calculate root mean square error to check error
mse = mean_squared_error(y_pred, y_valid)
rmse = mse ** 0.5
print('Mean squared error:', mse)
stddev = (model.predict(X_valid) - y_valid).std()
print('Standard deviation:', stddev)
print('Root mean square error:', rmse)
pct_error = "{:.0%}". format((rmse/y_valid_avg))
print('Percent error of prediction from true values:', pct_error)
return y_pred, y_pred_avg, y_valid, y_valid_avg, r2, mse, rmse, pct_error
# In[6]:
# split 3 dfs into train and valid (75/25)
X = region0.iloc[:, 1:-1] # all rows and all cols except first and last
y = region0.iloc[:, -1].values# all rows and last column
X_train0, X_valid0, y_train0, y_valid0 = train_test_split(X, y, test_size=0.25, random_state=42)
X = region1.iloc[:, 1:-1] # all rows and all cols except first and last
y = region1.iloc[:, -1].values # all rows and last column
X_train1, X_valid1, y_train1, y_valid1 = train_test_split(X, y, test_size=0.25, random_state=42)
X = region2.iloc[:, 1:-1] # all rows and all cols except first and last
y = region2.iloc[:, -1].values # all rows and last column
X_train2, X_valid2, y_train2, y_valid2 = train_test_split(X, y, test_size=0.25, random_state=42)
# In[7]:
# call lr method on 3 dfs
y_pred0, y_pred_avg0, y_valid0, y_valid_avg0, r20, mse0, rmse0, pct_e0 = lr(region0, X_train0, X_valid0, y_train0, y_valid0)
y_pred1, y_pred_avg1, y_valid1, y_valid_avg1, r21, mse1, rmse1, pct_e1 = lr(region1, X_train1, X_valid1, y_train1, y_valid1)
y_pred2, y_pred_avg2, y_valid2, y_valid_avg2, r22, mse2, rmse2, pct_e2 = lr(region2, X_train2, X_valid2, y_train2, y_valid2)
# In[8]:
plt.hist(region0['product'], alpha=.2, bins=50);
plt.hist(region1['product'],alpha=.2,bins=50);
plt.hist(region2['product'],alpha=.2,bins=50);
plt.legend(['region0', 'region1', 'region2'])
plt.title("Distribution of volumes divided into 50 bins")
plt.ylabel("Number of volume values (rows) in bins")
plt.xlabel("Volume in thousand barrels")
plt.show()
# In[9]:
# put values in table
compare = pd.DataFrame({'Volume': ['Avg Predicted (thousands of barrels)', 'Avg Target (thousands of barrels)'
, 'R-Squared', 'RMSE', 'Percent error from true values'],
'region0': [y_pred_avg0, y_valid_avg0, r20, rmse0, pct_e0],
'region1': [y_pred_avg1, y_valid_avg1, r21, rmse1, pct_e1],
'region2': [y_pred_avg2, y_valid_avg2, r22, rmse2, pct_e2] })
compare.set_index('Volume', inplace=True)
compare
# We note the best linear regression results with region1. The R^2 value is very close to 1, with 1 being ideal, RMSE is low as a percentage of the average volume in thousand barrels. The average predicted and average true volume is substantially lower than the volumes for region0 and region2.
#
# While region0 and region2 enjoy higher average predicted and true volume in thousand barrels, both perform poorly with linear regression (having a low R^2, a high RMSE, and a horrible percent error over 40%).
#
# It is interesting to note that when we divide the regions into 50 bins and plot counts of volume by volume, region1 appears to cluster in 6 main groups while region0 and region2 demonstrate more normal distribution. This could be an area for future investigtion, to see if those volume clusters correlate with a specific characteristic in the region. If we knew that data, we could recommend, with very high probability, OilyGiant select new wells in region1 with the characteristics associated with the 2 high volume clusters.
# # Step 3: Prepare for profit calculation
# - 3.1 Store all key values for calculations in separate variables.
# - 3.2 Calculate the volume of reserves sufficient for developing a new well without losses. Compare the obtained value with the average volume of reserves in each region.
# - 3.3 Provide the findings about the preparation for profit calculation step.
# In[10]:
# store values, calculate volume of reserves to avoid losses
n = 500 # oilwells
n_best = 200 # oilwells
revenue_per_barrel = 4.5 # USD
revenue_per_unit = 4500 # USD, unit is 1000 barrels
budget_200_oil_wells = 100_000_000.00 # dollars # 100 USD million
n_budget = 200 # oilwells in budget total
unit = 1000 # barrels
max_loss = 0.025
min_volume_of_reserves = budget_200_oil_wells/n_budget/revenue_per_unit
print('The minimum volume of reserves sufficient for developing'
+'\na new well without losses:', round(min_volume_of_reserves,2), 'in thousands of barrels')
print('\nThe predicted and true target of the average volume of reserves in all 3 '
'\nregions are lower', (round(y_pred_avg0,2), round(y_pred_avg1,2), round(y_pred_avg2,2))
,'but 2 have values that are close.')
# # Step 4: Write a function to calculate profit from a set of selected oil wells and model predictions
# - 4.1 Pick the wells with the highest values of predictions. The number of wells depends on the budget and cost of developing one oil well.
# - 4.2 Summarize the target volume of reserves in accordance with these predictions
# - 4.3 Provide findings: suggest a region for oil wells' development and justify the choice. Calculate the profit for the obtained volume of reserves.
#
# In[11]:
# sample the predicted wells with the highest volumes
# calculate the sum of the corresponding actual volumes
# calculate the profit:
# (volume of wells in thousand barrels * revenue) - budget
def revenue(target, predicted, count):
indices = predicted.sort_values(ascending=False).index
return target[indices][:count].sum() * revenue_per_unit - budget_200_oil_wells
# # Step 5: Calculate risks and profit for each region
# - 5.1 Use the bootstrap technique with 1000 samples to find the distribution of profit.
# - 5.2 Find average profit, 95% confidence interval and risk of losses. Loss is negative profit.
# - 5.3 Provide findings: suggest a region for development of oil wells and justify the choice.
# In[12]:
# bootstrap method
def revenue_bs(target, predicted):
values = []
target = pd.Series(target)
predicted = | pd.Series(predicted) | pandas.Series |
from datetime import datetime, timedelta
from importlib import reload
import string
import sys
import numpy as np
import pytest
from pandas._libs.tslibs import iNaT
from pandas.core.dtypes.dtypes import CategoricalDtype
import pandas as pd
from pandas import (
Categorical,
DataFrame,
Index,
Series,
Timedelta,
Timestamp,
date_range,
)
import pandas._testing as tm
class TestSeriesDtypes:
def test_dt64_series_astype_object(self):
dt64ser = Series(date_range("20130101", periods=3))
result = dt64ser.astype(object)
assert isinstance(result.iloc[0], datetime)
assert result.dtype == np.object_
def test_td64_series_astype_object(self):
tdser = Series(["59 Days", "59 Days", "NaT"], dtype="timedelta64[ns]")
result = tdser.astype(object)
assert isinstance(result.iloc[0], timedelta)
assert result.dtype == np.object_
@pytest.mark.parametrize("dtype", ["float32", "float64", "int64", "int32"])
def test_astype(self, dtype):
s = Series(np.random.randn(5), name="foo")
as_typed = s.astype(dtype)
assert as_typed.dtype == dtype
assert as_typed.name == s.name
def test_dtype(self, datetime_series):
assert datetime_series.dtype == np.dtype("float64")
assert datetime_series.dtypes == np.dtype("float64")
@pytest.mark.parametrize("value", [np.nan, np.inf])
@pytest.mark.parametrize("dtype", [np.int32, np.int64])
def test_astype_cast_nan_inf_int(self, dtype, value):
# gh-14265: check NaN and inf raise error when converting to int
msg = "Cannot convert non-finite values \\(NA or inf\\) to integer"
s = Series([value])
with pytest.raises(ValueError, match=msg):
s.astype(dtype)
@pytest.mark.parametrize("dtype", [int, np.int8, np.int64])
def test_astype_cast_object_int_fail(self, dtype):
arr = Series(["car", "house", "tree", "1"])
msg = r"invalid literal for int\(\) with base 10: 'car'"
with pytest.raises(ValueError, match=msg):
arr.astype(dtype)
def test_astype_cast_object_int(self):
arr = Series(["1", "2", "3", "4"], dtype=object)
result = arr.astype(int)
tm.assert_series_equal(result, Series(np.arange(1, 5)))
def test_astype_datetime(self):
s = Series(iNaT, dtype="M8[ns]", index=range(5))
s = s.astype("O")
assert s.dtype == np.object_
s = Series([datetime(2001, 1, 2, 0, 0)])
s = s.astype("O")
assert s.dtype == np.object_
s = Series([datetime(2001, 1, 2, 0, 0) for i in range(3)])
s[1] = np.nan
assert s.dtype == "M8[ns]"
s = s.astype("O")
assert s.dtype == np.object_
def test_astype_datetime64tz(self):
s = Series(date_range("20130101", periods=3, tz="US/Eastern"))
# astype
result = s.astype(object)
expected = Series(s.astype(object), dtype=object)
tm.assert_series_equal(result, expected)
result = Series(s.values).dt.tz_localize("UTC").dt.tz_convert(s.dt.tz)
tm.assert_series_equal(result, s)
# astype - object, preserves on construction
result = Series(s.astype(object))
expected = s.astype(object)
tm.assert_series_equal(result, expected)
# astype - datetime64[ns, tz]
result = Series(s.values).astype("datetime64[ns, US/Eastern]")
tm.assert_series_equal(result, s)
result = Series(s.values).astype(s.dtype)
tm.assert_series_equal(result, s)
result = s.astype("datetime64[ns, CET]")
expected = Series(date_range("20130101 06:00:00", periods=3, tz="CET"))
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("dtype", [str, np.str_])
@pytest.mark.parametrize(
"series",
[
Series([string.digits * 10, tm.rands(63), tm.rands(64), tm.rands(1000)]),
Series([string.digits * 10, tm.rands(63), tm.rands(64), np.nan, 1.0]),
],
)
def test_astype_str_map(self, dtype, series):
# see gh-4405
result = series.astype(dtype)
expected = series.map(str)
tm.assert_series_equal(result, expected)
def test_astype_str_cast_dt64(self):
# see gh-9757
ts = Series([Timestamp("2010-01-04 00:00:00")])
s = ts.astype(str)
expected = Series([str("2010-01-04")])
tm.assert_series_equal(s, expected)
ts = Series([Timestamp("2010-01-04 00:00:00", tz="US/Eastern")])
s = ts.astype(str)
expected = Series([str("2010-01-04 00:00:00-05:00")])
tm.assert_series_equal(s, expected)
def test_astype_str_cast_td64(self):
# see gh-9757
td = Series([Timedelta(1, unit="d")])
ser = td.astype(str)
expected = Series([str("1 days")])
tm.assert_series_equal(ser, expected)
def test_astype_unicode(self):
# see gh-7758: A bit of magic is required to set
# default encoding to utf-8
digits = string.digits
test_series = [
Series([digits * 10, tm.rands(63), tm.rands(64), tm.rands(1000)]),
Series(["データーサイエンス、お前はもう死んでいる"]),
]
former_encoding = None
if sys.getdefaultencoding() == "utf-8":
test_series.append(Series(["野菜食べないとやばい".encode("utf-8")]))
for s in test_series:
res = s.astype("unicode")
expec = s.map(str)
tm.assert_series_equal(res, expec)
# Restore the former encoding
if former_encoding is not None and former_encoding != "utf-8":
reload(sys)
sys.setdefaultencoding(former_encoding)
@pytest.mark.parametrize("dtype_class", [dict, Series])
def test_astype_dict_like(self, dtype_class):
# see gh-7271
s = Series(range(0, 10, 2), name="abc")
dt1 = dtype_class({"abc": str})
result = s.astype(dt1)
expected = Series(["0", "2", "4", "6", "8"], name="abc")
tm.assert_series_equal(result, expected)
dt2 = dtype_class({"abc": "float64"})
result = s.astype(dt2)
expected = Series([0.0, 2.0, 4.0, 6.0, 8.0], dtype="float64", name="abc")
tm.assert_series_equal(result, expected)
dt3 = dtype_class({"abc": str, "def": str})
msg = (
"Only the Series name can be used for the key in Series dtype "
r"mappings\."
)
with pytest.raises(KeyError, match=msg):
s.astype(dt3)
dt4 = dtype_class({0: str})
with pytest.raises(KeyError, match=msg):
s.astype(dt4)
# GH16717
# if dtypes provided is empty, it should error
if dtype_class is Series:
dt5 = dtype_class({}, dtype=object)
else:
dt5 = dtype_class({})
with pytest.raises(KeyError, match=msg):
s.astype(dt5)
def test_astype_categories_raises(self):
# deprecated 17636, removed in GH-27141
s = Series(["a", "b", "a"])
with pytest.raises(TypeError, match="got an unexpected"):
s.astype("category", categories=["a", "b"], ordered=True)
def test_astype_from_categorical(self):
items = ["a", "b", "c", "a"]
s = Series(items)
exp = Series(Categorical(items))
res = s.astype("category")
tm.assert_series_equal(res, exp)
items = [1, 2, 3, 1]
s = Series(items)
exp = Series(Categorical(items))
res = s.astype("category")
tm.assert_series_equal(res, exp)
df = DataFrame({"cats": [1, 2, 3, 4, 5, 6], "vals": [1, 2, 3, 4, 5, 6]})
cats = Categorical([1, 2, 3, 4, 5, 6])
exp_df = DataFrame({"cats": cats, "vals": [1, 2, 3, 4, 5, 6]})
df["cats"] = df["cats"].astype("category")
tm.assert_frame_equal(exp_df, df)
df = DataFrame(
{"cats": ["a", "b", "b", "a", "a", "d"], "vals": [1, 2, 3, 4, 5, 6]}
)
cats = Categorical(["a", "b", "b", "a", "a", "d"])
exp_df = DataFrame({"cats": cats, "vals": [1, 2, 3, 4, 5, 6]})
df["cats"] = df["cats"].astype("category")
tm.assert_frame_equal(exp_df, df)
# with keywords
lst = ["a", "b", "c", "a"]
s = Series(lst)
exp = Series(Categorical(lst, ordered=True))
res = s.astype(CategoricalDtype(None, ordered=True))
| tm.assert_series_equal(res, exp) | pandas._testing.assert_series_equal |
##############################################################################
#Copyright 2019 Google LLC
#
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
################################################################################
from sklearn.ensemble import ExtraTreesClassifier, ExtraTreesRegressor
import warnings
warnings.filterwarnings("ignore")
from sklearn.exceptions import DataConversionWarning
warnings.filterwarnings(action='ignore', category=DataConversionWarning)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
from numpy import inf
from sklearn.linear_model import LassoCV, RidgeCV
from sklearn.linear_model import Lasso, Ridge
from sklearn.model_selection import StratifiedShuffleSplit
import matplotlib.pylab as plt
get_ipython().magic(u'matplotlib inline')
from matplotlib.pylab import rcParams
rcParams['figure.figsize'] = 10, 6
from sklearn.metrics import classification_report, confusion_matrix
from functools import reduce
from sklearn.preprocessing import MinMaxScaler, StandardScaler
from sklearn.metrics import make_scorer
from sklearn.metrics import accuracy_score
from sklearn.metrics import precision_score
from sklearn.metrics import roc_auc_score
from sklearn.model_selection import RandomizedSearchCV
from sklearn.preprocessing import OneHotEncoder, LabelEncoder
from sklearn.model_selection import RepeatedKFold, RepeatedStratifiedKFold
from sklearn.ensemble import RandomForestRegressor, RandomForestClassifier
from sklearn.calibration import CalibratedClassifierCV
from autoviml.QuickML_Stacking import QuickML_Stacking
from autoviml.Transform_KM_Features import Transform_KM_Features
from autoviml.QuickML_Ensembling import QuickML_Ensembling
from autoviml.Auto_NLP import Auto_NLP, select_top_features_from_SVD
import xgboost as xgb
import sys
##################################################################################
def find_rare_class(classes, verbose=0):
######### Print the % count of each class in a Target variable #####
"""
Works on Multi Class too. Prints class percentages count of target variable.
It returns the name of the Rare class (the one with the minimum class member count).
This can also be helpful in using it as pos_label in Binary and Multi Class problems.
"""
counts = OrderedDict(Counter(classes))
total = sum(counts.values())
if verbose >= 1:
print(' Class -> Counts -> Percent')
for cls in counts.keys():
print("%6s: % 7d -> % 5.1f%%" % (cls, counts[cls], counts[cls]/total*100))
if type(pd.Series(counts).idxmin())==str:
return pd.Series(counts).idxmin()
else:
return int(pd.Series(counts).idxmin())
###############################################################################
def return_factorized_dict(ls):
"""
###### Factorize any list of values in a data frame using this neat function
if your data has any NaN's it automatically marks it as -1 and returns that for NaN's
Returns a dictionary mapping previous values with new values.
"""
factos = pd.unique(pd.factorize(ls)[0])
categs = pd.unique(pd.factorize(ls)[1])
if -1 in factos:
categs = np.insert(categs,np.where(factos==-1)[0][0],np.nan)
return dict(zip(categs,factos))
#############################################################################################
from sklearn.metrics import confusion_matrix
def balanced_accuracy_score(y_true, y_pred, sample_weight=None,
adjusted=False):
C = confusion_matrix(y_true, y_pred, sample_weight=sample_weight)
with np.errstate(divide='ignore', invalid='ignore'):
per_class = np.diag(C) / C.sum(axis=1)
if np.any(np.isnan(per_class)):
warnings.warn('y_pred contains classes not in y_true')
per_class = per_class[~np.isnan(per_class)]
score = np.mean(per_class)
if adjusted:
n_classes = len(per_class)
chance = 1 / n_classes
score -= chance
score /= 1 - chance
return score
#############################################################################################
import os
def check_if_GPU_exists():
GPU_exists = False
try:
from tensorflow.python.client import device_lib
dev_list = device_lib.list_local_devices()
print('Number of GPUs = %d' %len(dev_list))
for i in range(len(dev_list)):
if 'GPU' == dev_list[i].device_type:
GPU_exists = True
print('%s available' %dev_list[i].device_type)
except:
print('')
if not GPU_exists:
try:
os.environ['NVIDIA_VISIBLE_DEVICES']
print('GPU available on this device')
return True
except:
print('No GPU available on this device')
return False
else:
return True
#############################################################################################
def analyze_problem_type(train, targ,verbose=0):
"""
This module analyzes a Target Variable and finds out whether it is a
Regression or Classification type problem
"""
if train[targ].dtype != 'int64' and train[targ].dtype != float :
if train[targ].dtype == object:
if len(train[targ].unique()) > 1 and len(train[targ].unique()) <= 2:
model_class = 'Binary_Classification'
else:
model_class = 'Multi_Classification'
else:
if len(train[targ].unique()) == 2:
model_class = 'Binary_Classification'
elif len(train[targ].unique()) > 1 and len(train[targ].unique()) <= 30:
model_class = 'Multi_Classification'
elif train[targ].dtype == 'int64' or train[targ].dtype == float :
if len(train[targ].unique()) == 1:
print('Error in data set: Only one class in Target variable. Check input and try again')
sys.exit()
elif len(train[targ].unique()) == 2:
model_class = 'Binary_Classification'
elif len(train[targ].unique()) > 1 and len(train[targ].unique()) <= 30:
model_class = 'Multi_Classification'
else:
model_class = 'Regression'
elif train[targ].dtype == object:
if len(train[targ].unique()) > 1 and len(train[targ].unique()) <= 2:
model_class = 'Binary_Classification'
else:
model_class = 'Multi_Classification'
elif train[targ].dtype == bool:
model_class = 'Binary_Classification'
elif train[targ].dtype == 'int64':
if len(train[targ].unique()) == 2:
model_class = 'Binary_Classification'
elif len(train[targ].unique()) > 1 and len(train[targ].unique()) <= 30:
model_class = 'Multi_Classification'
else:
model_class = 'Regression'
else :
model_class = 'Regression'
return model_class
#######
def convert_train_test_cat_col_to_numeric(start_train, start_test, col,str_flag=True):
"""
#### This is the easiest way to label encode object variables in both train and test
#### This takes care of some categories that are present in train and not in test
### and vice versa
"""
start_train = copy.deepcopy(start_train)
start_test = copy.deepcopy(start_test)
missing_flag = False
new_missing_col = ''
if start_train[col].isnull().sum() > 0:
missing_flag = True
if str_flag:
new_missing_col = col + '_Missing_Flag'
start_train[new_missing_col] = 0
start_train.loc[start_train[col].isnull(),new_missing_col]=1
start_train[col] = start_train[col].fillna("NA", inplace=False).astype(str)
else:
new_missing_col = col + '_Missing_Flag'
start_train[new_missing_col] = 0
start_train.loc[start_train[col].isnull(),new_missing_col]=1
start_train[col] = start_train[col].fillna("NA", inplace=False).astype('category')
if len(start_train[col].apply(type).value_counts()) > 1:
print(' Alert! Mixed Data Types in Train data set %s column with %d data types. Fixing it...' %(
col, len(start_train[col].apply(type).value_counts())))
train_categs = start_train[col].value_counts().index.tolist()
else:
train_categs = np.unique(start_train[col]).tolist()
if not isinstance(start_test,str) :
if start_test[col].isnull().sum() > 0:
#### IN some rare cases, Test data has missing values while Train data doesn.t
#### This section is take care of those rare cases. We need to create a missing col
#### We need to create that missing flag column in both train and test in that case
if not missing_flag:
missing_flag = True
new_missing_col = col + '_Missing_Flag'
start_train[new_missing_col] = 0
##### THis is to take care of Missing_Flag in start_test data set!!
start_test[new_missing_col] = 0
start_test.loc[start_test[col].isnull(),new_missing_col]=1
if str_flag:
start_test[col] = start_test[col].fillna("NA", inplace=False).astype(str)
else:
start_test[col] = start_test[col].fillna("NA", inplace=False).astype('category')
else:
#### In some rare cases, there is missing values in train but not in test data!
#### In those cases, we need to create a new_missing_col in test data in addition to train
start_test[new_missing_col] = 0
if len(start_test[col].apply(type).value_counts()) > 1:
print(' Alert! Mixed Data Types in Test data set %s column with %d data types. Fixing it...' %(
col, len(start_test[col].apply(type).value_counts())))
test_categs = start_test[col].value_counts().index.tolist()
test_categs = [x if isinstance(x,str) else str(x) for x in test_categs]
start_test[col] = start_test[col].astype(str).values
else:
test_categs = np.unique(start_test[col]).tolist()
if not isinstance(start_test,str) :
categs_all = np.unique( train_categs + test_categs).tolist()
dict_all = return_factorized_dict(categs_all)
else:
dict_all = return_factorized_dict(train_categs)
start_train[col] = start_train[col].map(dict_all)
if not isinstance(start_test,str) :
start_test[col] = start_test[col].map(dict_all)
return start_train, start_test, missing_flag, new_missing_col
#############################################################################################################
def flatten_list(list_of_lists):
final_ls = []
for each_item in list_of_lists:
if isinstance(each_item,list):
final_ls += each_item
else:
final_ls.append(each_item)
return final_ls
#############################################################################################################
import scipy as sp
def Auto_ViML(train, target, test='',sample_submission='',hyper_param='RS', feature_reduction=True,
scoring_parameter='logloss', Boosting_Flag=None, KMeans_Featurizer=False,
Add_Poly=0, Stacking_Flag=False, Binning_Flag=False,
Imbalanced_Flag=False, verbose=0):
"""
#########################################################################################################
############# This is not an Officially Supported Google Product! #########################
#########################################################################################################
#### Automatically Build Variant Interpretable Machine Learning Models (Auto_ViML) ######
#### Developed by <NAME> ######
###### Version 0.1.652 #######
##### GPU UPGRADE!! Now with Auto_NLP. Best Version to Download or Upgrade. May 15,2020 ######
###### Auto_VIMAL with Auto_NLP combines structured data with NLP for Predictions. #######
#########################################################################################################
#Copyright 2019 Google LLC #######
# #######
#Licensed under the Apache License, Version 2.0 (the "License"); #######
#you may not use this file except in compliance with the License. #######
#You may obtain a copy of the License at #######
# #######
# https://www.apache.org/licenses/LICENSE-2.0 #######
# #######
#Unless required by applicable law or agreed to in writing, software #######
#distributed under the License is distributed on an "AS IS" BASIS, #######
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #######
#See the License for the specific language governing permissions and #######
#limitations under the License. #######
#########################################################################################################
#### Auto_ViML was designed for building a High Performance Interpretable Model With Fewest Vars. ###
#### The "V" in Auto_ViML stands for Variant because it tries Multiple Models and Multiple Features ###
#### to find the Best Performing Model for any data set.The "i" in Auto_ViML stands " Interpretable"###
#### since it selects the fewest Features to build a simpler, more interpretable model. This is key. ##
#### Auto_ViML is built mostly using Scikit-Learn, Numpy, Pandas and Matplotlib. Hence it should run ##
#### on any Python 2 or Python 3 Anaconda installations. You won't have to import any special ####
#### Libraries other than "SHAP" library for SHAP values which provides more interpretability. #####
#### But if you don't have it, Auto_ViML will skip it and show you the regular feature importances. ###
#########################################################################################################
#### INPUTS: ###
#########################################################################################################
#### train: could be a datapath+filename or a dataframe. It will detect which is which and load it.####
#### test: could be a datapath+filename or a dataframe. If you don't have any, just leave it as "". ###
#### submission: must be a datapath+filename. If you don't have any, just leave it as empty string.####
#### target: name of the target variable in the data set. ####
#### sep: if you have a spearator in the file such as "," or "\t" mention it here. Default is ",". ####
#### scoring_parameter: if you want your own scoring parameter such as "f1" give it here. If not, #####
#### it will assume the appropriate scoring param for the problem and it will build the model.#####
#### hyper_param: Tuning options are GridSearch ('GS'), RandomizedSearch ('RS')and now HyperOpt ('HO')#
#### Default setting is 'GS'. Auto_ViML with HyperOpt is approximately 3X Faster than Auto_ViML###
#### feature_reduction: Default = 'True' but it can be set to False if you don't want automatic ####
#### feature_reduction since in Image data sets like digits and MNIST, you get better #####
#### results when you don't reduce features automatically. You can always try both and see. #####
#### KMeans_Featurizer = True: Adds a cluster label to features based on KMeans. Use for Linear. #####
#### False (default) = For Random Forests or XGB models, leave it False since it may overfit.####
#### Boosting Flag: you have 3 possible choices (default is False): #####
#### None = This will build a Linear Model #####
#### False = This will build a Random Forest or Extra Trees model (also known as Bagging) #####
#### True = This will build an XGBoost model #####
#### Add_Poly: Default is 0. It has 2 additional settings: #####
#### 1 = Add interaction variables only such as x1*x2, x2*x3,...x9*10 etc. #####
#### 2 = Add Interactions and Squared variables such as x1**2, x2**2, etc. #####
#### Stacking_Flag: Default is False. If set to True, it will add an additional feature which #####
#### is derived from predictions of another model. This is used in some cases but may result#####
#### in overfitting. So be careful turning this flag "on". #####
#### Binning_Flag: Default is False. It set to True, it will convert the top numeric variables #####
#### into binned variables through a technique known as "Entropy" binning. This is very #####
#### helpful for certain datasets (especially hard to build models). #####
#### Imbalanced_Flag: Default is False. If set to True, it will downsample the "Majority Class" #####
#### in an imbalanced dataset and make the "Rare" class at least 5% of the data set. This #####
#### the ideal threshold in my mind to make a model learn. Do it for Highly Imbalanced data.#####
#### verbose: This has 3 possible states: #####
#### 0 = limited output. Great for running this silently and getting fast results. #####
#### 1 = more charts. Great for knowing how results were and making changes to flags in input. #####
#### 2 = lots of charts and output. Great for reproducing what Auto_ViML does on your own. #####
#########################################################################################################
#### OUTPUTS: #####
#########################################################################################################
#### model: It will return your trained model #####
#### features: the fewest number of features in your model to make it perform well #####
#### train_modified: this is the modified train dataframe after removing and adding features #####
#### test_modified: this is the modified test dataframe with the same transformations as train #####
################# A D D I T I O N A L N O T E S ###########
#### Finally, it writes your submission file to disk in the current directory called "mysubmission.csv"
#### This submission file is ready for you to show it clients or submit it to competitions. #####
#### If no submission file was given but as long as you give it a test file name, it will create #####
#### a submission file for you named "mySubmission.csv". #####
#### Auto_ViML works on any Multi-Class, Multi-Label Data Set. So you can have many target labels #####
#### You don't have to tell Auto_ViML whether it is a Regression or Classification problem. #####
#### Suggestions for a Scoring Metric: #####
#### If you have Binary Class and Multi-Class in a Single Label, Choose Accuracy. It will ######
#### do very well. If you want something better, try roc_auc even for Multi-Class which works. ######
#### You can try F1 or Weighted F1 if you want something complex or for Multi-Class. ######
#### Note that For Imbalanced Classes (<=5% classes), it automatically adds Class Weights. ######
#### Also, Note that it handles Multi-Label automatically so you can send Train data ######
#### with multiple Labels (Targets) and it will automatically predict for each Label. ######
#### Finally this is Meant to Be a Fast Algorithm, so use it for just quick POCs ######
#### This is Not Meant for Production Problems. It produces great models but it is not Perfect! ######
######################### HELP OTHERS! PLEASE CONTRIBUTE! OPEN A PULL REQUEST! ##########################
#########################################################################################################
"""
##### These copies are to make sure that the originals are not destroyed ####
CPU_count = os.cpu_count()
test = copy.deepcopy(test)
orig_train = copy.deepcopy(train)
orig_test = copy.deepcopy(test)
train_index = train.index
if not isinstance(test, str):
test_index = test.index
start_test = copy.deepcopy(orig_test)
####### These are Global Settings. If you change them here, it will ripple across the whole code ###
corr_limit = 0.70 #### This decides what the cut-off for defining highly correlated vars to remove is.
scaling = 'MinMax' ### This decides whether to use MinMax scaling or Standard Scaling ("Std").
first_flag = 0 ## This is just a setting to detect which is
seed= 99 ### this maintains repeatability of the whole ML pipeline here ###
subsample=0.7 #### Leave this low so the models generalize better. Increase it if you want overfit models
col_sub_sample = 0.7 ### Leave this low for the same reason above
poly_degree = 2 ### this create 2-degree polynomial variables in Add_Poly. Increase if you want more degrees
booster = 'gbtree' ### this is the booster for XGBoost. The other option is "Linear".
n_splits = 5 ### This controls the number of splits for Cross Validation. Increasing will take longer time.
matplotlib_flag = True #(default) This is for drawing SHAP values. If this is False, initJS is used.
early_stopping = 20 #### Early stopping rounds for XGBoost ######
encoded = '_Label_Encoded' ### This is the tag we add to feature names in the end to indicate they are label encoded
catboost_limit = 0.4 #### The catboost_limit represents the percentage of num vars in data. ANy lower, CatBoost is used.
cat_code_limit = 100 #### If the number of dummy variables to create in a data set exceeds this, CatBoost is the default Algorithm used
one_hot_size = 500 #### This determines the max length of one_hot_max_size parameter of CatBoost algrithm
Alpha_min = -3 #### The lowest value of Alpha in LOGSPACE that is used in CatBoost
Alpha_max = 2 #### The highest value of Alpha in LOGSPACE that is used in Lasso or Ridge Regression
Cs = [0.001,0.005,0.01,0.05,0.1,0.25,0.5,1,2,4,6,10,20,30,40,50,100,150,200,400,800,1000,2000]
#Cs = np.logspace(-4,3,40) ### The list of values of C used in Logistic Regression
tolerance = 0.001 #### This tolerance is needed to speed up Logistic Regression. Otherwise, SAGA takes too long!!
#### 'lbfgs' is the fastest one but doesnt provide accurate results. Newton-CG is slower but accurate!
#### SAGA is extremely slow. Even slower than Newton-CG. Liblinear is the fastest and as accurate as Newton-CG!
solvers = ['liblinear'] ### Other solvers for Logistic Regression model: ['newton-cg','lbfgs','saga','liblinear']
solver = 'liblinear' ### This is the next fastest solver after liblinear. Useful for Multi-class problems!
penalties = ['l2','l1'] ### This is to determine the penalties for LogisticRegression
n_steps = 6 ### number of estimator steps between 100 and max_estims
max_depth = 10 ##### This limits the max_depth used in decision trees and other classifiers
max_features = 10 #### maximum number of features in a random forest model or extra trees model
warm_start = True ### This is to set the warm_start flag for the ExtraTrees models
bootstrap = True #### Set this flag to control whether to bootstrap variables or not.
n_repeats = 1 #### This is for repeated KFold and StratifiedKFold - this changes the folds every time
Bins = 30 ### This is for plotting probabilities in a histogram. For small data sets, 30 is enough.
top_nlp_features = 100 ### This sets a limit on the number of features added by each NLP transformer!
removed_features_threshold = 5 #### This triggers the Truncated_SVD if number of removed features from XGB exceeds this!
calibrator_flag = False ### In Multi-class data sets, a CalibratedClassifier works better than regular classifiers!
max_class_length = 1 ### It turns out the number of classes is directly correlated to Estimated Time. Hence this!
print('############## D A T A S E T A N A L Y S I S #######################')
########## I F CATBOOST IS REQUESTED, THEN CHECK IF IT IS INSTALLED #######################
if isinstance(Boosting_Flag,str):
if Boosting_Flag.lower() == 'catboost':
from catboost import CatBoostClassifier, CatBoostRegressor
#### Similarly for Random Forests Model, it takes too long with Grid Search, so MAKE IT RandomizedSearch!
if not Boosting_Flag: ### there is also a chance Boosting_Flag is None - This is to eliminate that chance!
if orig_train.shape[0] >= 10000:
hyper_param = 'RS'
print('Changing hyperparameter search to RS. Otherwise, Random Forests will take too long for 10,000+ rows')
elif Boosting_Flag: ### there is also a chance Boosting_Flag is None - This is to eliminate that chance!
if not isinstance(Boosting_Flag, str):
if orig_train.shape[0] >= 10000:
hyper_param = 'RS'
print('Changing hyperparameter search to RS. Otherwise XGBoost will take too long for 10,000+ rows.')
########### T H I S I S W H E R E H Y P E R O P T P A R A M S A R E S E T #########
if hyper_param == 'HO':
########### HyperOpt related objective functions are defined here #################
from hyperopt import hp, tpe
from hyperopt.fmin import fmin
from hyperopt import Trials
from autoviml.custom_scores_HO import accu, rmse, gini_sklearn, gini_meae
from autoviml.custom_scores_HO import gini_msle, gini_mae, gini_mse, gini_rmse
from autoviml.custom_scores_HO import gini_accuracy, gini_bal_accuracy, gini_roc
from autoviml.custom_scores_HO import gini_precision, gini_average_precision, gini_weighted_precision
from autoviml.custom_scores_HO import gini_macro_precision, gini_micro_precision
from autoviml.custom_scores_HO import gini_samples_precision, gini_f1, gini_weighted_f1
from autoviml.custom_scores_HO import gini_macro_f1, gini_micro_f1, gini_samples_f1,f2_measure
from autoviml.custom_scores_HO import gini_log_loss, gini_recall, gini_weighted_recall
from autoviml.custom_scores_HO import gini_samples_recall, gini_macro_recall, gini_micro_recall
else:
from autoviml.custom_scores import accu, rmse, gini_sklearn, gini_meae
from autoviml.custom_scores import gini_msle, gini_mae, gini_mse, gini_rmse
from autoviml.custom_scores import gini_accuracy, gini_bal_accuracy, gini_roc
from autoviml.custom_scores import gini_precision, gini_average_precision, gini_weighted_precision
from autoviml.custom_scores import gini_macro_precision, gini_micro_precision
from autoviml.custom_scores import gini_samples_precision, gini_f1, gini_weighted_f1
from autoviml.custom_scores import gini_macro_f1, gini_micro_f1, gini_samples_f1,f2_measure
from autoviml.custom_scores import gini_log_loss, gini_recall, gini_weighted_recall
from autoviml.custom_scores import gini_samples_recall, gini_macro_recall, gini_micro_recall
###### If hyper_param = 'GS', it takes a LOOOONG TIME with "SAGA" solver for LogisticRegression.
#### Hence to speed it up you need to change the tolerance threshold to something bigger
if hyper_param == 'GS':
tolerance = 0.01 #### This tolerance is bigger to speed up Logistic Regression. Otherwise, SAGA takes too long!!
########## This is where some more default parameters are set up ######
data_dimension = orig_train.shape[0]*orig_train.shape[1] ### number of cells in the entire data set .
if data_dimension > 1000000:
### if data dimension exceeds 1 million, then reduce no of params
no_iter=30
early_stopping = 10
test_size = 0.20
max_iter = 10000
Bins = 100
top_nlp_features = 300
if isinstance(Boosting_Flag,str):
if Boosting_Flag.lower() == 'catboost':
max_estims = 5000
else:
max_estims = 400
else:
max_estims = 400
else:
if orig_train.shape[0] <= 1000:
no_iter=20
test_size = 0.1
max_iter = 4000
top_nlp_features = 250
if isinstance(Boosting_Flag,str):
if Boosting_Flag.lower() == 'catboost':
max_estims = 3000
else:
max_estims = 300
else:
max_estims = 300
early_stopping = 4
else:
no_iter=30
test_size = 0.15
max_iter = 7000
top_nlp_features = 200
if isinstance(Boosting_Flag,str):
if Boosting_Flag.lower() == 'catboost':
max_estims = 4000
else:
max_estims = 350
else:
max_estims = 350
early_stopping = 6
#### The warnings from Sklearn are so annoying that I have to shut it off ####
import warnings
warnings.filterwarnings("ignore")
def warn(*args, **kwargs):
pass
warnings.warn = warn
### First_Flag is merely a flag for the first time you want to set values of variables
if scaling == 'MinMax':
SS = MinMaxScaler()
elif scaling == 'Std':
SS = StandardScaler()
else:
SS = MinMaxScaler()
### Make target into a list so that we can uniformly process the target label
if not isinstance(target, list):
target = [target]
model_label = 'Single_Label'
elif isinstance(target, list):
if len(target)==1:
model_label = 'Single_Label'
elif len(target) > 1:
model_label = 'Multi_Label'
else:
print('Target variable is neither a string nor a list. Please check input and try again!')
return
##### This is where we run the Traditional models to compare them to XGB #####
start_time = time.time()
####################################################################################
##### Set up your Target Labels and Classes Properly Here #### Label Encoding #####
#### This is for Classification Problems Only where you do Label Encoding of Target
mldict = lambda: defaultdict(mldict)
label_dict = mldict()
first_time = True
print('Training Set Shape = {}'.format(orig_train.shape))
print(' Training Set Memory Usage = {:.2f} MB'.format(orig_train.memory_usage().sum() / 1024**2))
if not isinstance(orig_test,str):
print('Test Set Shape = {}'.format(orig_test.shape))
print(' Test Set Memory Usage = {:.2f} MB'.format(orig_test.memory_usage().sum() / 1024**2))
print('%s Target: %s' %(model_label,target))
###### Now analyze what problem we have here ####
try:
modeltype = analyze_problem_type(train, target[0],verbose)
except:
print('Cannot find the Target variable in data set. Please check input and try again')
return
for each_target in target:
#### Make sure you don't move these 2 lines: they need to be reset for every target!
#### HyperOpt will not do Trials beyond max_evals - so only if you reset here, it will do it again.
if hyper_param == 'HO':
params_dict = {}
bayes_trials = Trials()
############ THIS IS WHERE OTHER DEFAULT PARAMS ARE SET ###############
c_params = dict()
r_params = dict()
if modeltype == 'Regression':
scv = RepeatedKFold(n_splits=n_splits, n_repeats=n_repeats, random_state=seed)
eval_metric = 'rmse'
objective = 'reg:squarederror'
model_class = 'Regression'
start_train = copy.deepcopy(orig_train)
else:
if len(np.unique(train[each_target])) == 2:
model_class = 'Binary-Class'
elif len(np.unique(train[each_target])) > 2:
model_class = 'Multi-Class'
##### If multi-class happens, then you absolutely need to do SMOTE. Otherwise, you don't get good results!
#### Unfortunately SMOTE blows up when the data set is large -> so better to turn it off!
print('ALERT! Setting Imbalanced_Flag to True in Auto_ViML for Multi_Classification problems improves results!')
#Imbalanced_Flag = True
else:
print('Target label %s has less than 2 classes. Stopping' %each_target)
return
### This is for Classification Problems Only ########
print('Shuffling the data set before training')
start_train = orig_train.sample(frac=1.0, random_state=seed)
scv = RepeatedStratifiedKFold(n_splits=n_splits, n_repeats=n_repeats, random_state=seed)
if modeltype != 'Regression':
rare_class_orig = find_rare_class(orig_train[each_target].values,verbose=1)
### Perfrom Label Transformation only for Classification Problems ####
classes = np.unique(orig_train[each_target])
if first_time:
if hyper_param == 'GS':
print('Using GridSearchCV for Hyper Parameter Tuning. This is slow. Switch to RS for faster tuning...')
elif hyper_param == 'RS':
print('Using RandomizedSearchCV for Hyper Parameter Tuning. This is 3X faster than GridSearchCV...')
else:
print('Using HyperOpt which is approximately 3X Faster than GridSearchCV but results vary...')
first_time = False
if len(classes) > 2:
##### If Boosting_Flag = True, change it to False here since Multi-Class XGB is VERY SLOW!
max_class_length = len(classes)
if Boosting_Flag:
print('CAUTION: In Multi-Class Boosting (2+ classes), TRAINING WILL TAKE A LOT OF TIME!')
objective = 'multi:softmax'
eval_metric = "mlogloss"
else:
max_class_length = 2
eval_metric="logloss"
objective = 'binary:logistic'
### Do Label Encoding when the Target Classes in each Label are Strings or Multi Class ###
if type(start_train[each_target].values[0])==str or str(start_train[each_target].dtype
)=='category' or sorted(np.unique(start_train[each_target].values))[0] != 0:
### if the class is a string or if it has more than 2 classes, then use Factorizer!
label_dict[each_target]['values'] = start_train[each_target].values
#### Factorizer is the easiest way to convert target in train and predictions in test
#### This takes care of some classes that are present in train and not in predictions
### and vice versa. Hence it is better than Label Encoders which breaks when above happens.
train_targ_categs = list(start_train[each_target].value_counts().index)
if len(train_targ_categs) == 2:
majority_class = [x for x in train_targ_categs if x != rare_class_orig]
dict_targ_all = {majority_class[0]: 0, rare_class_orig: 1}
else:
dict_targ_all = return_factorized_dict(train_targ_categs)
start_train[each_target] = start_train[each_target].map(dict_targ_all)
label_dict[each_target]['dictionary'] = copy.deepcopy(dict_targ_all)
label_dict[each_target]['transformer'] = dict([(v,k) for (k,v) in dict_targ_all.items()])
label_dict[each_target]['classes'] = copy.deepcopy(train_targ_categs)
class_nums = list(dict_targ_all.values())
label_dict[each_target]['class_nums'] = copy.deepcopy(class_nums)
print('String or Multi Class target: %s transformed as follows: %s' %(each_target,dict_targ_all))
rare_class = find_rare_class(start_train[each_target].values)
else:
### Since the each_target here is already numeric, you don't have to modify it
start_train[each_target] = start_train[each_target].astype(int).values
rare_class = find_rare_class(start_train[each_target].values)
label_dict[each_target]['values'] = start_train[each_target].values
label_dict[each_target]['classes'] = np.unique(start_train[each_target].values)
class_nums = np.unique(start_train[each_target].values)
label_dict[each_target]['class_nums'] = copy.deepcopy(class_nums)
label_dict[each_target]['transformer'] = []
label_dict[each_target]['dictionary'] = dict(zip(classes,classes))
print(' Target %s is already numeric. No transformation done.' %each_target)
if rare_class != 1:
print('Alert! Rare Class is not 1 but %s in this data set' %rare_class)
else:
#### In Regression problems, max_class_length is artificially set to one.
#### It turns out that Estimated Time is correlated to number of classes in data set. Hence we use this!
max_class_length = 1
###########################################################################################
#### This is where we start doing the iterative hyper tuning parameters #####
params_dict = defaultdict(list)
accu_mean = []
error_rate = []
###### This is where we do the training and hyper parameter tuning ########
orig_preds = [x for x in list(orig_train) if x not in target]
count = 0
################# CLASSIFY COLUMNS HERE ######################
var_df = classify_columns(orig_train[orig_preds], verbose)
##### Classify Columns ################
id_cols = var_df['id_vars']
nlp_columns = var_df['nlp_vars']
date_cols = var_df['date_vars']
del_cols = var_df['cols_delete']
factor_cols = var_df['factor_vars']
numvars = var_df['continuous_vars']+var_df['int_vars']
cat_vars = var_df['string_bool_vars']+var_df['discrete_string_vars']+var_df[
'cat_vars']+var_df['factor_vars']+var_df['num_bool_vars']
num_bool_vars = var_df['num_bool_vars']
#######################################################################################
preds = [x for x in orig_preds if x not in id_cols+del_cols+date_cols+target]
if len(id_cols+del_cols+date_cols)== 0:
print(' No variables removed since no ID or low-information variables found in data set')
else:
print(' %d variables removed since they were ID or low-information variables'
%len(id_cols+del_cols+date_cols))
################## This is where real code begins ###################################################
GPU_exists = check_if_GPU_exists()
###### This is where we set the CPU and GPU parameters for XGBoost
param = {}
if Boosting_Flag:
if isinstance(Boosting_Flag,str):
if Boosting_Flag.lower() == 'catboost':
model_name = 'CatBoost'
hyper_param = None
else:
model_name = 'XGBoost'
else:
model_name = 'XGBoost'
elif Boosting_Flag is None:
model_name = 'Linear'
else:
model_name = 'Forests'
##### Set the Scoring Parameters here based on each model and preferences of user ##############
cpu_params = {}
if model_name == 'XGBoost':
##### WE should keep CPU params as backup in case GPU fails!
cpu_params['nthread'] = -1
cpu_params['tree_method'] = 'hist'
cpu_params['grow_policy'] = 'depthwise'
cpu_params['max_depth'] = max_depth
cpu_params['max_leaves'] = 0
cpu_params['verbosity'] = 0
cpu_params['gpu_id'] = 0
cpu_params['updater'] = 'grow_colmaker'
cpu_params['predictor'] = 'cpu_predictor'
cpu_params['num_parallel_tree'] = 1
if GPU_exists:
param['nthread'] = -1
param['tree_method'] = 'gpu_hist'
param['grow_policy'] = 'depthwise'
param['max_depth'] = max_depth
param['max_leaves'] = 0
param['verbosity'] = 0
param['gpu_id'] = 0
param['updater'] = 'grow_gpu_hist' #'prune'
param['predictor'] = 'gpu_predictor'
param['num_parallel_tree'] = 1
else:
param = copy.deepcopy(cpu_params)
validation_metric = copy.deepcopy(scoring_parameter)
elif model_name.lower() == 'catboost':
if model_class == 'Binary-Class':
catboost_scoring = 'Accuracy'
validation_metric = 'Accuracy'
loss_function='Logloss'
elif model_class == 'Multi-Class':
catboost_scoring = 'AUC'
validation_metric = 'AUC:type=Mu'
loss_function='MultiClass'
else:
loss_function = 'RMSE'
validation_metric = 'RMSE'
catboost_scoring = 'RMSE'
else:
validation_metric = copy.deepcopy(scoring_parameter)
########## D A T A P R E P R O C E S S I N G H E R E ##########################
print('############# D A T A P R E P A R A T I O N #############')
if start_train.isnull().sum().sum() > 0:
print('Filling missing values with "missing" placeholder and adding a column for missing_flags')
else:
print('No Missing Values in train data set')
copy_preds = copy.deepcopy(preds)
missing_flag_cols = []
if len(copy_preds) > 0:
dict_train = {}
for f in copy_preds:
if f in nlp_columns:
#### YOu have to skip this for NLP columns ##############
continue
missing_flag = False
if start_train[f].dtype == object:
#### This is the easiest way to label encode object variables in both train and test
#### This takes care of some categories that are present in train and not in test
### and vice versa
start_train, start_test,missing_flag,new_missing_col = convert_train_test_cat_col_to_numeric(start_train, start_test,f,True)
if missing_flag:
cat_vars.append(new_missing_col)
num_bool_vars.append(new_missing_col)
preds.append(new_missing_col)
missing_flag_cols.append(new_missing_col)
elif start_train[f].dtype == np.int64 or start_train[f].dtype == np.int32 or start_train[f].dtype == np.int16:
### if there are integer variables, don't scale them. Leave them as is.
fill_num = start_train[f].min() - 1
if start_train[f].isnull().sum() > 0:
missing_flag = True
new_missing_col = f + '_Missing_Flag'
start_train[new_missing_col] = 0
start_train.loc[start_train[f].isnull(),new_missing_col]=1
start_train[f] = start_train[f].fillna(fill_num).astype(int)
if type(orig_test) != str:
if missing_flag:
start_test[new_missing_col] = 0
if start_test[f].isnull().sum() > 0:
start_test.loc[start_test[f].isnull(),new_missing_col]=1
start_test[f] = start_test[f].fillna(fill_num).astype(int)
if missing_flag:
cat_vars.append(new_missing_col)
num_bool_vars.append(new_missing_col)
preds.append(new_missing_col)
missing_flag_cols.append(new_missing_col)
elif f in factor_cols:
start_train, start_test,missing_flag,new_missing_col = convert_train_test_cat_col_to_numeric(start_train, start_test,f,False)
if missing_flag:
cat_vars.append(new_missing_col)
num_bool_vars.append(new_missing_col)
preds.append(new_missing_col)
missing_flag_cols.append(new_missing_col)
else:
### for all numeric variables, fill missing values with 1 less than min.
fill_num = start_train[f].min() - 1
if start_train[f].isnull().sum() > 0:
missing_flag = True
new_missing_col = f + '_Missing_Flag'
start_train[new_missing_col] = 0
start_train.loc[start_train[f].isnull(),new_missing_col]=1
start_train[f] = start_train[f].fillna(fill_num)
if type(orig_test) != str:
if missing_flag:
start_test[new_missing_col] = 0
if start_test[f].isnull().sum() > 0:
start_test.loc[start_test[f].isnull(),new_missing_col]=1
start_test[f] = start_test[f].fillna(fill_num)
if missing_flag:
cat_vars.append(new_missing_col)
num_bool_vars.append(new_missing_col)
preds.append(new_missing_col)
missing_flag_cols.append(new_missing_col)
###########################################################################################
if orig_train.isnull().sum().sum() > 0:
### If there are missing values in remaining features print it here ####
top5 = orig_train.isnull().sum().sort_values(ascending=False).index.tolist()[:5]
print(' Columns with most missing values: %s' %(
[x for x in top5 if orig_train[x].isnull().sum()>0]))
print(' and their missing value totals: %s' %([orig_train[x].isnull().sum() for x in
top5 if orig_train[x].isnull().sum()>0]))
if start_train[copy_preds].isnull().sum().sum() == 0:
print('Completed missing value Imputation. No more missing values in train.')
if verbose >= 1:
print(' %d new missing value columns added: %s' %(len(missing_flag_cols),missing_flag_cols))
else:
print('Error: Unable to complete missing value imputation in train. Exiting...')
return
####################################################################################
if type(orig_test) != str:
if start_test[copy_preds].isnull().sum().sum() > 0:
print('Test data still has some missing values. Fix it. Exiting...')
return
else:
print('Test data has no missing values. Continuing...')
###########################################################################################
else:
print(' Could not find any variables in your data set. Please check your dataset and try again')
return
###########################################################################################
print('Completed Label Encoding and Filling of Missing Values for Train and Test Data')
### This is a minor test to make sure that Boolean vars are Integers if they are Numeric!
if len(num_bool_vars) > 0:
### Just make sure that numeric Boolean vars are set as Integer type -> otherwise CatBoost will blow up
for each_bool_num in var_df['num_bool_vars']:
start_train[each_bool_num] = start_train[each_bool_num].astype(int)
if type(start_test) != str:
start_test[each_bool_num] = start_test[each_bool_num].astype(int)
######################################################################################
######### Set your Refit Criterion here - if you want to maximize Precision or Recall do it here ##
if modeltype == 'Regression':
if scoring_parameter in ['log_loss', 'neg_mean_squared_error','mean_squared_error']:
refit_metric = 'rmse'
else:
refit_metric = 'mae'
else:
if scoring_parameter in ['precision', 'precision_score','average_precision']:
refit_metric = 'precision'
elif scoring_parameter in ['logloss', 'log_loss']:
refit_metric = 'log_loss'
elif scoring_parameter in ['recall', 'recall_score']:
refit_metric = 'recall'
elif scoring_parameter in ['f1', 'f1_score','f1_weighted']:
refit_metric = 'f1'
elif scoring_parameter in ['accuracy', 'balanced_accuracy','balanced-accuracy']:
refit_metric = 'balanced_accuracy'
else:
refit_metric = 'balanced_accuracy'
print('%s problem: hyperparameters are being optimized for %s' %(modeltype,refit_metric))
###########################################################################################
### Make sure you remove variables that are highly correlated within data set first
rem_vars = left_subtract(preds,numvars)
if len(numvars) > 0 and feature_reduction:
numvars = remove_variables_using_fast_correlation(start_train,numvars, 'pearson',
corr_limit,verbose)
### Reduced Preds are now free of correlated variables and hence can be used for Poly adds
red_preds = rem_vars + numvars
#### You need to save a copy of this red_preds so you can later on create a start_train
#### with it after each_target cycle is completed. Very important!
orig_red_preds = copy.deepcopy(red_preds)
for each_target in target:
print('\n############# PROCESSING T A R G E T = %s ##########################' %each_target)
######## D E F I N I N G N E W T R A I N and N E W T E S T here #########################
#### This is where we set the orig train data set with multiple labels to the new start_train
#### start_train has the new features added or reduced with the multi targets in one cycle
### That way, we start each train with one target, and then reset it with multi target
#############################################################################################
train = start_train[[each_target]+red_preds]
if type(orig_test) != str:
test = start_test[red_preds]
###### Add Polynomial Variables and Interaction Variables to Train ######
if Add_Poly >= 1:
if Add_Poly == 1:
print('\nAdding only Interaction Variables. This may result in Overfitting!')
elif Add_Poly == 2:
print('\nAdding only Squared Variables. This may result in Overfitting!')
elif Add_Poly == 3:
print('\nAdding Both Interaction and Squared Variables. This may result in Overfitting!')
## Since the data is already scaled, we set scaling to None here ##
### For train data we have to set the fit_flag to True ####
if len(numvars) > 1:
#### train_red contains reduced numeric variables with original and substituted poly/intxn variables
train_sel, lm, train_red,md,fin_xvars,feature_xvar_dict = add_poly_vars_select(train,numvars,
each_target,modeltype,poly_degree,Add_Poly,md='',
corr_limit=corr_limit, scaling='None',
fit_flag=True,verbose=verbose)
#### train_red contains reduced numeric variables with original and substituted poly/intxn variables
if len(left_subtract(train_sel,numvars)) > 0:
#### This means that new intxn and poly vars were added. In that case, you can use them as is
#### Since these vars were alread tested for correlation, there should be no high correlation!
### SO you can take train_sel as the new list of numeric vars (numvars) going forward!
addl_vars = left_subtract(train_sel,numvars)
#numvars = list(set(numvars).intersection(set(train_sel)))
##### Print the additional Interxn and Poly variables here #######
if verbose >= 1:
print(' Intxn and Poly Vars are: %s' %addl_vars)
train = train_red[train_sel].join(train[rem_vars+[each_target]])
red_preds = [x for x in list(train) if x not in [each_target]]
if type(test) != str:
######### Add Polynomial and Interaction variables to Test ################
## Since the data is already scaled, we set scaling to None here ##
### For Test data we have to set the fit_flag to False ####
_, _, test_x_df,_,_,_ = add_poly_vars_select(test,numvars,each_target,
modeltype,poly_degree,Add_Poly,md,
corr_limit, scaling='None', fit_flag=False,
verbose=verbose)
### we need to convert x_vars into text_vars in test_x_df using feature_xvar_dict
test_x_vars = test_x_df.columns.tolist()
test_text_vars = [feature_xvar_dict[x] for x in test_x_vars]
test_x_df.columns = test_text_vars
#### test_red contains reduced variables with orig and substituted poly/intxn variables
test_red = test_x_df[train_sel]
#### we should now combined test_red with rem_vars so that it is the same shape as train
test = test_red.join(test[rem_vars])
#### Now we should change train_sel to subst_vars since that is the new list of vars going forward
numvars = copy.deepcopy(train_sel)
else:
#### NO new variables were added. so we can skip the rest of the stuff now ###
#### This means the train_sel is the new set of numeric features selected by add_poly algorithm
red_preds = train_sel+rem_vars
print(' No new variable was added by polynomial features...')
else:
print('\nAdding Polynomial vars ignored since no numeric vars in data')
train_sel = copy.deepcopy(numvars)
else:
### if there are no Polynomial vars, then all numeric variables are selected
train_sel = copy.deepcopy(numvars)
################ A U T O N L P P R O C E S S I N G B E G I N S H E R E !!! ####
if len(nlp_columns) > 0:
for nlp_column in nlp_columns:
nlp_column_train = train[nlp_column].values
if not isinstance(orig_test, str):
nlp_column_test = test[nlp_column].values
train1, test1, best_nlp_transformer,max_features_limit = Auto_NLP(nlp_column,
train, test, each_target, refit_metric,
modeltype, top_nlp_features, verbose,
build_model=False)
########################################################################
if KMeans_Featurizer:
start_time1 = time.time()
##### Do a clustering of word vectors from each NLP_column. This gives great results!
tfidf_term_array = create_tfidf_terms(nlp_column_train, best_nlp_transformer,
is_train=True, max_features_limit=max_features_limit)
print ('Creating word clusters using term matrix of size: %d for Train data set...' %len(tfidf_term_array['terms']))
num_clusters = int(np.sqrt(len(tfidf_term_array['terms']))/2)
if num_clusters < 2:
num_clusters = 2
##### Always set verbose to 0 since we KMEANS running is too verbose!
km = KMeans(n_clusters=num_clusters, random_state=seed, verbose=0)
kme, cluster_labels = return_cluster_labels(km, tfidf_term_array, num_clusters,
is_train=True)
if isinstance(nlp_column, str):
cluster_col = nlp_column + '_word_cluster_label'
else:
cluster_col = str(nlp_column) + '_word_cluster_label'
train1[cluster_col] = cluster_labels
print ('Created one new column: %s using selected NLP technique...' %cluster_col)
if not isinstance(orig_test, str):
tfidf_term_array_test = create_tfidf_terms(nlp_column_test, best_nlp_transformer,
is_train=False, max_features_limit=max_features_limit)
_, cluster_labels_test = return_cluster_labels(kme, tfidf_term_array_test, num_clusters,
is_train=False)
test1[cluster_col] = cluster_labels_test
print ('Created word clusters using same sized term matrix for Test data set...')
print(' Time Taken for creating word cluster labels = %0.0f seconds' %(time.time()-start_time1) )
####### Make sure you include the above new columns created in the predictor variables!
red_preds = [x for x in list(train1) if x not in [each_target]]
train = train1[red_preds+[each_target]]
if not isinstance(orig_test, str):
test = test1[red_preds]
################ A U T O N L P P R O C E S S I N G E N D S H E R E !!! ####
###### We have to detect float variables again since we have created new variables using Auto_NLP!!
train_sel = np.array(red_preds)[(train[red_preds].dtypes==float).values].tolist()
######### A D D D A T E T I M E F E A T U R E S ####################
if len(date_cols) > 0:
#### Do this only if date time columns exist in your data set!
for date_col in date_cols:
print('Processing %s column for date time features....' %date_col)
date_df_train = create_time_series_features(orig_train, date_col)
if not isinstance(date_df_train, str):
date_col_adds = date_df_train.columns.tolist()
print(' Adding %d columns from date time column %s' %(len(date_col_adds),date_col))
train = train.join(date_df_train)
else:
date_col_adds = []
if not isinstance(orig_test, str):
date_df_test = create_time_series_features(orig_test, date_col)
if not isinstance(date_df_test, str):
test = test.join(date_df_test)
red_preds = [x for x in list(train) if x not in [each_target]]
train_sel = train_sel + date_col_adds
######### SELECT IMPORTANT FEATURES HERE #############################
if feature_reduction:
important_features,num_vars, imp_cats = find_top_features_xgb(train,red_preds,train_sel,
each_target,
modeltype,corr_limit,verbose)
else:
important_features = copy.deepcopy(red_preds)
num_vars = copy.deepcopy(numvars)
#### we need to set the rem_vars in case there is no feature reduction #######
imp_cats = left_subtract(important_features,num_vars)
#####################################################################################
if len(important_features) == 0:
print('No important features found. Using all input features...')
important_features = copy.deepcopy(red_preds)
num_vars = copy.deepcopy(numvars)
#### we need to set the rem_vars in case there is no feature reduction #######
imp_cats = left_subtract(important_features,num_vars)
### Training an XGBoost model to find important features
train = train[important_features+[each_target]]
######################################################################
if type(orig_test) != str:
test = test[important_features]
############## F E A T U R E E N G I N E E R I N G S T A R T S N O W ##############
###### From here on we do some Feature Engg using Target Variable with Data Leakage ############
### To avoid Model Leakage, we will now split the Data into Train and CV so that Held Out Data
## is Pure and is unadulterated by learning from its own Target. This is known as Data Leakage.
###################################################################################################
print('Starting Feature Engineering now...')
X = train[important_features]
y = train[each_target]
################ I M P O R T A N T ##################################################
### The reason we don't use train_test_split is because we want only a partial train entropy binned
### If we use the whole of Train for entropy binning then there will be data leakage and our
### cross validation test scores will not be so accurate. So don't change the next 5 lines here!
################ I M P O R T A N T ##################################################
if modeltype == 'Regression':
skf = KFold(n_splits=n_splits, random_state=seed)
else:
skf = StratifiedKFold(n_splits=n_splits, random_state=seed, shuffle=True)
cv_train_index, cv_index = next(skf.split(X, y))
################ TRAIN CV TEST SPLIT HERE ##################################################
try:
#### Sometimes this works but other times, it gives an error!
X_train, X_cv = X.loc[cv_train_index], X.loc[cv_index]
y_train, y_cv = y.loc[cv_train_index], y.loc[cv_index]
### The reason we don't use train_test_split is because we want only a partial train entropy binned
part_train = train.loc[cv_train_index]
part_cv = train.loc[cv_index]
except:
#### This works when the above method gives an error!
X_train, X_cv = X.iloc[cv_train_index], X.iloc[cv_index]
y_train, y_cv = y.iloc[cv_train_index], y.iloc[cv_index]
### The reason we don't use train_test_split is because we want only a partial train entropy binned
part_train = train.iloc[cv_train_index]
part_cv = train.iloc[cv_index]
print('Train CV Split completed with', "TRAIN rows:", cv_train_index.shape[0], "CV rows:", cv_index.shape[0])
################ IMPORTANT ENTROPY BINNING FIRST TIME #####################################
############ Add Entropy Binning of Continuous Variables Here ##############################
num_vars = np.array(important_features)[(train[important_features].dtypes==float)].tolist()
saved_important_features = copy.deepcopy(important_features) ### these are original features without '_bin' added
#### saved_num_vars is an important variable: it contains the orig_num_vars before they were binned
saved_num_vars = copy.deepcopy(num_vars) ### these are original numeric features without '_bin' added
############### BINNING FIRST TIME ##################################################
if Binning_Flag and len(saved_num_vars) > 0:
#### Do binning only when there are numeric features ####
#### When we Bin the first time, we set the entropy_binning flag to False so
#### no numeric variables are removed. But next time, we will remove them later!
part_train, num_vars, important_features, part_cv = add_entropy_binning(part_train,
each_target, saved_num_vars,
saved_important_features, part_cv,
modeltype, entropy_binning=False,verbose=verbose)
#### In saved_num_vars we send in all the continuous_vars but we bin only the top few vars.
### Those that are binned are removed from saved_num_vars and the remaining become num_vars
### Our job is to find the names of those original numeric variables which were binned.
### orig_num_vars contains original num vars. num_vars contains binned versions of those vars.
### Those binned variables have now become categorical vars and must be added to imp_cats.
### you get the name of the original vars which were binned here in this orig_num_vars variable!
orig_num_vars = left_subtract(saved_num_vars,num_vars)
#### you need to know the name of the binner variables. This is where you get it!
binned_num_vars = left_subtract(num_vars,saved_num_vars)
imp_cats += binned_num_vars
#### Also note that important_features does not contain orig_num_vars which have been erased.
else:
print(' Binning_Flag set to False or there are no numeric vars in data set to be binned')
####################### KMEANS FIRST TIME ############################
### Now we add another Feature tied to KMeans clustering using Predictor and Target variables ###
if KMeans_Featurizer and len(saved_num_vars) > 0:
### DO KMeans Featurizer only if there are numeric features in the data set!
print(' Adding one Feature named "KMeans_Clusters" based on KMeans_Featurizer_Flag=True...')
km_label = 'KMeans_Clusters'
if modeltype != 'Regression':
#### Make the number of clusters as the same as log10 of number of rows in Train
num_clusters = int(np.round(max(2,np.log10(train.shape[0]))))
#### Make the number of clusters as the same as log10 of number of rows in Train
train_clusters, cv_clusters = Transform_KM_Features(part_train[
important_features], part_train[each_target],
part_cv[important_features], num_clusters)
else:
### If it is Regression, you don't have to specify the number of clusters
train_clusters, cv_clusters = Transform_KM_Features(part_train[
important_features], part_train[each_target],
part_cv[important_features])
#### Since this is returning the each_target in X_train, we need to drop it here ###
print(' Used KMeans to naturally cluster Train predictor variables into %d clusters' %num_clusters)
part_train[km_label] = train_clusters
part_cv[km_label] = cv_clusters
#X_train.drop(each_target,axis=1,inplace=True)
imp_cats.append(km_label)
for imp_cat in imp_cats:
part_train[imp_cat] = part_train[imp_cat].astype(int)
part_cv[imp_cat] = part_cv[imp_cat].astype(int)
####### The features are checked again once we add the cluster feature ####
important_features.append(km_label)
else:
print(' KMeans_Featurizer set to False or there are no numeric vars in data')
km_label = ''
####################### STACKING FIRST TIME ############################
######### This is where you do Stacking of Multi Model Results into One Column ###
if Stacking_Flag:
#### In order to join, you need X_train to be a Pandas Series here ##
print('Alert! Stacking can produce Highly Overfit models on Training Data...')
### In order to avoid overfitting, we are going to learn from a small sample of data
### That is why we are using X_train to train on and using it to predict on X_cv!
addcol, stacks1 = QuickML_Stacking(part_train[important_features],part_train[
each_target],part_train[important_features],
modeltype, Boosting_Flag, scoring_parameter,verbose)
addcol, stacks2 = QuickML_Stacking(part_train[important_features],part_train[
each_target],part_cv[important_features],
modeltype, Boosting_Flag, scoring_parameter,verbose)
part_train = part_train.join(pd.DataFrame(stacks1,index=cv_train_index,
columns=addcol))
##### Adding multiple columns for Stacking is best! Do not do the average of predictions!
part_cv = part_cv.join(pd.DataFrame(stacks2,index=cv_index,
columns=addcol))
print(' Adding %d Stacking feature(s) to training data' %len(addcol))
###### We make sure that we remove any new features that are highly correlated ! #####
#addcol = remove_variables_using_fast_correlation(X_train,addcol,corr_limit,verbose)
important_features += addcol
###############################################################################
#### part train contains the unscaled original train. It also contains binned and orig_num_vars!
#### DO NOT DO TOUCH part_train and part_cv -> we need it to recrate train later!
####################### Now do Feature Scaling Here #################################
part_train_scaled, part_cv_scaled = perform_scaling_numeric_vars(part_train, important_features,
part_cv, model_name, SS)
#### part_train_scaled has both predictor and target variables. Target must be removed!
important_features = find_remove_duplicates(important_features)
X_train = part_train_scaled[important_features]
X_cv = part_cv_scaled[important_features]
#### Remember that the next 2 lines are crucial: if X and y are dataframes, then predict_proba
### will return dataframes or series. Otherwise it will return Numpy array's.
## Be consistent when using dataframes with XGB. That's the best way to keep feature names!
print('############### M O D E L B U I L D I N G B E G I N S ####################')
print('Rows in Train data set = %d' %X_train.shape[0])
print(' Features in Train data set = %d' %X_train.shape[1])
print(' Rows in held-out data set = %d' %X_cv.shape[0])
data_dim = X_train.shape[0]*X_train.shape[1]
### Setting up the Estimators for Single Label and Multi Label targets only
if modeltype == 'Regression':
metrics_list = ['neg_mean_absolute_error' ,'neg_mean_squared_error',
'neg_mean_squared_log_error','neg_median_absolute_error']
eval_metric = "rmse"
if scoring_parameter == 'neg_mean_absolute_error' or scoring_parameter =='mae':
meae_scorer = make_scorer(gini_meae, greater_is_better=False)
scorer = meae_scorer
elif scoring_parameter == 'neg_mean_squared_error' or scoring_parameter =='mse':
mse_scorer = make_scorer(gini_mse, greater_is_better=False)
scorer = mse_scorer
elif scoring_parameter == 'neg_mean_squared_log_error' or scoring_parameter == 'log_error':
msle_scorer = make_scorer(gini_msle, greater_is_better=False)
print(' Log Error is not recommended since predicted values might be negative and error')
rmse_scorer = make_scorer(gini_rmse, greater_is_better=False)
scorer = rmse_scorer
elif scoring_parameter == 'neg_median_absolute_error' or scoring_parameter == 'median_error':
mae_scorer = make_scorer(gini_mae, greater_is_better=False)
scorer = mae_scorer
elif scoring_parameter =='rmse' or scoring_parameter == 'root_mean_squared_error':
rmse_scorer = make_scorer(gini_rmse, greater_is_better=False)
scorer = rmse_scorer
else:
scoring_parameter = 'rmse'
rmse_scorer = make_scorer(gini_rmse, greater_is_better=False)
scorer = rmse_scorer
#### HYPER PARAMETERS FOR TUNING ARE SETUP HERE ###
if hyper_param == 'GS':
r_params = {
"Forests": {
"n_estimators" : np.linspace(100, max_estims, n_steps, dtype = "int"),
"max_depth": [3, 5, max_depth],
#"criterion" : ['mse','mae'],
},
"Linear": {
'alpha': np.logspace(-5,3),
},
"XGBoost": {
'learning_rate': np.linspace(0.1,0.5,5),
'gamma': np.linspace(0, 32,7).astype(int),
"max_depth": [3, 5, max_depth],
},
"CatBoost": {
'learning_rate': np.logspace(Alpha_min,Alpha_max,40),
},
}
else:
import scipy as sp
r_params = {
"Forests": {
'n_estimators': sp.stats.randint(100,max_estims),
"max_depth": sp.stats.randint(1, 10),
"min_samples_leaf": sp.stats.randint(1, 20),
#"criterion" : ['mse','mae'],
},
"Linear": {
'alpha': sp.stats.uniform(scale=1000),
},
"XGBoost": {
'learning_rate': sp.stats.uniform(scale=1),
'gamma': sp.stats.randint(0, 32),
'n_estimators': sp.stats.randint(100,max_estims),
"max_depth": sp.stats.randint(2, 10),
},
"CatBoost": {
'learning_rate': np.logspace(Alpha_min,Alpha_max,40),
},
}
if Boosting_Flag:
if model_name.lower() == 'catboost':
xgbm = CatBoostRegressor(verbose=1,iterations=max_estims,random_state=99,
one_hot_max_size=one_hot_size,
loss_function=loss_function, eval_metric=catboost_scoring,
subsample=0.7,bootstrap_type='Bernoulli',
metric_period = 100,
early_stopping_rounds=250,boosting_type='Plain')
else:
xgbm = XGBRegressor(seed=seed,n_jobs=-1,random_state=seed,subsample=subsample,
colsample_bytree=col_sub_sample,n_estimators=max_estims,
objective=objective)
xgbm.set_params(**param)
elif Boosting_Flag is None:
#xgbm = Lasso(max_iter=max_iter,random_state=seed)
xgbm = Lasso(max_iter=max_iter,random_state=seed)
else:
xgbm = RandomForestRegressor(
**{
'bootstrap': bootstrap, 'n_jobs': -1, 'warm_start': warm_start,
'random_state':seed,'min_samples_leaf':2,
'max_features': "sqrt"
})
else:
#### This is for Binary Classification ##############################
classes = label_dict[each_target]['classes']
metrics_list = ['accuracy_score','roc_auc_score','logloss', 'precision','recall','f1']
# Create regularization hyperparameter distribution with 50 C values ####
if hyper_param == 'GS':
c_params['XGBoost'] = {
'learning_rate': np.linspace(0.1,0.5,5),
'gamma': np.linspace(0, 32,7).astype(int),
"max_depth": [3, 5, max_depth],
}
c_params["CatBoost"] = {
'learning_rate': np.logspace(Alpha_min,Alpha_max,40),
}
if Imbalanced_Flag:
c_params['Linear'] = {
'C': Cs,
'solver' : solvers,
'penalty' : penalties,
'class_weight':[None, 'balanced'],
}
else:
c_params['Linear'] = {
'C': Cs,
'class_weight':[None, 'balanced'],
'penalty' : penalties,
'solver' : solvers,
}
c_params["Forests"] = {
##### I have selected these to avoid Overfitting which is a problem for small data sets
"n_estimators" : np.linspace(100, max_estims, n_steps, dtype = "int"),
"max_depth": [3, 5, max_depth],
#'max_features': [1,2,5, max_features],
#"criterion":['gini','entropy'],
}
else:
import scipy as sp
c_params['XGBoost'] = {
'learning_rate': sp.stats.uniform(scale=1),
'gamma': sp.stats.randint(0, 32),
'n_estimators': sp.stats.randint(100,max_estims),
"max_depth": sp.stats.randint(1, 10),
}
c_params["CatBoost"] = {
'learning_rate': sp.stats.uniform(scale=1),
}
if Imbalanced_Flag:
c_params['Linear'] = {
'C': sp.stats.uniform(scale=100),
'solver' : solvers,
'penalty' : penalties,
'class_weight':[None, 'balanced'],
}
else:
c_params['Linear'] = {
'C': sp.stats.uniform(scale=100),
'penalty' : penalties,
'solver' : solvers,
}
c_params["Forests"] = {
##### I have selected these to avoid Overfitting which is a problem for small data sets
'n_estimators': sp.stats.randint(100,max_estims),
"max_depth": sp.stats.randint(1, 10),
"min_samples_leaf": sp.stats.randint(1, 20),
#"criterion":['gini','entropy'],
#'max_features': ['log', "sqrt"] ,
#'class_weight':[None,'balanced']
}
# Create regularization hyperparameter distribution using uniform distribution
if len(classes) == 2:
objective = 'binary:logistic'
if scoring_parameter == 'accuracy' or scoring_parameter == 'accuracy_score':
accuracy_scorer = make_scorer(gini_accuracy, greater_is_better=True, needs_proba=False)
scorer =accuracy_scorer
elif scoring_parameter == 'gini':
gini_scorer = make_scorer(gini_sklearn, greater_is_better=False, needs_proba=True)
scorer =gini_scorer
elif scoring_parameter == 'auc' or scoring_parameter == 'roc_auc' or scoring_parameter == 'roc_auc_score':
roc_scorer = make_scorer(gini_roc, greater_is_better=True, needs_threshold=True)
scorer =roc_scorer
elif scoring_parameter == 'log_loss' or scoring_parameter == 'logloss':
scoring_parameter = 'neg_log_loss'
logloss_scorer = make_scorer(gini_log_loss, greater_is_better=False, needs_proba=False)
scorer =logloss_scorer
elif scoring_parameter=='balanced_accuracy' or scoring_parameter=='balanced-accuracy' or scoring_parameter=='average_accuracy':
bal_accuracy_scorer = make_scorer(gini_bal_accuracy, greater_is_better=True,
needs_proba=False)
scorer = bal_accuracy_scorer
elif scoring_parameter == 'precision' or scoring_parameter == 'precision_score':
precision_scorer = make_scorer(gini_precision, greater_is_better=True, needs_proba=False,
pos_label=rare_class)
scorer =precision_scorer
elif scoring_parameter == 'recall' or scoring_parameter == 'recall_score':
recall_scorer = make_scorer(gini_recall, greater_is_better=True, needs_proba=False,
pos_label=rare_class)
scorer =recall_scorer
elif scoring_parameter == 'f1' or scoring_parameter == 'f1_score':
f1_scorer = make_scorer(gini_f1, greater_is_better=True, needs_proba=False,
pos_label=rare_class)
scorer =f1_scorer
elif scoring_parameter == 'f2' or scoring_parameter == 'f2_score':
f2_scorer = make_scorer(f2_measure, greater_is_better=True, needs_proba=False)
scorer =f2_scorer
else:
logloss_scorer = make_scorer(gini_log_loss, greater_is_better=False, needs_proba=False)
scorer =logloss_scorer
#f1_scorer = make_scorer(gini_f1, greater_is_better=True, needs_proba=False,
# pos_label=rare_class)
#scorer = f1_scorer
### DO NOT USE NUM CLASS WITH BINARY CLASSIFICATION ######
if Boosting_Flag:
if model_name.lower() == 'catboost':
xgbm = CatBoostClassifier(verbose=1,iterations=max_estims,
random_state=99,one_hot_max_size=one_hot_size,
loss_function=loss_function, eval_metric=catboost_scoring,
subsample=0.7,bootstrap_type='Bernoulli',
metric_period = 100,
early_stopping_rounds=250,boosting_type='Plain')
else:
xgbm = XGBClassifier(base_score=0.5, booster='gbtree', subsample=subsample,
colsample_bytree=col_sub_sample,gamma=1, learning_rate=0.1, max_delta_step=0,
max_depth=max_depth, min_child_weight=1, missing=-999, n_estimators=max_estims,
n_jobs=-1, nthread=None, objective=objective,
random_state=1, reg_alpha=0.5, reg_lambda=0.5, scale_pos_weight=1,
seed=1)
xgbm.set_params(**param)
elif Boosting_Flag is None:
#### I have set the Verbose to be False here since it produces too much output ###
xgbm = LogisticRegression(random_state=seed,verbose=False,n_jobs=-1,solver=solver,
fit_intercept=True, tol=tolerance,
warm_start=warm_start, max_iter=max_iter)
else:
xgbm = RandomForestClassifier(
**{
'bootstrap': bootstrap, 'n_jobs': -1, 'warm_start': warm_start,
'random_state':seed,'min_samples_leaf':2,'oob_score':True,
'max_features': "sqrt"
})
else:
##### This is for MULTI Classification ##########################
objective = 'multi:softmax'
eval_metric = "mlogloss"
if scoring_parameter == 'gini':
gini_scorer = make_scorer(gini_sklearn, greater_is_better=False, needs_proba=True)
scorer = gini_scorer
elif scoring_parameter=='balanced_accuracy' or scoring_parameter=='balanced-accuracy' or scoring_parameter=='average_accuracy':
bal_accuracy_scorer = make_scorer(gini_bal_accuracy, greater_is_better=True,
needs_proba=False)
scorer = bal_accuracy_scorer
elif scoring_parameter == 'roc_auc' or scoring_parameter == 'roc_auc_score':
roc_auc_scorer = make_scorer(gini_sklearn, greater_is_better=False, needs_proba=True)
scorer = roc_auc_scorer
elif scoring_parameter == 'average_precision' or scoring_parameter == 'mean_precision':
average_precision_scorer = make_scorer(gini_average_precision,
greater_is_better=True, needs_proba=True)
scorer = average_precision_scorer
elif scoring_parameter == 'samples_precision':
samples_precision_scorer = make_scorer(gini_samples_precision,
greater_is_better=True, needs_proba=True)
scorer = samples_precision_scorer
elif scoring_parameter == 'weighted_precision' or scoring_parameter == 'weighted-precision':
weighted_precision_scorer = make_scorer(gini_weighted_precision,
greater_is_better=True, needs_proba=True)
scorer = weighted_precision_scorer
elif scoring_parameter == 'macro_precision':
macro_precision_scorer = make_scorer(gini_macro_precision,
greater_is_better=True, needs_proba=True)
scorer = macro_precision_scorer
elif scoring_parameter == 'micro_precision':
scorer = micro_precision_scorer
micro_precision_scorer = make_scorer(gini_micro_precision,
greater_is_better=True, needs_proba=True)
elif scoring_parameter == 'samples_recall':
samples_recall_scorer = make_scorer(gini_samples_recall, greater_is_better=True, needs_proba=True)
scorer = samples_recall_scorer
elif scoring_parameter == 'weighted_recall' or scoring_parameter == 'weighted-recall':
weighted_recall_scorer = make_scorer(gini_weighted_recall,
greater_is_better=True, needs_proba=True)
scorer = weighted_recall_scorer
elif scoring_parameter == 'macro_recall':
macro_recall_scorer = make_scorer(gini_macro_recall,
greater_is_better=True, needs_proba=True)
scorer = macro_recall_scorer
elif scoring_parameter == 'micro_recall':
micro_recall_scorer = make_scorer(gini_micro_recall, greater_is_better=True, needs_proba=True)
scorer = micro_recall_scorer
elif scoring_parameter == 'samples_f1':
samples_f1_scorer = make_scorer(gini_samples_f1,
greater_is_better=True, needs_proba=True)
scorer = samples_f1_scorer
elif scoring_parameter == 'weighted_f1' or scoring_parameter == 'weighted-f1':
weighted_f1_scorer = make_scorer(gini_weighted_f1,
greater_is_better=True, needs_proba=True)
scorer = weighted_f1_scorer
elif scoring_parameter == 'macro_f1':
macro_f1_scorer = make_scorer(gini_macro_f1,
greater_is_better=True, needs_proba=True)
scorer = macro_f1_scorer
elif scoring_parameter == 'micro_f1':
micro_f1_scorer = make_scorer(gini_micro_f1,
greater_is_better=True, needs_proba=True)
scorer = micro_f1_scorer
else:
weighted_f1_scorer = make_scorer(gini_weighted_f1,
greater_is_better=True, needs_proba=True)
scorer = weighted_f1_scorer
import scipy as sp
if Boosting_Flag:
# Create regularization hyperparameter distribution using uniform distribution
if hyper_param == 'GS':
c_params['XGBoost'] = {
'learning_rate': np.linspace(0.1,0.5,5),
'gamma': np.linspace(0, 32,7).astype(int),
"max_depth": [3, 5, max_depth],
}
c_params["CatBoost"] = {
'learning_rate': np.logspace(Alpha_min,Alpha_max,40),
}
else:
import scipy as sp
c_params['XGBoost'] = {
'learning_rate': sp.stats.uniform(scale=1),
'gamma': sp.stats.randint(0, 32),
'n_estimators': sp.stats.randint(100, max_estims),
'max_depth': sp.stats.randint(1, 10)
}
c_params['CatBoost'] = {
'learning_rate': sp.stats.uniform(scale=1),
}
if model_name.lower() == 'catboost':
xgbm = CatBoostClassifier(verbose=1,iterations=max_estims,
random_state=99,one_hot_max_size=one_hot_size,
loss_function=loss_function, eval_metric=catboost_scoring,
subsample=0.7,bootstrap_type='Bernoulli',
metric_period = 100,
early_stopping_rounds=250,boosting_type='Plain')
else:
xgbm = XGBClassifier(base_score=0.5, booster='gbtree', subsample=subsample,
colsample_bytree=col_sub_sample, gamma=1, learning_rate=0.1, max_delta_step=0,
max_depth=max_depth, min_child_weight=1, missing=-999, n_estimators=max_estims,
n_jobs=-1, nthread=None, objective=objective,
random_state=1, reg_alpha=0.5, reg_lambda=0.5, scale_pos_weight=1,
num_class= len(classes),
seed=1)
xgbm.set_params(**param)
elif Boosting_Flag is None:
if hyper_param == 'GS':
if Imbalanced_Flag:
c_params['Linear'] = {
'C': Cs,
'class_weight':[None, 'balanced'],
}
else:
c_params['Linear'] = {
'C': Cs,
}
else:
if Imbalanced_Flag:
c_params['Linear'] = {
'C': sp.stats.uniform(scale=100),
'class_weight':[None, 'balanced'],
}
else:
c_params['Linear'] = {
'C': sp.stats.uniform(scale=100),
}
#### I have set the Verbose to be False here since it produces too much output ###
xgbm = LogisticRegression(random_state=seed,verbose=False,n_jobs=-1,solver=solver,
fit_intercept=True, tol=tolerance, multi_class='auto',
max_iter=max_iter, warm_start=False,
)
else:
if hyper_param == 'GS':
c_params["Forests"] = {
##### I have selected these to avoid Overfitting which is a problem for small data sets
"n_estimators" : np.linspace(100, max_estims, n_steps, dtype = "int"),
"max_depth": [3, 5, max_depth],
#"criterion":['gini','entropy'],
}
else:
c_params["Forests"] = {
##### I have set these to avoid OverFitting which is a problem for small data sets ###
'n_estimators': sp.stats.randint(100,max_estims),
"max_depth": sp.stats.randint(1, 10),
"min_samples_leaf": sp.stats.randint(1, 20),
#"criterion":['gini','entropy'],
#'class_weight':[None,'balanced']
}
xgbm = RandomForestClassifier(bootstrap=bootstrap, oob_score=True,warm_start=warm_start,
n_estimators=100,max_depth=3,
min_samples_leaf=2,max_features='auto',
random_state=seed,n_jobs=-1)
###### Now do RandomizedSearchCV using # Early-stopping ################
if modeltype == 'Regression':
#scoreFunction = {"mse": "neg_mean_squared_error", "mae": "neg_mean_absolute_error"}
#### I have set the Verbose to be False here since it produces too much output ###
if hyper_param == 'GS':
#### I have set the Verbose to be False here since it produces too much output ###
gs = GridSearchCV(xgbm,param_grid=r_params[model_name],
scoring = scorer,
n_jobs=-1,
cv = scv,
refit = refit_metric,
return_train_score = True,
verbose=0)
elif hyper_param == 'RS':
gs = RandomizedSearchCV(xgbm,
param_distributions = r_params[model_name],
n_iter = no_iter,
scoring = scorer,
refit = refit_metric,
return_train_score = True,
random_state = seed,
cv = scv,
n_jobs=-1,
verbose = 0)
else:
#### CatBoost does not need Hyper Parameter tuning => it's great out of the box!
gs = copy.deepcopy(xgbm)
else:
if hyper_param == 'GS':
#### I have set the Verbose to be False here since it produces too much output ###
gs = GridSearchCV(xgbm,param_grid=c_params[model_name],
scoring = scorer,
return_train_score = True,
n_jobs=-1,
refit = refit_metric,
cv = scv,
verbose=0)
elif hyper_param == 'RS':
#### I have set the Verbose to be False here since it produces too much output ###
gs = RandomizedSearchCV(xgbm,
param_distributions = c_params[model_name],
n_iter = no_iter,
scoring = scorer,
refit = refit_metric,
return_train_score = True,
random_state = seed,
n_jobs=-1,
cv = scv,
verbose = 0)
else:
#### CatBoost does not need Hyper Parameter tuning => it's great out of the box!
gs = copy.deepcopy(xgbm)
#trains and optimizes the model
eval_set = [(X_train,y_train),(X_cv,y_cv)]
print('Finding Best Model and Hyper Parameters for Target: %s...' %each_target)
##### Here is where we put the part_train and part_cv together ###########
if modeltype != 'Regression':
### Do this only for Binary Classes and Multi-Classes, both are okay
baseline_accu = 1-(train[each_target].value_counts(1).sort_values())[rare_class]
print(' Baseline Accuracy Needed for Model = %0.2f%%' %(baseline_accu*100))
print('CPU Count = %s in this device' %CPU_count)
if modeltype == 'Regression':
if Boosting_Flag:
if model_name.lower() == 'catboost':
data_dim = data_dim*one_hot_size/len(preds)
print('Using %s Model, Estimated Training time = %0.3f mins' %(model_name,data_dim*max_class_length/(3000000.*CPU_count)))
else:
print('Using %s Model, Estimated Training time = %0.3f mins' %(model_name,data_dim*max_class_length/(50000.*CPU_count)))
elif Boosting_Flag is None:
print('Using %s Model, Estimated Training time = %0.3f mins' %(model_name,data_dim*max_class_length/(80000.*CPU_count)))
else:
print('Using %s Model, Estimated Training time = %0.3f mins' %(model_name,data_dim*max_class_length/(40000.*CPU_count)))
else:
if hyper_param == 'GS':
if Boosting_Flag:
if model_name.lower() == 'catboost':
data_dim = data_dim*one_hot_size/len(preds)
print('Using %s Model, Estimated Training time = %0.3f mins' %(model_name,data_dim*max_class_length/(300000.*CPU_count)))
else:
print('Using %s Model, Estimated Training time = %0.3f mins' %(model_name,data_dim*max_class_length/(10000.*CPU_count)))
elif Boosting_Flag is None:
#### A Linear model is usually the fastest ###########
print('Using %s Model, Estimated Training time = %0.3f mins' %(model_name,data_dim*max_class_length/(50000.*CPU_count)))
else:
print('Using %s Model, Estimated Training time = %0.3f mins' %(model_name,data_dim*max_class_length/(16000.*CPU_count)))
else:
if Boosting_Flag:
if model_name.lower() == 'catboost':
data_dim = data_dim*one_hot_size/len(preds)
print('Using %s Model, Estimated Training time = %0.2f mins' %(model_name,data_dim*max_class_length/(3000000.*CPU_count)))
else:
print('Using %s Model, Estimated Training time = %0.2f mins' %(model_name,data_dim*max_class_length/(40000.*CPU_count)))
elif Boosting_Flag is None:
print('Using %s Model, Estimated Training time = %0.2f mins' %(model_name,data_dim*max_class_length/(100000.*CPU_count)))
else:
print('Using %s Model, Estimated Training time = %0.2f mins' %(model_name,data_dim*max_class_length/(25000.*CPU_count)))
##### Since we are using Multiple Models each with its own quirks, we have to make sure it is done this way
##### ############ TRAINING MODEL FIRST TIME WITH X_TRAIN AND TESTING ON X_CV ############
model_start_time = time.time()
################################################################################################################################
##### BE VERY CAREFUL ABOUT MODIFYING THIS NEXT LINE JUST BECAUSE IT APPEARS TO BE A CODING MISTAKE. IT IS NOT!! #############
################################################################################################################################
#######
if Imbalanced_Flag:
if modeltype == 'Regression':
########### In case someone sets the Imbalanced_Flag mistakenly to True and it is Regression, you must set it to False ######
Imbalanced_Flag = False
else:
####### Imbalanced with Classification #################
try:
print('############## Imbalanced Flag on: Training model with SMOTE Oversampling method ###########')
#### The model is the downsampled model Trained on downsampled data sets. ####
model, X_train, y_train = training_with_SMOTE(X_train,y_train,eval_set, gs,
Boosting_Flag, eval_metric,
modeltype, model_name,training=True,
minority_class=rare_class,imp_cats=imp_cats,
calibrator_flag=calibrator_flag,
GPU_exists=GPU_exists, params = cpu_params,
verbose=verbose)
if isinstance(model, str):
model = copy.deepcopy(gs)
#### If d_model failed, it will just be an empty string, so you try the regular model ###
print('Error in training Imbalanced model first time. Trying regular model..')
Imbalanced_Flag = False
if Boosting_Flag:
if model_name == 'XGBoost':
#### Set the Verbose to 0 since we don't want too much output ##
try:
model.fit(X_train, y_train, early_stopping_rounds=early_stopping,
eval_metric=eval_metric,eval_set=eval_set,verbose=0)
except:
#### On Colab, even though GPU exists, many people don't turn it on.
#### In that case, XGBoost blows up when gpu_predictor is used.
#### This is to turn it back to cpu_predictor in case GPU errors!
if GPU_exists:
print('Error: GPU exists but it is not turned on. Using CPU for predictions...')
model.estimator.set_params(**cpu_params)
model.fit(X_train,y_train, early_stopping_rounds=early_stopping,
eval_metric=eval_metric,eval_set=eval_set,verbose=False)
else:
model.fit(X_train,y_train,
eval_metric=eval_metric, verbose=False)
else:
try:
model.fit(X_train, y_train,
cat_features=imp_cats,eval_set=(X_cv,y_cv), use_best_model=True,plot=True)
except:
model.fit(X_train, y_train, cat_features=imp_cats,use_best_model=False,plot=False)
else:
model.fit(X_train, y_train)
#### If downsampling succeeds, it will be used to get the best score and can become model again ##
if hyper_param == 'RS' or hyper_param == 'GS':
best_score = model.best_score_
else:
val_keys = list(model.best_score_.keys())
best_score = model.best_score_[val_keys[-1]][validation_metric]
except:
print('Error in training Imbalanced model first time. Trying regular model..')
Imbalanced_Flag = False
best_score = 0
################################################################################################################################
####### Though this next step looks like it is a Coding Mistake by Me, don't change it!!! ###################
####### This is for case when Imbalanced with Classification succeeds, this next step is skipped ############
################################################################################################################################
if not Imbalanced_Flag:
########### This is for both regular Regression and regular Classification Model Training. It is not a Mistake #############
########### In case Imbalanced training fails, this method is also tried. That's why we test the Flag here!! #############
try:
model = copy.deepcopy(gs)
if Boosting_Flag:
if model_name == 'XGBoost':
try:
#### Set the Verbose to 0 since we don't want too much output ##
model.fit(X_train, y_train, early_stopping_rounds=early_stopping,
eval_metric=eval_metric,eval_set=eval_set,verbose=0)
except:
#### On Colab, even though GPU exists, many people don't turn it on.
#### In that case, XGBoost blows up when gpu_predictor is used.
#### This is to turn it back to cpu_predictor in case GPU errors!
if GPU_exists:
print('Error: GPU exists but it is not turned on. Using CPU for predictions...')
model.estimator.set_params(**cpu_params)
model.fit(X_train,y_train, early_stopping_rounds=early_stopping,
eval_metric=eval_metric,eval_set=eval_set,verbose=False)
else:
model.fit(X_train,y_train,
eval_metric=eval_metric, verbose=False)
else:
try:
model.fit(X_train, y_train, cat_features=imp_cats,
eval_set=(X_cv,y_cv), use_best_model=True, plot=True)
except:
model.fit(X_train, y_train, cat_features=imp_cats, use_best_model=False, plot=False)
else:
model.fit(X_train, y_train)
except:
print('Training regular model first time is Erroring: Check if your Input is correct...')
return
try:
if hyper_param == 'RS' or hyper_param == 'GS':
best_score = model.best_score_
validation_metric = copy.deepcopy(scoring_parameter)
else:
val_keys = list(model.best_score_.keys())
if 'validation' in val_keys:
validation_metric = list(model.best_score_['validation'].keys())[0]
best_score = model.best_score_['validation'][validation_metric]
else:
validation_metric = list(model.best_score_['learn'].keys())[0]
best_score = model.best_score_['learn'][validation_metric]
except:
print('Error: Not able to print validation metrics. Continuing...')
## TRAINING OF MODELS COMPLETED. NOW GET METRICS on CV DATA ################
print(' Actual training time (in seconds): %0.0f' %(time.time()-model_start_time))
print('########### S I N G L E M O D E L R E S U L T S #################')
if modeltype != 'Regression':
############## This is for Classification Only !! ########################
if scoring_parameter in ['logloss','neg_log_loss','log_loss','log-loss','']:
print('{}-fold Cross Validation {} = {}'.format(n_splits, 'logloss', best_score))
elif scoring_parameter in ['accuracy','balanced-accuracy','balanced_accuracy','roc_auc','roc-auc',
'f1','precision','recall','average-precision','average_precision',
'weighted_f1','weighted-f1','AUC']:
print('%d-fold Cross Validation %s = %0.1f%%' %(n_splits,scoring_parameter, best_score*100))
else:
print('%d-fold Cross Validation %s = %0.1f' %(n_splits,validation_metric, best_score))
else:
######### This is for Regression only ###############
if best_score < 0:
best_score = best_score*-1
if scoring_parameter == '':
print('%d-fold Cross Validation %s Score = %0.4f' %(n_splits,'RMSE', best_score))
else:
print('%d-fold Cross Validation %s Score = %0.4f' %(n_splits,validation_metric, best_score))
#### We now need to set the Best Parameters, Fit the Model on Full X_train and Predict on X_cv
### Find what the order of best params are and set the same as the original model ###
if hyper_param == 'RS' or hyper_param == 'GS':
best_params= model.best_params_
print(' Best Parameters for Model = %s' %model.best_params_)
else:
#### CatBoost does not need Hyper Parameter tuning => it's great out of the box!
#### CatBoost does not need too many iterations. Just make sure you set the iterations low after the first time!
if model.get_best_iteration() == 0:
### In some small data sets, the number of iterations becomes zero, hence we set it as a default number
best_params = dict(zip(['iterations','learning_rate'],[1000,model.get_all_params()['learning_rate']]))
else:
best_params = dict(zip(['iterations','learning_rate'],[model.get_best_iteration(),model.get_all_params()['learning_rate']]))
print(' %s Best Parameters for Model: Iterations = %s, learning_rate = %0.2f' %(
model_name, model.get_best_iteration(), model.get_all_params()['learning_rate']))
if hyper_param == 'RS' or hyper_param == 'GS':
#### In the case of CatBoost, we don't do any Hyper Parameter tuning #########
gs = copy.deepcopy(model)
model = gs.best_estimator_
if modeltype == 'Multi_Classification':
try:
if X_cv.shape[0] <= 1000:
# THis works well for small data sets and is similar to parametric
method= 'sigmoid' # 'isotonic' # #
else:
# THis works well for large data sets and is non-parametric
method= 'isotonic'
model = CalibratedClassifierCV(model, method=method, cv="prefit")
model.fit(X_train, y_train)
print('Using a Calibrated Classifier in this Multi_Classification dataset to improve results...')
calibrator_flag = True
except:
calibrator_flag = False
pass
### Make sure you set this flag as False so that when ensembling is completed, this flag is True ##
if model_name.lower() == 'catboost':
print('Best Model selected and its parameters are:\n %s' %model.get_all_params())
else:
print('Best Model selected and its parameters are:\n %s' %model)
performed_ensembling = False
if modeltype != 'Regression':
m_thresh = 0.5
y_proba = model.predict_proba(X_cv)
y_pred = model.predict(X_cv)
if len(classes) <= 2:
print('Finding Best Threshold for Highest F1 Score...')
precision, recall, thresholds = precision_recall_curve(y_cv, y_proba[:,rare_class])
#precision, recall, thresholds = precision_recall_curve(y_cv, y_proba[:,1])
try:
f1 = (2*precision*recall)/(precision+recall)
f1 = np.nan_to_num(f1)
m_idx = np.argmax(f1)
m_thresh = thresholds[m_idx]
best_f1 = f1[m_idx]
except:
best_f1 = f1_score(y_cv, y_pred)
m_thresh = 0.5
# retrieve just the probabilities for the positive class
pos_probs = y_proba[:, rare_class]
if verbose >= 1:
# create a histogram of the predicted probabilities for the Rare Class since it will help decide threshold
plt.figure(figsize=(6,6))
plt.hist(pos_probs, bins=Bins, color='g')
plt.title("Model's Predictive Probability Histogram for Rare Class=%s with suggested threshold in red" %rare_class_orig)
plt.axvline(x=m_thresh, color='r', linestyle='--')
plt.show();
print(" Using threshold=0.5. However, %0.3f provides better F1=%0.2f for rare class..." %(m_thresh,best_f1))
###y_pred = (y_proba[:,rare_class]>=m_thresh).astype(int)
predicted = copy.deepcopy(y_proba)
predicted [:,0] = (predicted [:,0] >= (1-m_thresh)).astype('int')
predicted [:,1] = (predicted [:,1] > m_thresh).astype('int')
if m_thresh != 0.5:
y_pred = predicted[:,rare_class]
else:
y_proba = model.predict_proba(X_cv)
y_pred = model.predict(X_cv)
else:
y_pred = model.predict(X_cv)
### This is where you print out the First Model's Results ########
print('########################################################')
##### This next step is very important since some models give series, others give arrays. Very painful!
if isinstance(y_pred,pd.Series):
y_pred = y_pred.values
if isinstance(y_cv,pd.Series):
y_cv = y_cv.values
print('%s Model Prediction Results on Held Out CV Data Set:' %model_name)
if modeltype == 'Regression':
rmsle_calculated_m = rmse(y_cv, y_pred)
print_regression_model_stats(y_cv, y_pred,'%s Model: Predicted vs Actual for %s'%(model_name,each_target))
else:
if model_name == 'Forests':
if calibrator_flag:
print(' OOB Score = %0.3f' %model.base_estimator.oob_score_)
else:
print(' OOB Score = %0.3f' %model.oob_score_)
rmsle_calculated_m = balanced_accuracy_score(y_cv,y_pred)
if len(classes) == 2:
print(' Regular Accuracy Score = %0.1f%%' %(accuracy_score(y_cv,y_pred)*100))
y_probas = model.predict_proba(X_cv)
rmsle_calculated_m = print_classification_model_stats(y_cv, y_probas, m_thresh)
else:
###### Use a nice classification matrix printing module here #########
print(' Balanced Accuracy Score = %0.1f%%' %(rmsle_calculated_m*100))
print(classification_report(y_cv,y_pred))
print(confusion_matrix(y_cv, y_pred))
###### SET BEST PARAMETERS HERE ######
### Find what the order of best params are and set the same as the original model ###
## This is where we set the best parameters from training to the model ####
if modeltype == 'Regression':
if not Stacking_Flag:
print('################# E N S E M B L E M O D E L ##################')
try:
cols = []
subm = pd.DataFrame()
#### This is for Ensembling Only #####
models_list, cv_ensembles = QuickML_Ensembling(X_train, y_train, X_cv, y_cv,
modeltype=modeltype, Boosting_Flag=Boosting_Flag,
scoring='', verbose=verbose)
models_list.append(model_name)
for models, each in zip(models_list, range(len(models_list))):
new_col = each_target+'_'+models+'_predictions'
if each+1 == len(models_list):
subm[new_col] = y_pred
else:
subm[new_col] = cv_ensembles[:,each]
cols.append(new_col)
if len(cols) == 5:
print(' Displaying results of weighted average ensemble of %d regressors' %len(cols))
ensem_pred = subm[cols[-1]]*0.5+0.125*(subm[cols[0]]+subm[
cols[1]]+subm[cols[2]]+subm[cols[3]])
else:
print(' Calculating regular average ensemble of %d regressors' %len(cols))
ensem_pred = (subm[cols].mean(axis=1))
print('#############################################################################')
performed_ensembling = True
#### Since we have a new ensembled y_pred, make sure it is series or array before printing it!
if isinstance(y_pred,pd.Series):
print_regression_model_stats(y_cv, ensem_pred.values,'Ensemble Model: Model Predicted vs Actual for %s' %each_target)
else:
print_regression_model_stats(y_cv, ensem_pred,'Ensemble Model: Model Predicted vs Actual for %s' %each_target)
except:
print('Could not complete Ensembling predictions on held out data due to Error')
else:
## This is for Classification Problems Only #
### Find what the order of best params are and set the same as the original model ###
## This is where we set the best parameters from training to the model ####
if not Stacking_Flag:
print('################# E N S E M B L E M O D E L ##################')
#### We do Ensembling only if the Stacking_Flag is False. Otherwise, we don't!
try:
classes = label_dict[each_target]['classes']
cols = []
subm = pd.DataFrame()
#### This is for Ensembling Only #####
if len(classes) == 2:
models_list, cv_ensembles = QuickML_Ensembling(X_train, y_train, X_cv, y_cv,
modeltype='Binary_Classification', Boosting_Flag=Boosting_Flag,
scoring='', verbose=verbose)
else:
models_list, cv_ensembles = QuickML_Ensembling(X_train, y_train, X_cv, y_cv,
modeltype='Multi_Classification', Boosting_Flag=Boosting_Flag,
scoring='', verbose=verbose)
models_list.append(model_name)
for models, each in zip(models_list, range(len(models_list))):
new_col = each_target+'_'+models+'_predictions'
if each+1 == len(models_list):
subm[new_col] = y_pred
else:
subm[new_col] = cv_ensembles[:,each]
cols.append(new_col)
if len(cols) == 5:
print(' Displaying results of weighted average ensemble of %d classifiers' %len(cols))
ensem_pred = np.round(subm[cols[-1]]*0.5+0.125*(subm[cols[0]]+subm[
cols[1]]+subm[cols[2]]+subm[cols[3]])).astype(int)
else:
print(' Calculating regular average ensemble of %d classifiers' %len(cols))
ensem_pred = (subm[cols].mean(axis=1)).astype(int)
print('#############################################################################')
performed_ensembling = True
##### This next step is very important since some models give series, others give arrays. Very painful!
if isinstance(ensem_pred,pd.Series):
ensem_pred = ensem_pred.values
except:
print('Could not complete Ensembling predictions on held out data due to Error')
else:
print('No Ensembling of models done since Stacking_Flag = True ')
if verbose >= 1:
if len(classes) == 2:
plot_classification_results(model,X_cv, y_cv, y_pred, classes, class_nums, each_target )
else:
try:
Draw_ROC_MC_ML(model, X_cv, y_cv, each_target, model_name, verbose)
Draw_MC_ML_PR_ROC_Curves(model,X_cv,y_cv)
except:
print('Could not plot PR and ROC curves. Continuing...')
#### In case there are special scoring_parameter requests, you can print it here!
if scoring_parameter == 'roc_auc' or scoring_parameter == 'auc':
if len(classes) == 2:
print(' ROC AUC Score = %0.1f%%' %(roc_auc_score(y_cv, y_proba[:,rare_class])*100))
else:
print(' No ROC AUC score for multi-class problems')
elif scoring_parameter == 'jaccard':
accu_all = jaccard_singlelabel(y_cv, y_pred)
print(' Mean Jaccard Similarity = {:,.1f}%'.format(
accu_all*100))
## This is for multi-label problems ##
if count == 0:
zipped = copy.deepcopy(y_pred)
count += 1
else:
zipped = zip(zipped,y_pred)
count += 1
elif scoring_parameter == 'basket_recall':
if count == 0:
zipped = copy.deepcopy(y_pred)
count += 1
else:
zipped = zip(zipped,y_pred)
count += 1
if not Stacking_Flag and performed_ensembling:
if modeltype == 'Regression':
rmsle_calculated_f = rmse(y_cv, y_pred)
print('After multiple models, Ensemble Model Results:')
print(' RMSE Score = %0.5f' %(rmsle_calculated_f,))
print('#############################################################################')
if rmsle_calculated_f < rmsle_calculated_m:
print('Ensembling Models is better than Single Model for this data set.')
error_rate.append(rmsle_calculated_f)
else:
print('Single Model is better than Ensembling Models for this data set.')
error_rate.append(rmsle_calculated_m)
else:
rmsle_calculated_f = balanced_accuracy_score(y_cv,y_pred)
print('After multiple models, Ensemble Model Results:')
rare_pct = y_cv[y_cv==rare_class].shape[0]/y_cv.shape[0]
print(' Balanced Accuracy Score = %0.3f%%' %(
rmsle_calculated_f*100))
print(classification_report(y_cv,y_pred))
print(confusion_matrix(y_cv,y_pred))
print('#############################################################################')
if rmsle_calculated_f > rmsle_calculated_m:
print('Ensembling Models is better than Single Model for this data set.')
error_rate.append(rmsle_calculated_f)
else:
print('Single Model is better than Ensembling Models for this data set.')
error_rate.append(rmsle_calculated_m)
if verbose >= 1:
if Boosting_Flag:
try:
if model_name.lower() == 'catboost':
plot_xgb_metrics(model,catboost_scoring,eval_set,modeltype,'%s Results' %each_target,
model_name)
else:
plot_xgb_metrics(gs.best_estimator_,eval_metric,eval_set,modeltype,'%s Results' %each_target,
model_name)
except:
print('Could not plot Model Evaluation Results Metrics')
else:
try:
plot_RS_params(gs.cv_results_, scoring_parameter, each_target)
except:
print('Could not plot Cross Validation Parameters')
print(' Time taken for this Target (in seconds) = %0.0f' %(time.time()-start_time))
print('Training model on complete Train data and Predicting using give Test Data...')
################ I M P O R T A N T: C O M B I N I N G D A T A ######################
#### This is Second time: we combine train and CV into Train and Test Sets #################
train = part_train.append(part_cv)
important_features = [x for x in list(train) if x not in [each_target]]
############################################################################################
###### Now that we have used partial data to make stacking predictors, we can remove them from consideration!
if Stacking_Flag:
important_features = left_subtract(important_features, addcol)
try:
train.drop(addcol,axis=1, inplace=True)
except:
pass
###### Similarly we will have to create KMeans_Clusters again using full Train data!
if KMeans_Featurizer:
important_features = left_subtract(important_features, km_label)
try:
train.drop(km_label,axis=1, inplace=True)
except:
pass
########################## BINNING SECOND TIME ###############################
new_num_vars = np.array(important_features)[(train[important_features].dtypes==float)].tolist()
## Now we re-use the saved_num_vars which contained a list of num_vars for binning now!
###### Once again we do Entropy Binning on the Full Train Data Set !!
########################## BINNING SECOND TIME ###############################
if Binning_Flag and len(saved_num_vars) > 0:
### when you bin the second time, you have to send in important_features with original
### numeric variables so that it works on binning only those. Otherwise it will fail.
### Do Entropy Binning only if there are numeric variables in the data set! #####
#### When we Bin the second first time, we set the entropy_binning flag to True so
#### that all numeric variables that are binned are removed. This way, only bins remain.
train, num_vars, important_features, test = add_entropy_binning(train, each_target,
orig_num_vars, important_features, test,
modeltype, entropy_binning=True,verbose=verbose)
#### In saved_num_vars we send in all the continuous_vars but we bin only the top few vars.
### Those that are binned are removed from saved_num_vars and the remaining become num_vars
### Our job is to find the names of those original numeric variables which were binned.
### orig_num_vars contains original num vars. num_vars contains binned versions of those vars.
### Those binned variables have now become categorical vars and must be added to imp_cats.
#### Also note that important_features does not contain orig_num_vars which have been erased.
else:
print(' Binning_Flag set to False or there are no numeric vars in data set to be binned')
### Now we add another Feature tied to KMeans clustering using Predictor and Target variables ###
####################### KMEANS SECOND TIME ############################
if KMeans_Featurizer and len(saved_num_vars) > 0:
#### Perform KMeans Featurizer only if there are numeric variables in data set! #########
print('Adding one feature named "KMeans_Clusters" using KMeans_Featurizer...')
km_label = 'KMeans_Clusters'
if modeltype != 'Regression':
#### Make the number of clusters as the same as log10 of number of rows in Train
train_cluster, test_cluster = Transform_KM_Features(train[important_features], train[each_target], test[important_features], num_clusters)
else:
train_cluster, test_cluster = Transform_KM_Features(train[important_features], train[each_target], test[important_features])
#### Now make sure that the cat features are either string or integers ######
print(' Used KMeans to naturally cluster Train predictor variables into %d clusters' %num_clusters)
train[km_label] = train_cluster
if not isinstance(test, str):
test[km_label] = test_cluster
#X_train.drop(each_target,axis=1,inplace=True)
for imp_cat in imp_cats:
train[imp_cat] = train[imp_cat].astype(int)
if not isinstance(test, str):
test[imp_cat] = test[imp_cat].astype(int)
saved_num_vars.append(km_label) ### You need to add it to this variable list for Scaling later!
important_features.append(km_label)
########################## STACKING SECOND TIME ###############################
######### This is where you do Stacking of Multi Model Results into One Column ###
if Stacking_Flag:
#### In order to join, you need X_train to be a Pandas Series here ##
print('CAUTION: Stacking can produce Highly Overfit models on Training Data...')
### In order to avoid overfitting, we are going to learn from a small sample of data
### That is why we are using X_cv to train on and using it to predict on X_train!
addcol, stacks1 = QuickML_Stacking(train[important_features],train[each_target],'',
modeltype, Boosting_Flag, scoring_parameter,verbose)
##### Adding multiple columns for Stacking is best! Do not do the average of predictions!
#### The reason we add the word "Partial_Train" is to show that these Stacking results are from Partial Train data!
addcols = copy.deepcopy(addcol)
##### Adding multiple columns for Stacking is best! Do not do the average of predictions!
train = train.join(pd.DataFrame(stacks1,index=train.index,
columns=addcols))
##### Leaving multiple columns for Stacking is best! Do not do the average of predictions!
print(' Adding %d Stacking feature(s) to training data' %len(addcols))
if not isinstance(orig_test, str):
### In order to avoid overfitting, we are going to learn from a small sample of data
### That is why we are using X_train to train on and using it to predict on X_test
_, stacks2 = QuickML_Stacking(train[important_features],train[each_target],test[important_features],
modeltype, Boosting_Flag, scoring_parameter,verbose)
##### Adding multiple columns for Stacking is best! Do not do the average of predictions!
test = test.join(pd.DataFrame(stacks2,index=test.index,
columns=addcols))
##### Adding multiple columns for Stacking is best! Do not do the average of predictions!
#test = test.join(pd.DataFrame(stacks2.mean(axis=1).round().astype(int),
# columns=[addcol],index=test.index))
###### We make sure that we remove too many features that are highly correlated ! #####
#addcol = remove_variables_using_fast_correlation(train,addcol,corr_limit,verbose)
important_features += addcols
saved_num_vars.append(addcol) ### You need to add it for binning later!
############################################################################################
if len(important_features) == 0:
print('No important features found. Using all input features...')
important_features = copy.deepcopy(saved_important_features)
#important_features = copy.deepcopy(red_preds)
############################################################################################
if model_name.lower() == 'catboost':
print(' Setting best params for CatBoost model from Initial State since you cannot change params to a fitted Catboost model ')
model = xgbm.set_params(**best_params)
print(' Number of Categorical and Integer variables used in CatBoost training = %d' %len(imp_cats))
#### Perform Scaling of Train data a second time using FULL TRAIN data set this time !
#### important_features keeps track of all variables that we need to ensure they are scaled!
train, test = perform_scaling_numeric_vars(train, important_features, test,
model_name, SS)
################ T R A I N I N G M O D E L A S E C O N D T I M E ###################
### The next 2 lines are crucial: if X and y are dataframes, then next 2 should be df's
### They should not be df.values since they will become numpy arrays and XGB will error.
trainm = train[important_features+[each_target]]
red_preds = copy.deepcopy(important_features)
X = trainm[red_preds]
y = trainm[each_target]
eval_set = [()]
##### ############ TRAINING MODEL SECOND TIME WITH FULL_TRAIN AND PREDICTING ON TEST ############
model_start_time = time.time()
if modeltype != 'Regression':
if Imbalanced_Flag:
try:
print('################## Imbalanced Flag Set ############################')
print('Imbalanced Class Training using SMOTE Rare Class Oversampling method...')
model, X, y = training_with_SMOTE(X,y, eval_set, model,
Boosting_Flag, eval_metric,modeltype, model_name,
training=False, minority_class=rare_class,
imp_cats=imp_cats, calibrator_flag=calibrator_flag,
GPU_exists=GPU_exists, params=cpu_params,
verbose=verbose)
if isinstance(model, str):
#### If downsampling model failed, it will just be an empty string, so you can try regular model ###
model = copy.deepcopy(best_model)
print('Error in training Imbalanced model second time. Trying regular model..')
Imbalanced_Flag = False
if calibrator_flag:
model.fit(X, y)
else:
if Boosting_Flag:
#### Set the Verbose to 0 since we don't want too much output ##
if model_name == 'XGBoost':
#### Set the Verbose to 0 since we don't want too much output ##
model.fit(X, y,
eval_metric=eval_metric,verbose=0)
else:
model.fit(X, y, cat_features=imp_cats, plot=False)
else:
model.fit(X, y)
except:
print('Error in training Imbalanced model second time. Trying regular model..')
Imbalanced_Flag = False
if calibrator_flag:
model.fit(X, y)
else:
if Boosting_Flag:
if model_name == 'XGBoost':
#### Set the Verbose to 0 since we don't want too much output ##
model.fit(X, y,
eval_metric=eval_metric,verbose=0)
else:
model.fit(X, y, cat_features=imp_cats, plot=False)
else:
model.fit(X, y)
else:
try:
if calibrator_flag:
model.fit(X, y)
else:
if Boosting_Flag:
if model_name == 'XGBoost':
### Since second time we don't have X_cv, we remove it
model.fit(X, y,
eval_metric=eval_metric,verbose=0)
else:
model.fit(X, y, cat_features=imp_cats, plot=False)
else:
model.fit(X, y)
except:
print('Training regular model second time erroring: Check if Input is correct...')
return
else:
try:
if calibrator_flag:
model.fit(X, y)
else:
if Boosting_Flag:
if model_name == 'XGBoost':
model.fit(X, y,
eval_metric=eval_metric,verbose=0)
else:
model.fit(X, y, cat_features=imp_cats, use_best_model=False, plot=False)
else:
model.fit(X, y)
except:
print('Training model second time is Erroring: Check if Input is correct...')
return
print('Actual Training time taken in seconds = %0.0f' %(time.time()-model_start_time))
## TRAINING OF MODELS COMPLETED. NOW START PREDICTIONS ON TEST DATA ################
#### new_cols is to keep track of new prediction columns we are creating #####
new_cols = []
if not isinstance(orig_test, str):
### If there is a test data frame, then let us predict on it #######
### The next 3 lines are crucial: if X and y are dataframes, then next 2 should be df's
### They should not be df.values since they will become numpy arrays and XGB will error.
try:
#### We need the id columns to carry over into the predictions ####
testm = orig_test[id_cols].join(test[red_preds])
except:
### if for some reason id columns are not available, then do without it
testm = test[red_preds]
X_test = testm[red_preds]
else:
##### If there is no Test file, then do a final prediction on Train itself ###
orig_index = orig_train.index
trainm = train.reindex(index = orig_index)
testm = orig_train[id_cols].join(trainm[red_preds])
X_test = testm[red_preds]
if modeltype == 'Regression':
y_pred = model.predict(X_test)
##### This next step is very important since some models give series, others give arrays. Very painful!
if isinstance(y_pred,pd.Series):
y_pred = y_pred.values
######## This is for Regression Problems Only ###########
###### If Stacking_ Flag is False, then we do Ensembling #######
if not Stacking_Flag:
try:
new_cols = []
subm = pd.DataFrame()
#### This is for Ensembling Only #####
#### In Test data verbose is set to zero since no results can be obtained!
models_list, ensembles = QuickML_Ensembling(X, y, X_test, '',
modeltype=modeltype, Boosting_Flag=Boosting_Flag,
scoring='', verbose=0)
models_list.append(model_name)
for models, each in zip(models_list, range(len(models_list))):
new_col = each_target+'_'+models+'_predictions'
if each+1 == len(models_list):
subm[new_col] = y_pred
testm[new_col] = y_pred
else:
subm[new_col] = ensembles[:,each]
testm[new_col] = ensembles[:,each]
new_cols.append(new_col)
### After this, y_pred is a Series from now on. You need y_pred.values ####
if len(new_cols) == 5:
print(' Calculating weighted average ensemble of %d regressors' %len(new_cols))
ensem_pred = subm[new_cols[-1]]*0.5+0.125*(subm[new_cols[0]]+subm[
new_cols[1]]+subm[new_cols[2]]+subm[new_cols[3]])
else:
print(' Calculating regular average ensemble of %d regressors' %len(cols))
ensem_pred = (subm[new_cols].mean(axis=1))
##### This next step is very important since some models give series, others give arrays. Very painful!
if isinstance(y_pred,pd.Series):
ensem_pred = ensem_pred.values
new_col = each_target+'_Ensembled_predictions'
testm[new_col] = ensem_pred
new_cols.append(new_col)
print('Completed Ensemble predictions on held out data')
except:
print('Could not complete Ensembling predictions on held out data due to Error')
else:
stack_cols, stacksfinal = QuickML_Stacking(X, y, X_test,
modeltype, Boosting_Flag,
scoring_parameter,verbose=verbose)
new_col = each_target+'_Stacked_'+stack_cols[0].split("_")[0]+'_predictions'
if len(stack_cols) == 1:
testm[new_col] = stacksfinal
else:
#### Just average the predictions from each stacked model into a final pred
testm[new_col] = stacksfinal.mean(axis=1)
if not isinstance(sample_submission, str):
sample_submission[each_target] = y_pred
#### If there is a test file, it probably doesn't have target, so add predictions to it!
testm[each_target+'_predictions'] = y_pred
else:
proba_cols = []
######## This is for both Binary and Multi Classification Problems ###########
y_proba = model.predict_proba(X_test)
y_pred = model.predict(X_test)
predicted = copy.deepcopy(y_proba)
if len(classes) <= 2:
predicted [:,0] = (predicted [:,0] >= (1-m_thresh)).astype('int')
predicted [:,1] = (predicted [:,1] > m_thresh).astype('int')
if predicted[:,rare_class].mean()==0 or predicted[:,rare_class].mean()==1:
### If the model is predicting all 0's or all 1's, you need to use a regular threshold
m_thresh = 0.5
print(' Making test Data predictions using regular Threshold = %0.3f' %m_thresh)
else:
### If the model is good with the modified threshold, then you use the modified threshold!
print(' Making test Data predictions using modified Threshold = %0.3f' %m_thresh)
y_pred = predicted[:,rare_class]
else:
##### For multi-class, just make predictions of multiple classes here #######
y_pred = model.predict(X_test)
##### This next step is very important since some models give series, others give arrays. Very painful!
if isinstance(y_pred,pd.Series):
y_pred = y_pred.values.astype(int)
else:
### In a small number of cases, it's an array but has a shape of 1.
### This causes errors later. Hence I have to make it a singleton array.
try:
if y_pred.shape[1] == 1:
y_pred = y_pred.ravel()
except:
y_pred = y_pred.astype(int)
if len(label_dict[each_target]['transformer']) == 0:
######### NO T R A N S F O R M E R L O G I C B E G I N S H E R E ! #####################
### if there is no transformer, then leave the predicted classes as is
classes = label_dict[each_target]['classes']
##### If there is no transformer, you can just predict the classes as is and save it here ###
testm[each_target+'_predictions'] = y_pred
###### If Stacking_Flag is False, then we do Ensembling #######
if not Stacking_Flag:
### Ensembling is not done when the model name is CatBoost ####
new_cols = []
subm = pd.DataFrame()
#### This is for Ensembling Only #####
#### In Test data verbose is set to zero since no results can be obtained!
if len(classes) == 2:
models_list, ensembles = QuickML_Ensembling(X, y, X_test, '',
modeltype='Binary_Classification', Boosting_Flag=Boosting_Flag,
scoring='', verbose=0)
else:
models_list, ensembles = QuickML_Ensembling(X, y, X_test, '',
modeltype='Multi_Classification', Boosting_Flag=Boosting_Flag,
scoring='', verbose=0)
models_list.append(model_name)
for models, each in zip(models_list, range(len(models_list))):
new_col = each_target+'_'+models+'_predictions'
if each+1 == len(models_list):
subm[new_col] = y_pred
testm[new_col] = y_pred
else:
subm[new_col] = ensembles[:,each]
testm[new_col] = ensembles[:,each]
new_cols.append(new_col)
### You will need to create probabilities for each class here ####
for each_class in classes:
if isinstance(each_class, str):
proba_col = each_target+'_proba_'+each_class
else:
proba_col = each_target+'_proba_'+str(each_class)
count = int(label_dict[each_target]['dictionary'][each_class])
testm[proba_col] = y_proba[:,count]
proba_cols.append(proba_col)
if not Stacking_Flag:
new_col = each_target+'_Ensembled_predictions'
if len(new_cols) == 5:
print(' Calculating weighted average ensemble of %d classifiers' %len(new_cols))
ensem_pred = np.round(subm[new_cols[-1]]*0.5+0.125*(subm[new_cols[0]]+subm[
new_cols[1]]+subm[new_cols[2]]+subm[new_cols[3]])).astype(int)
else:
print(' Calculating average ensemble of %d classifiers' %len(new_cols))
ensem_pred = (subm[new_cols].mean(axis=1)).astype(int)
else:
stack_cols, stacksfinal = QuickML_Stacking(X, y, X_test,
modeltype, Boosting_Flag,scoring_parameter,verbose)
new_col = each_target+'_Stacked_'+stack_cols[0].split("_")[0]+'_predictions'
ensem_pred = np.argmax(stacksfinal,axis=1)
##### This next step is very important since some models give series, others give arrays. Very painful!
if isinstance(ensem_pred,pd.Series):
ensem_pred = ensem_pred.values
testm[new_col] = ensem_pred
new_cols.append(new_col)
if not isinstance(sample_submission, str):
sample_submission[each_target] = y_pred
else:
######### T R A N S F O R M E R L O G I C B E G I N S H E R E ! #####################
### if there is a transformer, then you must convert the predicted classes to orig classes
classes = label_dict[each_target]['classes']
dic = label_dict[each_target]['dictionary']
transformer = label_dict[each_target]['transformer']
class_nums = label_dict[each_target]['class_nums']
##### If there is a transformer, you must convert predictions to original classes
testm[each_target+'_predictions'] = pd.Series(y_pred).map(transformer).values
for each_class in classes:
if isinstance(each_class, str):
proba_col = each_target+'_proba_'+each_class
else:
proba_col = each_target+'_proba_'+str(each_class)
count = label_dict[each_target]['dictionary'][each_class]
testm[proba_col] = y_proba[:,count]
proba_cols.append(proba_col)
###### If Stacking_ Flag is False, then we do Ensembling #######
if not Stacking_Flag:
subm = pd.DataFrame()
#### This is for Ensembling Only #####
if len(classes) == 2:
models_list, ensembles = QuickML_Ensembling(X, y, X_test, '',
modeltype='Binary_Classification', Boosting_Flag=Boosting_Flag,
scoring='', verbose=verbose)
else:
models_list, ensembles = QuickML_Ensembling(X, y, X_test, '',
modeltype='Multi_Classification', Boosting_Flag=Boosting_Flag,
scoring='', verbose=verbose)
models_list.append(model_name)
for models, each in zip(models_list, range(len(models_list))):
new_col = each_target+'_'+models+'_predictions'
if each+1 == len(models_list):
subm[new_col] = y_pred
testm[new_col] = pd.Series(y_pred).map(transformer).values
else:
subm[new_col] = ensembles[:,each]
testm[new_col] = pd.Series(ensembles[:,each]).map(transformer).values
new_cols.append(new_col)
### After this, y_pred is a Series from now on. You need y_pred.values ####
if len(cols) == 5:
print(' Calculating weighted average ensemble of %d classifiers' %len(new_cols))
ensem_pred = np.round(subm[new_cols[-1]]*0.5+0.125*(subm[new_cols[0]]+subm[
new_cols[1]]+subm[new_cols[2]]+subm[new_cols[3]])).astype(int)
else:
print(' Calculating regular average ensemble of %d classifiers' %len(new_cols))
ensem_pred = (subm[new_cols].mean(axis=1)).astype(int)
print('########################################################')
##### This next step is very important since some models give series, others give arrays. Very painful!
if isinstance(ensem_pred,pd.Series):
ensem_pred = ensem_pred.values
print('Completed Ensemble predictions on held out data')
new_col = each_target+'_Ensembled_predictions'
else:
stack_cols, stacksfinal = QuickML_Stacking(X, y, X_test,
modeltype, Boosting_Flag,scoring_parameter,verbose)
new_col = each_target+'_Stacked_'+stack_cols[0].split("_")[0]+'_predictions'
ensem_pred = np.argmax(stacksfinal,axis=1)
print('########################################################')
print('Completed Stacked predictions on held out data')
testm[new_col] = pd.Series(ensem_pred).map(transformer).values
new_cols.append(new_col)
if not isinstance(sample_submission, str):
sample_submission[each_target] = pd.Series(y_pred).map(transformer).values
##################### P L O T F E A T U R E I M P O R T A N C E S H E R E ###################
if calibrator_flag:
plot_model = model.base_estimator
else:
plot_model = copy.deepcopy(model)
try:
if Boosting_Flag is None:
### If you don't use absolute values, you won't get the right set of features in order. Make sure!
imp_features_df = pd.DataFrame(abs(plot_model.coef_[0]),
columns=['Feature Importances'],index=important_features).sort_values(
'Feature Importances',ascending=False)
else:
if model_name.lower() == 'xgboost':
##### SHAP requires this step: XGBoost models must have been "predicted"
_ = plot_model.predict(X_test)
### It is possible that in some cases, XGBoost has fewer features than what was sent in.
### In those cases, we need to identify and know which features in XGBoost are in and which are out
#### In that case, we need to find those features and then do a feature importance
dictf = plot_model.get_booster().get_score(importance_type='gain')
if len(left_subtract(plot_model.get_booster().feature_names,important_features)) > 0:
#### If feature names from XGBoost and important_features are not same,you must transform dictf like this!
dicta = dict(zip(plot_model.get_booster().feature_names,important_features))
featdict = dict([(x,dicta[x]) for x in dictf.keys()])
featdict2 = dict([(dicta[x],dictf[x]) for x in featdict.keys()])
imp_features_df = pd.DataFrame(featdict2.values(),index=featdict2.keys(),
columns = ['Feature Importances']).sort_values('Feature Importances',
ascending=False)
else:
#### If the feature names from XGBoost and the important_features are same,
### you can plot dictf immediately!
imp_features_df = pd.DataFrame(dictf.values(),index=dictf.keys(),
columns = ['Feature Importances']).sort_values('Feature Importances',
ascending=False)
elif model_name == 'Forests':
imp_features_df = pd.DataFrame(plot_model.feature_importances_, columns=['Feature Importances'],
index=important_features).sort_values('Feature Importances',
ascending=False)
elif model_name.lower() == 'catboost':
from catboost import Pool
imp_features_df = pd.DataFrame(plot_model.get_feature_importance(
Pool(X_cv, label=y_cv,cat_features=imp_cats)),
columns=['Feature Importances'],
index=important_features).sort_values(
'Feature Importances',ascending=False)
### Now draw the feature importances using the data frame above!
height_size = 5
width_size = 10
color_string = 'byrcmgkbyrcmgkbyrcmgkbyrcmgk'
print('Plotting Feature Importances to explain the output of model')
imp_features_df[:15].plot(kind='barh',title='Feature Importances for predicting %s' %each_target,
figsize=(width_size, height_size), color=color_string);
except:
print('Could not draw feature importance plot due to an error')
########### D R A W SHAP VALUES USING TREE BASED MODELS. THE REST WILL NOT GET SHAP ############
if verbose >= 2:
print('Trying to plot SHAP values if SHAP is installed in this machine...')
try:
if model_name.lower() == 'catboost':
if verbose > 0:
import shap
from catboost import Pool
shap.initjs()
plt.figure()
shap_values = plot_model.get_feature_importance(Pool(X_cv, label=y_cv,cat_features=imp_cats),type="ShapValues")
shap_df = pd.DataFrame(np.c_[X_cv.values,y_cv],columns=[list(X_cv)+[each_target]])
if modeltype == 'Multi_Classification':
for each_i in range(len(classes)):
### This is needed for Catboost models but it is very cumbersome!
### You need to cycle through multiple values of classes from 0 to n_classes-1.
### There is no way to force it in an Ax => so you are stuck printing multiple charts
shap.summary_plot(shap_values[:,each_i,:], shap_df, plot_type="violin")
else:
shap.summary_plot(shap_values, shap_df, plot_type="violin")
else:
import shap
shap.initjs()
#### This works well for RFC and XGBoost for multiclass problems #####
#### This plots a violin plot that is different from the bar chart above!
#### This does not work for CatBoost so try something else!
if model_name.lower() == 'linear':
explainer = shap.LinearExplainer(plot_model, X_test, feature_dependence="independent")
shap_values = explainer.shap_values(X_test)
plt.figure()
shap.summary_plot(shap_values, X_test, plot_type="bar")
if modeltype != 'Regression':
plt.figure()
shap.summary_plot(shap_values, X_test)
elif model_name.lower() == 'forests':
#### This works well for RFC and XGBoost for multiclass problems #####
### It works for both binary and multi-class problems ########
### However, it does NOT work for CatBoost models!
explainer = shap.TreeExplainer(plot_model)
shap_values = explainer.shap_values(X_test)
plt.figure()
shap.summary_plot(shap_values, X_test, plot_type="bar")
### There is no single violin plot for Random Forests in SHAP
#### It actually has multiple outputs so you can loop through it for each class
if modeltype != 'Regression':
for each_i in range(len(classes)):
plt.figure()
shap.summary_plot(shap_values[each_i], X_test)
elif model_name.lower() == 'xgboost':
#### This works well for RFC and XGBoost for multiclass problems #####
### It works for both binary and multi-class problems ########
### However, it does NOT work for CatBoost models!
explainer = shap.TreeExplainer(plot_model)
shap_values = explainer.shap_values(X_test)
plt.figure()
shap.summary_plot(shap_values, X_test, plot_type="bar")
if modeltype != 'Regression':
plt.figure()
shap.summary_plot(shap_values, X_test)
except:
print('Could not plot SHAP values since SHAP is not installed or could not import SHAP in this machine')
print('############### P R E D I C T I O N O N T E S T C O M P L E T E D #################')
print(' Time taken for this Target (in seconds) = %0.0f' %(time.time()-start_time))
## Write the test and submission files to disk ###
print('Writing Output files to disk...')
#############################################################################################
if not isinstance(testm, str):
try:
write_file_to_folder(testm, each_target, each_target+'_'+modeltype+'_'+'test_modified.csv')
##### D R A W K D E P L O T S FOR PROBABILITY OF PREDICTIONS - very useful! #########
if modeltype != 'Regression':
if verbose >= 2:
testm[proba_cols].plot(kind='kde',figsize=(10,6),
title='Predictive Probability Density Chart with suggested threshold in red')
plt.axvline(x=m_thresh, color='r', linestyle='--');
except:
print(' Error: Not able to save test modified file. Skipping...')
#############################################################################################
if isinstance(sample_submission, str):
sample_submission = testm[id_cols+[each_target+'_predictions']]
try:
write_file_to_folder(sample_submission, each_target, each_target+'_'+modeltype+'_'+'submission.csv')
except:
print(' Error: Not able to save submission file. Skipping...')
#############################################################################################
try:
#### Bring trainm back to its original index ###################
orig_index = orig_train.index
trainm = train.reindex(index = orig_index)
write_file_to_folder(trainm, each_target, each_target+'_'+modeltype+'_'+'train_modified.csv')
except:
print(' Error: Not able to save train modified file. Skipping...')
### In case of multi-label models, we will reset the start train and test dataframes to contain new features created
start_train = start_train[target].join(start_train[orig_red_preds])
if not isinstance(orig_test, str):
start_test = start_test[orig_red_preds]
#### Once each target cycle is over, reset the red_preds to the orig_red_preds so we can start over
red_preds = copy.deepcopy(orig_red_preds)
#### Perform Final Multi-Label Operations here since all Labels are finished by now ###
#### Don't change the target here to each_target since this is for multi-label situations only ###
if (scoring_parameter == 'basket_recall' or scoring_parameter == 'jaccard') and modeltype != 'Regression':
y_preds = np.array(list(zipped))
_,_,_,y_actuals = train_test_split(train[red_preds], train[target].values,
test_size=test_size, random_state=seed)
print('Shape of Actuals: %s and Preds: %s' %(y_actuals.shape[0], y_preds.shape[0]))
if y_actuals.shape[0] == y_preds.shape[0]:
if scoring_parameter == 'basket_recall' and len(target) > 1:
accu_all = basket_recall(y_actuals, y_preds).mean()
print(' Mean Basket Recall = {:,.1f}%'.format(
accu_all*100))
elif scoring_parameter == 'jaccard' and len(target) > 1:
## This shows similarity in multi-label situations ####
accu_all = jaccard_multilabel(y_actuals, y_preds)
print(' Mean Jaccard Similarity = %s' %(
accu_all))
## END OF ONE LABEL IN A MULTI LABEL DATA SET ! WHEW ! ###################
print('############### C O M P L E T E D ################')
print('Time Taken in mins = %0.1f for the Entire Process' %((time.time()-start_time)/60))
#return model, imp_features_df.index.tolist(), trainm, testm
return model, important_features, trainm, testm
###############################################################################
def plot_SHAP_values(m,X,modeltype,Boosting_Flag=False,matplotlib_flag=False,verbose=0):
import shap
# load JS visualization code to notebook
if not matplotlib_flag:
shap.initjs();
# explain the model's predictions using SHAP values
explainer = shap.TreeExplainer(m)
shap_values = explainer.shap_values(X)
if not Boosting_Flag is None:
if Boosting_Flag:
# visualize the first prediction's explanation (use matplotlib=True to avoid Javascript)
if verbose > 0 and modeltype != 'Multi_Classification':
shap.summary_plot(shap_values, X, plot_type="violin");
if verbose >= 1:
shap.summary_plot(shap_values, X, plot_type="bar");
else:
shap.summary_plot(shap_values, X, plot_type="bar");
################################################################################
################ Find top features using XGB ###################
################################################################################
from xgboost.sklearn import XGBClassifier
from xgboost.sklearn import XGBRegressor
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import chi2, mutual_info_regression, mutual_info_classif
def find_top_features_xgb(train,preds,numvars,target,modeltype,corr_limit,verbose=0):
"""
This is a fast utility that uses XGB to find top features. You
It returns a list of important features.
Since it is XGB, you dont have to restrict the input to just numeric vars.
You can send in all kinds of vars and it will take care of transforming it. Sweet!
"""
import xgboost as xgb
###################### I M P O R T A N T ##############################################
###### This top_num decides how many top_n features XGB selects in each iteration.
#### There a total of 5 iterations. Hence 5x10 means maximum 50 featues will be selected.
##### If there are more than 50 variables, then maximum 5*25 = 125 variables will be selected
if len(preds) <= 50:
top_num = 10
else:
top_num = 25
###################### I M P O R T A N T ##############################################
#### If there are more than 30 categorical variables in a data set, it is worth reducing features.
#### Otherwise. XGBoost is pretty good at finding the best features whether cat or numeric !
n_splits = 5
max_depth = 8
max_cats = 5
###################### I M P O R T A N T ##############################################
train = copy.deepcopy(train)
preds = copy.deepcopy(preds)
numvars = copy.deepcopy(numvars)
subsample = 0.7
col_sub_sample = 0.7
train = copy.deepcopy(train)
start_time = time.time()
test_size = 0.2
seed = 1
early_stopping = 5
####### All the default parameters are set up now #########
kf = KFold(n_splits=n_splits, random_state=33)
rem_vars = left_subtract(preds,numvars)
catvars = copy.deepcopy(rem_vars)
############ I M P O R T A N T ! I M P O R T A N T ! ######################
##### Removing the Cat Vars selection using Linear Methods since they fail so often.
##### Linear methods such as Chi2 or Mutual Information Score are not great
#### for feature selection since they can't handle large data and provide
#### misleading results for large data sets. Hence I am using XGBoost alone.
#### Also, another method of using Spearman Correlation for CatVars with 100's
#### of variables is very slow. Also, is not very clear is effective: only 3-4 vars
#### are removed. Hence for now, I am not going to use Spearman method. Perhaps later.
##############################################################################
#if len(catvars) > max_cats:
# start_time = time.time()
# important_cats = remove_variables_using_fast_correlation(train,catvars,'spearman',
# corr_limit,verbose)
# if verbose >= 1:
# print('Time taken for reducing highly correlated Categorical vars was %0.0f seconds' %(time.time()-start_time))
#else:
important_cats = copy.deepcopy(catvars)
print('No categorical feature reduction done. All %d Categorical vars selected ' %(len(catvars)))
if len(numvars) > 1:
final_list = remove_variables_using_fast_correlation(train,numvars,'pearson',
corr_limit,verbose)
else:
final_list = copy.deepcopy(numvars)
print(' Adding %s categorical variables to reduced numeric variables of %d' %(
len(important_cats),len(final_list)))
if isinstance(final_list,np.ndarray):
final_list = final_list.tolist()
preds = final_list+important_cats
#######You must convert category variables into integers ###############
for important_cat in important_cats:
if str(train[important_cat].dtype) == 'category':
train[important_cat] = train[important_cat].astype(int)
######## Drop Missing value rows since XGB for some reason #########
######## can't handle missing values in early stopping rounds #######
train.dropna(axis=0,subset=preds+[target],inplace=True)
######## Dont move this train and y definition anywhere else ########
y = train[target]
print('############## F E A T U R E S E L E C T I O N ####################')
important_features = []
if modeltype == 'Regression':
objective = 'reg:squarederror'
model_xgb = XGBRegressor( n_estimators=100,subsample=subsample,objective=objective,
colsample_bytree=col_sub_sample,reg_alpha=0.5, reg_lambda=0.5,
seed=1,n_jobs=-1,random_state=1)
eval_metric = 'rmse'
else:
#### This is for Classifiers only
classes = np.unique(train[target].values)
if len(classes) == 2:
model_xgb = XGBClassifier(base_score=0.5, booster='gbtree', subsample=subsample,
colsample_bytree=col_sub_sample,gamma=1, learning_rate=0.1, max_delta_step=0,
max_depth=max_depth, min_child_weight=1, missing=-999, n_estimators=100,
n_jobs=-1, nthread=None, objective='binary:logistic',
random_state=1, reg_alpha=0.5, reg_lambda=0.5, scale_pos_weight=1,
seed=1)
eval_metric = 'logloss'
else:
model_xgb = XGBClassifier(base_score=0.5, booster='gbtree', subsample=subsample,
colsample_bytree=col_sub_sample, gamma=1, learning_rate=0.1, max_delta_step=0,
max_depth=max_depth, min_child_weight=1, missing=-999, n_estimators=100,
n_jobs=-1, nthread=None, objective='multi:softmax',
random_state=1, reg_alpha=0.5, reg_lambda=0.5, scale_pos_weight=1,
seed=1)
eval_metric = 'mlogloss'
#### This is where you start to Iterate on Finding Important Features ################
save_xgb = copy.deepcopy(model_xgb)
train_p = train[preds]
if train_p.shape[1] < 10:
iter_limit = 2
else:
iter_limit = int(train_p.shape[1]/5+0.5)
print('Current number of predictors = %d ' %(train_p.shape[1],))
print(' Finding Important Features using Boosted Trees algorithm...')
try:
for i in range(0,train_p.shape[1],iter_limit):
new_xgb = copy.deepcopy(save_xgb)
print(' using %d variables...' %(train_p.shape[1]-i))
if train_p.shape[1]-i < iter_limit:
X = train_p.iloc[:,i:]
if modeltype == 'Regression':
train_part = int((1-test_size)*X.shape[0])
X_train, X_cv, y_train, y_cv = X[:train_part],X[train_part:],y[:train_part],y[train_part:]
else:
X_train, X_cv, y_train, y_cv = train_test_split(X, y,
test_size=test_size, random_state=seed)
try:
eval_set = [(X_train,y_train),(X_cv,y_cv)]
model_xgb.fit(X_train,y_train,early_stopping_rounds=early_stopping,eval_set=eval_set,
eval_metric=eval_metric,verbose=False)
important_features += pd.Series(model_xgb.get_booster().get_score(
importance_type='gain')).sort_values(ascending=False)[:top_num].index.tolist()
except:
new_xgb.fit(X_train,y_train,early_stopping_rounds=early_stopping,eval_set=eval_set,
eval_metric=eval_metric,verbose=False)
print('XGB has a bug in version xgboost 1.02 for feature importances. Try to install version 0.90 or 1.10 - continuing...')
important_features += pd.Series(new_xgb.get_booster().get_score(
importance_type='gain')).sort_values(ascending=False)[:top_num].index.tolist()
important_features = list(OrderedDict.fromkeys(important_features))
else:
X = train_p[list(train_p.columns.values)[i:train_p.shape[1]]]
#### Split here into train and test #####
if modeltype == 'Regression':
train_part = int((1-test_size)*X.shape[0])
X_train, X_cv, y_train, y_cv = X[:train_part],X[train_part:],y[:train_part],y[train_part:]
else:
X_train, X_cv, y_train, y_cv = train_test_split(X, y,
test_size=test_size, random_state=seed)
eval_set = [(X_train,y_train),(X_cv,y_cv)]
try:
model_xgb.fit(X_train,y_train,early_stopping_rounds=early_stopping,
eval_set=eval_set,eval_metric=eval_metric,verbose=False)
important_features += pd.Series(model_xgb.get_booster().get_score(
importance_type='gain')).sort_values(ascending=False)[:top_num].index.tolist()
except:
new_xgb.fit(X_train,y_train,early_stopping_rounds=early_stopping,
eval_set=eval_set,eval_metric=eval_metric,verbose=False)
important_features += pd.Series(model_xgb.get_booster().get_score(
importance_type='gain')).sort_values(ascending=False)[:top_num].index.tolist()
important_features = list(OrderedDict.fromkeys(important_features))
except:
print('Finding top features using XGB is crashing. Continuing with all predictors...')
important_features = copy.deepcopy(preds)
return important_features, [], []
important_features = list(OrderedDict.fromkeys(important_features))
print('Found %d important features' %len(important_features))
#print(' Time taken (in seconds) = %0.0f' %(time.time()-start_time))
numvars = [x for x in numvars if x in important_features]
important_cats = [x for x in important_cats if x in important_features]
return important_features, numvars, important_cats
################################################################################
def basket_recall(label, pred):
"""
This tests the recall of a given basket of items in a label by the second basket, pred.
It compares the 2 baskets (arrays or lists) named as label and pred, and finds common items
between the two. Then it divides that length by the total number of items in the label basket
to come up with a basket recall score. This score may be useful in recommendation problems
where you are interested in finding how many items in a basket (labels) that your
predictions (pred) basket got correct. The order of the items in the baskets does not matter.
"""
if isinstance(label, list):
label = np.array(label)
if isinstance(pred, list):
pred = np.array(pred)
if len(label) > 1:
jacc_arr = []
for row1,row2,count in zip(label,pred, range(len(label))):
intersection = len(np.intersect1d(row1,row2))
union = len(row1)
jacc = float(intersection / union)
if count == 0:
jacc_arr = copy.deepcopy(jacc)
else:
jacc_arr = np.r_[jacc_arr,jacc]
return jacc_arr
else:
intersection = len(list(set(list1).intersection(set(list2))))
union = (len(list1) + len(list2)) - intersection
jacc_arr = float(intersection / union)
return jacc_arr
################################################################################
def jaccard_singlelabel(label, pred):
"""
This compares 2 baskets (could be lists or arrays): label and pred, and finds common items
between the two. Then it divides that number by either rows or columns to return %.
### Jaccard_Columnwise = this means you have multi-labels and you want it summed columnwise
### This tells you what is the average accuracy for each column in multi-label target
### It will return as many totals as the number of columns in your multi-label target.
### To get a percentage, you will have to divide it by the number of rows in the data set.
### This percentage gives you the % of rows in each label you got correctly=%Each_Label Accuracy
### This will give you as many percentages as there are labels in your multi-label target.
### Jaccard_Row-wise = this means you have combos but where order matters and you want it compared row-wise
### This tells you how many labels in each row did you get right. THat's accuracy by row.
### It will return as many totals as the number of rows in your data set.
### To get a percentage, you will have to divide it by the number of labels in the data set.
### This percentage gives you the % of labels in each row you got correctly=%Combined_Label_Accuracy
### This will give you a single percentage number for the whole data set
"""
if isinstance(label, list):
label = np.array(label)
if isinstance(pred, list):
pred = np.array(pred)
try:
### This is for Multi-Label Problems ##### Returns 2 results: one number and
### the second is an array with as many items as number of labels in target
jacc_each_label = np.sum(label==pred,axis=0)/label.shape[0]
return jacc_each_label
except:
return 0
################################################################################
def jaccard_multilabel(label, pred):
"""
This compares 2 baskets (could be lists or arrays): label and pred, and finds common items
between the two. Then it divides that number by either rows or columns to return %.
### Jaccard_Columnwise = this means you have multi-labels and you want it summed columnwise
### This tells you what is the average accuracy for each column in multi-label target
### It will return as many totals as the number of columns in your multi-label target.
### To get a percentage, you will have to divide it by the number of rows in the data set.
### This percentage gives you the % of rows in each label you got correctly=%Each_Label Accuracy
### This will give you as many percentages as there are labels in your multi-label target.
### Jaccard_Row-wise = this means you have combos but where order matters and you want it compared row-wise
### This tells you how many labels in each row did you get right. THat's accuracy by row.
### It will return as many totals as the number of rows in your data set.
### To get a percentage, you will have to divide it by the number of labels in the data set.
### This percentage gives you the % of labels in each row you got correctly=%Combined_Label_Accuracy
### This will give you a single percentage number for the whole data set
"""
if isinstance(label, list):
label = np.array(label)
if isinstance(pred, list):
pred = np.array(pred)
### This is for Multi-Label Problems ##### Returns 2 results: one number and
### the second is an array with as many items as number of labels in target
try:
jacc_data_set = np.sum(label==pred,axis=1).sum()/label.shape[1]
return jacc_data_set
except:
return 0
################################################################################
def plot_RS_params(cv_results, score, mname):
"""
####### This plots the GridSearchCV Results sent in ############
"""
df = pd.DataFrame(cv_results)
params = [x for x in list(df) if x.startswith('param_')]
traincols = ['mean_train_score' ]
testcols = ['mean_test_score' ]
cols = traincols+testcols
ncols = 2
noplots = len(params)
if noplots%ncols == 0:
rows = noplots/ncols
else:
rows = (noplots/ncols)+1
height_size = 5
width_size = 15
fig = plt.figure(figsize=(width_size,rows*height_size))
fig.suptitle('Training and Validation: Hyper Parameter Tuning for target=%s' %mname, fontsize=20,y=1.01)
#### If the values are negative, convert them to positive ############
if len(df.loc[df[cols[0]]<0]) > 0:
df[cols] = df[cols]*-1
for each_param, count in zip(params, range(noplots)):
plt.subplot(rows,ncols,count+1)
ax1 = plt.gca()
if df[each_param].dtype != object:
df[[each_param]+cols].groupby(each_param).mean().plot(kind='line',
title='%s for %s' %(each_param,mname),ax=ax1)
else:
try:
df[each_param] = pd.to_numeric(df[each_param])
df[[each_param]+cols].groupby(each_param).mean().plot(kind='line',
title='%s for %s' %(each_param,mname), ax=ax1)
except:
df[[each_param]+cols].groupby(each_param).mean().plot(kind='bar',stacked=False,
title='%s for %s' %(each_param,mname), ax=ax1)
#### This is to plot the test_mean_score against params to see how it increases
for each_param in params:
#### This is to find which parameters are non string and convert them to strings
if df[each_param].dtype!=object:
df[each_param] = df[each_param].astype(str)
try:
df['combined_parameters'] = df[params].apply(lambda x: '__'.join(x), axis=1 )
except:
df['combined_parameters'] = df[params].apply(lambda x: '__'.join(x.map(str)), axis=1 )
if len(params) == 1:
df['combined_parameters'] = copy.deepcopy(df[params])
else:
df[['combined_parameters']+cols].groupby('combined_parameters').mean().sort_values(
cols[1]).plot(figsize=(width_size,height_size),kind='line',subplots=False,
title='Combined Parameters: %s scores for %s' %(score,mname))
plt.xticks(rotation=45)
plt.show();
return df
################################################################################
def plot_xgb_metrics(model,eval_metric,eval_set,modeltype,model_label='',model_name=""):
height_size = 5
width_size = 10
if model_name.lower() == 'catboost':
results = model.get_evals_result()
else:
results = model.evals_result()
res_keys = list(results.keys())
eval_metric = list(results[res_keys[0]].keys())
if isinstance(eval_metric, list):
# plot log loss
eval_metric = eval_metric[0]
# plot metrics now
fig, ax = plt.subplots(figsize=(width_size, height_size))
epochs = len(results[res_keys[0]][eval_metric])
x_axis = range(0, epochs)
if model_name.lower() == 'catboost':
ax.plot(x_axis, results[res_keys[0]][eval_metric], label='%s' %res_keys[0])
else:
ax.plot(x_axis, results[res_keys[0]][eval_metric], label='%s' %res_keys[0])
epochs = len(results[res_keys[-1]][eval_metric])
x_axis = range(0, epochs)
ax.plot(x_axis, results[res_keys[-1]][eval_metric], label='%s' %res_keys[-1])
ax.legend()
plt.ylabel(eval_metric)
plt.title('%s Train and Validation Metrics across Epochs (Early Stopping in effect)' %model_label)
plt.show();
################################################################################
######### NEW And FAST WAY to CLASSIFY COLUMNS IN A DATA SET #######
################################################################################
def classify_columns(df_preds, verbose=0):
"""
Takes a dataframe containing only predictors to be classified into various types.
DO NOT SEND IN A TARGET COLUMN since it will try to include that into various columns.
Returns a data frame containing columns and the class it belongs to such as numeric,
categorical, date or id column, boolean, nlp, discrete_string and cols to delete...
####### Returns a dictionary with 10 kinds of vars like the following: # continuous_vars,int_vars
# cat_vars,factor_vars, bool_vars,discrete_string_vars,nlp_vars,date_vars,id_vars,cols_delete
"""
max_cols_to_print = 30
print('############## C L A S S I F Y I N G V A R I A B L E S ####################')
print('Classifying variables in data set...')
#### Cat_Limit defines the max number of categories a column can have to be called a categorical colum
cat_limit = 15
def add(a,b):
return a+b
train = df_preds[:]
sum_all_cols = dict()
orig_cols_total = train.shape[1]
#Types of columns
cols_delete = [col for col in list(train) if (len(train[col].value_counts()) == 1
) | (train[col].isnull().sum()/len(train) >= 0.90)]
train = train[left_subtract(list(train),cols_delete)]
var_df = pd.Series(dict(train.dtypes)).reset_index(drop=False).rename(
columns={0:'type_of_column'})
sum_all_cols['cols_delete'] = cols_delete
var_df['bool'] = var_df.apply(lambda x: 1 if x['type_of_column'] in ['bool','object']
and len(train[x['index']].value_counts()) == 2 else 0, axis=1)
string_bool_vars = list(var_df[(var_df['bool'] ==1)]['index'])
sum_all_cols['string_bool_vars'] = string_bool_vars
var_df['num_bool'] = var_df.apply(lambda x: 1 if x['type_of_column'] in [np.uint8,
np.uint16, np.uint32, np.uint64,
'int8','int16','int32','int64',
'float16','float32','float64'] and len(
train[x['index']].value_counts()) == 2 else 0, axis=1)
num_bool_vars = list(var_df[(var_df['num_bool'] ==1)]['index'])
sum_all_cols['num_bool_vars'] = num_bool_vars
###### This is where we take all Object vars and split them into diff kinds ###
discrete_or_nlp = var_df.apply(lambda x: 1 if x['type_of_column'] in ['object'] and x[
'index'] not in string_bool_vars+cols_delete else 0,axis=1)
######### This is where we figure out whether a string var is nlp or discrete_string var ###
var_df['nlp_strings'] = 0
var_df['discrete_strings'] = 0
var_df['cat'] = 0
var_df['id_col'] = 0
discrete_or_nlp_vars = var_df.loc[discrete_or_nlp==1]['index'].values.tolist()
if len(var_df.loc[discrete_or_nlp==1]) != 0:
for col in discrete_or_nlp_vars:
#### first fill empty or missing vals since it will blowup ###
train[col] = train[col].fillna(' ')
if train[col].map(lambda x: len(x) if type(x)==str else 0).mean(
) >= 50 and len(train[col].value_counts()
) <= len(train) and col not in string_bool_vars:
var_df.loc[var_df['index']==col,'nlp_strings'] = 1
elif len(train[col].value_counts()) > cat_limit and len(train[col].value_counts()
) <= len(train) and col not in string_bool_vars:
var_df.loc[var_df['index']==col,'discrete_strings'] = 1
elif len(train[col].value_counts()) > cat_limit and len(train[col].value_counts()
) == len(train) and col not in string_bool_vars:
var_df.loc[var_df['index']==col,'id_col'] = 1
else:
var_df.loc[var_df['index']==col,'cat'] = 1
nlp_vars = list(var_df[(var_df['nlp_strings'] ==1)]['index'])
sum_all_cols['nlp_vars'] = nlp_vars
discrete_string_vars = list(var_df[(var_df['discrete_strings'] ==1) ]['index'])
sum_all_cols['discrete_string_vars'] = discrete_string_vars
###### This happens only if a string column happens to be an ID column #######
#### DO NOT Add this to ID_VARS yet. It will be done later.. Dont change it easily...
#### Category DTYPE vars are very special = they can be left as is and not disturbed in Python. ###
var_df['dcat'] = var_df.apply(lambda x: 1 if str(x['type_of_column'])=='category' else 0,
axis=1)
factor_vars = list(var_df[(var_df['dcat'] ==1)]['index'])
sum_all_cols['factor_vars'] = factor_vars
########################################################################
date_or_id = var_df.apply(lambda x: 1 if x['type_of_column'] in [np.uint8,
np.uint16, np.uint32, np.uint64,
'int8','int16',
'int32','int64'] and x[
'index'] not in string_bool_vars+num_bool_vars+discrete_string_vars+nlp_vars else 0,
axis=1)
######### This is where we figure out whether a numeric col is date or id variable ###
var_df['int'] = 0
var_df['date_time'] = 0
### if a particular column is date-time type, now set it as a date time variable ##
var_df['date_time'] = var_df.apply(lambda x: 1 if x['type_of_column'] in ['<M8[ns]','datetime64[ns]'] and x[
'index'] not in string_bool_vars+num_bool_vars+discrete_string_vars+nlp_vars else 0,
axis=1)
### this is where we save them as date time variables ###
if len(var_df.loc[date_or_id==1]) != 0:
for col in var_df.loc[date_or_id==1]['index'].values.tolist():
if len(train[col].value_counts()) == len(train):
if train[col].min() < 1900 or train[col].max() > 2050:
var_df.loc[var_df['index']==col,'id_col'] = 1
else:
try:
pd.to_datetime(train[col],infer_datetime_format=True)
var_df.loc[var_df['index']==col,'date_time'] = 1
except:
var_df.loc[var_df['index']==col,'id_col'] = 1
else:
if train[col].min() < 1900 or train[col].max() > 2050:
if col not in num_bool_vars:
var_df.loc[var_df['index']==col,'int'] = 1
else:
try:
| pd.to_datetime(train[col],infer_datetime_format=True) | pandas.to_datetime |
# -*- coding: utf-8 -*-
"""
Created on Fri Dec 13 15:21:55 2019
@author: raryapratama
"""
#%%
#Step (1): Import Python libraries, set land conversion scenarios general parameters
import numpy as np
import matplotlib.pyplot as plt
from scipy.integrate import quad
import seaborn as sns
import pandas as pd
#PF_PO Scenario
##Set parameters
#Parameters for primary forest
initAGB = 233 #source: van Beijma et al. (2018)
initAGB_min = 233-72
initAGB_max = 233 + 72
#parameters for oil palm plantation. Source: Khasanah et al. (2015)
tf_palmoil = 26
a_nucleus = 2.8167
b_nucleus = 6.8648
a_plasma = 2.5449
b_plasma = 5.0007
c_cont_po_nucleus = 0.5448 #carbon content in biomass
c_cont_po_plasma = 0.5454
tf = 201
a = 0.082
b = 2.53
#%%
#Step (2_1): C loss from the harvesting/clear cut
df1nu = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO_EC.xlsx', 'PF_PO_S1nu')
df1pl = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO_EC.xlsx', 'PF_PO_S1pl')
df3nu = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO_EC.xlsx', 'PF_PO_Enu')
df3pl = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO_EC.xlsx', 'PF_PO_Epl')
t = range(0,tf,1)
c_firewood_energy_S1nu = df1nu['Firewood_other_energy_use'].values
c_firewood_energy_S1pl = df1pl['Firewood_other_energy_use'].values
c_firewood_energy_Enu = df3nu['Firewood_other_energy_use'].values
c_firewood_energy_Epl = df3pl['Firewood_other_energy_use'].values
#%%
#Step (2_2): C loss from the harvesting/clear cut as wood pellets
dfEnu = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO_EC.xlsx', 'PF_PO_Enu')
dfEpl = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO_EC.xlsx', 'PF_PO_Epl')
c_pellets_Enu = dfEnu['Wood_pellets'].values
c_pellets_Epl = dfEpl['Wood_pellets'].values
#%%
#Step (3): Aboveground biomass (AGB) decomposition
df = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO_EC.xlsx', 'PF_PO_S1nu')
tf = 201
t = np.arange(tf)
def decomp(t,remainAGB):
return (1-(1-np.exp(-a*t))**b)*remainAGB
#set zero matrix
output_decomp = np.zeros((len(t),len(df['C_remainAGB'].values)))
for i,remain_part in enumerate(df['C_remainAGB'].values):
#print(i,remain_part)
output_decomp[i:,i] = decomp(t[:len(t)-i],remain_part)
print(output_decomp[:,:4])
#find the yearly emissions from decomposition by calculating the differences between elements in list 'decomp_tot_S1'
#(https://stackoverflow.com/questions/5314241/difference-between-consecutive-elements-in-list)
# https://stackoverflow.com/questions/11095892/numpy-difference-between-neighboring-elements
#difference between element,
subs_matrix = np.zeros((len(t)-1,len(df['C_remainAGB'].values-1)))
i = 0
while i < tf:
subs_matrix[:,i] = np.diff(output_decomp[:,i])
i = i + 1
print(subs_matrix[:,:4])
print(len(subs_matrix))
#since there is no carbon emission from decomposition at the beginning of the year (esp. from 'year 1' onward),
#we have to replace the positive numbers with 0 values (https://stackoverflow.com/questions/36310897/how-do-i-change-all-negative-numbers-to-zero-in-python/36310913)
subs_matrix = subs_matrix.clip(max=0)
print(subs_matrix[:,:4])
#make the results as absolute values
subs_matrix = abs(subs_matrix)
print(subs_matrix[:,:4])
#insert row of zeros into the first row of the subs_matrix
zero_matrix = np.zeros((len(t)-200,len(df['C_remainAGB'].values)))
print(zero_matrix)
subs_matrix = np.vstack((zero_matrix, subs_matrix))
print(subs_matrix[:,:4])
#sum every column of the subs_matrix into one vector matrix
matrix_tot = (tf,1)
decomp_emissions = np.zeros(matrix_tot)
i = 0
while i < tf:
decomp_emissions[:,0] = decomp_emissions[:,0] + subs_matrix[:,i]
i = i + 1
print(decomp_emissions[:,0])
#%%
#Step (4): Dynamic stock model of in-use wood materials
from dynamic_stock_model import DynamicStockModel
df1nu = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO_EC.xlsx', 'PF_PO_S1nu')
df1pl = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO_EC.xlsx', 'PF_PO_S1pl')
df3nu = | pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO_EC.xlsx', 'PF_PO_Enu') | pandas.read_excel |
import pandas as pd
import numpy as np
file1 = '../data/STRIDE_PATIENT.xlsx'
x1 = pd.ExcelFile(file1)
stride_patient = x1.parse('Sheet1')
file2 = '../data//SURGERY.xlsx'
x2 = pd.ExcelFile(file2)
surgery = x2.parse('Sheet1')
stride_patient_req = stride_patient
pat_surgery = pd.merge(stride_patient_req, surgery, on='PAT_DEID', how='inner')
pat_surgery['BIRTH_DATE'] = pat_surgery['BIRTH_DATE'].str[0:7] + '19' + pat_surgery['BIRTH_DATE'].str[7:]
pat_surgery['SURGERY_DATE'] = pat_surgery['SURGERY_DATE'].str[0:7] + '20' + pat_surgery['SURGERY_DATE'].str[7:]
pat_surgery['BIRTH_DATE'] = pd.to_datetime(pat_surgery['BIRTH_DATE'])
pat_surgery['SURGERY_DATE'] = | pd.to_datetime(pat_surgery['SURGERY_DATE']) | pandas.to_datetime |
# -*- coding: utf-8 -*-
"""
These the test the public routines exposed in types/common.py
related to inference and not otherwise tested in types/test_common.py
"""
from warnings import catch_warnings, simplefilter
import collections
import re
from datetime import datetime, date, timedelta, time
from decimal import Decimal
from numbers import Number
from fractions import Fraction
import numpy as np
import pytz
import pytest
import pandas as pd
from pandas._libs import lib, iNaT, missing as libmissing
from pandas import (Series, Index, DataFrame, Timedelta,
DatetimeIndex, TimedeltaIndex, Timestamp,
Panel, Period, Categorical, isna, Interval,
DateOffset)
from pandas import compat
from pandas.compat import u, PY2, StringIO, lrange
from pandas.core.dtypes import inference
from pandas.core.dtypes.common import (
is_timedelta64_dtype,
is_timedelta64_ns_dtype,
is_datetime64_dtype,
is_datetime64_ns_dtype,
is_datetime64_any_dtype,
is_datetime64tz_dtype,
is_number,
is_integer,
is_float,
is_bool,
is_scalar,
is_scipy_sparse,
ensure_int32,
ensure_categorical)
from pandas.util import testing as tm
import pandas.util._test_decorators as td
@pytest.fixture(params=[True, False], ids=str)
def coerce(request):
return request.param
# collect all objects to be tested for list-like-ness; use tuples of objects,
# whether they are list-like or not (special casing for sets), and their ID
ll_params = [
([1], True, 'list'), # noqa: E241
([], True, 'list-empty'), # noqa: E241
((1, ), True, 'tuple'), # noqa: E241
(tuple(), True, 'tuple-empty'), # noqa: E241
({'a': 1}, True, 'dict'), # noqa: E241
(dict(), True, 'dict-empty'), # noqa: E241
({'a', 1}, 'set', 'set'), # noqa: E241
(set(), 'set', 'set-empty'), # noqa: E241
(frozenset({'a', 1}), 'set', 'frozenset'), # noqa: E241
(frozenset(), 'set', 'frozenset-empty'), # noqa: E241
(iter([1, 2]), True, 'iterator'), # noqa: E241
(iter([]), True, 'iterator-empty'), # noqa: E241
((x for x in [1, 2]), True, 'generator'), # noqa: E241
((x for x in []), True, 'generator-empty'), # noqa: E241
(Series([1]), True, 'Series'), # noqa: E241
(Series([]), True, 'Series-empty'), # noqa: E241
(Series(['a']).str, True, 'StringMethods'), # noqa: E241
(Series([], dtype='O').str, True, 'StringMethods-empty'), # noqa: E241
(Index([1]), True, 'Index'), # noqa: E241
(Index([]), True, 'Index-empty'), # noqa: E241
(DataFrame([[1]]), True, 'DataFrame'), # noqa: E241
(DataFrame(), True, 'DataFrame-empty'), # noqa: E241
(np.ndarray((2,) * 1), True, 'ndarray-1d'), # noqa: E241
(np.array([]), True, 'ndarray-1d-empty'), # noqa: E241
(np.ndarray((2,) * 2), True, 'ndarray-2d'), # noqa: E241
(np.array([[]]), True, 'ndarray-2d-empty'), # noqa: E241
(np.ndarray((2,) * 3), True, 'ndarray-3d'), # noqa: E241
(np.array([[[]]]), True, 'ndarray-3d-empty'), # noqa: E241
(np.ndarray((2,) * 4), True, 'ndarray-4d'), # noqa: E241
(np.array([[[[]]]]), True, 'ndarray-4d-empty'), # noqa: E241
(np.array(2), False, 'ndarray-0d'), # noqa: E241
(1, False, 'int'), # noqa: E241
(b'123', False, 'bytes'), # noqa: E241
(b'', False, 'bytes-empty'), # noqa: E241
('123', False, 'string'), # noqa: E241
('', False, 'string-empty'), # noqa: E241
(str, False, 'string-type'), # noqa: E241
(object(), False, 'object'), # noqa: E241
(np.nan, False, 'NaN'), # noqa: E241
(None, False, 'None') # noqa: E241
]
objs, expected, ids = zip(*ll_params)
@pytest.fixture(params=zip(objs, expected), ids=ids)
def maybe_list_like(request):
return request.param
def test_is_list_like(maybe_list_like):
obj, expected = maybe_list_like
expected = True if expected == 'set' else expected
assert inference.is_list_like(obj) == expected
def test_is_list_like_disallow_sets(maybe_list_like):
obj, expected = maybe_list_like
expected = False if expected == 'set' else expected
assert inference.is_list_like(obj, allow_sets=False) == expected
def test_is_sequence():
is_seq = inference.is_sequence
assert (is_seq((1, 2)))
assert (is_seq([1, 2]))
assert (not is_seq("abcd"))
assert (not is_seq(u("abcd")))
assert (not is_seq(np.int64))
class A(object):
def __getitem__(self):
return 1
assert (not is_seq(A()))
def test_is_array_like():
assert inference.is_array_like(Series([]))
assert inference.is_array_like(Series([1, 2]))
assert inference.is_array_like(np.array(["a", "b"]))
assert inference.is_array_like(Index(["2016-01-01"]))
class DtypeList(list):
dtype = "special"
assert inference.is_array_like(DtypeList())
assert not inference.is_array_like([1, 2, 3])
assert not inference.is_array_like(tuple())
assert not inference.is_array_like("foo")
assert not inference.is_array_like(123)
@pytest.mark.parametrize('inner', [
[], [1], (1, ), (1, 2), {'a': 1}, {1, 'a'}, Series([1]),
Series([]), Series(['a']).str, (x for x in range(5))
])
@pytest.mark.parametrize('outer', [
list, Series, np.array, tuple
])
def test_is_nested_list_like_passes(inner, outer):
result = outer([inner for _ in range(5)])
assert inference.is_list_like(result)
@pytest.mark.parametrize('obj', [
'abc', [], [1], (1,), ['a'], 'a', {'a'},
[1, 2, 3], Series([1]), DataFrame({"A": [1]}),
([1, 2] for _ in range(5)),
])
def test_is_nested_list_like_fails(obj):
assert not inference.is_nested_list_like(obj)
@pytest.mark.parametrize(
"ll", [{}, {'A': 1}, Series([1])])
def test_is_dict_like_passes(ll):
assert inference.is_dict_like(ll)
@pytest.mark.parametrize(
"ll", ['1', 1, [1, 2], (1, 2), range(2), Index([1])])
def test_is_dict_like_fails(ll):
assert not inference.is_dict_like(ll)
@pytest.mark.parametrize("has_keys", [True, False])
@pytest.mark.parametrize("has_getitem", [True, False])
@pytest.mark.parametrize("has_contains", [True, False])
def test_is_dict_like_duck_type(has_keys, has_getitem, has_contains):
class DictLike(object):
def __init__(self, d):
self.d = d
if has_keys:
def keys(self):
return self.d.keys()
if has_getitem:
def __getitem__(self, key):
return self.d.__getitem__(key)
if has_contains:
def __contains__(self, key):
return self.d.__contains__(key)
d = DictLike({1: 2})
result = inference.is_dict_like(d)
expected = has_keys and has_getitem and has_contains
assert result is expected
def test_is_file_like(mock):
class MockFile(object):
pass
is_file = inference.is_file_like
data = StringIO("data")
assert is_file(data)
# No read / write attributes
# No iterator attributes
m = MockFile()
assert not is_file(m)
MockFile.write = lambda self: 0
# Write attribute but not an iterator
m = MockFile()
assert not is_file(m)
# gh-16530: Valid iterator just means we have the
# __iter__ attribute for our purposes.
MockFile.__iter__ = lambda self: self
# Valid write-only file
m = MockFile()
assert is_file(m)
del MockFile.write
MockFile.read = lambda self: 0
# Valid read-only file
m = MockFile()
assert is_file(m)
# Iterator but no read / write attributes
data = [1, 2, 3]
assert not is_file(data)
assert not is_file(mock.Mock())
@pytest.mark.parametrize(
"ll", [collections.namedtuple('Test', list('abc'))(1, 2, 3)])
def test_is_names_tuple_passes(ll):
assert inference.is_named_tuple(ll)
@pytest.mark.parametrize(
"ll", [(1, 2, 3), 'a', Series({'pi': 3.14})])
def test_is_names_tuple_fails(ll):
assert not inference.is_named_tuple(ll)
def test_is_hashable():
# all new-style classes are hashable by default
class HashableClass(object):
pass
class UnhashableClass1(object):
__hash__ = None
class UnhashableClass2(object):
def __hash__(self):
raise TypeError("Not hashable")
hashable = (1,
3.14,
np.float64(3.14),
'a',
tuple(),
(1, ),
HashableClass(), )
not_hashable = ([], UnhashableClass1(), )
abc_hashable_not_really_hashable = (([], ), UnhashableClass2(), )
for i in hashable:
assert inference.is_hashable(i)
for i in not_hashable:
assert not inference.is_hashable(i)
for i in abc_hashable_not_really_hashable:
assert not inference.is_hashable(i)
# numpy.array is no longer collections.Hashable as of
# https://github.com/numpy/numpy/pull/5326, just test
# is_hashable()
assert not inference.is_hashable(np.array([]))
# old-style classes in Python 2 don't appear hashable to
# collections.Hashable but also seem to support hash() by default
if PY2:
class OldStyleClass():
pass
c = OldStyleClass()
assert not isinstance(c, compat.Hashable)
assert inference.is_hashable(c)
hash(c) # this will not raise
@pytest.mark.parametrize(
"ll", [re.compile('ad')])
def test_is_re_passes(ll):
assert inference.is_re(ll)
@pytest.mark.parametrize(
"ll", ['x', 2, 3, object()])
def test_is_re_fails(ll):
assert not inference.is_re(ll)
@pytest.mark.parametrize(
"ll", [r'a', u('x'),
r'asdf',
re.compile('adsf'),
u(r'\u2233\s*'),
re.compile(r'')])
def test_is_recompilable_passes(ll):
assert inference.is_re_compilable(ll)
@pytest.mark.parametrize(
"ll", [1, [], object()])
def test_is_recompilable_fails(ll):
assert not inference.is_re_compilable(ll)
class TestInference(object):
def test_infer_dtype_bytes(self):
compare = 'string' if PY2 else 'bytes'
# string array of bytes
arr = np.array(list('abc'), dtype='S1')
assert lib.infer_dtype(arr) == compare
# object array of bytes
arr = arr.astype(object)
assert lib.infer_dtype(arr) == compare
# object array of bytes with missing values
assert lib.infer_dtype([b'a', np.nan, b'c'], skipna=True) == compare
def test_isinf_scalar(self):
# GH 11352
assert libmissing.isposinf_scalar(float('inf'))
assert libmissing.isposinf_scalar(np.inf)
assert not libmissing.isposinf_scalar(-np.inf)
assert not libmissing.isposinf_scalar(1)
assert not libmissing.isposinf_scalar('a')
assert libmissing.isneginf_scalar(float('-inf'))
assert libmissing.isneginf_scalar(-np.inf)
assert not libmissing.isneginf_scalar(np.inf)
assert not libmissing.isneginf_scalar(1)
assert not libmissing.isneginf_scalar('a')
def test_maybe_convert_numeric_infinities(self):
# see gh-13274
infinities = ['inf', 'inF', 'iNf', 'Inf',
'iNF', 'InF', 'INf', 'INF']
na_values = {'', 'NULL', 'nan'}
pos = np.array(['inf'], dtype=np.float64)
neg = np.array(['-inf'], dtype=np.float64)
msg = "Unable to parse string"
for infinity in infinities:
for maybe_int in (True, False):
out = lib.maybe_convert_numeric(
np.array([infinity], dtype=object),
na_values, maybe_int)
tm.assert_numpy_array_equal(out, pos)
out = lib.maybe_convert_numeric(
np.array(['-' + infinity], dtype=object),
na_values, maybe_int)
tm.assert_numpy_array_equal(out, neg)
out = lib.maybe_convert_numeric(
np.array([u(infinity)], dtype=object),
na_values, maybe_int)
tm.assert_numpy_array_equal(out, pos)
out = lib.maybe_convert_numeric(
np.array(['+' + infinity], dtype=object),
na_values, maybe_int)
tm.assert_numpy_array_equal(out, pos)
# too many characters
with pytest.raises(ValueError, match=msg):
lib.maybe_convert_numeric(
np.array(['foo_' + infinity], dtype=object),
na_values, maybe_int)
def test_maybe_convert_numeric_post_floatify_nan(self, coerce):
# see gh-13314
data = np.array(['1.200', '-999.000', '4.500'], dtype=object)
expected = np.array([1.2, np.nan, 4.5], dtype=np.float64)
nan_values = {-999, -999.0}
out = lib.maybe_convert_numeric(data, nan_values, coerce)
tm.assert_numpy_array_equal(out, expected)
def test_convert_infs(self):
arr = np.array(['inf', 'inf', 'inf'], dtype='O')
result = lib.maybe_convert_numeric(arr, set(), False)
assert result.dtype == np.float64
arr = np.array(['-inf', '-inf', '-inf'], dtype='O')
result = lib.maybe_convert_numeric(arr, set(), False)
assert result.dtype == np.float64
def test_scientific_no_exponent(self):
# See PR 12215
arr = np.array(['42E', '2E', '99e', '6e'], dtype='O')
result = lib.maybe_convert_numeric(arr, set(), False, True)
assert np.all(np.isnan(result))
def test_convert_non_hashable(self):
# GH13324
# make sure that we are handing non-hashables
arr = np.array([[10.0, 2], 1.0, 'apple'])
result = lib.maybe_convert_numeric(arr, set(), False, True)
tm.assert_numpy_array_equal(result, np.array([np.nan, 1.0, np.nan]))
def test_convert_numeric_uint64(self):
arr = np.array([2**63], dtype=object)
exp = np.array([2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(lib.maybe_convert_numeric(arr, set()), exp)
arr = np.array([str(2**63)], dtype=object)
exp = np.array([2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(lib.maybe_convert_numeric(arr, set()), exp)
arr = np.array([np.uint64(2**63)], dtype=object)
exp = np.array([2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(lib.maybe_convert_numeric(arr, set()), exp)
@pytest.mark.parametrize("arr", [
np.array([2**63, np.nan], dtype=object),
np.array([str(2**63), np.nan], dtype=object),
np.array([np.nan, 2**63], dtype=object),
np.array([np.nan, str(2**63)], dtype=object)])
def test_convert_numeric_uint64_nan(self, coerce, arr):
expected = arr.astype(float) if coerce else arr.copy()
result = lib.maybe_convert_numeric(arr, set(),
coerce_numeric=coerce)
tm.assert_almost_equal(result, expected)
def test_convert_numeric_uint64_nan_values(self, coerce):
arr = np.array([2**63, 2**63 + 1], dtype=object)
na_values = {2**63}
expected = (np.array([np.nan, 2**63 + 1], dtype=float)
if coerce else arr.copy())
result = lib.maybe_convert_numeric(arr, na_values,
coerce_numeric=coerce)
tm.assert_almost_equal(result, expected)
@pytest.mark.parametrize("case", [
np.array([2**63, -1], dtype=object),
np.array([str(2**63), -1], dtype=object),
np.array([str(2**63), str(-1)], dtype=object),
np.array([-1, 2**63], dtype=object),
np.array([-1, str(2**63)], dtype=object),
np.array([str(-1), str(2**63)], dtype=object)])
def test_convert_numeric_int64_uint64(self, case, coerce):
expected = case.astype(float) if coerce else case.copy()
result = lib.maybe_convert_numeric(case, set(), coerce_numeric=coerce)
tm.assert_almost_equal(result, expected)
@pytest.mark.parametrize("value", [-2**63 - 1, 2**64])
def test_convert_int_overflow(self, value):
# see gh-18584
arr = np.array([value], dtype=object)
result = lib.maybe_convert_objects(arr)
tm.assert_numpy_array_equal(arr, result)
def test_maybe_convert_objects_uint64(self):
# see gh-4471
arr = np.array([2**63], dtype=object)
exp = np.array([2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(lib.maybe_convert_objects(arr), exp)
# NumPy bug: can't compare uint64 to int64, as that
# results in both casting to float64, so we should
# make sure that this function is robust against it
arr = np.array([np.uint64(2**63)], dtype=object)
exp = np.array([2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(lib.maybe_convert_objects(arr), exp)
arr = np.array([2, -1], dtype=object)
exp = np.array([2, -1], dtype=np.int64)
tm.assert_numpy_array_equal(lib.maybe_convert_objects(arr), exp)
arr = np.array([2**63, -1], dtype=object)
exp = np.array([2**63, -1], dtype=object)
tm.assert_numpy_array_equal(lib.maybe_convert_objects(arr), exp)
def test_mixed_dtypes_remain_object_array(self):
# GH14956
array = np.array([datetime(2015, 1, 1, tzinfo=pytz.utc), 1],
dtype=object)
result = lib.maybe_convert_objects(array, convert_datetime=1)
tm.assert_numpy_array_equal(result, array)
class TestTypeInference(object):
# Dummy class used for testing with Python objects
class Dummy():
pass
def test_inferred_dtype_fixture(self, any_skipna_inferred_dtype):
# see pandas/conftest.py
inferred_dtype, values = any_skipna_inferred_dtype
# make sure the inferred dtype of the fixture is as requested
assert inferred_dtype == lib.infer_dtype(values, skipna=True)
def test_length_zero(self):
result = lib.infer_dtype(np.array([], dtype='i4'))
assert result == 'integer'
result = lib.infer_dtype([])
assert result == 'empty'
# GH 18004
arr = np.array([np.array([], dtype=object),
np.array([], dtype=object)])
result = lib.infer_dtype(arr)
assert result == 'empty'
def test_integers(self):
arr = np.array([1, 2, 3, np.int64(4), np.int32(5)], dtype='O')
result = lib.infer_dtype(arr)
assert result == 'integer'
arr = np.array([1, 2, 3, np.int64(4), np.int32(5), 'foo'], dtype='O')
result = lib.infer_dtype(arr)
assert result == 'mixed-integer'
arr = np.array([1, 2, 3, 4, 5], dtype='i4')
result = lib.infer_dtype(arr)
assert result == 'integer'
def test_bools(self):
arr = np.array([True, False, True, True, True], dtype='O')
result = lib.infer_dtype(arr)
assert result == 'boolean'
arr = np.array([np.bool_(True), np.bool_(False)], dtype='O')
result = lib.infer_dtype(arr)
assert result == 'boolean'
arr = np.array([True, False, True, 'foo'], dtype='O')
result = lib.infer_dtype(arr)
assert result == 'mixed'
arr = np.array([True, False, True], dtype=bool)
result = lib.infer_dtype(arr)
assert result == 'boolean'
arr = np.array([True, np.nan, False], dtype='O')
result = lib.infer_dtype(arr, skipna=True)
assert result == 'boolean'
def test_floats(self):
arr = np.array([1., 2., 3., np.float64(4), np.float32(5)], dtype='O')
result = lib.infer_dtype(arr)
assert result == 'floating'
arr = np.array([1, 2, 3, np.float64(4), np.float32(5), 'foo'],
dtype='O')
result = lib.infer_dtype(arr)
assert result == 'mixed-integer'
arr = np.array([1, 2, 3, 4, 5], dtype='f4')
result = lib.infer_dtype(arr)
assert result == 'floating'
arr = np.array([1, 2, 3, 4, 5], dtype='f8')
result = lib.infer_dtype(arr)
assert result == 'floating'
def test_decimals(self):
# GH15690
arr = np.array([Decimal(1), Decimal(2), Decimal(3)])
result = lib.infer_dtype(arr)
assert result == 'decimal'
arr = np.array([1.0, 2.0, Decimal(3)])
result = lib.infer_dtype(arr)
assert result == 'mixed'
arr = np.array([Decimal(1), Decimal('NaN'), Decimal(3)])
result = lib.infer_dtype(arr)
assert result == 'decimal'
arr = np.array([Decimal(1), np.nan, Decimal(3)], dtype='O')
result = lib.infer_dtype(arr)
assert result == 'decimal'
def test_string(self):
pass
def test_unicode(self):
arr = [u'a', np.nan, u'c']
result = lib.infer_dtype(arr)
assert result == 'mixed'
arr = [u'a', np.nan, u'c']
result = lib.infer_dtype(arr, skipna=True)
expected = 'unicode' if PY2 else 'string'
assert result == expected
@pytest.mark.parametrize('dtype, missing, skipna, expected', [
(float, np.nan, False, 'floating'),
(float, np.nan, True, 'floating'),
(object, np.nan, False, 'floating'),
(object, np.nan, True, 'empty'),
(object, None, False, 'mixed'),
(object, None, True, 'empty')
])
@pytest.mark.parametrize('box', [pd.Series, np.array])
def test_object_empty(self, box, missing, dtype, skipna, expected):
# GH 23421
arr = box([missing, missing], dtype=dtype)
result = lib.infer_dtype(arr, skipna=skipna)
assert result == expected
def test_datetime(self):
dates = [datetime(2012, 1, x) for x in range(1, 20)]
index = Index(dates)
assert index.inferred_type == 'datetime64'
def test_infer_dtype_datetime(self):
arr = np.array([Timestamp('2011-01-01'),
Timestamp('2011-01-02')])
assert lib.infer_dtype(arr) == 'datetime'
arr = np.array([np.datetime64('2011-01-01'),
np.datetime64('2011-01-01')], dtype=object)
assert lib.infer_dtype(arr) == 'datetime64'
arr = np.array([datetime(2011, 1, 1), datetime(2012, 2, 1)])
assert lib.infer_dtype(arr) == 'datetime'
# starts with nan
for n in [pd.NaT, np.nan]:
arr = np.array([n, pd.Timestamp('2011-01-02')])
assert lib.infer_dtype(arr) == 'datetime'
arr = np.array([n, np.datetime64('2011-01-02')])
assert lib.infer_dtype(arr) == 'datetime64'
arr = np.array([n, datetime(2011, 1, 1)])
assert lib.infer_dtype(arr) == 'datetime'
arr = np.array([n, pd.Timestamp('2011-01-02'), n])
assert lib.infer_dtype(arr) == 'datetime'
arr = np.array([n, np.datetime64('2011-01-02'), n])
assert lib.infer_dtype(arr) == 'datetime64'
arr = np.array([n, datetime(2011, 1, 1), n])
assert lib.infer_dtype(arr) == 'datetime'
# different type of nat
arr = np.array([np.timedelta64('nat'),
np.datetime64('2011-01-02')], dtype=object)
assert lib.infer_dtype(arr) == 'mixed'
arr = np.array([np.datetime64('2011-01-02'),
np.timedelta64('nat')], dtype=object)
assert lib.infer_dtype(arr) == 'mixed'
# mixed datetime
arr = np.array([datetime(2011, 1, 1),
pd.Timestamp('2011-01-02')])
assert lib.infer_dtype(arr) == 'datetime'
# should be datetime?
arr = np.array([np.datetime64('2011-01-01'),
pd.Timestamp('2011-01-02')])
assert lib.infer_dtype(arr) == 'mixed'
arr = np.array([pd.Timestamp('2011-01-02'),
np.datetime64('2011-01-01')])
assert lib.infer_dtype(arr) == 'mixed'
arr = np.array([np.nan, pd.Timestamp('2011-01-02'), 1])
assert lib.infer_dtype(arr) == 'mixed-integer'
arr = np.array([np.nan, pd.Timestamp('2011-01-02'), 1.1])
assert lib.infer_dtype(arr) == 'mixed'
arr = np.array([np.nan, '2011-01-01', pd.Timestamp('2011-01-02')])
assert lib.infer_dtype(arr) == 'mixed'
def test_infer_dtype_timedelta(self):
arr = np.array([pd.Timedelta('1 days'),
pd.Timedelta('2 days')])
assert lib.infer_dtype(arr) == 'timedelta'
arr = np.array([np.timedelta64(1, 'D'),
np.timedelta64(2, 'D')], dtype=object)
assert lib.infer_dtype(arr) == 'timedelta'
arr = np.array([timedelta(1), timedelta(2)])
assert lib.infer_dtype(arr) == 'timedelta'
# starts with nan
for n in [pd.NaT, np.nan]:
arr = np.array([n, Timedelta('1 days')])
assert lib.infer_dtype(arr) == 'timedelta'
arr = np.array([n, np.timedelta64(1, 'D')])
assert lib.infer_dtype(arr) == 'timedelta'
arr = np.array([n, timedelta(1)])
assert lib.infer_dtype(arr) == 'timedelta'
arr = np.array([n, pd.Timedelta('1 days'), n])
assert lib.infer_dtype(arr) == 'timedelta'
arr = np.array([n, np.timedelta64(1, 'D'), n])
assert lib.infer_dtype(arr) == 'timedelta'
arr = np.array([n, timedelta(1), n])
assert lib.infer_dtype(arr) == 'timedelta'
# different type of nat
arr = np.array([np.datetime64('nat'), np.timedelta64(1, 'D')],
dtype=object)
assert lib.infer_dtype(arr) == 'mixed'
arr = np.array([np.timedelta64(1, 'D'), np.datetime64('nat')],
dtype=object)
assert lib.infer_dtype(arr) == 'mixed'
def test_infer_dtype_period(self):
# GH 13664
arr = np.array([pd.Period('2011-01', freq='D'),
pd.Period('2011-02', freq='D')])
assert lib.infer_dtype(arr) == 'period'
arr = np.array([pd.Period('2011-01', freq='D'),
pd.Period('2011-02', freq='M')])
assert lib.infer_dtype(arr) == 'period'
# starts with nan
for n in [pd.NaT, np.nan]:
arr = np.array([n, pd.Period('2011-01', freq='D')])
assert lib.infer_dtype(arr) == 'period'
arr = np.array([n, pd.Period('2011-01', freq='D'), n])
assert lib.infer_dtype(arr) == 'period'
# different type of nat
arr = np.array([np.datetime64('nat'), pd.Period('2011-01', freq='M')],
dtype=object)
assert lib.infer_dtype(arr) == 'mixed'
arr = np.array([pd.Period('2011-01', freq='M'), np.datetime64('nat')],
dtype=object)
assert lib.infer_dtype(arr) == 'mixed'
@pytest.mark.parametrize(
"data",
[
[datetime(2017, 6, 12, 19, 30), datetime(2017, 3, 11, 1, 15)],
[Timestamp("20170612"), Timestamp("20170311")],
[Timestamp("20170612", tz='US/Eastern'),
Timestamp("20170311", tz='US/Eastern')],
[date(2017, 6, 12),
Timestamp("20170311", tz='US/Eastern')],
[np.datetime64("2017-06-12"), np.datetime64("2017-03-11")],
[np.datetime64("2017-06-12"), datetime(2017, 3, 11, 1, 15)]
]
)
def test_infer_datetimelike_array_datetime(self, data):
assert lib.infer_datetimelike_array(data) == "datetime"
@pytest.mark.parametrize(
"data",
[
[timedelta(2017, 6, 12), timedelta(2017, 3, 11)],
[timedelta(2017, 6, 12), date(2017, 3, 11)],
[np.timedelta64(2017, "D"), np.timedelta64(6, "s")],
[np.timedelta64(2017, "D"), timedelta(2017, 3, 11)]
]
)
def test_infer_datetimelike_array_timedelta(self, data):
assert lib.infer_datetimelike_array(data) == "timedelta"
def test_infer_datetimelike_array_date(self):
arr = [date(2017, 6, 12), date(2017, 3, 11)]
assert lib.infer_datetimelike_array(arr) == "date"
@pytest.mark.parametrize(
"data",
[
["2017-06-12", "2017-03-11"],
[20170612, 20170311],
[20170612.5, 20170311.8],
[Dummy(), Dummy()],
[Timestamp("20170612"), Timestamp("20170311", tz='US/Eastern')],
[Timestamp("20170612"), 20170311],
[timedelta(2017, 6, 12), Timestamp("20170311", tz='US/Eastern')]
]
)
def test_infer_datetimelike_array_mixed(self, data):
assert lib.infer_datetimelike_array(data) == "mixed"
@pytest.mark.parametrize(
"first, expected",
[
[[None], "mixed"],
[[np.nan], "mixed"],
[[pd.NaT], "nat"],
[[datetime(2017, 6, 12, 19, 30), pd.NaT], "datetime"],
[[np.datetime64("2017-06-12"), pd.NaT], "datetime"],
[[date(2017, 6, 12), pd.NaT], "date"],
[[timedelta(2017, 6, 12), pd.NaT], "timedelta"],
[[np.timedelta64(2017, "D"), pd.NaT], "timedelta"]
]
)
@pytest.mark.parametrize("second", [None, np.nan])
def test_infer_datetimelike_array_nan_nat_like(self, first, second,
expected):
first.append(second)
assert lib.infer_datetimelike_array(first) == expected
def test_infer_dtype_all_nan_nat_like(self):
arr = np.array([np.nan, np.nan])
assert lib.infer_dtype(arr) == 'floating'
# nan and None mix are result in mixed
arr = np.array([np.nan, np.nan, None])
assert lib.infer_dtype(arr) == 'mixed'
arr = np.array([None, np.nan, np.nan])
assert lib.infer_dtype(arr) == 'mixed'
# pd.NaT
arr = np.array([pd.NaT])
assert lib.infer_dtype(arr) == 'datetime'
arr = np.array([pd.NaT, np.nan])
assert lib.infer_dtype(arr) == 'datetime'
arr = np.array([np.nan, pd.NaT])
assert lib.infer_dtype(arr) == 'datetime'
arr = np.array([np.nan, pd.NaT, np.nan])
assert lib.infer_dtype(arr) == 'datetime'
arr = np.array([None, pd.NaT, None])
assert lib.infer_dtype(arr) == 'datetime'
# np.datetime64(nat)
arr = np.array([np.datetime64('nat')])
assert lib.infer_dtype(arr) == 'datetime64'
for n in [np.nan, pd.NaT, None]:
arr = np.array([n, np.datetime64('nat'), n])
assert lib.infer_dtype(arr) == 'datetime64'
arr = np.array([pd.NaT, n, np.datetime64('nat'), n])
assert lib.infer_dtype(arr) == 'datetime64'
arr = np.array([np.timedelta64('nat')], dtype=object)
assert lib.infer_dtype(arr) == 'timedelta'
for n in [np.nan, pd.NaT, None]:
arr = np.array([n, np.timedelta64('nat'), n])
assert lib.infer_dtype(arr) == 'timedelta'
arr = np.array([pd.NaT, n, np.timedelta64('nat'), n])
assert lib.infer_dtype(arr) == 'timedelta'
# datetime / timedelta mixed
arr = np.array([pd.NaT, np.datetime64('nat'),
np.timedelta64('nat'), np.nan])
assert lib.infer_dtype(arr) == 'mixed'
arr = np.array([np.timedelta64('nat'), np.datetime64('nat')],
dtype=object)
assert lib.infer_dtype(arr) == 'mixed'
def test_is_datetimelike_array_all_nan_nat_like(self):
arr = np.array([np.nan, pd.NaT, np.datetime64('nat')])
assert lib.is_datetime_array(arr)
assert lib.is_datetime64_array(arr)
assert not lib.is_timedelta_or_timedelta64_array(arr)
arr = np.array([np.nan, pd.NaT, np.timedelta64('nat')])
assert not lib.is_datetime_array(arr)
assert not lib.is_datetime64_array(arr)
assert lib.is_timedelta_or_timedelta64_array(arr)
arr = np.array([np.nan, pd.NaT, np.datetime64('nat'),
np.timedelta64('nat')])
assert not lib.is_datetime_array(arr)
assert not lib.is_datetime64_array(arr)
assert not lib.is_timedelta_or_timedelta64_array(arr)
arr = np.array([np.nan, pd.NaT])
assert lib.is_datetime_array(arr)
assert lib.is_datetime64_array(arr)
assert lib.is_timedelta_or_timedelta64_array(arr)
arr = np.array([np.nan, np.nan], dtype=object)
assert not lib.is_datetime_array(arr)
assert not lib.is_datetime64_array(arr)
assert not | lib.is_timedelta_or_timedelta64_array(arr) | pandas._libs.lib.is_timedelta_or_timedelta64_array |
from __future__ import print_function
import os
import pandas as pd
import xgboost as xgb
import time
import shutil
from sklearn import preprocessing
from sklearn.cross_validation import train_test_split
import numpy as np
from sklearn.utils import shuffle
from sklearn import metrics
import sys
def archive_results(filename,results,algo,script):
"""
:type algo: basestring
:type script: basestring
:type results: DataFrame
"""
#assert results == pd.DataFrame
now=time.localtime()[0:5]
dirname='../archive'
subdirfmt='%4d-%02d-%02d-%02d-%02d'
subdir=subdirfmt %now
if not os.path.exists(os.path.join(dirname,str(algo))):
os.mkdir(os.path.join(dirname,str(algo)))
dir_to_create=os.path.join(dirname,str(algo),subdir)
if not os.path.exists(dir_to_create):
os.mkdir(dir_to_create)
os.chdir(dir_to_create)
results.to_csv(filename,index=False,float_format='%.6f')
shutil.copy2(script,'.')
return
###############################################################################################
# support class to redirect stderr
class flushfile():
def __init__(self, f):
self.f = f
def __getattr__(self,name):
return object.__getattribute__(self.f, name)
def write(self, x):
self.f.write(x)
self.f.flush()
def flush(self):
self.f.flush()
# Stderr
oldstderr = sys.stderr # global
def capture_stderr(log):
oldstderr = sys.stderr
sys.stderr = open(log, 'w')
sys.stderr = flushfile(sys.stderr)
return log
def restore_stderr():
sys.stderr = oldstderr
def parse_xgblog(xgblog):
import re
pattern = re.compile(r'^\[(?P<round>\d+)\]\s*\D+:(?P<validation>\d+.\d+)\s*\D+:(?P<train>\d+.\d+)')
xgb_list = []
with open(xgblog, "r") as ins:
next(ins)
for line in ins:
match = pattern.match(line)
if match:
idx = int(match.group("round"))
validation = float(match.group("validation"))
training = float(match.group("train"))
xgb_list.append([idx, validation, training])
else:
pass # raise Exception("Failed to parse!")
return xgb_list
def preprocess_data(train,test):
id_test=test['patient_id']
train=train.drop(['patient_id'],axis=1)
test=test.drop(['patient_id'],axis=1)
y=train['is_screener']
train=train.drop(['is_screener'],axis=1)
for f in train.columns:
if train[f].dtype == 'object':
print(f)
lbl = preprocessing.LabelEncoder()
lbl.fit(list(train[f].values) + list(test[f].values))
train[f] = lbl.transform(list(train[f].values))
test[f] = lbl.transform(list(test[f].values))
return id_test,test,train,y
os.chdir(os.getcwd())
train_file = '../input/patients_train.csv.gz'
test_file = '../input/patients_test.csv.gz'
train = pd.read_csv(train_file)
test = pd.read_csv(test_file)
train.drop( 'patient_gender', axis = 1, inplace = True )
test.drop( 'patient_gender', axis = 1, inplace = True )
########## last asctivity files
activity_file=('../input/activity_selected_last.csv.gz')
diagnosis_file=('../input/diagnosis_selected_last.csv.gz')
procedure_file=('../input/procedure_selected_last.csv.gz')
surgical_file=('../input/surgical_selected_last.csv.gz')
prescription_file=('../input/prescription_selected_last.csv.gz')
physicians_file=('../input/physicians.csv.gz')
drugs_file=('../input/drugs.csv.gz')
############ first activity files
activity_file_first=('../input/activity_selected_last.csv.gz')
diagnosis_file_first=('../input/diagnosis_selected_last.csv.gz')
procedure_file_first=('../input/procedure_selected_last.csv.gz')
surgical_file_first=('../input/surgical_selected_last.csv.gz')
prescription_file=('../input/prescription_selected_last.csv.gz')
activity=pd.read_csv(activity_file )
#Fa=pd.read_csv(activity_file_first,usecols=['activity_year'])
#print(Fa)
#activity['activity_first_year']=Fa['activity_year']
#activity['delta_time_activity']=activity['activity_year']-activity['activity_first_year']
#print(activity[activity['delta_time_activity']!=0,'delta_time_activity'])
train=pd.merge(train,activity, on='patient_id',how='left')
test=pd.merge(test,activity, on='patient_id',how='left')
print('after merging activity')
print(train.shape,test.shape)
procedure=pd.read_csv(procedure_file )
diagnosis=pd.read_csv(diagnosis_file)
diagnosis=pd.merge(diagnosis,procedure,on=['patient_id','claim_id'],how='left')
train= | pd.merge(train,diagnosis, on='patient_id',how='left') | pandas.merge |
from collections import deque
from datetime import datetime
import operator
import re
import numpy as np
import pytest
import pytz
import pandas as pd
from pandas import DataFrame, MultiIndex, Series
import pandas._testing as tm
import pandas.core.common as com
from pandas.core.computation.expressions import _MIN_ELEMENTS, _NUMEXPR_INSTALLED
from pandas.tests.frame.common import _check_mixed_float, _check_mixed_int
# -------------------------------------------------------------------
# Comparisons
class TestFrameComparisons:
# Specifically _not_ flex-comparisons
def test_frame_in_list(self):
# GH#12689 this should raise at the DataFrame level, not blocks
df = pd.DataFrame(np.random.randn(6, 4), columns=list("ABCD"))
msg = "The truth value of a DataFrame is ambiguous"
with pytest.raises(ValueError, match=msg):
df in [None]
def test_comparison_invalid(self):
def check(df, df2):
for (x, y) in [(df, df2), (df2, df)]:
# we expect the result to match Series comparisons for
# == and !=, inequalities should raise
result = x == y
expected = pd.DataFrame(
{col: x[col] == y[col] for col in x.columns},
index=x.index,
columns=x.columns,
)
tm.assert_frame_equal(result, expected)
result = x != y
expected = pd.DataFrame(
{col: x[col] != y[col] for col in x.columns},
index=x.index,
columns=x.columns,
)
tm.assert_frame_equal(result, expected)
msgs = [
r"Invalid comparison between dtype=datetime64\[ns\] and ndarray",
"invalid type promotion",
(
# npdev 1.20.0
r"The DTypes <class 'numpy.dtype\[.*\]'> and "
r"<class 'numpy.dtype\[.*\]'> do not have a common DType."
),
]
msg = "|".join(msgs)
with pytest.raises(TypeError, match=msg):
x >= y
with pytest.raises(TypeError, match=msg):
x > y
with pytest.raises(TypeError, match=msg):
x < y
with pytest.raises(TypeError, match=msg):
x <= y
# GH4968
# invalid date/int comparisons
df = pd.DataFrame(np.random.randint(10, size=(10, 1)), columns=["a"])
df["dates"] = pd.date_range("20010101", periods=len(df))
df2 = df.copy()
df2["dates"] = df["a"]
check(df, df2)
df = pd.DataFrame(np.random.randint(10, size=(10, 2)), columns=["a", "b"])
df2 = pd.DataFrame(
{
"a": pd.date_range("20010101", periods=len(df)),
"b": pd.date_range("20100101", periods=len(df)),
}
)
check(df, df2)
def test_timestamp_compare(self):
# make sure we can compare Timestamps on the right AND left hand side
# GH#4982
df = pd.DataFrame(
{
"dates1": pd.date_range("20010101", periods=10),
"dates2": pd.date_range("20010102", periods=10),
"intcol": np.random.randint(1000000000, size=10),
"floatcol": np.random.randn(10),
"stringcol": list(tm.rands(10)),
}
)
df.loc[np.random.rand(len(df)) > 0.5, "dates2"] = pd.NaT
ops = {"gt": "lt", "lt": "gt", "ge": "le", "le": "ge", "eq": "eq", "ne": "ne"}
for left, right in ops.items():
left_f = getattr(operator, left)
right_f = getattr(operator, right)
# no nats
if left in ["eq", "ne"]:
expected = left_f(df, pd.Timestamp("20010109"))
result = right_f(pd.Timestamp("20010109"), df)
tm.assert_frame_equal(result, expected)
else:
msg = (
"'(<|>)=?' not supported between "
"instances of 'numpy.ndarray' and 'Timestamp'"
)
with pytest.raises(TypeError, match=msg):
left_f(df, pd.Timestamp("20010109"))
with pytest.raises(TypeError, match=msg):
right_f( | pd.Timestamp("20010109") | pandas.Timestamp |
import pytest
from dppd import dppd
import pandas as pd
import numpy as np
import pandas.testing
from plotnine.data import mtcars, diamonds
from collections import OrderedDict
assert_series_equal = pandas.testing.assert_series_equal
def assert_frame_equal(left, right, check_column_order=True, **kwargs):
if not check_column_order:
assert set(left.columns) == set(right.columns)
left = left.loc[:, right.columns]
return pandas.testing.assert_frame_equal(left, right, **kwargs)
dp, X = dppd()
__author__ = "<NAME>"
__copyright__ = "<NAME>"
__license__ = "mit"
def ordered_DataFrame(d, index=None):
"""Prior to pandas 0.23 (and python 3.6) the order
of columns in a DataFrame only followed the definition order for OrderedDicts.
"""
od = OrderedDict(d)
return pd.DataFrame(od, index=index)
def test_head():
df = pd.DataFrame({"a": list(range(10))})
actual = dp(df).head(5).pd
should = df.head(5)
assert_frame_equal(should, actual)
def test_ends():
df = pd.DataFrame({"a": list(range(10))})
actual = dp(df).ends(2).pd
should = df.head(2)
should = should.append(df.tail(2))
assert_frame_equal(should, actual)
def test_2_stage_concat():
df = pd.DataFrame({"a": list(range(10))})
a = dp(df).head(5).pd
actual = dp(df).concat(a).pd
should = pd.concat([df, a], axis=0)
assert_frame_equal(should, actual)
def test_list_concat():
df = pd.DataFrame({"a": list(range(10))})
should = pd.concat([df, df, df], axis=0)
actual = dp(df).concat([df, df]).pd
assert_frame_equal(should, actual)
def test_arrange():
df = pd.DataFrame(
{
"a": [str(x) for x in (range(10))],
"bb": list(range(10)),
"ccc": list(range(10)),
}
).set_index("a")
should = df.sort_values("bb", ascending=False)
actual = dp(df).arrange("bb").pd
assert_frame_equal(should, actual)
def test_arrange_column_spec():
df = pd.DataFrame(
{
"a": [str(x) for x in (range(10))],
"bb": list(range(10)),
"ccc": list(range(10)),
}
).set_index("a")
should = df.sort_values("bb", ascending=False)
actual = dp(df).arrange([x for x in X.columns if len(x) == 2]).pd
assert_frame_equal(should, actual)
def test_arrange_column_spec_empty():
with pytest.raises(ValueError):
dp(mtcars).arrange(X.columns.str.startswith("nosuchcolumn"))
def test_arrange_grouped_column_spec_empty():
with pytest.raises(ValueError):
dp(mtcars).groupby("cyl").arrange(lambda x: "nosuchcolumn" in x)
def test_arrange_column_spec_inverse():
actual = dp(mtcars).select("hp").arrange("-hp").pd
should = mtcars.sort_values("hp", ascending=True)[["hp"]]
assert_frame_equal(should, actual)
def test_arrange_kind_allowed():
with pytest.raises(ValueError):
dp(mtcars).select(["hp", "qsec"]).arrange("hp", "qsec")
def test_arrange_column_spec_inverse2():
actual = dp(mtcars).select(["hp", "qsec"]).arrange(["hp", "qsec"]).pd
should = mtcars.sort_values(["hp", "qsec"], ascending=[False, False])[
["hp", "qsec"]
]
assert_frame_equal(should, actual)
actual = dp(mtcars).select(["hp", "qsec"]).arrange(["-hp", "qsec"]).pd
should = mtcars.sort_values(["hp", "qsec"], ascending=[True, False])[["hp", "qsec"]]
assert_frame_equal(should, actual)
actual = dp(mtcars).select(["hp", "qsec"]).arrange(["hp", "-qsec"]).pd
should = mtcars.sort_values(["hp", "qsec"], ascending=[False, True])[["hp", "qsec"]]
assert_frame_equal(should, actual)
def test_mutate():
df = pd.DataFrame(
{"a": [str(x) for x in (range(10))], "bb": 10, "ccc": list(range(20, 30))}
).set_index("a")
should = df.assign(d=list(range(30, 40)))
actual = dp(df).mutate(d=X["ccc"] + X["bb"]).pd
assert_frame_equal(should, actual)
def test_transmutate():
df = pd.DataFrame(
{"a": [str(x) for x in (range(10))], "bb": 10, "ccc": list(range(20, 30))}
).set_index("a")
should = pd.DataFrame({"d": list(range(30, 40))}).set_index(df.index)
actual = dp(df).transmutate(d=X["ccc"] + X["bb"]).pd
assert_frame_equal(should, actual)
def test_distinct_dataFrame():
df = pd.DataFrame({"a": list(range(5)) + list(range(5)), "b": list(range(10))})
should = df.head(5)
actual = dp(df).distinct("a").pd
assert_frame_equal(should, actual)
def test_distinct_dataFrame_all_columns():
df = pd.DataFrame({"a": list(range(5)) + list(range(5)), "b": list(range(10))})
should = df
actual = dp(df).distinct().pd
assert_frame_equal(should, actual)
df = pd.DataFrame({"a": list(range(5)) + list(range(5))})
should = df.head(5)
actual = dp(df).distinct().pd
assert_frame_equal(should, actual)
def test_distinct_series():
a = pd.Series(["a", "a", "b", "c", "d", "b"])
should = a.iloc[[0, 2, 3, 4]]
actual = dp(a).distinct().pd
assert_series_equal(should, actual)
def test_filter():
actual = dp(mtcars).filter_by(X.name.str.contains("Merc")).pd
should = mtcars[mtcars.name.str.contains("Merc")]
assert_frame_equal(should, actual)
def test_filter_combo():
actual = dp(mtcars).filter_by(X.name.str.contains("Merc") & (X.hp > 62)).pd
should = mtcars[mtcars.name.str.contains("Merc") & (mtcars.hp > 62)]
assert_frame_equal(should, actual)
def test_add_count():
df = pd.DataFrame({"x": [1, 5, 2, 2, 4, 0, 4], "y": [1, 2, 3, 4, 5, 6, 5]})
actual = dp(df).add_count().pd
should = pd.DataFrame(
OrderedDict(
[
("x", [1, 5, 2, 2, 4, 0, 4]),
("y", [1, 2, 3, 4, 5, 6, 5]),
("count", len(df)),
]
)
)
# should.index = [5, 0, 2, 3, 4, 6, 1]
assert_frame_equal(should, actual)
def test_groupby_add_count():
df = pd.DataFrame({"x": [1, 5, 2, 2, 4, 0, 4], "y": [1, 2, 3, 4, 5, 6, 5]})
actual = dp(df).groupby("x").add_count().ungroup().pd
should = ordered_DataFrame(
{
"x": [1, 5, 2, 2, 4, 0, 4],
"y": [1, 2, 3, 4, 5, 6, 5],
"count": [1, 1, 2, 2, 2, 1, 2],
}
)
# should.index = [5, 0, 2, 3, 4, 6, 1]
assert_frame_equal(should, actual)
def test_groupby_head():
actual = dp(mtcars).groupby("cyl").head(1).select("name").pd
should = (
pd.DataFrame(
{
"name": ["Datsun 710", "Mazda RX4", "Hornet Sportabout"],
"cyl": [4, 6, 8],
"idx": [2, 0, 4],
}
)
.set_index("idx")
.sort_index()[["name"]]
)
should.index.name = None
assert_frame_equal(should, actual)
def test_groupby_ungroup_head():
actual = dp(mtcars).groupby("cyl").identity().ungroup().head(1).pd
should = mtcars.iloc[[0]]
should = should[["cyl"] + [x for x in should.columns if x != "cyl"]]
assert_frame_equal(should, actual)
def test_ungroup_on_non_grouped_raises():
with pytest.raises(AttributeError):
dp(mtcars).ungroup()
def test_groupby_summarise():
actual = dp(mtcars).groupby("cyl").summarise(("name", len, "count")).pd
should = (
pd.DataFrame({"cyl": [4, 6, 8], "count": [11, 7, 14]})
.set_index("cyl")
.reset_index()
)
assert_frame_equal(should, actual)
def test_sorting_within_groups():
actual = dp(mtcars).groupby(X.cyl).arrange("qsec").ungroup().pd
should = mtcars.sort_values(["cyl", "qsec"])
should = should[actual.columns]
assert_frame_equal(should, actual)
def test_sorting_within_groups_head():
actual = dp(mtcars).groupby(X.cyl).print().sort_values("qsec").tail(1).pd
dfs = []
for cyl, sub_df in mtcars.groupby("cyl"):
sub_df = sub_df.sort_values("qsec")
dfs.append(sub_df.tail(1))
should = pd.concat(dfs)[actual.columns]
assert_frame_equal(should, actual)
def test_sorting_within_groups_head_ungroup():
actual = dp(mtcars).groupby(X.cyl).arrange("qsec").ungroup().tail(1).pd
for cyl, sub_df in mtcars.groupby("cyl"):
sub_df = sub_df.sort_values("qsec")
should = sub_df.tail(1)[actual.columns]
assert_frame_equal(should, actual)
def test_select_in_grouping_keeps_groups():
actual = dp(mtcars).groupby("cyl").select("qsec").ungroup().pd
assert (actual.columns == ["cyl", "qsec"]).all()
def test_iter_groups():
g = []
ls = []
for grp, sub_df in dp(mtcars).groupby("cyl").itergroups():
g.append(grp)
ls.append(len(sub_df))
assert g == [4, 6, 8]
assert ls == [11, 7, 14]
def test_grouped_mutate_returns_scalar_per_group():
actual = (
dp(mtcars)
.groupby("cyl")
.mutate(count={grp: len(sub_df) for (grp, sub_df) in X.itergroups()})
.select("count")
.ungroup()
.pd.sort_index()
)
should = mtcars.groupby("cyl").agg("count")["name"]
should = ordered_DataFrame(
{"cyl": mtcars.cyl, "count": [should[cyl] for cyl in mtcars.cyl]},
index=mtcars.index,
)
assert_frame_equal(should, actual)
def test_grouped_mutate_returns_scalar_per_group_str():
actual = (
dp(mtcars)
.groupby("cyl")
.mutate(count={grp: "X" + str(len(sub_df)) for (grp, sub_df) in X.itergroups()})
.select("count")
.ungroup()
.pd.sort_index()
)
should = mtcars.groupby("cyl").agg("count")["name"]
should = ordered_DataFrame(
{"cyl": mtcars.cyl, "count": ["X" + str(should[cyl]) for cyl in mtcars.cyl]},
index=mtcars.index,
)
assert_frame_equal(should, actual)
def test_grouped_mutate_returns_series_per_group():
actual = (
dp(mtcars)
.groupby("cyl")
.mutate(grp_rank={grp: sub_df.hp.rank() for (grp, sub_df) in X.itergroups()})
.select("grp_rank")
.ungroup()
.pd.sort_index()
)
ac = []
for grp, sub_df in mtcars.groupby("cyl"):
x = sub_df["hp"].rank()
ac.append(x)
ac = pd.concat(ac)
should = mtcars.assign(grp_rank=ac)[["cyl", "grp_rank"]]
assert_frame_equal(should, actual)
def test_grouped_mutate_callable():
actual = (
dp(mtcars)
.groupby("cyl")
.mutate(max_hp=lambda x: x["hp"].max())
.select(["cyl", "max_hp", "name"])
.ungroup()
.pd
)
ac = []
for grp, sub_df in mtcars.groupby("cyl"):
x = pd.Series(sub_df["hp"].max(), index=sub_df.index)
ac.append(x)
ac = pd.concat(ac)
should = mtcars.assign(max_hp=ac)[["cyl", "max_hp", "name"]].sort_values("name")
assert_frame_equal(should, actual.sort_values("name"))
def test_grouped_mutate_callable2():
actual = (
dp(mtcars)
.groupby(["cyl", "qsec"])
.mutate(max_hp=lambda x: x["hp"].max())
.select(["cyl", "max_hp", "name"])
.ungroup()
.pd
)
ac = []
for grp, sub_df in mtcars.groupby(["cyl", "qsec"]):
x = pd.Series(sub_df["hp"].max(), index=sub_df.index)
ac.append(x)
ac = pd.concat(ac)
should = mtcars.assign(max_hp=ac)[["cyl", "qsec", "max_hp", "name"]].sort_values(
"name"
)
assert_frame_equal(should, actual.sort_values("name"))
def test_grouped_mutate_returns_scalar():
actual = (
dp(mtcars)
.groupby("cyl")
.mutate(count=4)
.select("count")
.ungroup()
.pd.sort_index()
)
should = mtcars.groupby("cyl").agg("count")["name"]
should = ordered_DataFrame({"cyl": mtcars.cyl, "count": 4}, index=mtcars.index)
assert_frame_equal(should, actual)
def test_grouped_mutate_returns_series():
actual = (
dp(mtcars)
.groupby("cyl")
.mutate(count=pd.Series(range(len(mtcars))))
.select("count")
.ungroup()
.pd.sort_index()
)
should = mtcars.groupby("cyl").agg("count")["name"]
should = ordered_DataFrame(
{"cyl": mtcars.cyl, "count": pd.Series(range(len(mtcars)))}, index=mtcars.index
)
assert_frame_equal(should, actual)
def test_grouped_mutate_in_non_group():
actual = (
dp(mtcars)
.mutate(count={grp: len(sub_df) for (grp, sub_df) in X.itergroups()})
.select("count")
.pd.sort_index()
)
should = ordered_DataFrame(
{"count": [len(mtcars)] * len(mtcars)}, index=mtcars.index
)
assert_frame_equal(should, actual)
def test_grouped_mutate_in_non_group_invalid_key():
with pytest.raises(KeyError):
dp(mtcars).mutate(
count={"shu": len(sub_df) for (grp, sub_df) in X.itergroups()}
)
def test_grouped_mutate_in_non_group_multile_keys():
with pytest.raises(KeyError):
dp(mtcars).mutate(count={None: 5, "shu": "hello"})
def test_grouped_mutate_repeated_keys():
df = mtcars.copy()
df.index = list(range(16)) + list(range(16))
with pytest.raises(ValueError): # cannot reindex from duplicate axis
with dppd(df) as (ddf, X):
ddf.groupby("cyl").mutate(
grp_rank={grp: sub_df.hp.rank() for (grp, sub_df) in X.itergroups()}
)
def test_grouped_mutate_non_sorted():
actual = (
dp(mtcars)
.groupby("cyl")
.mutate(grp_rank={grp: sub_df.hp.rank() for (grp, sub_df) in X.itergroups()})
.select("grp_rank")
.ungroup()
.pd.sort_index()
)
ac = []
for grp, sub_df in mtcars.groupby("cyl"):
x = sub_df["hp"].rank()
ac.append(x)
ac = pd.concat(ac)
should = mtcars.assign(grp_rank=ac)[["cyl", "grp_rank"]]
assert_frame_equal(should, actual)
def test_groupby_two_summarize_grouped():
actual = (
dp(diamonds).groupby(["color", "cut"]).summarise(("price", len, "count")).pd
)
should = pd.DataFrame(diamonds.groupby(["color", "cut"])["price"].agg("count"))
should.columns = ["count"]
should = should.reset_index()
assert_frame_equal(should, actual)
def test_groupby_two_mutate_grouped():
actual = (
dp(mtcars)
.groupby(["cyl", "vs"])
.mutate(grp_rank={grp: sub_df.hp.rank() for (grp, sub_df) in X.itergroups()})
.select("grp_rank")
.ungroup()
.pd.sort_index()
)
ac = []
for grp, sub_df in mtcars.groupby(["cyl", "vs"]):
x = sub_df["hp"].rank()
ac.append(x)
ac = pd.concat(ac)
should = mtcars.assign(grp_rank=ac)[["cyl", "vs", "grp_rank"]]
assert_frame_equal(should, actual)
def test_select_does_not_remove_group_columns():
actual = dp(mtcars).groupby("cyl").select("name").ungroup().pd
assert (actual.columns == ["cyl", "name"]).all()
def test_unselected_group_columns_is_ignored():
actual = dp(mtcars).groupby("cyl").unselect("cyl").ungroup().pd
assert "cyl" in actual.columns
def test_dropping_non_group_columns_works():
actual = dp(mtcars).groupby("cyl").drop("name", axis=1).ungroup().pd
assert "name" not in actual.columns
def test_dropping_group_columns_is_ignored():
actual = dp(mtcars).groupby("cyl").drop("cyl", axis=1).ungroup().pd
assert "cyl" in actual.columns
def test_groupby_sort_changes_order_but_not_result():
a = (
dp(mtcars)
.groupby("cyl")
.sort_values("hp")
.mutate(count={grp: len(sub_df) for (grp, sub_df) in X.itergroups()})
.ungroup()
.pd
)
b = (
dp(mtcars)
.groupby("cyl")
.mutate(count={grp: len(sub_df) for (grp, sub_df) in X.itergroups()})
.ungroup()
.pd
)
assert_frame_equal(a, b.loc[a.index]) #
def test_groupby_sort_changes_order_but_not_result2():
a = (
dp(mtcars)
.groupby("cyl")
.sort_values("hp")
.mutate(count={grp: sub_df.hp.rank() for (grp, sub_df) in X.itergroups()})
.ungroup()
.pd
)
b = (
dp(mtcars)
.groupby("cyl")
.mutate(count={grp: sub_df.hp.rank() for (grp, sub_df) in X.itergroups()})
.ungroup()
.pd
)
assert_frame_equal(a, b.loc[a.index]) #
def test_grouped_mutate_missing_keys():
actual = (
dp(mtcars).groupby("cyl").mutate(count={4: 170, 6: 180, 8: 190}).ungroup().pd
)
assert (actual[actual.cyl == 4]["count"] == 170).all()
assert (actual[actual.cyl == 6]["count"] == 180).all()
with pytest.raises(KeyError):
dp(mtcars).groupby("cyl").mutate(count={4: 170, 6: 180}).pd
def test_grouped_2_mutate_missing_keys():
counts = {(4, 0): 40, (4, 1): 41, (6, 0): 60, (6, 1): 61, (8, 0): 80, (8, 1): 81}
actual = dp(mtcars).groupby(["cyl", "vs"]).mutate(count=counts).ungroup().pd
print(actual)
assert (actual[(actual.cyl == 4) & (actual.vs == 0)]["count"] == 40).all()
assert (actual[(actual.cyl == 4) & (actual.vs == 1)]["count"] == 41).all()
assert (actual[(actual.cyl == 6) & (actual.vs == 0)]["count"] == 60).all()
assert (actual[(actual.cyl == 6) & (actual.vs == 1)]["count"] == 61).all()
assert (actual[(actual.cyl == 8) & (actual.vs == 0)]["count"] == 80).all()
assert (actual[(actual.cyl == 8) & (actual.vs == 1)]["count"] == 81).all()
with pytest.raises(KeyError):
del counts[4, 0]
dp(mtcars).groupby(["cyl", "vs"]).mutate(count=counts).pd
def test_basic_summary():
actual = dp(mtcars).groupby("cyl").summarize((X.hp, len, "count")).pd
should = mtcars.groupby("cyl")[["hp"]].agg("count")
should.columns = ["count"]
should = should.reset_index()
assert_frame_equal(should, actual) # will fail
def test_summary_quantiles():
args = [
("disp", lambda x, q=q: x.quantile(q), "q%.2f" % q)
for q in np.arange(0, 1.1, 0.1)
]
actual = dp(mtcars).sort_values("cyl").groupby("cyl").summarise(*args).pd
lambdas = [lambda x, q=q: x.quantile(q) for q in np.arange(0, 1.1, 0.1)]
for l, q in zip(lambdas, np.arange(0, 1.1, 0.1)):
l.__name__ = "q%.2f" % q
should = (
mtcars.sort_values("cyl")
.groupby("cyl")["disp"]
.aggregate(lambdas)
.reset_index()
)
assert_frame_equal(should, actual)
def test_summary_repeated_target_names():
with pytest.raises(ValueError):
dp(mtcars).summarise((X.disp, np.mean, "one"), (X.hp, np.mean, "one")).pd
def test_empty_summarize_raises():
with pytest.raises(ValueError):
dp(mtcars).groupby("cyl").summarize()
with pytest.raises(ValueError):
dp(mtcars).summarize()
def test_summarise_non_tuple():
with pytest.raises(ValueError):
dp(mtcars).groupby("cyl").summarize(np.min)
def test_summarize_auto_name():
actual = dp(mtcars).groupby("cyl").summarize(("hp", np.min))
assert "hp_amin" in actual.columns
def test_do():
def count_and_count_unique(df):
return pd.DataFrame({"count": [len(df)], "unique": [(~df.duplicated()).sum()]})
actual = dp(mtcars).groupby("cyl").select("hp").do(count_and_count_unique).pd
should = pd.DataFrame(
OrderedDict(
[("cyl", [4, 6, 8]), ("count", [11, 7, 14]), ("unique", [10, 4, 9])]
)
)
assert_frame_equal(should, actual)
def test_do_categorical_grouping():
def count_and_count_unique(df):
return pd.DataFrame({"count": [len(df)], "unique": [(~df.duplicated()).sum()]})
actual = (
dp(mtcars)
.mutate(cyl=pd.Categorical(X.cyl))
.groupby("cyl")
.select("hp")
.do(count_and_count_unique)
.pd
)
should = pd.DataFrame(
OrderedDict(
[
("cyl", | pd.Categorical([4, 6, 8]) | pandas.Categorical |
#################################################################
# #
# Useful python scripts for interfacing #
# with datasets and programs #
# #
#################################################################
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
#from pypdb import describe_pdb
import os, sys
import tqdm
import Bio
def ProTherm_data():
data_path = "data/ProTherm+HotMusic.csv"
dataset = pd.read_csv(data_path)
return dataset
def Master_results():
data_path = "results/master.csv"
dataset = pd.read_csv(data_path)
return dataset
class HotMusic_data(object):
def __init__(self, data_path="HotMusic_dataset.csv"):
self.data_path = data_path
self.load_dataset()
def load_dataset(self):
dataset = pd.read_csv(self.data_path)
variations = list(dataset["Variation"])
for i, variation in enumerate(variations):
dataset.loc[i,"wt"] = variation[0]
dataset.loc[i,"mut"] = variation[-1]
dataset.loc[i,"location"] = variation[1:-1]
self.dataset = dataset
class Missense3D_data(object):
def __init__(self, data_path="", tsv_path=""):
self.data_path = data_path
self.tsv_path = tsv_path
try:
self.load_dataset()
except:
self.read_tsv()
def load_dataset(self):
self.dataset = pd.read_csv(self.data_path, header=0, index_col=False)
def save_dataset(self):
self.dataset.to_csv(self.data_path, index=False)
print("Dataset saved as: " + self.data_path)
def read_tsv(self):
print("No precomputed database detected, searching for TSV files")
dataset = pd.read_csv(self.tsv_path, sep='\t', header=0, index_col=False)
#Remove errors from dataset
count = 0
for i in range(dataset.shape[0]):
if dataset.loc[i, "#UniProt ID"][0] == "#":
dataset.drop(i, inplace=True)
count = count + 1
dataset.reset_index(inplace=True)
print("--- Removed {} errors from Missense3d results ---".format(count))
#Extract discription headers
descriptions = dataset.loc[1, "#Description"]
descriptions = descriptions.split("|")
headers = []
for i in range(16):
headers.append(descriptions[i].split(":")[1])
#Save prediction and description information
dataset.astype("object")
for i in range(dataset.shape[0]):
descriptions = dataset.loc[i, "#Description"]
descriptions = descriptions.split("|")
dataset.loc[i, "one_hot_features"] = ""
if dataset.loc[i,"#Prediction"].split()[1] == "Damaging":
dataset.loc[i, "BoolPrediction"] = 1
else:
dataset.loc[i, "BoolPrediction"] = 0
for j in range(16):
description = descriptions[j].split(":")[2]
if description[0] == "Y":
dataset.loc[i, "one_hot_features"] = dataset.loc[i, "one_hot_features"]+ "1"
else:
dataset.loc[i, "one_hot_features"]= dataset.loc[i, "one_hot_features"]+ "0"
# CONSTRUCT VARIANT INFO FOR DATA MERGING
dataset.loc[i, "#Orig"] = dataset.loc[i, "#Orig"][0] + dataset.loc[i, "#Orig"][1:].lower()
dataset.loc[i, "#Mutant"] = dataset.loc[i, "#Mutant"][0] + dataset.loc[i, "#Mutant"][1:].lower()
dataset.loc[i, "variant_info"] = dataset.loc[i, "#PDB ID"] + "_" + Bio.Data.IUPACData.protein_letters_3to1[dataset.loc[i, "#Orig"]] + str(dataset.loc[i, "#PosInPDB"]) + Bio.Data.IUPACData.protein_letters_3to1[dataset.loc[i, "#Mutant"]]
print(dataset.loc[i])
self.dataset = dataset
print("Constructed database")
def Missense3D_training_data():
data1 = pd.read_excel("/project/home/student1/FYP/data/raw/missense3d/all_dataset.xlsx", header=0, indexes=True)
data2 = | pd.read_excel("/project/home/student1/FYP/data/raw/missense3d/control_dataset.xlsx", header=0, indexes=True) | pandas.read_excel |
from abc import ABC
from datetime import timedelta, datetime
from itertools import product, islice
from os.path import getmtime, dirname, basename, splitext
from time import ctime
import pandas as pd
from corpus.corpus_entry import CorpusEntry
from util.string_util import contains_numeric
class Corpus(ABC):
"""
Base class for corpora
"""
def __init__(self, corpus_id, name, df_path=None, df=None):
"""
Create a new corpus holding a list of corpus entries
:param corpus_id: unique ID
:param name: unique corpus name
:param df_path: absolute path to DataFrame holding the segmentation information (samples)
:param df: DataFrame holding the segmentation information (samples)
"""
self.corpus_id = corpus_id
self._name = name
self.creation_date = None
self.root_path = None
if df_path:
self.df_path = df_path
self.creation_date = getmtime(df_path)
self.root_path = dirname(df_path)
self.df = df if df is not None else pd.read_csv(df_path)
elif df:
self.df = df
@property
def entries(self):
return self.create_entries(self.df)
def create_entries(self, df):
for (entry_id, subset, lang, wav), df in df.groupby(['entry_id', 'subset', 'language', 'audio_file']):
df = df.loc[:, ('start_frame', 'end_frame', 'duration', 'transcript', 'language', 'numeric')]
yield CorpusEntry(entry_id, self, subset, lang, wav, df)
def __iter__(self):
for corpus_entry in self.entries:
yield corpus_entry
def __getitem__(self, val):
# access by index
if isinstance(val, int):
return next(islice(self.entries, val, val + 1))
if isinstance(val, slice):
return list(islice(self.entries, val.start, val.stop))
# access by id
if isinstance(val, str):
return next(iter([entry for entry in self.entries if entry.id == val]), None)
return None
def __len__(self):
return len(self.df.index)
def __call__(self, *args, **kwargs):
languages = kwargs['languages'].split(',') if 'languages' in kwargs else self.languages
print(f'filtering languages={languages}')
df = self.df[self.df['language'].isin(languages)]
numeric = kwargs['numeric'] if 'numeric' in kwargs else False
if numeric is False:
print(f'filtering out speech segments with numbers in transcript')
df = df[df['numeric'] == False]
self.__init__(self.df_path, df)
return self
@property
def name(self):
languages = ', '.join(self.languages)
return self._name + f' (languages: {languages})'
@property
def languages(self):
return self.df['language'].unique().tolist()
@property
def keys(self):
return self.df['entry_id'].unique().tolist()
def train_set(self, numeric=False):
return self._get_segments_for_subset('train', numeric)
def dev_set(self, numeric=False):
return self._get_segments_for_subset('dev', numeric)
def test_set(self, numeric=False):
return self._get_segments_for_subset('test', numeric)
def _get_segments_for_subset(self, subset, numeric):
df_subset = self._filter_segments(subset, numeric)
if numeric is True:
return [segment for entry in self.create_entries(df_subset) for segment in entry.segments_numeric]
elif numeric is False:
return [segment for entry in self.create_entries(df_subset) for segment in entry.segments_not_numeric]
return [segment for entry in self.create_entries(df_subset) for segment in entry.segments]
def _filter_segments(self, subset, numeric=None):
df_subset = self.df[self.df['subset'] == subset]
if numeric is None:
return df_subset
return df_subset[df_subset['numeric'] == numeric]
def summary(self, html=False):
print(f"""
Corpus: {self.name}
Root: {self.root_path}
Index: {self.df_path}
Creation date: {ctime(self.creation_date)}
# entries: {len(self)}
""")
def abs_perc_string(value, total, unit=None):
percent = 100 * value / total if total > 0 else 0
if unit == 's':
value = timedelta(seconds=value)
else:
value = f'{value:,}'
return f'{value} ({percent:.2f}%)'
def create_row(df, n_total, s_total):
df_train = df[df['subset'] == 'train']
df_dev = df[df['subset'] == 'dev']
df_test = df[df['subset'] == 'test']
n_all = abs_perc_string(len(df), n_total) if len(df) else '-'
n_train = abs_perc_string(len(df_train), n_total) if len(df_train) else '-'
n_dev = abs_perc_string(len(df_dev), n_total) if len(df_dev) else '-'
n_test = abs_perc_string(len(df_test), n_total) if len(df_test) else '-'
s_all = df['duration'].sum()
s_train = df_train['duration'].sum()
s_dev = df_dev['duration'].sum()
s_test = df_test['duration'].sum()
audio_all = abs_perc_string(s_all, s_total, unit='s') if s_all else '-'
audio_train = abs_perc_string(s_train, s_total, unit='s') if s_train else '-'
audio_dev = abs_perc_string(s_dev, s_total, unit='s') if s_dev else '-'
audio_test = abs_perc_string(s_test, s_total, unit='s') if s_test else '-'
audio_all_av = timedelta(seconds=df['duration'].mean()) if df['duration'].any() else '-'
audio_train_av = timedelta(seconds=df_train['duration'].mean()) if df_train['duration'].any() else '-'
audio_dev_av = timedelta(seconds=df_dev['duration'].mean()) if df_dev['duration'].any() else '-'
audio_test_av = timedelta(seconds=df_test['duration'].mean()) if df_test['duration'].any() else '-'
trans_all_av = f"{df['transcript'].map(len).mean():.2f}" if df['transcript'].any() else '-'
trans_train_av = f"{df_train['transcript'].map(len).mean():.2f}" if df['transcript'].any() else '-'
trans_dev_av = f"{df_dev['transcript'].map(len).mean():.2f}" if df['transcript'].any() else '-'
trans_test_av = f"{df_test['transcript'].map(len).mean():.2f}" if df['transcript'].any() else '-'
return [
n_all, n_train, n_dev, n_test,
audio_all, audio_train, audio_dev, audio_test,
str(audio_all_av), str(audio_train_av), str(audio_dev_av), str(audio_test_av),
trans_all_av, trans_train_av, trans_dev_av, trans_test_av
]
data = []
languages = self.languages + [None] if len(self.languages) > 1 else self.languages
for lang, numeric, in product(languages, [None, True, False]):
df = self.df
if lang:
df = df[df['language'] == lang]
n_total = len(df)
s_total = df['duration'].sum()
if numeric is not None:
df = df[df['numeric'] == numeric]
data.append(create_row(df, n_total=n_total, s_total=s_total))
languages = self.languages + ['all'] if len(self.languages) > 1 else self.languages
index = pd.MultiIndex.from_product([languages, ['all', 'numeric', 'non-numeric']])
columns = pd.MultiIndex.from_product([['samples', 'audio', 'Ø audio', 'Ø transcript'],
['total', 'train', 'dev', 'test']])
df_stats = | pd.DataFrame(data=data, index=index, columns=columns) | pandas.DataFrame |
from __future__ import division #brings in Python 3.0 mixed type calculation rules
import datetime
import inspect
import numpy as np
import numpy.testing as npt
import os.path
import pandas as pd
import sys
from tabulate import tabulate
import unittest
print("Python version: " + sys.version)
print("Numpy version: " + np.__version__)
# #find parent directory and import model
# parent_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir))
# sys.path.append(parent_dir)
from ..trex_exe import Trex
test = {}
class TestTrex(unittest.TestCase):
"""
Unit tests for T-Rex model.
"""
print("trex unittests conducted at " + str(datetime.datetime.today()))
def setUp(self):
"""
Setup routine for trex unit tests.
:return:
"""
pass
# setup the test as needed
# e.g. pandas to open trex qaqc csv
# Read qaqc csv and create pandas DataFrames for inputs and expected outputs
def tearDown(self):
"""
Teardown routine for trex unit tests.
:return:
"""
pass
# teardown called after each test
# e.g. maybe write test results to some text file
def create_trex_object(self):
# create empty pandas dataframes to create empty object for testing
df_empty = pd.DataFrame()
# create an empty trex object
trex_empty = Trex(df_empty, df_empty)
return trex_empty
def test_app_rate_parsing(self):
"""
unittest for function app_rate_testing:
method extracts 1st and maximum from each list in a series of lists of app rates
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
expected_results = pd.Series([], dtype="object")
result = pd.Series([], dtype="object")
expected_results = [[0.34, 0.78, 2.34], [0.34, 3.54, 2.34]]
try:
trex_empty.app_rates = pd.Series([[0.34], [0.78, 3.54], [2.34, 1.384, 2.22]], dtype='object')
# trex_empty.app_rates = ([[0.34], [0.78, 3.54], [2.34, 1.384, 2.22]])
# parse app_rates Series of lists
trex_empty.app_rate_parsing()
result = [trex_empty.first_app_rate, trex_empty.max_app_rate]
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_conc_initial(self):
"""
unittest for function conc_initial:
conc_0 = (app_rate * self.frac_act_ing * food_multiplier)
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
result = pd.Series([], dtype = 'float')
expected_results = [12.7160, 9.8280, 11.2320]
try:
# specify an app_rates Series (that is a series of lists, each list representing
# a set of application rates for 'a' model simulation)
trex_empty.app_rates = pd.Series([[0.34, 1.384, 13.54], [0.78, 11.34, 3.54],
[2.34, 1.384, 3.4]], dtype='float')
trex_empty.food_multiplier_init_sg = pd.Series([110., 15., 240.], dtype='float')
trex_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02], dtype='float')
for i in range(len(trex_empty.frac_act_ing)):
result[i] = trex_empty.conc_initial(i, trex_empty.app_rates[i][0], trex_empty.food_multiplier_init_sg[i])
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_conc_timestep(self):
"""
unittest for function conc_timestep:
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
result = pd.Series([], dtype = 'float')
expected_results = [6.25e-5, 0.039685, 7.8886e-30]
try:
trex_empty.foliar_diss_hlife = pd.Series([.25, 0.75, 0.01], dtype='float')
conc_0 = pd.Series([0.001, 0.1, 10.0])
for i in range(len(conc_0)):
result[i] = trex_empty.conc_timestep(i, conc_0[i])
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_percent_to_frac(self):
"""
unittest for function percent_to_frac:
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
expected_results = pd.Series([.04556, .1034, .9389], dtype='float')
try:
trex_empty.percent_incorp = pd.Series([4.556, 10.34, 93.89], dtype='float')
result = trex_empty.percent_to_frac(trex_empty.percent_incorp)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_inches_to_feet(self):
"""
unittest for function inches_to_feet:
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
expected_results = pd.Series([0.37966, 0.86166, 7.82416], dtype='float')
try:
trex_empty.bandwidth = pd.Series([4.556, 10.34, 93.89], dtype='float')
result = trex_empty.inches_to_feet(trex_empty.bandwidth)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_at_bird(self):
"""
unittest for function at_bird:
adjusted_toxicity = self.ld50_bird * (aw_bird / self.tw_bird_ld50) ** (self.mineau_sca_fact - 1)
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
result = pd.Series([], dtype = 'float')
expected_results = pd.Series([69.17640, 146.8274, 56.00997], dtype='float')
try:
trex_empty.ld50_bird = pd.Series([100., 125., 90.], dtype='float')
trex_empty.tw_bird_ld50 = pd.Series([175., 100., 200.], dtype='float')
trex_empty.mineau_sca_fact = pd.Series([1.15, 0.9, 1.25], dtype='float')
# following variable is unique to at_bird and is thus sent via arg list
trex_empty.aw_bird_sm = pd.Series([15., 20., 30.], dtype='float')
for i in range(len(trex_empty.aw_bird_sm)):
result[i] = trex_empty.at_bird(i, trex_empty.aw_bird_sm[i])
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_at_bird1(self):
"""
unittest for function at_bird1; alternative approach using more vectorization:
adjusted_toxicity = self.ld50_bird * (aw_bird / self.tw_bird_ld50) ** (self.mineau_sca_fact - 1)
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
result = pd.Series([], dtype = 'float')
expected_results = pd.Series([69.17640, 146.8274, 56.00997], dtype='float')
try:
trex_empty.ld50_bird = pd.Series([100., 125., 90.], dtype='float')
trex_empty.tw_bird_ld50 = pd.Series([175., 100., 200.], dtype='float')
trex_empty.mineau_sca_fact = pd.Series([1.15, 0.9, 1.25], dtype='float')
trex_empty.aw_bird_sm = pd.Series([15., 20., 30.], dtype='float')
# for i in range(len(trex_empty.aw_bird_sm)):
# result[i] = trex_empty.at_bird(i, trex_empty.aw_bird_sm[i])
result = trex_empty.at_bird1(trex_empty.aw_bird_sm)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_fi_bird(self):
"""
unittest for function fi_bird:
food_intake = (0.648 * (aw_bird ** 0.651)) / (1 - mf_w_bird)
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
expected_results = pd.Series([4.19728, 22.7780, 59.31724], dtype='float')
try:
#?? 'mf_w_bird_1' is a constant (i.e., not an input whose value changes per model simulation run); thus it should
#?? be specified here as a constant and not a pd.series -- if this is correct then go ahead and change next line
trex_empty.mf_w_bird_1 = pd.Series([0.1, 0.8, 0.9], dtype='float')
trex_empty.aw_bird_sm = pd.Series([15., 20., 30.], dtype='float')
result = trex_empty.fi_bird(trex_empty.aw_bird_sm, trex_empty.mf_w_bird_1)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_sc_bird(self):
"""
unittest for function sc_bird:
m_s_a_r = ((self.app_rate * self.frac_act_ing) / 128) * self.density * 10000 # maximum seed application rate=application rate*10000
risk_quotient = m_s_a_r / self.noaec_bird
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
expected_results = pd.Series([6.637969, 77.805, 34.96289, np.nan], dtype='float')
try:
trex_empty.app_rates = pd.Series([[0.34, 1.384, 13.54], [0.78, 11.34, 3.54],
[2.34, 1.384, 3.4], [3.]], dtype='object')
trex_empty.app_rate_parsing() #get 'first_app_rate' per model simulation run
trex_empty.frac_act_ing = pd.Series([0.15, 0.20, 0.34, np.nan], dtype='float')
trex_empty.density = pd.Series([8.33, 7.98, 6.75, np.nan], dtype='float')
trex_empty.noaec_bird = pd.Series([5., 1.25, 12., np.nan], dtype='float')
result = trex_empty.sc_bird()
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_sa_bird_1(self):
"""
# unit test for function sa_bird_1
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
result_sm = pd.Series([], dtype = 'float')
result_md = pd.Series([], dtype = 'float')
result_lg = pd.Series([], dtype = 'float')
expected_results_sm = pd.Series([0.228229, 0.704098, 0.145205], dtype = 'float')
expected_results_md = pd.Series([0.126646, 0.540822, 0.052285], dtype = 'float')
expected_results_lg = pd.Series([0.037707, 0.269804, 0.01199], dtype = 'float')
try:
trex_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02], dtype='float')
trex_empty.app_rates = pd.Series([[0.34, 1.384, 13.54], [0.78, 11.34, 3.54],
[2.34, 1.384, 3.4]], dtype='float')
trex_empty.app_rate_parsing() #get 'first_app_rate' per model simulation run
trex_empty.density = pd.Series([8.33, 7.98, 6.75], dtype='float')
# following parameter values are needed for internal call to "test_at_bird"
# results from "test_at_bird" test using these values are [69.17640, 146.8274, 56.00997]
trex_empty.ld50_bird = pd.Series([100., 125., 90.], dtype='float')
trex_empty.tw_bird_ld50 = pd.Series([175., 100., 200.], dtype='float')
trex_empty.mineau_sca_fact = pd.Series([1.15, 0.9, 1.25], dtype='float')
trex_empty.aw_bird_sm = pd.Series([15., 20., 30.], dtype='float')
trex_empty.aw_bird_md = pd.Series([115., 120., 130.], dtype='float')
trex_empty.aw_bird_lg = | pd.Series([1015., 1020., 1030.], dtype='float') | pandas.Series |
#!/usr/bin/env python
# coding: utf-8
# # Benchmark Results
# This notebook visualizes the output from the different models on different classification problems
# In[1]:
import collections
import glob
import json
import os
import numpy as np
import pandas as pd
from plotnine import *
from saged.utils import split_sample_names, create_dataset_stat_df, get_dataset_stats, parse_map_file
# ## Set Up Functions and Get Metadata
# In[3]:
def return_unlabeled():
# For use in a defaultdict
return 'unlabeled'
# In[4]:
data_dir = '../../data/'
map_file = os.path.join(data_dir, 'sample_classifications.pkl')
sample_to_label = parse_map_file(map_file)
sample_to_label = collections.defaultdict(return_unlabeled, sample_to_label)
# In[ ]:
metadata_path = os.path.join(data_dir, 'aggregated_metadata.json')
metadata = None
with open(metadata_path) as json_file:
metadata = json.load(json_file)
sample_metadata = metadata['samples']
# In[ ]:
experiments = metadata['experiments']
sample_to_study = {}
for study in experiments:
for accession in experiments[study]['sample_accession_codes']:
sample_to_study[accession] = study
# ## Sepsis classification
# In[8]:
in_files = glob.glob('../../results/single_label.*')
in_files = [file for file in in_files if 'be_corrected' not in file]
print(in_files[:5])
# In[9]:
sepsis_metrics = pd.DataFrame()
for path in in_files:
new_df = pd.read_csv(path, sep='\t')
model_info = path.strip('.tsv').split('sepsis.')[-1]
model_info = model_info.split('.')
if len(model_info) == 4:
unsupervised_model = model_info[0]
supervised_model = model_info[1]
else:
unsupervised_model = 'untransformed'
supervised_model = model_info[0]
new_df['unsupervised'] = unsupervised_model
new_df['supervised'] = supervised_model
sepsis_metrics = pd.concat([sepsis_metrics, new_df])
sepsis_metrics
# In[10]:
plot = ggplot(sepsis_metrics, aes(x='supervised', y='accuracy', fill='unsupervised'))
plot += geom_violin()
plot += ggtitle('PCA vs untransformed data for classifying sepsis')
print(plot)
# In[11]:
plot = ggplot(sepsis_metrics, aes(x='supervised', y='accuracy', fill='unsupervised'))
plot += geom_jitter(size=3)
plot += ggtitle('PCA vs untransformed data for classifying sepsis')
print(plot)
# ## All labels
# In[12]:
in_files = glob.glob('../../results/all_labels.*')
in_files = [file for file in in_files if 'be_corrected' not in file]
print(in_files[:5])
# In[13]:
metrics = None
for path in in_files:
if metrics is None:
metrics = pd.read_csv(path, sep='\t')
model_info = path.strip('.tsv').split('all_labels.')[-1]
model_info = model_info.split('.')
if len(model_info) == 4:
unsupervised_model = model_info[0]
supervised_model = model_info[1]
else:
unsupervised_model = 'untransformed'
supervised_model = model_info[0]
metrics['unsupervised'] = unsupervised_model
metrics['supervised'] = supervised_model
else:
new_df = pd.read_csv(path, sep='\t')
model_info = path.strip('.tsv').split('all_labels.')[-1]
model_info = model_info.split('.')
if len(model_info) == 4:
unsupervised_model = model_info[0]
supervised_model = model_info[1]
else:
unsupervised_model = 'untransformed'
supervised_model = model_info[0]
new_df['unsupervised'] = unsupervised_model
new_df['supervised'] = supervised_model
metrics = | pd.concat([metrics, new_df]) | pandas.concat |
from collections import OrderedDict
from contextlib import contextmanager
from functools import partial, wraps
import os
import signal
import subprocess
import sys
import types
from typing import Callable, Iterable, Iterator, List, Mapping, Optional, Tuple, TypeVar, Union
import humanize
from more_itertools import flatten, one, unique_everseen, windowed
import numpy as np
import pandas as pd
from pandas.api.types import CategoricalDtype
from potoo import debug_print
import potoo.numpy
from potoo.util import get_cols, get_rows, or_else
# Convenient shorthands for interactive use -- not recommended for durable code that needs to be read and maintained
DF = pd.DataFrame
S = pd.Series
X = TypeVar('X')
#
# Global options
#
# Mutate these for manual control
# - https://pandas.pydata.org/pandas-docs/stable/options.html
# - TODO In ipykernel you have to manually set_display() after changing any of these
# - Workaround: use pd.set_option for the display_* settings
ipykernel_display_max_rows = 1000 # For pd df output
ipykernel_display_width = 10000 # For pd df output
ipykernel_lines = 75 # Does this affect anything?
ipykernel_columns = 120 # For ipython pretty printing (not dfs)
display_width = 0 # Default: 80; 0 means use get_terminal_size, ''/None means unlimited
display_max_rows = 0 # Default: 60; 0 means use get_terminal_size, ''/None means unlimited
display_max_columns = 250 # Default: 20
display_max_colwidth = lambda cols: 200 # Default: 50; go big for dense bq cells
display_precision = 3 # Default: 6; better magic than _float_format
def set_display_max_colwidth(x=display_max_colwidth):
global display_max_colwidth
if isinstance(x, types.FunctionType):
display_max_colwidth = x
elif isinstance(x, float):
display_max_colwidth = lambda cols: int(cols * x)
elif isinstance(x, int):
display_max_colwidth = lambda cols: x
return display_max_colwidth
def set_display_precision(x=display_precision):
global display_precision
display_precision = x
return display_precision
def set_display(**kwargs):
"Make everything nice"
# XXX I couldn't find a way to make auto-detect work with both ipython (terminal) + ipykernel (atom)
# # Unset $LINES + $COLUMNS so pandas will detect changes in terminal size after process start
# # - https://github.com/pandas-dev/pandas/blob/473a7f3/pandas/io/formats/terminal.py#L32-L33
# # - https://github.com/python/cpython/blob/7028e59/Lib/shutil.py#L1071-L1079
# # - TODO These used to be '' instead of del. Revert back if this change causes problems.
# os.environ.pop('LINES', None)
# os.environ.pop('COLUMNS', None)
# HACK This is all horrible and I hate it. After much trial and error I settled on this as a way to make both
# ipython (terminal) and ipykernel (atom) work.
try:
size = os.get_terminal_size(sys.__stdout__.fileno()) # [TODO Why didn't I use shutil.get_terminal_size here?]
except OSError:
# If ipykernel
lines = ipykernel_lines
columns = ipykernel_columns
_display_width = display_width or ipykernel_display_width or columns
_display_max_rows = display_max_rows or ipykernel_display_max_rows or lines
else:
# If terminal
lines = size.lines - 8
columns = size.columns
_display_width = display_width or columns
_display_max_rows = display_max_rows or lines
# Let kwargs override any of these params that we just inferred
lines = kwargs.get('lines', lines)
columns = kwargs.get('columns', columns)
_display_width = kwargs.get('_display_width', _display_width)
_display_max_rows = kwargs.get('_display_max_rows', _display_max_rows)
# For ipython pretty printing (not dfs)
os.environ['LINES'] = str(lines)
os.environ['COLUMNS'] = str(columns)
potoo.numpy.set_display()
# http://pandas.pydata.org/pandas-docs/stable/generated/pandas.set_option.html
# - TODO Any good way to %page by default?
# - here: pd.set_option('display.width', 10000)
# - repl: pd.DataFrame({i:range(100) for i in range(100)})
pd.set_option('display.width', _display_width)
pd.set_option('display.max_rows', _display_max_rows)
pd.set_option('display.max_columns', display_max_columns)
pd.set_option('display.max_colwidth', display_max_colwidth(get_cols()))
pd.set_option('display.precision', display_precision) # Default: 6; better magic than _float_format
# pd.set_option('display._float_format', _float_format(10, 3)) # Default: magic in pandas.formats.format
def set_display_on_sigwinch():
"set_display on window change (SIGWINCH)"
signal.signal(signal.SIGWINCH, lambda sig, frame: set_display())
set_display() # And ensure it's set to begin with
# TODO Check out `with pd.option_context`
@contextmanager
def with_options(options):
saved = {}
for k, v in options.items():
saved[k] = pd.get_option(k)
pd.set_option(k, v)
try:
yield
finally:
for k, v in saved.items():
pd.set_option(k, v)
#
# Utils
#
# display() within a df pipeline, e.g.
#
# df2 = (df
# .pipe(df_display, ...)
# ...
# )
#
def df_display(df, *xs: any):
if not xs:
xs = [lambda df: df]
for x in xs:
if hasattr(x, '__call__'):
x = x(df)
if isinstance(x, str):
# print(x)
display({'text/plain': x}, raw=True) # display() instead of print() to match flush behavior
else:
if not isinstance(x, tuple):
x = (x,)
display(*x) # Less reliable flush, e.g. for single-line strs (which don't make it here), and maybe others...
# ipy_print(*x) # Forces text/plain instead of text/html (e.g. df colors and spacing)
return df
def quantiles(x, q=4, **kwargs):
(_x_labeled, bins) = pd.qcut(
x,
q=q,
retbins=True, # Return bins as extra output
**{
'duplicates': 'drop', 'labels': False, # Return shorter list (e.g. [0]) i/o throwing when bin edges aren't unique
**kwargs,
},
)
return bins
def df_rows(df) -> Iterator['Row']:
"""Shorthand for a very common idiom"""
return (row for i, row in df.iterrows())
def df_map_rows(df, f: Callable[['Row'], 'Row'], *args, **kwargs) -> pd.DataFrame:
"""Shorthand for a very common idiom"""
return df.apply(axis=1, func=f, *args, **kwargs)
def series_assign(s: pd.Series, **kwargs) -> pd.Series:
"""Like df.assign but for Series"""
s = s.copy()
for k, v in kwargs.items():
s.at[k] = v if not callable(v) else v(s.at[k])
return s
def df_assign_first(df, **kwargs) -> pd.DataFrame:
"""Like df.assign but also reposition the assigned cols to be first"""
return (df
.assign(**kwargs)
.pipe(df_reorder_cols, first=kwargs.keys())
)
def df_map_col(df, **kwargs) -> pd.DataFrame:
"""
Map col values by the given function
- A shorthand for a very common usage of df.assign / df.col.map
"""
return df.assign(**{
c: df[c].map(f)
for c, f in kwargs.items()
})
# XXX Deprecated: remove after updating callers
def df_col_map(*args, **kwargs) -> pd.DataFrame:
return df_map_col(*args, **kwargs)
# Based on https://github.com/pandas-dev/pandas/issues/8517#issuecomment-247785821
# - Not very performant, use sparingly...
def df_flatmap(df: pd.DataFrame, f: Callable[['Row'], Union[pd.DataFrame, Iterable['Row']]]) -> pd.DataFrame:
return pd.DataFrame(
OrderedDict(row_out)
for _, row_in in df.iterrows()
for f_out in [f(row_in)]
for row_out in (
(row_out for i, row_out in f_out.iterrows()) if isinstance(f_out, pd.DataFrame) else
f_out
)
)
def df_summary(
df: Union[pd.DataFrame, pd.Series], # A df, or a series that will be coerced into a 1-col df
n=10, # Show first n rows of df (0 for none, None for all)
k=10, # Show random k rows of df (0 for none, None for all)
random_state=None, # For df.sample
# Summaries that might have a different dtype than the column they summarize (e.g. count, mean)
stats=[
# Use dtype.name (str) instead of dtype (complicated object that causes trouble)
('dtype', lambda df: [dtype.name for dtype in df.dtypes]),
# ('sizeof', lambda df: _sizeof_df_cols(df).map(partial(humanize.naturalsize, binary=True))),
('sizeof', lambda df: _sizeof_df_cols(df)),
('len', lambda df: len(df)),
# 'count', # Prefer isnull (= len - count)
('isnull', lambda df: df.isnull().sum()),
# df.apply + or_else to handle unhashable types
('nunique', lambda df: df.apply(lambda c: or_else(np.nan, lambda: c.nunique()))),
# df.apply + or_else these else they subset the cols to just the numerics, which quietly messes up col ordering
# - dtype.base else 'category' dtypes break np.issubdtype [https://github.com/pandas-dev/pandas/issues/9581]
('mean', lambda df: df.apply(func=lambda c: c.mean() if np.issubdtype(c.dtype.base, np.number) else np.nan)),
('std', lambda df: df.apply(func=lambda c: c.std() if np.issubdtype(c.dtype.base, np.number) else np.nan)),
],
# Summaries that have the same dtype as the column they summarize (e.g. quantile values)
prototypes=[
('min', lambda df: _df_quantile(df, 0, interpolation='lower')),
('25%', lambda df: _df_quantile(df, .25, interpolation='lower')),
('50%', lambda df: _df_quantile(df, .5, interpolation='lower')),
('75%', lambda df: _df_quantile(df, .75, interpolation='lower')),
('max', lambda df: _df_quantile(df, 1, interpolation='higher')),
],
):
"""A more flexible version of df.describe, with more information by default"""
# Coerce series to df
if isinstance(df, pd.Series):
df = pd.DataFrame(df)
# Surface non-default indexes as cols in stats
if not df.index.identical(pd.RangeIndex(len(df))):
try:
df = df.reset_index() # Surface indexes as cols in stats
except:
# Oops, index is already a col [`drop=df.index.name in df.columns` is unreliable b/c df.index.names ...]
df = df.reset_index(drop=True)
stats = [(f, lambda df, f=f: getattr(df, f)()) if isinstance(f, str) else f for f in stats]
prototypes = [(f, lambda df, f=f: getattr(df, f)()) if isinstance(f, str) else f for f in prototypes]
return (
# Make a df from: stats + prototypes + first n rows + random k rows
pd.concat([
pd.DataFrame(OrderedDict({k: f(df) for k, f in stats + prototypes})).T,
df[:n],
df[n:].sample(
n=min(k, len(df[n:])),
replace=False,
random_state=random_state,
).sort_index(),
])
# Reorder cols to match input (some aggs like mean/std throw out non-numeric cols, which messes up order)
[df.columns]
# Pull stats up into col index, so that our col dtypes can match the input col dtypes
# - [Added later] Also prototypes, to separate from df[:n] rows
# - FIXME dtypes get mixed up (e.g. False/0) in the transpose
# - WARNING Seems deeply rooted -- coercing each index value wasn't sufficient to fix, via:
# MultiIndex.map(lambda (k, *xs): (k, *tuple(df[k].dtype.type(x) for x in xs)))
.T.set_index([k for k, f in stats + prototypes], append=True).T
# Transpose for fixed width (stats) and variable height (input cols)
# - [Nope: transposing cols mixes dtypes such that mixed str/int/float undermines display.precision smarts]
# .T
)
def _df_quantile(df, q=.5, interpolation='linear'):
"""Like pd.DataFrame.quantile but handles ordered categoricals"""
return df.apply(lambda c: _series_quantile(c, q=q, interpolation=interpolation))
def _series_quantile(s, *args, **kwargs):
"""Like pd.Series.quantile but handles ordered categoricals"""
if s.dtype.name == 'category':
cat_code = s.cat.codes.quantile(*args, **kwargs)
return s.dtype.categories[cat_code] if cat_code != -1 else None
else:
try:
return s.quantile(*args, **kwargs)
except:
# e.g. a column of non-uniform np.array's will fail like:
# ValueError: operands could not be broadcast together with shapes (6599624,) (459648,)
return np.nan
def _sizeof_df_cols(df: pd.DataFrame) -> 'Column[int]':
return df.memory_usage(index=False, deep=True)
# XXX Looks like df.memory_usage(deep=True) is more accurate (previous attempts were missing deep=True)
# def _sizeof_df_cols(df: pd.DataFrame) -> 'Column[int]':
# """
# sizeof is hard, but make our best effort:
# - Use dask.sizeof.sizeof instead of sys.getsizeof, since the latter is unreliable for pandas/numpy objects
# - Use df.applymap, since dask.sizeof.sizeof appears to not do this right [why? seems wrong...]
# """
# try:
# import dask.sizeof
# except:
# return df.apply(lambda c: None)
# else:
# return df.applymap(dask.sizeof.sizeof).sum()
def df_value_counts(
df: pd.DataFrame,
exprs=None, # Cols to surface, as expressions understood by df.eval(expr) (default: df.columns)
limit=10, # Limit rows
exclude_max_n=1, # Exclude cols where max n ≤ exclude_max_n
fillna='', # Fill na cells (for seeing); pass None to leave na cols as NaN (for processing)
unique_names=False, # Give all cols unique names (for processing) instead of reusing 'n' (for seeing)
**kwargs, # kwargs for .value_counts (e.g. dropna)
) -> pd.DataFrame:
"""Series.value_counts() extended over a whole DataFrame (with a few compromises in hygiene)"""
exprs = exprs if exprs is not None else df.columns
return (df
.pipe(df_remove_unused_categories)
.pipe(df_cat_to_str)
.pipe(lambda df: (pd.concat(axis=1, objs=[
ns
for expr_opts in exprs
for expr, opts in [expr_opts if isinstance(expr_opts, tuple) else (expr_opts, dict())]
for ns in [(df
.eval(expr)
.value_counts(**kwargs)
)]
if ns.iloc[0] > exclude_max_n
for ns in [(ns
.pipe(lambda s: (
# NOTE We "sort_index" when "sort_values=True" because the "values" are in the index, as opposed to
# the "counts", which are the default sort
s.sort_values(ascending=opts.get('ascending', False)) if not opts.get('sort_values') else
s.sort_index(ascending=opts.get('ascending', True))
))
.iloc[:limit]
.to_frame()
.rename(columns=lambda x: f'n_{expr}' if unique_names else 'n')
.reset_index()
.rename(columns={'index': expr})
)]
])))
.fillna(fillna)
)
def df_reorder_cols(df: pd.DataFrame, first: List[str] = [], last: List[str] = []) -> pd.DataFrame:
first_last = set(first) | set(last)
return df.reindex(columns=list(first) + [c for c in df.columns if c not in first_last] + list(last))
def df_transform_columns(df: pd.DataFrame, f: Callable[[List[str]], List[str]]) -> pd.DataFrame:
df = df.copy()
df.columns = f(df.columns)
return df
def df_transform_column_names(df: pd.DataFrame, f: Callable[[str], str]) -> pd.DataFrame:
return df_transform_columns(df, lambda cs: [f(c) for c in df.columns])
def df_transform_index(df: pd.DataFrame, f: Callable[[List[str]], List[str]]) -> pd.DataFrame:
df = df.copy()
df.index = f(df.index)
return df
def df_set_index_name(df: pd.DataFrame, name: str) -> pd.DataFrame:
return df_transform_index(df, lambda index: index.rename(name))
def df_remove_unused_categories(df: pd.DataFrame) -> pd.DataFrame:
"""
Do col.remove_unused_categories() for all categorical columns
"""
return df.assign(**{
k: df[k].cat.remove_unused_categories()
for k in df.columns
if df[k].dtype.name == 'category'
})
def df_ordered_cats_like(df: pd.DataFrame, **col_names_to_cats) -> pd.DataFrame:
"""
More flexible than df.astype({'foo': cat_dtype, ...}) / df_ordered_cat(df, ...)
- In addition to cat dtypes, allows cols with cat dtype, lists of cat values, and functions that return any of those
- Like .astype(), preserves unused cat values (caller can use df_remove_unused_categories if desired)
"""
return (df
.assign(**{
col_name: df[col_name].pipe(as_ordered_cat_like, cats)
for col_name, cats in col_names_to_cats.items()
})
)
def as_ordered_cat_like(s: pd.Series, cats) -> pd.Series:
"""
More flexible than s.astype(cat_dtype) / as_ordered_cat(s, cat_values)
- In addition to cat dtypes, allows cols with cat dtype, lists of cat values, and functions that return any of those
- Like .astype(), preserves unused cat values (caller can use df_remove_unused_categories if desired)
"""
# Allow functions (of the input col)
if callable(cats):
cats = cats(s)
# Allow cols with categorical dtype
# - Fail on cols with non-categorical dtype
if isinstance(cats, pd.Series):
cats = cats.dtypes.categories
# Allow categorical dtypes
# - TODO Is there a robust way to isinstance(cats, [np.dtype, pd.dtype]) so we can fail on non-categorical dtypes?
if isinstance(cats, pd.api.types.CategoricalDtype):
cats = cats.categories
# At this point cats should be an iter of cat values
# - Dedupe them for the user, since CategoricalDtype rejects duplicate cat values
return as_ordered_cat(
s,
ordered_cats=list(unique_everseen(cats)),
)
# XXX Try migrating callers to df_ordered_cats_like to see if we can kill this less-usable one
# FIXME Multiple *args appears broken: `.pipe(df_ordered_cat, 'x', 'y')`
# - Workaround: `.pipe(df_ordered_cat, 'x').pipe(df_ordered_cat, 'y')`
def df_ordered_cat(df: pd.DataFrame, *args, transform=lambda x: x, **kwargs) -> pd.DataFrame:
"""
Map many str series to ordered category series
"""
cats = dict(
**{k: lambda df: df[k].unique() for k in args},
**kwargs,
)
return df.assign(**{
k: as_ordered_cat(df[k], list(transform(
x(df) if isinstance(x, types.FunctionType) else x
)))
for k, x in cats.items()
})
def as_ordered_cat(s: pd.Series, ordered_cats: List[str] = None) -> pd.Series:
"""
Map a str series to an ordered category series
- If ordered_cats isn't given, s.unique() is used
"""
return s.astype(CategoricalDtype(ordered_cats or list(s.unique()), ordered=True))
def df_cat_to_str(df: pd.DataFrame) -> pd.DataFrame:
"""
Map any categorical columns to str columns (see cat_to_str for details)
"""
return df.apply(cat_to_str, axis=0)
def cat_to_str(s: pd.Series) -> pd.Series:
"""
If s is a category dtype, map it to a str. This is useful when you want to avoid bottlenecks on large cats:
- s.apply(f) will apply f to each value in s _and_ each value in the category, to make the new output category dtype
- cat_to_str(s).apply(f) will apply f only to each value in s, since there's no output category dtype to compute
"""
return s.astype('str') if s.dtype.name == 'category' else s
# XXX after migrating callers to new name
def df_reverse_cat(*args, **kwargs):
return df_reverse_cats(*args, **kwargs)
def df_reverse_cats(df: pd.DataFrame, *col_names) -> pd.DataFrame:
"""
Reverse the cat.categories values of each (ordered) category column given in col_names
- Useful e.g. for reversing plotnine axes: https://github.com/has2k1/plotnine/issues/116#issuecomment-365911195
"""
return df_transform_cats(df, **{
col_name: reversed
for col_name in col_names
})
def df_transform_cats(
df: pd.DataFrame,
**col_name_to_f,
) -> pd.DataFrame:
"""
Transform the cat.categories values to f(cat.categories) for each category column given in col_names
"""
return df.assign(**{col_name: transform_cat(df[col_name], f=f) for col_name, f in col_name_to_f.items()})
def transform_cat(
s: pd.Series,
f: Callable[[List[str]], Iterable[str]] = lambda xs: xs,
ordered: bool = None,
) -> pd.Series:
"""
Transform the category values of a categorical series
"""
return s.astype('str').astype(CategoricalDtype(
categories=list(f(s.dtype.categories)),
ordered=ordered if ordered is not None else s.dtype.ordered,
))
def reverse_cat(s: pd.Series) -> pd.Series:
"""
Reverse the category values of a categorical series
- Useful e.g. for reversing plotnine axes: https://github.com/has2k1/plotnine/issues/116#issuecomment-365911195
"""
return transform_cat(s, reversed)
def df_ensure(df, **kwargs):
"""
df.assign only the columns that aren't already present
"""
return df.assign(**{
k: v
for k, v in kwargs.items()
if k not in df
})
return df
def df_require_nonempty(df, e: Union[str, Exception]) -> pd.DataFrame:
"""
Raise if df is empty, else return df. Useful in pipelines, e.g.
(df
...
.pipe(df_require_nonempty, f'No requested things found: x[{x}], y[{y}]') # -> ValueError
...
.pipe(df_require_nonempty, AssertionError(f'Oops, my fault'))
...
)
"""
if df.empty:
if isinstance(e, str):
e = ValueError(e)
raise e
return df
# XXX Obviated by df_ensure?
# def produces_cols(*cols):
# cols = [c for c in cols if c != ...]
# def decorator(f):
# @wraps(f)
# def g(*args, **kwargs) -> pd.DataFrame:
# df = _find_df_in_args(*args, **kwargs)
# _refresh = kwargs.pop('_refresh', False)
# if _refresh or not cols or any(c not in df for c in cols):
# df = f(*args, **kwargs)
# return df
# return g
# return decorator
def requires_cols(*required_cols):
required_cols = [c for c in required_cols if c != ...]
def decorator(f):
@wraps(f)
def g(*args, **kwargs) -> any:
input = _find_first_df_or_series_in_args(*args, **kwargs)
input_cols = input.columns if isinstance(input, pd.DataFrame) else input.index # df.columns or series.index
if not set(required_cols) <= set(input_cols):
raise ValueError(f'requires_col: required_cols[{required_cols}] not all in input_cols[{input_cols}]')
return f(*args, **kwargs)
return g
return decorator
def _find_first_df_or_series_in_args(*args, **kwargs):
for x in [*args, *kwargs.values()]:
if isinstance(x, (pd.DataFrame, pd.Series)):
return x
else:
raise ValueError('No df or series found in args')
def requires_nonempty_rows(f):
@wraps(f)
def g(*args, **kwargs) -> any:
input = _find_first_df_or_series_in_args(*args, **kwargs)
input_cols = input.columns if isinstance(input, pd.DataFrame) else input.index # df.columns or series.index
if input.empty:
raise ValueError(f'requires_nonempty_rows: rows are empty ({input})')
return f(*args, **kwargs)
return g
def df_require_index_is_trivial(df: pd.DataFrame) -> pd.DataFrame:
require_index_is_trivial(df.index)
return df
def require_index_is_trivial(index: pd.Index) -> pd.Index:
pd.testing.assert_index_equal(index, pd.RangeIndex(len(index)))
return index
def df_style_cell(*styles: Union[
Tuple[Callable[['cell'], bool], 'style'],
Tuple['cell', 'style'],
Callable[['cell'], Optional['style']],
]) -> Callable[['cell'], 'style']:
"""
Shorthand for df.style.applymap(...). Example usage:
df.style.applymap(df_style_cell(
(lambda x: 0 < x < 1, 'color: red'),
(0, 'color: green'),
lambda x: 'background: %s' % to_rgb_hex(x),
))
"""
def f(x):
y = None
for style in styles:
if isinstance(style, tuple) and isinstance(style[0], types.FunctionType) and style[0](x):
y = style[1]
elif isinstance(style, tuple) and x == style[0]:
y = style[1]
elif isinstance(style, types.FunctionType):
y = style(x)
if y:
break
return y or ''
return f
#
# io
#
def pd_read_fwf(
filepath_or_buffer,
widths: Optional[Union[List[int], 'infer']] = None,
unused_char='\a', # For widths='infer': any char not present in the file, used to initially parse raw lines
**kwargs,
) -> pd.DataFrame:
"""
Like pd.read_fwf, except:
- Add support for widths='infer', which infers col widths from the header row (assuming no spaces in header names)
"""
if widths == 'infer':
# Read raw lines
# - Use pd.read_* (with kwargs) so we get all the file/str/compression/encoding goodies
[header_line, *body_lines] = (
| pd.read_csv(filepath_or_buffer, **kwargs, header=None, sep=unused_char) | pandas.read_csv |
"""
Import as:
import core.statistics as cstati
"""
import collections
import datetime
import functools
import logging
import math
import numbers
from typing import Any, Iterable, List, Optional, Tuple, Union, cast
import numpy as np
import pandas as pd
import scipy as sp
import sklearn.model_selection
import statsmodels
import statsmodels.api as sm
import core.finance as cfinan
import core.signal_processing as csigna
import helpers.dataframe as hdataf
import helpers.dbg as dbg
_LOG = logging.getLogger(__name__)
# #############################################################################
# Sampling statistics: start, end, frequency, NaNs, infs, etc.
# #############################################################################
def summarize_time_index_info(
srs: pd.Series,
nan_mode: Optional[str] = None,
prefix: Optional[str] = None,
) -> pd.Series:
"""
Return summarized information about datetime index of the input.
:param srs: pandas series of floats
:param nan_mode: argument for hdataf.apply_nan_mode()
:param prefix: optional prefix for output's index
:return: series with information about input's index
"""
dbg.dassert_isinstance(srs, pd.Series)
nan_mode = nan_mode or "drop"
prefix = prefix or ""
original_index = srs.index
# Assert that input series has a sorted datetime index.
dbg.dassert_isinstance(original_index, pd.DatetimeIndex)
dbg.dassert_strictly_increasing_index(original_index)
freq = original_index.freq
clear_srs = hdataf.apply_nan_mode(srs, mode=nan_mode)
clear_index = clear_srs.index
result = {}
if clear_srs.empty:
_LOG.warning("Empty input series `%s`", srs.name)
result["start_time"] = np.nan
result["end_time"] = np.nan
else:
result["start_time"] = clear_index[0]
result["end_time"] = clear_index[-1]
result["n_sampling_points"] = len(clear_index)
result["frequency"] = freq
if freq is None:
sampling_points_per_year = clear_srs.resample("Y").count().mean()
else:
sampling_points_per_year = hdataf.compute_points_per_year_for_given_freq(
freq
)
result["sampling_points_per_year"] = sampling_points_per_year
# Compute input time span as a number of `freq` units in
# `clear_index`.
if not clear_srs.empty:
if freq is None:
clear_index_time_span = (clear_index[-1] - clear_index[0]).days
sampling_points_per_year = (
hdataf.compute_points_per_year_for_given_freq("D")
)
else:
clear_index_time_span = len(srs[clear_index[0] : clear_index[-1]])
else:
clear_index_time_span = 0
result["time_span_in_years"] = (
clear_index_time_span / sampling_points_per_year
)
result = pd.Series(result, dtype="object")
result.index = prefix + result.index
return result
def compute_special_value_stats(
srs: pd.Series,
prefix: Optional[str] = None,
) -> pd.Series:
"""
Calculate special value statistics in time series.
:param srs: pandas series of floats
:param prefix: optional prefix for metrics' outcome
:return: series of statistics
"""
prefix = prefix or ""
dbg.dassert_isinstance(srs, pd.Series)
result_index = [
prefix + "n_rows",
prefix + "frac_zero",
prefix + "frac_nan",
prefix + "frac_inf",
prefix + "frac_constant",
prefix + "num_finite_samples",
prefix + "num_unique_values",
]
nan_result = pd.Series(np.nan, index=result_index, name=srs.name)
if srs.empty:
_LOG.warning("Empty input series `%s`", srs.name)
return nan_result
result_values = [
len(srs),
compute_frac_zero(srs),
compute_frac_nan(srs),
compute_frac_inf(srs),
compute_zero_diff_proportion(srs).iloc[1],
count_num_finite_samples(srs),
count_num_unique_values(srs),
]
result = pd.Series(data=result_values, index=result_index, name=srs.name)
return result
# TODO(*): Move this function out of this library.
def replace_infs_with_nans(
data: Union[pd.Series, pd.DataFrame],
) -> Union[pd.Series, pd.DataFrame]:
"""
Replace infs with nans in a copy of `data`.
"""
if data.empty:
_LOG.warning("Empty input!")
return data.replace([np.inf, -np.inf], np.nan)
def compute_frac_zero(
data: Union[pd.Series, pd.DataFrame],
atol: float = 0.0,
axis: Optional[int] = 0,
) -> Union[float, pd.Series]:
"""
Calculate fraction of zeros in a numerical series or dataframe.
:param data: numeric series or dataframe
:param atol: absolute tolerance, as in `np.isclose`
:param axis: numpy axis for summation
"""
# Create an ndarray of zeros of the same shape.
zeros = np.zeros(data.shape)
# Compare values of `df` to `zeros`.
is_close_to_zero = np.isclose(data.values, zeros, atol=atol)
num_zeros = is_close_to_zero.sum(axis=axis)
return _compute_denominator_and_package(num_zeros, data, axis)
def compute_frac_nan(
data: Union[pd.Series, pd.DataFrame], axis: Optional[int] = 0
) -> Union[float, pd.Series]:
"""
Calculate fraction of nans in `data`.
:param data: numeric series or dataframe
:param axis: numpy axis for summation
"""
num_nans = data.isna().values.sum(axis=axis)
return _compute_denominator_and_package(num_nans, data, axis)
def compute_frac_inf(
data: Union[pd.Series, pd.DataFrame], axis: Optional[int] = 0
) -> Union[float, pd.Series]:
"""
Count fraction of infs in a numerical series or dataframe.
:param data: numeric series or dataframe
:param axis: numpy axis for summation
"""
num_infs = np.isinf(data.values).sum(axis=axis)
return _compute_denominator_and_package(num_infs, data, axis)
# TODO(Paul): Refactor to work with dataframes as well. Consider how to handle
# `axis`, which the pd.Series version of `copy()` does not take.
def count_num_finite_samples(data: pd.Series) -> Union[int, float]:
"""
Count number of finite data points in a given time series.
:param data: numeric series or dataframe
"""
if data.empty:
_LOG.warning("Empty input series `%s`", data.name)
return np.nan # type: ignore
data = data.copy()
data = replace_infs_with_nans(data)
ret = data.count()
ret = cast(int, ret)
return ret
# TODO(Paul): Extend to dataframes.
def count_num_unique_values(data: pd.Series) -> Union[int, float, np.float]:
"""
Count number of unique values in the series.
"""
if data.empty:
_LOG.warning("Empty input series `%s`", data.name)
return np.nan
srs = pd.Series(data=data.unique())
return count_num_finite_samples(srs)
def compute_zero_diff_proportion(
srs: pd.Series,
atol: Optional[float] = None,
rtol: Optional[float] = None,
nan_mode: Optional[str] = None,
prefix: Optional[str] = None,
) -> pd.Series:
"""
Compute proportion of unvarying periods in a series.
https://numpy.org/doc/stable/reference/generated/numpy.isclose.html
:param srs: pandas series of floats
:param atol: as in numpy.isclose
:param rtol: as in numpy.isclose
:param nan_mode: argument for hdataf.apply_nan_mode()
If `nan_mode` is "leave_unchanged":
- consecutive `NaN`s are not counted as a constant period
- repeated values with `NaN` in between are not counted as a constant
period
If `nan_mode` is "drop":
- the denominator is reduced by the number of `NaN` and `inf` values
- repeated values with `NaN` in between are counted as a constant
period
If `nan_mode` is "ffill":
- consecutive `NaN`s are counted as a constant period
- repeated values with `NaN` in between are counted as a constant
period
:param prefix: optional prefix for metrics' outcome
:return: series with proportion of unvarying periods
"""
dbg.dassert_isinstance(srs, pd.Series)
atol = atol or 0
rtol = rtol or 1e-05
nan_mode = nan_mode or "leave_unchanged"
prefix = prefix or ""
srs = srs.replace([np.inf, -np.inf], np.nan)
data = hdataf.apply_nan_mode(srs, mode=nan_mode)
result_index = [
prefix + "approx_const_count",
prefix + "approx_const_frac",
]
if data.shape[0] < 2:
_LOG.warning(
"Input series `%s` with size '%d' is too small",
srs.name,
data.shape[0],
)
nan_result = | pd.Series(data=np.nan, index=result_index, name=srs.name) | pandas.Series |
import pandas as pd
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.naive_bayes import MultinomialNB
from sklearn.linear_model import LogisticRegression
from sklearn import metrics
from sklearn.model_selection import train_test_split
################Stage-1: Sentence Level Classification
df_train = pd.read_csv('ngramsTrain.csv',header=None)
df_test = pd.read_csv('ngramsTest.csv',header=None)
#Encoding 9 classes for classification
mapping = {"bn_en_":0,"en_":1,"gu_en_":2,"hi_en_":3,"kn_en_":4,"ml_en_":5,"mr_en_":6,"ta_en_":7,"te_en_":8}
classes = ["bn_en_","en_","gu_en_","hi_en_","kn_en_","ml_en_","mr_en_","ta_en_","te_en_"]
languages = ["bengali","english","gujarati","hindi","kannada","malayalam","marathi","tamil","telugu"]
print("Building Sentence Level Classifier..........")
df_train = df_train.replace(mapping)
df_test = df_test.replace(mapping)
y_train = df_train[0]
x_train = df_train[1]
y_test = df_test[0]
x_test = df_test[1]
cv = CountVectorizer()
cv.fit(x_train)
new_x = cv.transform(x_train)
train_dataset = new_x.toarray()
######Naive Bayes Classifier
nb = MultinomialNB()
nb.fit(train_dataset,y_train)
######MaxEntropy i.e., Multi Class Logistic Regression
lg = LogisticRegression(random_state=0)
lg.fit(train_dataset,y_train)
new_x_test = cv.transform(x_test)
y_pred = nb.predict(new_x_test)
y1_pred = lg.predict(new_x_test)
print("F1 Score of Naive bayes for sentence classifier is ",metrics.accuracy_score(y_test,y_pred))
print("F1 Score of Logistic Regression for sentence classifier is ",metrics.accuracy_score(y_test,y1_pred))
print("Successfully built sentence level classifier......")
####################Stage 2: Building Binary Classifiers
print("\nBuilding Binary Classifiers........")
def ngram_generator(n,word):
i=0
n_grams=''
j=1
while(j<=n):
i=0
while(i<=len(word)-j):
n_grams+=word[i:i+j]+' '
i+=1
j+=1
return n_grams
#Constructing 9 dataframes from given language files
en_df = pd.read_csv('eng2.txt',header=None)
lis = []
for i in range(len(en_df)):
lis.append(1)
t = en_df[0]
en_df[0] = lis
en_df[1] = t
te_df = pd.read_csv('telugu.txt',header=None)
for i in range(len(te_df)):
te_df[0][i] = ngram_generator(5,te_df[0][i])
lis = []
for i in range(len(te_df)):
lis.append(8)
t = te_df[0]
te_df[0] = lis
te_df[1] = t
hi_df = | pd.read_csv('hindiW.txt',header=None) | pandas.read_csv |
# -*- coding: utf-8 -*-
"""Generating the training data.
This script generates the training data according to the config specifications.
Example
-------
To run this script, pass in the desired config file as argument::
$ generate baobab/configs/tdlmc_diagonal_config.py --n_data 1000
"""
import os, sys
import random
import argparse
import gc
from types import SimpleNamespace
from tqdm import tqdm
import numpy as np
import pandas as pd
# Lenstronomy modules
import lenstronomy
print("Lenstronomy path being used: {:s}".format(lenstronomy.__path__[0]))
from lenstronomy.LensModel.lens_model import LensModel
from lenstronomy.LensModel.Solver.lens_equation_solver import LensEquationSolver
from lenstronomy.LightModel.light_model import LightModel
from lenstronomy.PointSource.point_source import PointSource
from lenstronomy.SimulationAPI.data_api import DataAPI
import lenstronomy.Util.util as util
# Baobab modules
from baobab.configs import BaobabConfig
import baobab.bnn_priors as bnn_priors
from baobab.sim_utils import instantiate_PSF_models, get_PSF_model, generate_image, Selection
def parse_args():
"""Parse command-line arguments
"""
parser = argparse.ArgumentParser()
parser.add_argument('config', help='train config file path')
parser.add_argument('--n_data', default=None, dest='n_data', type=int,
help='size of dataset to generate (overrides config file)')
args = parser.parse_args()
# sys.argv rerouting for setuptools entry point
if args is None:
args = SimpleNamespace()
args.config = sys.argv[0]
args.n_data = sys.argv[1]
return args
def main():
args = parse_args()
cfg = BaobabConfig.from_file(args.config)
if args.n_data is not None:
cfg.n_data = args.n_data
# Seed for reproducibility
np.random.seed(cfg.seed)
random.seed(cfg.seed)
# Create data directory
save_dir = cfg.out_dir
if not os.path.exists(save_dir):
os.makedirs(save_dir)
print("Destination folder path: {:s}".format(save_dir))
print("Log path: {:s}".format(cfg.log_path))
cfg.export_log()
else:
raise OSError("Destination folder already exists.")
# Instantiate PSF models
psf_models = instantiate_PSF_models(cfg.psf, cfg.instrument.pixel_scale)
n_psf = len(psf_models)
# Instantiate density models
kwargs_model = dict(
lens_model_list=[cfg.bnn_omega.lens_mass.profile, cfg.bnn_omega.external_shear.profile],
source_light_model_list=[cfg.bnn_omega.src_light.profile],
)
lens_mass_model = LensModel(lens_model_list=kwargs_model['lens_model_list'])
src_light_model = LightModel(light_model_list=kwargs_model['source_light_model_list'])
lens_eq_solver = LensEquationSolver(lens_mass_model)
lens_light_model = None
ps_model = None
if 'lens_light' in cfg.components:
kwargs_model['lens_light_model_list'] = [cfg.bnn_omega.lens_light.profile]
lens_light_model = LightModel(light_model_list=kwargs_model['lens_light_model_list'])
if 'agn_light' in cfg.components:
kwargs_model['point_source_model_list'] = [cfg.bnn_omega.agn_light.profile]
ps_model = PointSource(point_source_type_list=kwargs_model['point_source_model_list'], fixed_magnification_list=[False])
# Instantiate Selection object
selection = Selection(cfg.selection, cfg.components)
# Initialize BNN prior
bnn_prior = getattr(bnn_priors, cfg.bnn_prior_class)(cfg.bnn_omega, cfg.components)
# Initialize empty metadata dataframe
metadata = pd.DataFrame()
metadata_path = os.path.join(save_dir, 'metadata.csv')
current_idx = 0 # running idx of dataset
pbar = tqdm(total=cfg.n_data)
while current_idx < cfg.n_data:
sample = bnn_prior.sample() # FIXME: sampling in batches
# Selections on sampled parameters
if selection.reject_initial(sample):
continue
psf_model = get_PSF_model(psf_models, n_psf, current_idx)
# Instantiate the image maker data_api with detector and observation conditions
kwargs_detector = util.merge_dicts(cfg.instrument, cfg.bandpass, cfg.observation)
kwargs_detector.update(seeing=cfg.psf.fwhm,
psf_type=cfg.psf.type,
kernel_point_source=psf_model,
background_noise=0.0)
data_api = DataAPI(cfg.image.num_pix, **kwargs_detector)
# Generate the image
img, img_features = generate_image(sample, psf_model, data_api, lens_mass_model, src_light_model, lens_eq_solver, cfg.instrument.pixel_scale, cfg.image.num_pix, cfg.components, cfg.numerics, min_magnification=cfg.selection.magnification.min, lens_light_model=lens_light_model, ps_model=ps_model)
if img is None: # couldn't make the magnification cut
continue
# Save image file
img_filename = 'X_{0:07d}.npy'.format(current_idx)
img_path = os.path.join(save_dir, img_filename)
np.save(img_path, img)
# Save labels
meta = {}
for comp in cfg.components:
for param_name, param_value in sample[comp].items():
meta['{:s}_{:s}'.format(comp, param_name)] = param_value
#if cfg.bnn_prior_class in ['DiagonalCosmoBNNPrior']:
# if cfg.bnn_omega.time_delays.calculate_time_delays:
# # Order time delays in increasing dec
# unordered_td = sample['misc']['true_td'] # np array
# increasing_dec_i = np.argsort(img_features['y_image'])
# td = unordered_td[increasing_dec_i]
# td = td[1:] - td[0] # take BCD - A
# sample['misc']['true_td'] = list(td)
# img_features['x_image'] = img_features['x_image'][increasing_dec_i]
# img_features['y_image'] = img_features['y_image'][increasing_dec_i]
if cfg.bnn_prior_class in ['EmpiricalBNNPrior', 'DiagonalCosmoBNNPrior']:
for misc_name, misc_value in sample['misc'].items():
meta['{:s}'.format(misc_name)] = misc_value
if 'agn_light' in cfg.components:
x_image = np.zeros(4)
y_image = np.zeros(4)
n_img = len(img_features['x_image'])
meta['n_img'] = n_img
x_image[:n_img] = img_features['x_image']
y_image[:n_img] = img_features['y_image']
for i in range(4):
meta['x_image_{:d}'.format(i)] = x_image[i]
meta['y_image_{:d}'.format(i)] = y_image[i]
meta['total_magnification'] = img_features['total_magnification']
meta['img_filename'] = img_filename
metadata = metadata.append(meta, ignore_index=True)
# Export metadata.csv for the first time
if current_idx == 0:
# Sort columns lexicographically
metadata = metadata.reindex(sorted(metadata.columns), axis=1)
# Export to csv
metadata.to_csv(metadata_path, index=None)
# Initialize empty dataframe for next checkpoint chunk
metadata = | pd.DataFrame() | pandas.DataFrame |
import os
import ujson as json
import pytups as pt
import pandas as pd
import datetime
import numpy as np
def read_json(filename):
with open(filename, 'r') as f:
return json.load(f)
def _treat_stop_area(one_stop):
get_line_time = lambda v: (v['line']['shortName'], v['dateTime'])
return pt.TupList(one_stop['departures']['departure']).\
apply(get_line_time).\
to_dict(1).\
vapply(sorted)
def write_table(timetable, filename):
_test = timetable.apply(lambda v: ','.join(v))
with open( filename ,'w') as f:
f.write('\n'.join(_test))
def generate_timetable():
data_dir = 'data_tisseo/stops_schedules/'
files = os.listdir(data_dir)
_get_name = lambda v: os.path.splitext(v)[0]
files_data = \
pt.TupList(files). \
to_dict(None). \
vapply(_get_name). \
reverse(). \
vapply(lambda v: data_dir + v). \
vapply(read_json)
all_passing = \
files_data. \
vapply(_treat_stop_area). \
to_dictup(). \
to_tuplist()
return all_passing
class Node(object):
# consists of a trip arriving at a stop at a certain time (in a seq)
def __init__(self, trip_id, stop_sequence, info, hops=0):
"""
:param trip_id: the trip (mission, bus)
:param stop_sequence: sequence of the stop in trip
:param info: static information on the line
:param int hops: number of previous transfers before getting into this node
"""
# convert this to integer has a lot to do with R and
# it being unable to pass integer arguments
stop_sequence = int(stop_sequence)
self.seq = stop_sequence
self.trip = trip_id
data = info['stop_times'][trip_id][stop_sequence]
self.stop = data['stop_id']
self.time = data['arrival_time']
self.info = info
self.route = info['trips'][trip_id]['route_id']
self.hash = hash(self.__key())
self.hops = hops
return
def __repr__(self):
route_name = self.info['routes'][self.route]['route_short_name']
stop_name = self.info['stops'][self.stop]['stop_name']
time = self.time.strftime('%H:%M')
return repr('{} @ {} Line:{}'.format(stop_name, time, route_name))
def get_neighbors_in_trip(self, max_time=None):
data = self.info['stop_times'][self.trip]
if max_time is not None:
data = data.clean(func=lambda v: v['arrival_time'] < max_time)
if not data:
return []
last = max(data.keys())
return [Node(self.trip, seq, self.info, hops=self.hops)
for seq in range(self.seq+1, last+1)]
def get_neighbors_in_stop(self, max_time=None, delta_min=10, walk_speed=5):
# we take a look for other trips in the same stop filtered by time (5 min wait?)
# also: they should not share the same route_id.
_max_time = self.time + datetime.timedelta(minutes=delta_min)
if max_time is None:
max_time = _max_time
else:
max_time = min(_max_time, max_time)
# we get the pandas data frame corresponding to that stop or close stops
# and filter it accordingly
neighbors = self.info['stops_neigh'][self.stop]
other_lines = self.info['stop_times_2'].loc[neighbors.keys()]
other_lines['max_time'] = max_time
# the minimum time will depend on the distance to that stop
# other_lines['min_time'] = self.time + datetime.timedelta(minutes=1)
other_lines['min_time_delta'] = other_lines.index.map(neighbors) * 60/walk_speed + 1
other_lines['min_time'] = self.time + | pd.to_timedelta(other_lines.min_time_delta, unit='minute') | pandas.to_timedelta |
import pandas as pd
import numpy as np
from scipy.io import loadmat
from tqdm import tqdm
from noiseceiling.utils import _find_repeats
mat = loadmat('data/raw/AU_data_for_Lukas.mat')
au_names = [n[0] for n in mat['AUnames'][0]]
rename_au = {'AU10Open': 'AU10', 'AU10LOpen': 'AU10L', 'AU10ROpen': 'AU10R', 'AU16Open': 'AU16', 'AU27i': 'AU27'}
au_names = [rename_au[name] if name in rename_au.keys() else name for name in au_names]
emo_names = [e[0] for e in mat['expnames'][0]]
# Extract amplitudes per AU
au_data = mat['data_AUamp']
au_data[np.isnan(au_data)] = 0
au_data_onoff = mat['data_AUon']
# Check whether the on/off data is indeed amplitude > 0
au_data_bin = (au_data > 0).astype(int)
np.testing.assert_array_equal(au_data_bin, au_data_onoff)
# Make sure dimensions match
au_data = np.moveaxis(au_data, -1, 0) # 60 x 42 x 2400
au_model = np.moveaxis(mat['models_AUon'], -1, 1) # 60 x 42 x 6
emo_rating = np.moveaxis(mat['data_cat'], -1, 0) # 60 x 2400 x 7
intensity = mat['data_rat'].T # 60 x 2400
# load face identities
mat = loadmat('data/raw/cluster_data_ID.mat')['id'].squeeze()
f_ids = np.stack([mat[i].T for i in range(len(mat))]) # 60 x 2400 x 8
f_ids = f_ids.round(1) # round to one decimal to reduce precision
# Last 45 participants saw one of 8 faces [face ID code: 0-7]
f_ids_45 = f_ids[15:, :, :].argmax(axis=-1)
# First 15 participants saw a weighted face [face ID code: 8-9xx]
f_ids_df = pd.DataFrame(f_ids[:15, :, :].reshape((15*2400, 8)), columns=[f'fp_{i}' for i in range(8)])
uniq_face_ids, _ = _find_repeats(f_ids_df, progress_bar=False)
uniq_face_ids = np.vstack((uniq_face_ids.reshape((15, 2400)) + 7, f_ids_45)) # 60 x 2400
# Last four columns represent male faces
gender = (f_ids.argmax(axis=2) > 3).astype(int) # 0 = female, 1 = male
gender = gender.reshape((60, 2400))
for i in tqdm(range(au_data.shape[0])):
idx = []
for ii in range(au_data.shape[2]):
au_on = np.where(au_data[i, :, ii] > 0)[0]
this_idx= '_'.join(
[f'{au_names[iii]}-{int(100 * au_data[i, iii, ii])}'
for iii in au_on]
)
if not this_idx:
this_idx = 'empty'
idx.append(this_idx)
this_dat = np.c_[au_data[i, :, :].T, uniq_face_ids[i, :], gender[i, :]]
df = | pd.DataFrame(this_dat, columns=au_names + ['face_id', 'face_gender'], index=idx) | pandas.DataFrame |
import unittest
import pandas as pd
import numpy as np
from math import sqrt
import numba
import hpat
from hpat.tests.test_utils import (count_array_REPs, count_parfor_REPs,
count_parfor_OneDs, count_array_OneDs,
count_parfor_OneD_Vars, count_array_OneD_Vars,
dist_IR_contains)
from datetime import datetime
import random
class TestDate(unittest.TestCase):
@unittest.skip("needs support for boxing/unboxing DatetimeIndex")
def test_datetime_index_in(self):
def test_impl(dti):
return dti
hpat_func = hpat.jit(test_impl)
df = self._gen_str_date_df()
dti = pd.DatetimeIndex(df['str_date'])
np.testing.assert_array_equal(hpat_func(dti).values, test_impl(dti).values)
def test_datetime_index(self):
def test_impl(df):
return pd.DatetimeIndex(df['str_date']).values
hpat_func = hpat.jit(test_impl)
df = self._gen_str_date_df()
np.testing.assert_array_equal(hpat_func(df), test_impl(df))
def test_datetime_index_kw(self):
def test_impl(df):
return pd.DatetimeIndex(data=df['str_date']).values
hpat_func = hpat.jit(test_impl)
df = self._gen_str_date_df()
np.testing.assert_array_equal(hpat_func(df), test_impl(df))
def test_datetime_arg(self):
def test_impl(A):
return A
hpat_func = hpat.jit(test_impl)
df = self._gen_str_date_df()
A = pd.DatetimeIndex(df['str_date']).to_series()
np.testing.assert_array_equal(hpat_func(A), test_impl(A))
def test_datetime_getitem(self):
def test_impl(A):
return A[0]
hpat_func = hpat.jit(test_impl)
df = self._gen_str_date_df()
A = pd.DatetimeIndex(df['str_date']).to_series()
self.assertEqual(hpat_func(A), test_impl(A))
def test_ts_map(self):
def test_impl(A):
return A.map(lambda x: x.hour)
hpat_func = hpat.jit(test_impl)
df = self._gen_str_date_df()
A = pd.DatetimeIndex(df['str_date']).to_series()
np.testing.assert_array_equal(hpat_func(A), test_impl(A))
def test_ts_map_date(self):
def test_impl(A):
return A.map(lambda x: x.date())[0]
hpat_func = hpat.jit(test_impl)
df = self._gen_str_date_df()
A = pd.DatetimeIndex(df['str_date']).to_series()
np.testing.assert_array_equal(hpat_func(A), test_impl(A))
def test_ts_map_date2(self):
def test_impl(df):
return df.apply(lambda row: row.dt_ind.date(), axis=1)[0]
hpat_func = hpat.jit(test_impl)
df = self._gen_str_date_df()
df['dt_ind'] = pd.DatetimeIndex(df['str_date'])
np.testing.assert_array_equal(hpat_func(df), test_impl(df))
def test_ts_map_date_set(self):
def test_impl(df):
df['hpat_date'] = df.dt_ind.map(lambda x: x.date())
hpat_func = hpat.jit(test_impl)
df = self._gen_str_date_df()
df['dt_ind'] = pd.DatetimeIndex(df['str_date'])
hpat_func(df)
df['pd_date'] = df.dt_ind.map(lambda x: x.date())
np.testing.assert_array_equal(df['hpat_date'], df['pd_date'])
def test_date_series_unbox(self):
def test_impl(A):
return A[0]
hpat_func = hpat.jit(test_impl)
df = self._gen_str_date_df()
A = pd.DatetimeIndex(df['str_date']).to_series().map(lambda x: x.date())
self.assertEqual(hpat_func(A), test_impl(A))
def test_date_series_unbox2(self):
def test_impl(A):
return A[0]
hpat_func = hpat.jit(test_impl)
df = self._gen_str_date_df()
A = pd.DatetimeIndex(df['str_date']).map(lambda x: x.date())
self.assertEqual(hpat_func(A), test_impl(A))
def test_datetime_index_set(self):
def test_impl(df):
df['hpat'] = pd.DatetimeIndex(df['str_date']).values
hpat_func = hpat.jit(test_impl)
df = self._gen_str_date_df()
hpat_func(df)
df['std'] = pd.DatetimeIndex(df['str_date'])
allequal = (df['std'].equals(df['hpat']))
self.assertTrue(allequal)
def test_timestamp(self):
def test_impl():
dt = datetime(2017, 4, 26)
ts = pd.Timestamp(dt)
return ts.day + ts.hour + ts.microsecond + ts.month + ts.nanosecond + ts.second + ts.year
hpat_func = hpat.jit(test_impl)
self.assertEqual(hpat_func(), test_impl())
def test_extract(self):
def test_impl(s):
return s.month
hpat_func = hpat.jit(test_impl)
ts = pd.Timestamp(datetime(2017, 4, 26).isoformat())
month = hpat_func(ts)
self.assertEqual(month, 4)
def test_timestamp_date(self):
def test_impl(s):
return s.date()
hpat_func = hpat.jit(test_impl)
ts = pd.Timestamp(datetime(2017, 4, 26).isoformat())
self.assertEqual(hpat_func(ts), test_impl(ts))
def test_datetimeindex_str_comp(self):
def test_impl(df):
return (df.A >= '2011-10-23').values
df = pd.DataFrame({'A': pd.DatetimeIndex(['2015-01-03', '2010-10-11'])})
hpat_func = hpat.jit(test_impl)
np.testing.assert_array_equal(hpat_func(df), test_impl(df))
def test_datetimeindex_str_comp2(self):
def test_impl(df):
return ('2011-10-23' <= df.A).values
df = pd.DataFrame({'A': pd.DatetimeIndex(['2015-01-03', '2010-10-11'])})
hpat_func = hpat.jit(test_impl)
np.testing.assert_array_equal(hpat_func(df), test_impl(df))
def test_datetime_index_df(self):
def test_impl(df):
df = pd.DataFrame({'A': pd.DatetimeIndex(df['str_date'])})
return df.A
hpat_func = hpat.jit(test_impl)
df = self._gen_str_date_df()
np.testing.assert_array_equal(hpat_func(df), test_impl(df))
def test_datetime_index_date(self):
def test_impl(df):
return | pd.DatetimeIndex(df['str_date']) | pandas.DatetimeIndex |
from datetime import datetime
import numpy as np
import pytest
from pandas import Series, _testing as tm
def test_title():
values = Series(["FOO", "BAR", np.nan, "Blah", "blurg"])
result = values.str.title()
exp = Series(["Foo", "Bar", np.nan, "Blah", "Blurg"])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(["FOO", np.nan, "bar", True, datetime.today(), "blah", None, 1, 2.0])
mixed = mixed.str.title()
exp = Series(["Foo", np.nan, "Bar", np.nan, np.nan, "Blah", np.nan, np.nan, np.nan])
tm.assert_almost_equal(mixed, exp)
def test_lower_upper():
values = Series(["om", np.nan, "nom", "nom"])
result = values.str.upper()
exp = | Series(["OM", np.nan, "NOM", "NOM"]) | pandas.Series |
#!/usr/bin/env python3
#Compute UMAP dimensionality reduction over AnnData objects
import numpy as np
import pandas as pd
import scanpy as sc
import argparse
def save_umap(umap_df, out_name):
umap_df.to_csv(out_name, header = True, index = True)
def umap_counts_mat(dataset):
"""Compute UMAP over the expression matrix of batch corrected objects,
This includes: mnnCorrect, limma, ComBat, Seurat_v3 and Scanorama methods."""
sc.tl.pca(dataset, n_comps=args.n_pcs)
sc.pp.neighbors(dataset, n_neighbors=args.n_neighbours, use_rep = 'X_pca')
sc.tl.umap(dataset, n_components=2)
umap_df = | pd.DataFrame(dataset.obsm["X_umap"], index = dataset.obs_names, columns = ["UMAP_1", "UMAP_2"]) | pandas.DataFrame |
"""
Provide the groupby split-apply-combine paradigm. Define the GroupBy
class providing the base-class of operations.
The SeriesGroupBy and DataFrameGroupBy sub-class
(defined in pandas.core.groupby.generic)
expose these user-facing objects to provide specific functionality.
"""
from contextlib import contextmanager
import datetime
from functools import partial, wraps
import inspect
import re
import types
from typing import (
Callable,
Dict,
FrozenSet,
Generic,
Hashable,
Iterable,
List,
Mapping,
Optional,
Tuple,
Type,
TypeVar,
Union,
)
import numpy as np
from pandas._config.config import option_context
from pandas._libs import Timestamp
import pandas._libs.groupby as libgroupby
from pandas._typing import FrameOrSeries, Scalar
from pandas.compat import set_function_name
from pandas.compat.numpy import function as nv
from pandas.errors import AbstractMethodError
from pandas.util._decorators import Appender, Substitution, cache_readonly, doc
from pandas.core.dtypes.cast import maybe_cast_result
from pandas.core.dtypes.common import (
ensure_float,
is_bool_dtype,
is_datetime64_dtype,
is_extension_array_dtype,
is_integer_dtype,
is_numeric_dtype,
is_object_dtype,
is_scalar,
)
from pandas.core.dtypes.missing import isna, notna
from pandas.core import nanops
import pandas.core.algorithms as algorithms
from pandas.core.arrays import Categorical, DatetimeArray
from pandas.core.base import DataError, PandasObject, SelectionMixin
import pandas.core.common as com
from pandas.core.frame import DataFrame
from pandas.core.generic import NDFrame
from pandas.core.groupby import base, ops
from pandas.core.indexes.api import CategoricalIndex, Index, MultiIndex
from pandas.core.series import Series
from pandas.core.sorting import get_group_index_sorter
_common_see_also = """
See Also
--------
Series.%(name)s
DataFrame.%(name)s
"""
_apply_docs = dict(
template="""
Apply function `func` group-wise and combine the results together.
The function passed to `apply` must take a {input} as its first
argument and return a DataFrame, Series or scalar. `apply` will
then take care of combining the results back together into a single
dataframe or series. `apply` is therefore a highly flexible
grouping method.
While `apply` is a very flexible method, its downside is that
using it can be quite a bit slower than using more specific methods
like `agg` or `transform`. Pandas offers a wide range of method that will
be much faster than using `apply` for their specific purposes, so try to
use them before reaching for `apply`.
Parameters
----------
func : callable
A callable that takes a {input} as its first argument, and
returns a dataframe, a series or a scalar. In addition the
callable may take positional and keyword arguments.
args, kwargs : tuple and dict
Optional positional and keyword arguments to pass to `func`.
Returns
-------
applied : Series or DataFrame
See Also
--------
pipe : Apply function to the full GroupBy object instead of to each
group.
aggregate : Apply aggregate function to the GroupBy object.
transform : Apply function column-by-column to the GroupBy object.
Series.apply : Apply a function to a Series.
DataFrame.apply : Apply a function to each row or column of a DataFrame.
""",
dataframe_examples="""
>>> df = pd.DataFrame({'A': 'a a b'.split(),
'B': [1,2,3],
'C': [4,6, 5]})
>>> g = df.groupby('A')
Notice that ``g`` has two groups, ``a`` and ``b``.
Calling `apply` in various ways, we can get different grouping results:
Example 1: below the function passed to `apply` takes a DataFrame as
its argument and returns a DataFrame. `apply` combines the result for
each group together into a new DataFrame:
>>> g[['B', 'C']].apply(lambda x: x / x.sum())
B C
0 0.333333 0.4
1 0.666667 0.6
2 1.000000 1.0
Example 2: The function passed to `apply` takes a DataFrame as
its argument and returns a Series. `apply` combines the result for
each group together into a new DataFrame:
>>> g[['B', 'C']].apply(lambda x: x.max() - x.min())
B C
A
a 1 2
b 0 0
Example 3: The function passed to `apply` takes a DataFrame as
its argument and returns a scalar. `apply` combines the result for
each group together into a Series, including setting the index as
appropriate:
>>> g.apply(lambda x: x.C.max() - x.B.min())
A
a 5
b 2
dtype: int64
""",
series_examples="""
>>> s = pd.Series([0, 1, 2], index='a a b'.split())
>>> g = s.groupby(s.index)
From ``s`` above we can see that ``g`` has two groups, ``a`` and ``b``.
Calling `apply` in various ways, we can get different grouping results:
Example 1: The function passed to `apply` takes a Series as
its argument and returns a Series. `apply` combines the result for
each group together into a new Series:
>>> g.apply(lambda x: x*2 if x.name == 'b' else x/2)
0 0.0
1 0.5
2 4.0
dtype: float64
Example 2: The function passed to `apply` takes a Series as
its argument and returns a scalar. `apply` combines the result for
each group together into a Series, including setting the index as
appropriate:
>>> g.apply(lambda x: x.max() - x.min())
a 1
b 0
dtype: int64
Notes
-----
In the current implementation `apply` calls `func` twice on the
first group to decide whether it can take a fast or slow code
path. This can lead to unexpected behavior if `func` has
side-effects, as they will take effect twice for the first
group.
Examples
--------
{examples}
""",
)
_pipe_template = """
Apply a function `func` with arguments to this %(klass)s object and return
the function's result.
%(versionadded)s
Use `.pipe` when you want to improve readability by chaining together
functions that expect Series, DataFrames, GroupBy or Resampler objects.
Instead of writing
>>> h(g(f(df.groupby('group')), arg1=a), arg2=b, arg3=c) # doctest: +SKIP
You can write
>>> (df.groupby('group')
... .pipe(f)
... .pipe(g, arg1=a)
... .pipe(h, arg2=b, arg3=c)) # doctest: +SKIP
which is much more readable.
Parameters
----------
func : callable or tuple of (callable, str)
Function to apply to this %(klass)s object or, alternatively,
a `(callable, data_keyword)` tuple where `data_keyword` is a
string indicating the keyword of `callable` that expects the
%(klass)s object.
args : iterable, optional
Positional arguments passed into `func`.
kwargs : dict, optional
A dictionary of keyword arguments passed into `func`.
Returns
-------
object : the return type of `func`.
See Also
--------
Series.pipe : Apply a function with arguments to a series.
DataFrame.pipe: Apply a function with arguments to a dataframe.
apply : Apply function to each group instead of to the
full %(klass)s object.
Notes
-----
See more `here
<https://pandas.pydata.org/pandas-docs/stable/user_guide/groupby.html#piping-function-calls>`_
Examples
--------
%(examples)s
"""
_transform_template = """
Call function producing a like-indexed %(klass)s on each group and
return a %(klass)s having the same indexes as the original object
filled with the transformed values
Parameters
----------
f : function
Function to apply to each group.
Can also accept a Numba JIT function with
``engine='numba'`` specified.
If the ``'numba'`` engine is chosen, the function must be
a user defined function with ``values`` and ``index`` as the
first and second arguments respectively in the function signature.
Each group's index will be passed to the user defined function
and optionally available for use.
.. versionchanged:: 1.1.0
*args
Positional arguments to pass to func
engine : str, default 'cython'
* ``'cython'`` : Runs the function through C-extensions from cython.
* ``'numba'`` : Runs the function through JIT compiled code from numba.
.. versionadded:: 1.1.0
engine_kwargs : dict, default None
* For ``'cython'`` engine, there are no accepted ``engine_kwargs``
* For ``'numba'`` engine, the engine can accept ``nopython``, ``nogil``
and ``parallel`` dictionary keys. The values must either be ``True`` or
``False``. The default ``engine_kwargs`` for the ``'numba'`` engine is
``{'nopython': True, 'nogil': False, 'parallel': False}`` and will be
applied to the function
.. versionadded:: 1.1.0
**kwargs
Keyword arguments to be passed into func.
Returns
-------
%(klass)s
See Also
--------
%(klass)s.groupby.apply
%(klass)s.groupby.aggregate
%(klass)s.transform
Notes
-----
Each group is endowed the attribute 'name' in case you need to know
which group you are working on.
The current implementation imposes three requirements on f:
* f must return a value that either has the same shape as the input
subframe or can be broadcast to the shape of the input subframe.
For example, if `f` returns a scalar it will be broadcast to have the
same shape as the input subframe.
* if this is a DataFrame, f must support application column-by-column
in the subframe. If f also supports application to the entire subframe,
then a fast path is used starting from the second chunk.
* f must not mutate groups. Mutation is not supported and may
produce unexpected results.
When using ``engine='numba'``, there will be no "fall back" behavior internally.
The group data and group index will be passed as numpy arrays to the JITed
user defined function, and no alternative execution attempts will be tried.
Examples
--------
>>> df = pd.DataFrame({'A' : ['foo', 'bar', 'foo', 'bar',
... 'foo', 'bar'],
... 'B' : ['one', 'one', 'two', 'three',
... 'two', 'two'],
... 'C' : [1, 5, 5, 2, 5, 5],
... 'D' : [2.0, 5., 8., 1., 2., 9.]})
>>> grouped = df.groupby('A')
>>> grouped.transform(lambda x: (x - x.mean()) / x.std())
C D
0 -1.154701 -0.577350
1 0.577350 0.000000
2 0.577350 1.154701
3 -1.154701 -1.000000
4 0.577350 -0.577350
5 0.577350 1.000000
Broadcast result of the transformation
>>> grouped.transform(lambda x: x.max() - x.min())
C D
0 4 6.0
1 3 8.0
2 4 6.0
3 3 8.0
4 4 6.0
5 3 8.0
"""
_agg_template = """
Aggregate using one or more operations over the specified axis.
Parameters
----------
func : function, str, list or dict
Function to use for aggregating the data. If a function, must either
work when passed a %(klass)s or when passed to %(klass)s.apply.
Accepted combinations are:
- function
- string function name
- list of functions and/or function names, e.g. ``[np.sum, 'mean']``
- dict of axis labels -> functions, function names or list of such.
Can also accept a Numba JIT function with
``engine='numba'`` specified.
If the ``'numba'`` engine is chosen, the function must be
a user defined function with ``values`` and ``index`` as the
first and second arguments respectively in the function signature.
Each group's index will be passed to the user defined function
and optionally available for use.
.. versionchanged:: 1.1.0
*args
Positional arguments to pass to func
engine : str, default 'cython'
* ``'cython'`` : Runs the function through C-extensions from cython.
* ``'numba'`` : Runs the function through JIT compiled code from numba.
.. versionadded:: 1.1.0
engine_kwargs : dict, default None
* For ``'cython'`` engine, there are no accepted ``engine_kwargs``
* For ``'numba'`` engine, the engine can accept ``nopython``, ``nogil``
and ``parallel`` dictionary keys. The values must either be ``True`` or
``False``. The default ``engine_kwargs`` for the ``'numba'`` engine is
``{'nopython': True, 'nogil': False, 'parallel': False}`` and will be
applied to the function
.. versionadded:: 1.1.0
**kwargs
Keyword arguments to be passed into func.
Returns
-------
%(klass)s
See Also
--------
%(klass)s.groupby.apply
%(klass)s.groupby.transform
%(klass)s.aggregate
Notes
-----
When using ``engine='numba'``, there will be no "fall back" behavior internally.
The group data and group index will be passed as numpy arrays to the JITed
user defined function, and no alternative execution attempts will be tried.
%(examples)s
"""
class GroupByPlot(PandasObject):
"""
Class implementing the .plot attribute for groupby objects.
"""
def __init__(self, groupby):
self._groupby = groupby
def __call__(self, *args, **kwargs):
def f(self):
return self.plot(*args, **kwargs)
f.__name__ = "plot"
return self._groupby.apply(f)
def __getattr__(self, name: str):
def attr(*args, **kwargs):
def f(self):
return getattr(self.plot, name)(*args, **kwargs)
return self._groupby.apply(f)
return attr
@contextmanager
def _group_selection_context(groupby):
"""
Set / reset the _group_selection_context.
"""
groupby._set_group_selection()
yield groupby
groupby._reset_group_selection()
_KeysArgType = Union[
Hashable,
List[Hashable],
Callable[[Hashable], Hashable],
List[Callable[[Hashable], Hashable]],
Mapping[Hashable, Hashable],
]
class _GroupBy(PandasObject, SelectionMixin, Generic[FrameOrSeries]):
_group_selection = None
_apply_whitelist: FrozenSet[str] = frozenset()
def __init__(
self,
obj: FrameOrSeries,
keys: Optional[_KeysArgType] = None,
axis: int = 0,
level=None,
grouper: "Optional[ops.BaseGrouper]" = None,
exclusions=None,
selection=None,
as_index: bool = True,
sort: bool = True,
group_keys: bool = True,
squeeze: bool = False,
observed: bool = False,
mutated: bool = False,
dropna: bool = True,
):
self._selection = selection
assert isinstance(obj, NDFrame), type(obj)
obj._consolidate_inplace()
self.level = level
if not as_index:
if not isinstance(obj, DataFrame):
raise TypeError("as_index=False only valid with DataFrame")
if axis != 0:
raise ValueError("as_index=False only valid for axis=0")
self.as_index = as_index
self.keys = keys
self.sort = sort
self.group_keys = group_keys
self.squeeze = squeeze
self.observed = observed
self.mutated = mutated
self.dropna = dropna
if grouper is None:
from pandas.core.groupby.grouper import get_grouper
grouper, exclusions, obj = get_grouper(
obj,
keys,
axis=axis,
level=level,
sort=sort,
observed=observed,
mutated=self.mutated,
dropna=self.dropna,
)
self.obj = obj
self.axis = obj._get_axis_number(axis)
self.grouper = grouper
self.exclusions = set(exclusions) if exclusions else set()
def __len__(self) -> int:
return len(self.groups)
def __repr__(self) -> str:
# TODO: Better repr for GroupBy object
return object.__repr__(self)
def _assure_grouper(self):
"""
We create the grouper on instantiation sub-classes may have a
different policy.
"""
pass
@property
def groups(self):
"""
Dict {group name -> group labels}.
"""
self._assure_grouper()
return self.grouper.groups
@property
def ngroups(self):
self._assure_grouper()
return self.grouper.ngroups
@property
def indices(self):
"""
Dict {group name -> group indices}.
"""
self._assure_grouper()
return self.grouper.indices
def _get_indices(self, names):
"""
Safe get multiple indices, translate keys for
datelike to underlying repr.
"""
def get_converter(s):
# possibly convert to the actual key types
# in the indices, could be a Timestamp or a np.datetime64
if isinstance(s, datetime.datetime):
return lambda key: Timestamp(key)
elif isinstance(s, np.datetime64):
return lambda key: Timestamp(key).asm8
else:
return lambda key: key
if len(names) == 0:
return []
if len(self.indices) > 0:
index_sample = next(iter(self.indices))
else:
index_sample = None # Dummy sample
name_sample = names[0]
if isinstance(index_sample, tuple):
if not isinstance(name_sample, tuple):
msg = "must supply a tuple to get_group with multiple grouping keys"
raise ValueError(msg)
if not len(name_sample) == len(index_sample):
try:
# If the original grouper was a tuple
return [self.indices[name] for name in names]
except KeyError as err:
# turns out it wasn't a tuple
msg = (
"must supply a same-length tuple to get_group "
"with multiple grouping keys"
)
raise ValueError(msg) from err
converters = [get_converter(s) for s in index_sample]
names = (tuple(f(n) for f, n in zip(converters, name)) for name in names)
else:
converter = get_converter(index_sample)
names = (converter(name) for name in names)
return [self.indices.get(name, []) for name in names]
def _get_index(self, name):
"""
Safe get index, translate keys for datelike to underlying repr.
"""
return self._get_indices([name])[0]
@cache_readonly
def _selected_obj(self):
# Note: _selected_obj is always just `self.obj` for SeriesGroupBy
if self._selection is None or isinstance(self.obj, Series):
if self._group_selection is not None:
return self.obj[self._group_selection]
return self.obj
else:
return self.obj[self._selection]
def _reset_group_selection(self):
"""
Clear group based selection.
Used for methods needing to return info on each group regardless of
whether a group selection was previously set.
"""
if self._group_selection is not None:
# GH12839 clear cached selection too when changing group selection
self._group_selection = None
self._reset_cache("_selected_obj")
def _set_group_selection(self):
"""
Create group based selection.
Used when selection is not passed directly but instead via a grouper.
NOTE: this should be paired with a call to _reset_group_selection
"""
grp = self.grouper
if not (
self.as_index
and getattr(grp, "groupings", None) is not None
and self.obj.ndim > 1
and self._group_selection is None
):
return
ax = self.obj._info_axis
groupers = [g.name for g in grp.groupings if g.level is None and g.in_axis]
if len(groupers):
# GH12839 clear selected obj cache when group selection changes
self._group_selection = ax.difference(Index(groupers), sort=False).tolist()
self._reset_cache("_selected_obj")
def _set_result_index_ordered(self, result):
# set the result index on the passed values object and
# return the new object, xref 8046
# the values/counts are repeated according to the group index
# shortcut if we have an already ordered grouper
if not self.grouper.is_monotonic:
index = Index(np.concatenate(self._get_indices(self.grouper.result_index)))
result.set_axis(index, axis=self.axis, inplace=True)
result = result.sort_index(axis=self.axis)
result.set_axis(self.obj._get_axis(self.axis), axis=self.axis, inplace=True)
return result
def _dir_additions(self):
return self.obj._dir_additions() | self._apply_whitelist
def __getattr__(self, attr: str):
if attr in self._internal_names_set:
return object.__getattribute__(self, attr)
if attr in self.obj:
return self[attr]
raise AttributeError(
f"'{type(self).__name__}' object has no attribute '{attr}'"
)
@Substitution(
klass="GroupBy",
versionadded=".. versionadded:: 0.21.0",
examples="""\
>>> df = pd.DataFrame({'A': 'a b a b'.split(), 'B': [1, 2, 3, 4]})
>>> df
A B
0 a 1
1 b 2
2 a 3
3 b 4
To get the difference between each groups maximum and minimum value in one
pass, you can do
>>> df.groupby('A').pipe(lambda x: x.max() - x.min())
B
A
a 2
b 2""",
)
@Appender(_pipe_template)
def pipe(self, func, *args, **kwargs):
return com.pipe(self, func, *args, **kwargs)
plot = property(GroupByPlot)
def _make_wrapper(self, name):
assert name in self._apply_whitelist
self._set_group_selection()
# need to setup the selection
# as are not passed directly but in the grouper
f = getattr(self._selected_obj, name)
if not isinstance(f, types.MethodType):
return self.apply(lambda self: getattr(self, name))
f = getattr(type(self._selected_obj), name)
sig = inspect.signature(f)
def wrapper(*args, **kwargs):
# a little trickery for aggregation functions that need an axis
# argument
if "axis" in sig.parameters:
if kwargs.get("axis", None) is None:
kwargs["axis"] = self.axis
def curried(x):
return f(x, *args, **kwargs)
# preserve the name so we can detect it when calling plot methods,
# to avoid duplicates
curried.__name__ = name
# special case otherwise extra plots are created when catching the
# exception below
if name in base.plotting_methods:
return self.apply(curried)
try:
return self.apply(curried)
except TypeError as err:
if not re.search(
"reduction operation '.*' not allowed for this dtype", str(err)
):
# We don't have a cython implementation
# TODO: is the above comment accurate?
raise
if self.obj.ndim == 1:
# this can be called recursively, so need to raise ValueError
raise ValueError
# GH#3688 try to operate item-by-item
result = self._aggregate_item_by_item(name, *args, **kwargs)
return result
wrapper.__name__ = name
return wrapper
def get_group(self, name, obj=None):
"""
Construct DataFrame from group with provided name.
Parameters
----------
name : object
The name of the group to get as a DataFrame.
obj : DataFrame, default None
The DataFrame to take the DataFrame out of. If
it is None, the object groupby was called on will
be used.
Returns
-------
group : same type as obj
"""
if obj is None:
obj = self._selected_obj
inds = self._get_index(name)
if not len(inds):
raise KeyError(name)
return obj._take_with_is_copy(inds, axis=self.axis)
def __iter__(self):
"""
Groupby iterator.
Returns
-------
Generator yielding sequence of (name, subsetted object)
for each group
"""
return self.grouper.get_iterator(self.obj, axis=self.axis)
@Appender(
_apply_docs["template"].format(
input="dataframe", examples=_apply_docs["dataframe_examples"]
)
)
def apply(self, func, *args, **kwargs):
func = self._is_builtin_func(func)
# this is needed so we don't try and wrap strings. If we could
# resolve functions to their callable functions prior, this
# wouldn't be needed
if args or kwargs:
if callable(func):
@wraps(func)
def f(g):
with np.errstate(all="ignore"):
return func(g, *args, **kwargs)
elif hasattr(nanops, "nan" + func):
# TODO: should we wrap this in to e.g. _is_builtin_func?
f = getattr(nanops, "nan" + func)
else:
raise ValueError(
"func must be a callable if args or kwargs are supplied"
)
else:
f = func
# ignore SettingWithCopy here in case the user mutates
with option_context("mode.chained_assignment", None):
try:
result = self._python_apply_general(f)
except TypeError:
# gh-20949
# try again, with .apply acting as a filtering
# operation, by excluding the grouping column
# This would normally not be triggered
# except if the udf is trying an operation that
# fails on *some* columns, e.g. a numeric operation
# on a string grouper column
with _group_selection_context(self):
return self._python_apply_general(f)
return result
def _python_apply_general(self, f):
keys, values, mutated = self.grouper.apply(f, self._selected_obj, self.axis)
return self._wrap_applied_output(
keys, values, not_indexed_same=mutated or self.mutated
)
def _iterate_slices(self) -> Iterable[Series]:
raise AbstractMethodError(self)
def transform(self, func, *args, **kwargs):
raise AbstractMethodError(self)
def _cumcount_array(self, ascending: bool = True):
"""
Parameters
----------
ascending : bool, default True
If False, number in reverse, from length of group - 1 to 0.
Notes
-----
this is currently implementing sort=False
(though the default is sort=True) for groupby in general
"""
ids, _, ngroups = self.grouper.group_info
sorter = get_group_index_sorter(ids, ngroups)
ids, count = ids[sorter], len(ids)
if count == 0:
return np.empty(0, dtype=np.int64)
run = np.r_[True, ids[:-1] != ids[1:]]
rep = np.diff(np.r_[np.nonzero(run)[0], count])
out = (~run).cumsum()
if ascending:
out -= np.repeat(out[run], rep)
else:
out = np.repeat(out[np.r_[run[1:], True]], rep) - out
rev = np.empty(count, dtype=np.intp)
rev[sorter] = np.arange(count, dtype=np.intp)
return out[rev].astype(np.int64, copy=False)
def _transform_should_cast(self, func_nm: str) -> bool:
"""
Parameters
----------
func_nm: str
The name of the aggregation function being performed
Returns
-------
bool
Whether transform should attempt to cast the result of aggregation
"""
return (self.size().fillna(0) > 0).any() and (
func_nm not in base.cython_cast_blacklist
)
def _cython_transform(self, how: str, numeric_only: bool = True, **kwargs):
output: Dict[base.OutputKey, np.ndarray] = {}
for idx, obj in enumerate(self._iterate_slices()):
name = obj.name
is_numeric = is_numeric_dtype(obj.dtype)
if numeric_only and not is_numeric:
continue
try:
result, _ = self.grouper.transform(obj.values, how, **kwargs)
except NotImplementedError:
continue
if self._transform_should_cast(how):
result = maybe_cast_result(result, obj, how=how)
key = base.OutputKey(label=name, position=idx)
output[key] = result
if len(output) == 0:
raise DataError("No numeric types to aggregate")
return self._wrap_transformed_output(output)
def _wrap_aggregated_output(self, output: Mapping[base.OutputKey, np.ndarray]):
raise AbstractMethodError(self)
def _wrap_transformed_output(self, output: Mapping[base.OutputKey, np.ndarray]):
raise AbstractMethodError(self)
def _wrap_applied_output(self, keys, values, not_indexed_same: bool = False):
raise AbstractMethodError(self)
def _cython_agg_general(
self, how: str, alt=None, numeric_only: bool = True, min_count: int = -1
):
output: Dict[base.OutputKey, Union[np.ndarray, DatetimeArray]] = {}
# Ideally we would be able to enumerate self._iterate_slices and use
# the index from enumeration as the key of output, but ohlc in particular
# returns a (n x 4) array. Output requires 1D ndarrays as values, so we
# need to slice that up into 1D arrays
idx = 0
for obj in self._iterate_slices():
name = obj.name
is_numeric = is_numeric_dtype(obj.dtype)
if numeric_only and not is_numeric:
continue
result, agg_names = self.grouper.aggregate(
obj._values, how, min_count=min_count
)
if agg_names:
# e.g. ohlc
assert len(agg_names) == result.shape[1]
for result_column, result_name in zip(result.T, agg_names):
key = base.OutputKey(label=result_name, position=idx)
output[key] = maybe_cast_result(result_column, obj, how=how)
idx += 1
else:
assert result.ndim == 1
key = base.OutputKey(label=name, position=idx)
output[key] = maybe_cast_result(result, obj, how=how)
idx += 1
if len(output) == 0:
raise DataError("No numeric types to aggregate")
return self._wrap_aggregated_output(output)
def _python_agg_general(
self, func, *args, engine="cython", engine_kwargs=None, **kwargs
):
func = self._is_builtin_func(func)
if engine != "numba":
f = lambda x: func(x, *args, **kwargs)
# iterate through "columns" ex exclusions to populate output dict
output: Dict[base.OutputKey, np.ndarray] = {}
for idx, obj in enumerate(self._iterate_slices()):
name = obj.name
if self.grouper.ngroups == 0:
# agg_series below assumes ngroups > 0
continue
if engine == "numba":
result, counts = self.grouper.agg_series(
obj,
func,
*args,
engine=engine,
engine_kwargs=engine_kwargs,
**kwargs,
)
else:
try:
# if this function is invalid for this dtype, we will ignore it.
result, counts = self.grouper.agg_series(obj, f)
except TypeError:
continue
assert result is not None
key = base.OutputKey(label=name, position=idx)
output[key] = maybe_cast_result(result, obj, numeric_only=True)
if len(output) == 0:
return self._python_apply_general(f)
if self.grouper._filter_empty_groups:
mask = counts.ravel() > 0
for key, result in output.items():
# since we are masking, make sure that we have a float object
values = result
if is_numeric_dtype(values.dtype):
values = ensure_float(values)
output[key] = maybe_cast_result(values[mask], result)
return self._wrap_aggregated_output(output)
def _concat_objects(self, keys, values, not_indexed_same: bool = False):
from pandas.core.reshape.concat import concat
def reset_identity(values):
# reset the identities of the components
# of the values to prevent aliasing
for v in com.not_none(*values):
ax = v._get_axis(self.axis)
ax._reset_identity()
return values
if not not_indexed_same:
result = concat(values, axis=self.axis)
ax = self._selected_obj._get_axis(self.axis)
# this is a very unfortunate situation
# we can't use reindex to restore the original order
# when the ax has duplicates
# so we resort to this
# GH 14776, 30667
if ax.has_duplicates:
indexer, _ = result.index.get_indexer_non_unique(ax.values)
indexer = algorithms.unique1d(indexer)
result = result.take(indexer, axis=self.axis)
else:
result = result.reindex(ax, axis=self.axis)
elif self.group_keys:
values = reset_identity(values)
if self.as_index:
# possible MI return case
group_keys = keys
group_levels = self.grouper.levels
group_names = self.grouper.names
result = concat(
values,
axis=self.axis,
keys=group_keys,
levels=group_levels,
names=group_names,
sort=False,
)
else:
# GH5610, returns a MI, with the first level being a
# range index
keys = list(range(len(values)))
result = concat(values, axis=self.axis, keys=keys)
else:
values = reset_identity(values)
result = concat(values, axis=self.axis)
if isinstance(result, Series) and self._selection_name is not None:
result.name = self._selection_name
return result
def _apply_filter(self, indices, dropna):
if len(indices) == 0:
indices = np.array([], dtype="int64")
else:
indices = np.sort(np.concatenate(indices))
if dropna:
filtered = self._selected_obj.take(indices, axis=self.axis)
else:
mask = np.empty(len(self._selected_obj.index), dtype=bool)
mask.fill(False)
mask[indices.astype(int)] = True
# mask fails to broadcast when passed to where; broadcast manually.
mask = np.tile(mask, list(self._selected_obj.shape[1:]) + [1]).T
filtered = self._selected_obj.where(mask) # Fill with NaNs.
return filtered
# To track operations that expand dimensions, like ohlc
OutputFrameOrSeries = TypeVar("OutputFrameOrSeries", bound=NDFrame)
class GroupBy(_GroupBy[FrameOrSeries]):
"""
Class for grouping and aggregating relational data.
See aggregate, transform, and apply functions on this object.
It's easiest to use obj.groupby(...) to use GroupBy, but you can also do:
::
grouped = groupby(obj, ...)
Parameters
----------
obj : pandas object
axis : int, default 0
level : int, default None
Level of MultiIndex
groupings : list of Grouping objects
Most users should ignore this
exclusions : array-like, optional
List of columns to exclude
name : str
Most users should ignore this
Returns
-------
**Attributes**
groups : dict
{group name -> group labels}
len(grouped) : int
Number of groups
Notes
-----
After grouping, see aggregate, apply, and transform functions. Here are
some other brief notes about usage. When grouping by multiple groups, the
result index will be a MultiIndex (hierarchical) by default.
Iteration produces (key, group) tuples, i.e. chunking the data by group. So
you can write code like:
::
grouped = obj.groupby(keys, axis=axis)
for key, group in grouped:
# do something with the data
Function calls on GroupBy, if not specially implemented, "dispatch" to the
grouped data. So if you group a DataFrame and wish to invoke the std()
method on each group, you can simply do:
::
df.groupby(mapper).std()
rather than
::
df.groupby(mapper).aggregate(np.std)
You can pass arguments to these "wrapped" functions, too.
See the online documentation for full exposition on these topics and much
more
"""
@property
def _obj_1d_constructor(self) -> Type["Series"]:
# GH28330 preserve subclassed Series/DataFrames
if isinstance(self.obj, DataFrame):
return self.obj._constructor_sliced
assert isinstance(self.obj, Series)
return self.obj._constructor
def _bool_agg(self, val_test, skipna):
"""
Shared func to call any / all Cython GroupBy implementations.
"""
def objs_to_bool(vals: np.ndarray) -> Tuple[np.ndarray, Type]:
if is_object_dtype(vals):
vals = np.array([bool(x) for x in vals])
else:
vals = vals.astype(np.bool)
return vals.view(np.uint8), np.bool
def result_to_bool(result: np.ndarray, inference: Type) -> np.ndarray:
return result.astype(inference, copy=False)
return self._get_cythonized_result(
"group_any_all",
aggregate=True,
cython_dtype=np.dtype(np.uint8),
needs_values=True,
needs_mask=True,
pre_processing=objs_to_bool,
post_processing=result_to_bool,
val_test=val_test,
skipna=skipna,
)
@Substitution(name="groupby")
@Appender(_common_see_also)
def any(self, skipna: bool = True):
"""
Return True if any value in the group is truthful, else False.
Parameters
----------
skipna : bool, default True
Flag to ignore nan values during truth testing.
Returns
-------
bool
"""
return self._bool_agg("any", skipna)
@Substitution(name="groupby")
@Appender(_common_see_also)
def all(self, skipna: bool = True):
"""
Return True if all values in the group are truthful, else False.
Parameters
----------
skipna : bool, default True
Flag to ignore nan values during truth testing.
Returns
-------
bool
"""
return self._bool_agg("all", skipna)
@Substitution(name="groupby")
@Appender(_common_see_also)
def count(self):
"""
Compute count of group, excluding missing values.
Returns
-------
Series or DataFrame
Count of values within each group.
"""
# defined here for API doc
raise NotImplementedError
@Substitution(name="groupby")
@Substitution(see_also=_common_see_also)
def mean(self, numeric_only: bool = True):
"""
Compute mean of groups, excluding missing values.
Parameters
----------
numeric_only : bool, default True
Include only float, int, boolean columns. If None, will attempt to use
everything, then use only numeric data.
Returns
-------
pandas.Series or pandas.DataFrame
%(see_also)s
Examples
--------
>>> df = pd.DataFrame({'A': [1, 1, 2, 1, 2],
... 'B': [np.nan, 2, 3, 4, 5],
... 'C': [1, 2, 1, 1, 2]}, columns=['A', 'B', 'C'])
Groupby one column and return the mean of the remaining columns in
each group.
>>> df.groupby('A').mean()
B C
A
1 3.0 1.333333
2 4.0 1.500000
Groupby two columns and return the mean of the remaining column.
>>> df.groupby(['A', 'B']).mean()
C
A B
1 2.0 2
4.0 1
2 3.0 1
5.0 2
Groupby one column and return the mean of only particular column in
the group.
>>> df.groupby('A')['B'].mean()
A
1 3.0
2 4.0
Name: B, dtype: float64
"""
return self._cython_agg_general(
"mean",
alt=lambda x, axis: Series(x).mean(numeric_only=numeric_only),
numeric_only=numeric_only,
)
@Substitution(name="groupby")
@Appender(_common_see_also)
def median(self, numeric_only=True):
"""
Compute median of groups, excluding missing values.
For multiple groupings, the result index will be a MultiIndex
Parameters
----------
numeric_only : bool, default True
Include only float, int, boolean columns. If None, will attempt to use
everything, then use only numeric data.
Returns
-------
Series or DataFrame
Median of values within each group.
"""
return self._cython_agg_general(
"median",
alt=lambda x, axis: Series(x).median(axis=axis, numeric_only=numeric_only),
numeric_only=numeric_only,
)
@Substitution(name="groupby")
@Appender(_common_see_also)
def std(self, ddof: int = 1):
"""
Compute standard deviation of groups, excluding missing values.
For multiple groupings, the result index will be a MultiIndex.
Parameters
----------
ddof : int, default 1
Degrees of freedom.
Returns
-------
Series or DataFrame
Standard deviation of values within each group.
"""
# TODO: implement at Cython level?
return np.sqrt(self.var(ddof=ddof))
@Substitution(name="groupby")
@Appender(_common_see_also)
def var(self, ddof: int = 1):
"""
Compute variance of groups, excluding missing values.
For multiple groupings, the result index will be a MultiIndex.
Parameters
----------
ddof : int, default 1
Degrees of freedom.
Returns
-------
Series or DataFrame
Variance of values within each group.
"""
if ddof == 1:
return self._cython_agg_general(
"var", alt=lambda x, axis: Series(x).var(ddof=ddof)
)
else:
func = lambda x: x.var(ddof=ddof)
with _group_selection_context(self):
return self._python_agg_general(func)
@Substitution(name="groupby")
@Appender(_common_see_also)
def sem(self, ddof: int = 1):
"""
Compute standard error of the mean of groups, excluding missing values.
For multiple groupings, the result index will be a MultiIndex.
Parameters
----------
ddof : int, default 1
Degrees of freedom.
Returns
-------
Series or DataFrame
Standard error of the mean of values within each group.
"""
return self.std(ddof=ddof) / np.sqrt(self.count())
@Substitution(name="groupby")
@Appender(_common_see_also)
def size(self):
"""
Compute group sizes.
Returns
-------
Series
Number of rows in each group.
"""
result = self.grouper.size()
# GH28330 preserve subclassed Series/DataFrames through calls
if issubclass(self.obj._constructor, Series):
result = self._obj_1d_constructor(result, name=self.obj.name)
else:
result = self._obj_1d_constructor(result)
return self._reindex_output(result, fill_value=0)
@classmethod
def _add_numeric_operations(cls):
"""
Add numeric operations to the GroupBy generically.
"""
def groupby_function(
name: str,
alias: str,
npfunc,
numeric_only: bool = True,
min_count: int = -1,
):
_local_template = """
Compute %(f)s of group values.
Parameters
----------
numeric_only : bool, default %(no)s
Include only float, int, boolean columns. If None, will attempt to use
everything, then use only numeric data.
min_count : int, default %(mc)s
The required number of valid values to perform the operation. If fewer
than ``min_count`` non-NA values are present the result will be NA.
Returns
-------
Series or DataFrame
Computed %(f)s of values within each group.
"""
@Substitution(name="groupby", f=name, no=numeric_only, mc=min_count)
@Appender(_common_see_also)
@Appender(_local_template)
def func(self, numeric_only=numeric_only, min_count=min_count):
self._set_group_selection()
# try a cython aggregation if we can
try:
return self._cython_agg_general(
how=alias,
alt=npfunc,
numeric_only=numeric_only,
min_count=min_count,
)
except DataError:
pass
except NotImplementedError as err:
if "function is not implemented for this dtype" in str(
err
) or "category dtype not supported" in str(err):
# raised in _get_cython_function, in some cases can
# be trimmed by implementing cython funcs for more dtypes
pass
else:
raise
# apply a non-cython aggregation
result = self.aggregate(lambda x: npfunc(x, axis=self.axis))
return result
set_function_name(func, name, cls)
return func
def first_compat(obj: FrameOrSeries, axis: int = 0):
def first(x: Series):
x = x.array[notna(x.array)]
if len(x) == 0:
return np.nan
return x[0]
if isinstance(obj, DataFrame):
return obj.apply(first, axis=axis)
elif isinstance(obj, Series):
return first(obj)
else:
raise TypeError(type(obj))
def last_compat(obj: FrameOrSeries, axis: int = 0):
def last(x: Series):
x = x.array[notna(x.array)]
if len(x) == 0:
return np.nan
return x[-1]
if isinstance(obj, DataFrame):
return obj.apply(last, axis=axis)
elif isinstance(obj, Series):
return last(obj)
else:
raise TypeError(type(obj))
cls.sum = groupby_function("sum", "add", np.sum, min_count=0)
cls.prod = groupby_function("prod", "prod", np.prod, min_count=0)
cls.min = groupby_function("min", "min", np.min, numeric_only=False)
cls.max = groupby_function("max", "max", np.max, numeric_only=False)
cls.first = groupby_function("first", "first", first_compat, numeric_only=False)
cls.last = groupby_function("last", "last", last_compat, numeric_only=False)
@Substitution(name="groupby")
@ | Appender(_common_see_also) | pandas.util._decorators.Appender |
##
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
import pandas as pd
from pycylon.index import Index, RangeIndex, NumericIndex, CategoricalIndex, ColumnIndex, \
range_calculator
from pycylon import Table
from pycylon import CylonContext
import pyarrow as pa
import numpy as np
from pycylon.io import CSVReadOptions
from pycylon.io import read_csv
def test_with_pandas():
pdf = pd.DataFrame([[1, 2, 3, 4, 5, 'a'], [6, 7, 8, 9, 10, 'b'], [11, 12, 13, 14, 15, 'c'],
[16, 17, 18, 19, 20, 'a'], [16, 17, 18, 19, 20, 'd'],
[111, 112, 113, 114, 5,
'a']])
# print(pdf)
pdf1 = pdf.set_index([1, 2])
# print(pdf1)
print(pdf1.index)
def test_numeric_index():
rg = range(0, 10, 1)
rg1 = range(0, 10, 2)
r = NumericIndex(data=rg)
assert r.index_values == rg
assert r.index_values != rg1
def test_range_index():
rg = range(0, 10, 1)
rg1 = range(0, 10, 2)
r = RangeIndex(start=rg.start, stop=rg.stop, step=rg.step)
assert r.index_values == rg
assert r.index_values != rg1
r1 = RangeIndex(rg)
r2 = RangeIndex(rg)
assert r1.index_values == rg
assert r2.index_values != rg1
def calculate_range_size_manual(rg: range):
sum = 0
for i in rg:
sum += 1
return sum
def test_range_count():
rg_1 = range(0, 10)
rg_2 = range(0, 10, 2)
rg_3 = range(0, 10, 3)
rg_4 = range(0, 11, 2)
rg_5 = range(0, 14, 3)
rgs = [rg_1, rg_2, rg_3, rg_4, rg_5]
for rg in rgs:
assert range_calculator(rg) == calculate_range_size_manual(rg)
def test_cylon_set_index_from_column():
from pycylon.indexing.cyindex import IndexingType
from pycylon.indexing.index_utils import IndexUtil
pdf_float = pd.DataFrame({'a': pd.Series([1, 4, 7, 10, 20, 23, 10], dtype=np.int64()),
'b': pd.Series([2, 5, 8, 11, 22, 25, 12], dtype='int')})
pdf = pd.DataFrame([[1, 2], [4, 5], [7, 8], [10, 11], [20, 22], [23, 25], [10, 12]])
ctx: CylonContext = CylonContext(config=None, distributed=False)
cn_tb: Table = Table.from_pandas(ctx, pdf_float)
indexing_type = IndexingType.LINEAR
drop_index = True
print("Before Indexing")
print(cn_tb)
# cn_tb.set_index('a', indexing_schema, drop_index)
cn_tb.set_index('a', indexing_type, drop_index)
print("After Indexing")
assert cn_tb.column_names == ['b']
assert cn_tb.get_index().get_type() == IndexingType.LINEAR
def test_reset_index():
from pycylon.indexing.cyindex import IndexingType
from pycylon.indexing.index_utils import IndexUtil
pdf_float = pd.DataFrame({'a': pd.Series([1, 4, 7, 10, 20, 23, 10], dtype=np.int64()),
'b': pd.Series([2, 5, 8, 11, 22, 25, 12], dtype='int')})
pdf = pd.DataFrame([[1, 2], [4, 5], [7, 8], [10, 11], [20, 22], [23, 25], [10, 12]])
ctx: CylonContext = CylonContext(config=None, distributed=False)
cn_tb: Table = Table.from_pandas(ctx, pdf_float)
indexing_type = IndexingType.LINEAR
drop_index = True
# cn_tb.set_index('a', indexing_schema, drop_index)
cn_tb.set_index('a', indexing_type, drop_index)
# assert cn_tb.get_index().get_type() == IndexingSchema.LINEAR
assert cn_tb.get_index().get_type() == IndexingType.LINEAR
rest_drop_index = False
# cn_tb.reset_index(rest_drop_index)
cn_tb.reset_index(rest_drop_index)
assert cn_tb.column_names == ['index', 'b']
# assert cn_tb.get_index().get_schema() == IndexingSchema.RANGE
assert cn_tb.get_index().get_type() == IndexingType.RANGE
def test_cylon_cpp_single_column_indexing():
# TODO: REMOVE
pass
# from pycylon.indexing.cyindex import IndexingSchema
# from pycylon.indexing.index_utils import IndexUtil
#
#
# pdf_float = pd.DataFrame({'a': pd.Series([1, 4, 7, 10, 20, 23, 10], dtype=np.int64()),
# 'b': pd.Series([2, 5, 8, 11, 22, 25, 12], dtype='int')})
# pdf = pd.DataFrame([[1, 2], [4, 5], [7, 8], [10, 11], [20, 22], [23, 25], [10, 12]])
# ctx: CylonContext = CylonContext(config=None, distributed=False)
# cn_tb: Table = Table.from_pandas(ctx, pdf_float)
# indexing_schema = IndexingSchema.LINEAR
#
# print("Input Table")
# print(cn_tb)
# print(cn_tb.to_arrow())
#
# output = IndexUtil.build_index(indexing_schema, cn_tb, 0, True)
# print("Output Indexed Table")
# print(output)
#
# loc_ix = LocIndexer(indexing_schema)
# start_index = 1
# end_index = 7
# column_index = 0
#
# loc_out = loc_ix.loc_with_single_column(slice(start_index, end_index), column_index, output)
# #
# print(loc_out)
#
# print(loc_out.to_arrow())
#
# index = loc_out.get_index()
#
# print(index)
#
# print(index.get_index_array())
#
# indices = [4, 7, 23, 20]
#
# loc_out2 = loc_ix.loc_with_single_column(indices, column_index, output)
#
# print(loc_out2)
#
# loc_index = 10
# loc_out3 = loc_ix.loc_with_single_column(loc_index, column_index, output)
#
# print(loc_out3)
def test_cylon_cpp_multi_column_indexing():
# TODO REMOVE
pass
# from pycylon.indexing.cyindex import IndexingSchema
# from pycylon.indexing.index_utils import IndexUtil
#
#
# pdf_float = pd.DataFrame({'a': pd.Series([1, 4, 7, 10, 20, 23, 10], dtype=np.float64()),
# 'b': pd.Series([2, 5, 8, 11, 22, 25, 12], dtype='int'),
# 'c': pd.Series([11, 12, 14, 15, 16, 17, 18], dtype='int')
# })
# pdf = pd.DataFrame([[1, 2, 11], [4, 5, 12], [7, 8, 14], [10, 11, 15], [20, 22, 16], [23, 25,
# 17],
# [10, 12, 18]])
# ctx: CylonContext = CylonContext(config=None, distributed=False)
# cn_tb: Table = Table.from_pandas(ctx, pdf)
# indexing_schema = IndexingSchema.LINEAR
#
# print("Input Table")
# print(cn_tb)
# print(cn_tb.to_arrow())
#
# output = IndexUtil.build_index(indexing_schema, cn_tb, 0, True)
# print("Output Indexed Table")
# print(output)
#
# loc_ix = LocIndexer(indexing_schema)
# start_index = 1
# end_index = 7
# column_index = [0, 1]
#
# loc_out = loc_ix.loc_with_multi_column(slice(start_index, end_index), column_index, output)
# #
# print(loc_out)
#
# print(loc_out.to_arrow())
#
# index = loc_out.get_index()
#
# print(index)
#
# print(index.get_index_array())
#
# indices = [4, 7, 23, 20]
#
# loc_out2 = loc_ix.loc_with_multi_column(indices, column_index, output)
#
# print(loc_out2)
#
# loc_index = 10
# loc_out3 = loc_ix.loc_with_multi_column(loc_index, column_index, output)
#
# print(loc_out3)
def test_cylon_cpp_str_single_column_indexing():
# TODO REMOVE
pass
# from pycylon.indexing.cyindex import IndexingSchema
# from pycylon.indexing.index_utils import IndexUtil
#
#
# pdf_str = pd.DataFrame([["1", 2], ["4", 5], ["7", 8], ["10", 11], ["20", 22], ["23", 25], ["10",
# 12]])
# pdf_float = pd.DataFrame({'a': pd.Series([1, 4, 7, 10, 20, 23, 10], dtype=np.float64()),
# 'b': pd.Series([2, 5, 8, 11, 22, 25, 12], dtype='int')})
# pdf = pd.DataFrame([[1, 2], [4, 5], [7, 8], [10, 11], [20, 22], [23, 25], [10, 12]])
# ctx: CylonContext = CylonContext(config=None, distributed=False)
# cn_tb: Table = Table.from_pandas(ctx, pdf_str)
# indexing_schema = IndexingSchema.LINEAR
#
# print("Input Table")
# print(cn_tb)
# print(cn_tb.to_arrow())
#
# output = IndexUtil.build_index(indexing_schema, cn_tb, 0, True)
# print("Output Indexed Table")
# print(output)
#
# loc_ix = LocIndexer(indexing_schema)
# start_index = "1"
# end_index = "7"
# column_index = 0
#
# loc_out = loc_ix.loc_with_single_column(slice(start_index, end_index), column_index, output)
#
# print(loc_out)
#
# print(loc_out.to_arrow())
#
# index = loc_out.get_index()
#
# print(index)
#
# print(index.get_index_array())
#
# indices = ["100", "4", "7", "23", "20"]
#
# indices = ['4']
#
# # indices = [4, 7]
#
# loc_out2 = loc_ix.loc_with_single_column(indices, column_index, output)
#
# print(loc_out2)
#
# loc_index = '10'
# loc_out3 = loc_ix.loc_with_single_column(loc_index, column_index, output)
#
# print(loc_out3)
def test_cylon_cpp_str_multi_column_indexing():
# TODO REMOVE
pass
# from pycylon.indexing.cyindex import IndexingSchema
# from pycylon.indexing.index_utils import IndexUtil
#
#
# pdf_str = pd.DataFrame([["1", 2, 3], ["4", 5, 4], ["7", 8, 10], ["10", 11, 12], ["20", 22, 20],
# ["23", 25, 20], ["10", 12, 35]])
# pdf_float = pd.DataFrame({'a': pd.Series([1, 4, 7, 10, 20, 23, 10], dtype=np.float64()),
# 'b': pd.Series([2, 5, 8, 11, 22, 25, 12], dtype='int'),
# 'c': pd.Series([3, 4, 10, 12, 20, 20, 35], dtype='int')})
# pdf = pd.DataFrame([[1, 2], [4, 5], [7, 8], [10, 11], [20, 22], [23, 25], [10, 12]])
# ctx: CylonContext = CylonContext(config=None, distributed=False)
# cn_tb: Table = Table.from_pandas(ctx, pdf_str)
# indexing_schema = IndexingSchema.LINEAR
#
# print("Input Table")
# print(cn_tb)
# print(cn_tb.to_arrow())
#
# output = IndexUtil.build_index(indexing_schema, cn_tb, 0, True)
# print("Output Indexed Table")
# print(output)
#
# loc_ix = LocIndexer(indexing_schema)
# start_index = "1"
# end_index = "7"
# column_index = [0, 1]
#
# loc_out = loc_ix.loc_with_multi_column(slice(start_index, end_index), column_index, output)
#
# print(loc_out)
#
# print(loc_out.to_arrow())
#
# index = loc_out.get_index()
#
# print(index)
#
# print(index.get_index_array())
#
# indices = ["100", "4", "7", "23", "20"]
#
# indices = ['4']
#
# # indices = [4, 7]
#
# loc_out2 = loc_ix.loc_with_multi_column(indices, column_index, output)
#
# print(loc_out2)
#
# loc_index = '10'
# loc_out3 = loc_ix.loc_with_multi_column(loc_index, column_index, output)
#
# print(loc_out3)
def test_cylon_cpp_range_column_indexing():
# TODO REMOVE
pass
# from pycylon.indexing.cyindex import IndexingSchema
# from pycylon.indexing.index_utils import IndexUtil
#
#
# pdf_float = pd.DataFrame({'a': pd.Series([1, 4, 7, 10, 20, 23, 10], dtype=np.float64()),
# 'b': pd.Series([2, 5, 8, 11, 22, 25, 12], dtype='int'),
# 'c': pd.Series([11, 12, 14, 15, 16, 17, 18], dtype='int')
# })
# pdf = pd.DataFrame([[1, 2, 11], [4, 5, 12], [7, 8, 14], [10, 11, 15], [20, 22, 16], [23, 25,
# 17],
# [10, 12, 18]])
# ctx: CylonContext = CylonContext(config=None, distributed=False)
# cn_tb: Table = Table.from_pandas(ctx, pdf)
# indexing_schema = IndexingSchema.LINEAR
#
# print("Input Table")
# print(cn_tb)
# print(cn_tb.to_arrow())
#
# output = IndexUtil.build_index(indexing_schema, cn_tb, 0, True)
# print("Output Indexed Table")
# print(output)
#
# loc_ix = LocIndexer(indexing_schema)
# start_index = 1
# end_index = 7
# column_index = slice(0, 1)
#
# loc_out = loc_ix.loc_with_range_column(slice(start_index, end_index), column_index, output)
# #
# print(loc_out)
#
# print(loc_out.to_arrow())
#
# index = loc_out.get_index()
#
# print(index)
#
# print(index.get_index_array())
#
# indices = [4, 7, 23, 20]
#
# loc_out2 = loc_ix.loc_with_range_column(indices, column_index, output)
#
# print(loc_out2)
#
# loc_index = 10
# loc_out3 = loc_ix.loc_with_range_column(loc_index, column_index, output)
#
# print(loc_out3)
def test_cylon_cpp_str_range_column_indexing():
# TODO REMOVE
pass
# from pycylon.indexing.cyindex import IndexingSchema
# from pycylon.indexing.index_utils import IndexUtil
#
#
# pdf_str = pd.DataFrame([["1", 2, 3], ["4", 5, 4], ["7", 8, 10], ["10", 11, 12], ["20", 22, 20],
# ["23", 25, 20], ["10", 12, 35]])
# pdf_float = pd.DataFrame({'a': pd.Series([1, 4, 7, 10, 20, 23, 10], dtype=np.float64()),
# 'b': pd.Series([2, 5, 8, 11, 22, 25, 12], dtype='int'),
# 'c': pd.Series([3, 4, 10, 12, 20, 20, 35], dtype='int')})
# pdf = pd.DataFrame([[1, 2], [4, 5], [7, 8], [10, 11], [20, 22], [23, 25], [10, 12]])
# ctx: CylonContext = CylonContext(config=None, distributed=False)
# cn_tb: Table = Table.from_pandas(ctx, pdf_str)
# indexing_schema = IndexingSchema.LINEAR
#
# print("Input Table")
# print(cn_tb)
# print(cn_tb.to_arrow())
#
# output = IndexUtil.build_index(indexing_schema, cn_tb, 0, True)
# print("Output Indexed Table")
# print(output)
#
# loc_ix = LocIndexer(indexing_schema)
# start_index = "1"
# end_index = "7"
# column_index = slice(0, 1)
#
# loc_out = loc_ix.loc_with_range_column(slice(start_index, end_index), column_index, output)
#
# print(loc_out)
#
# print(loc_out.to_arrow())
#
# index = loc_out.get_index()
#
# print(index)
#
# print(index.get_index_array())
#
# indices = ["100", "4", "7", "23", "20"]
#
# indices = ['4']
#
# # indices = [4, 7]
#
# loc_out2 = loc_ix.loc_with_range_column(indices, column_index, output)
#
# print(loc_out2)
#
# loc_index = '10'
# loc_out3 = loc_ix.loc_with_range_column(loc_index, column_index, output)
#
# print(loc_out3)
def test_loc_op_mode_1():
from pycylon.indexing.cyindex import IndexingType
pdf_float = pd.DataFrame({'a': pd.Series([1, 4, 7, 10, 20, 23, 11], dtype=np.int64()),
'b': pd.Series([2, 5, 8, 11, 22, 25, 12], dtype='int'),
'c': pd.Series([12, 15, 18, 111, 122, 125, 112], dtype='int'),
'd': pd.Series([212, 215, 218, 211, 222, 225, 312], dtype='int'),
'e': pd.Series([1121, 12151, 12181, 12111, 12221, 12251, 13121],
dtype='int')})
ctx: CylonContext = CylonContext(config=None, distributed=False)
cn_tb: Table = Table.from_pandas(ctx, pdf_float)
indexing_type = IndexingType.LINEAR
drop_index = True
print("Before Indexing")
print(cn_tb)
cn_tb.set_index('a', indexing_type, drop_index)
pdf_float = pdf_float.set_index('a')
print("After Indexing")
assert cn_tb.column_names == ['b', 'c', 'd', 'e']
# assert cn_tb.get_index().get_schema() == IndexingSchema.LINEAR
assert cn_tb.get_index().get_type() == IndexingType.LINEAR
loc_cn_1 = cn_tb.loc[7:20, 'c':'e']
loc_pd_1 = pdf_float.loc[7:20, 'c':'e']
print(loc_cn_1.get_index().values)
print(loc_pd_1.index.values)
assert loc_pd_1.values.tolist() == loc_cn_1.to_pandas().values.tolist()
assert loc_cn_1.get_index().get_index_array() == pa.array(loc_pd_1.index)
# assert loc_cn_1.get_arrow_index().get_index_array() == pa.array(loc_pd_1.index)
loc_cn_2 = cn_tb.loc[7:20, 'd':]
loc_pd_2 = pdf_float.loc[7:20, 'd':]
assert loc_pd_2.values.tolist() == loc_cn_2.to_pandas().values.tolist()
assert loc_cn_2.get_index().get_index_array() == pa.array(loc_pd_2.index)
# assert loc_cn_2.get_arrow_index().get_index_array() == pa.array(loc_pd_2.index)
loc_cn_3 = cn_tb.loc[7:, 'd':]
loc_pd_3 = pdf_float.loc[7:, 'd':]
assert loc_pd_3.values.tolist() == loc_cn_3.to_pandas().values.tolist()
assert loc_cn_3.get_index().get_index_array() == pa.array(loc_pd_3.index)
# assert loc_cn_3.get_arrow_index().get_index_array() == pa.array(loc_pd_3.index)
loc_cn_4 = cn_tb.loc[:7, 'd':]
loc_pd_4 = pdf_float.loc[:7, 'd':]
assert loc_pd_4.values.tolist() == loc_cn_4.to_pandas().values.tolist()
assert loc_cn_4.get_index().get_index_array() == pa.array(loc_pd_4.index)
# assert loc_cn_4.get_arrow_index().get_index_array() == pa.array(loc_pd_4.index)
loc_cn_5 = cn_tb.loc[:, 'd':]
loc_pd_5 = pdf_float.loc[:, 'd':]
assert loc_pd_5.values.tolist() == loc_cn_5.to_pandas().values.tolist()
assert loc_cn_5.get_index().get_index_array() == pa.array(loc_pd_5.index)
# assert loc_cn_5.get_arrow_index().get_index_array() == pa.array(loc_pd_5.index)
loc_cn_6 = cn_tb.loc[[7, 20], 'd':]
loc_pd_6 = pdf_float.loc[[7, 20], 'd':]
assert loc_pd_6.values.tolist() == loc_cn_6.to_pandas().values.tolist()
assert loc_cn_6.get_index().get_index_array() == pa.array(loc_pd_6.index)
def test_loc_op_mode_2():
from pycylon.indexing.cyindex import IndexingType
pdf_float = pd.DataFrame({'a': pd.Series(["1", "4", "7", "10", "20", "23", "11"]),
'b': | pd.Series([2, 5, 8, 11, 22, 25, 12], dtype='int') | pandas.Series |
from airflow.decorators import dag, task
from airflow.utils.dates import days_ago
from airflow.operators.bash import BashOperator
from airflow.providers.postgres.operators.postgres import PostgresOperator
from airflow.hooks.postgres_hook import PostgresHook
from airflow.models import Variable
from datetime import datetime, timedelta
from acona_postgres_tools import acona_truncate_table, acona_data_write
# [END import_module]
# [START default_args]
# These args will get passed on to each operator
# You can override them on a per-task basis during operator initialization
default_args = {
'owner': 'airflow'
}
# [END default_args]
# [START instantiate_dag]
@dag(
default_args=default_args,
start_date=days_ago(2),
tags=['prophet'],
schedule_interval='0 5 * * 0')
def acona_forecast():
# [END instantiate_dag]
# [START forecast]
@task()
def forecast(metric):
"""
#### Get historic data from Warehouse to generate forecasts
"""
import json
import requests
import os
import urllib.parse
import pandas as pd
import numpy as np
from prophet import Prophet
WAREHOUSE_TOKEN = Variable.get("WAREHOUSE_TOKEN")
WAREHOUSE_URL = Variable.get("WAREHOUSE_URL")
output = {}
df = {}
result = {}
# Load urls (for specific domain only?)
urls = os.popen('curl ' + WAREHOUSE_URL + '/rpc/acona_urls -H "Authorization: Bearer ' + WAREHOUSE_TOKEN + '"').read()
forecasts = {}
forecasted_lower = pd.DataFrame()
forecasted_upper = | pd.DataFrame() | pandas.DataFrame |
from collections import abc, deque
from decimal import Decimal
from io import StringIO
from warnings import catch_warnings
import numpy as np
from numpy.random import randn
import pytest
from pandas.core.dtypes.dtypes import CategoricalDtype
import pandas as pd
from pandas import (
Categorical,
DataFrame,
DatetimeIndex,
Index,
MultiIndex,
Series,
concat,
date_range,
read_csv,
)
import pandas._testing as tm
from pandas.core.arrays import SparseArray
from pandas.core.construction import create_series_with_explicit_dtype
from pandas.tests.extension.decimal import to_decimal
@pytest.fixture(params=[True, False])
def sort(request):
"""Boolean sort keyword for concat and DataFrame.append."""
return request.param
class TestConcatenate:
def test_concat_copy(self):
df = DataFrame(np.random.randn(4, 3))
df2 = DataFrame(np.random.randint(0, 10, size=4).reshape(4, 1))
df3 = DataFrame({5: "foo"}, index=range(4))
# These are actual copies.
result = concat([df, df2, df3], axis=1, copy=True)
for b in result._mgr.blocks:
assert b.values.base is None
# These are the same.
result = concat([df, df2, df3], axis=1, copy=False)
for b in result._mgr.blocks:
if b.is_float:
assert b.values.base is df._mgr.blocks[0].values.base
elif b.is_integer:
assert b.values.base is df2._mgr.blocks[0].values.base
elif b.is_object:
assert b.values.base is not None
# Float block was consolidated.
df4 = DataFrame(np.random.randn(4, 1))
result = concat([df, df2, df3, df4], axis=1, copy=False)
for b in result._mgr.blocks:
if b.is_float:
assert b.values.base is None
elif b.is_integer:
assert b.values.base is df2._mgr.blocks[0].values.base
elif b.is_object:
assert b.values.base is not None
def test_concat_with_group_keys(self):
df = DataFrame(np.random.randn(4, 3))
df2 = DataFrame(np.random.randn(4, 4))
# axis=0
df = DataFrame(np.random.randn(3, 4))
df2 = DataFrame(np.random.randn(4, 4))
result = concat([df, df2], keys=[0, 1])
exp_index = MultiIndex.from_arrays(
[[0, 0, 0, 1, 1, 1, 1], [0, 1, 2, 0, 1, 2, 3]]
)
expected = DataFrame(np.r_[df.values, df2.values], index=exp_index)
tm.assert_frame_equal(result, expected)
result = concat([df, df], keys=[0, 1])
exp_index2 = MultiIndex.from_arrays([[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 1, 2]])
expected = DataFrame(np.r_[df.values, df.values], index=exp_index2)
tm.assert_frame_equal(result, expected)
# axis=1
df = DataFrame(np.random.randn(4, 3))
df2 = DataFrame(np.random.randn(4, 4))
result = concat([df, df2], keys=[0, 1], axis=1)
expected = DataFrame(np.c_[df.values, df2.values], columns=exp_index)
tm.assert_frame_equal(result, expected)
result = concat([df, df], keys=[0, 1], axis=1)
expected = DataFrame(np.c_[df.values, df.values], columns=exp_index2)
tm.assert_frame_equal(result, expected)
def test_concat_keys_specific_levels(self):
df = DataFrame(np.random.randn(10, 4))
pieces = [df.iloc[:, [0, 1]], df.iloc[:, [2]], df.iloc[:, [3]]]
level = ["three", "two", "one", "zero"]
result = concat(
pieces,
axis=1,
keys=["one", "two", "three"],
levels=[level],
names=["group_key"],
)
tm.assert_index_equal(result.columns.levels[0], Index(level, name="group_key"))
tm.assert_index_equal(result.columns.levels[1], Index([0, 1, 2, 3]))
assert result.columns.names == ["group_key", None]
def test_concat_dataframe_keys_bug(self, sort):
t1 = DataFrame(
{"value": Series([1, 2, 3], index=Index(["a", "b", "c"], name="id"))}
)
t2 = DataFrame({"value": Series([7, 8], index=Index(["a", "b"], name="id"))})
# it works
result = concat([t1, t2], axis=1, keys=["t1", "t2"], sort=sort)
assert list(result.columns) == [("t1", "value"), ("t2", "value")]
def test_concat_series_partial_columns_names(self):
# GH10698
foo = Series([1, 2], name="foo")
bar = Series([1, 2])
baz = Series([4, 5])
result = concat([foo, bar, baz], axis=1)
expected = DataFrame(
{"foo": [1, 2], 0: [1, 2], 1: [4, 5]}, columns=["foo", 0, 1]
)
tm.assert_frame_equal(result, expected)
result = concat([foo, bar, baz], axis=1, keys=["red", "blue", "yellow"])
expected = DataFrame(
{"red": [1, 2], "blue": [1, 2], "yellow": [4, 5]},
columns=["red", "blue", "yellow"],
)
tm.assert_frame_equal(result, expected)
result = concat([foo, bar, baz], axis=1, ignore_index=True)
expected = DataFrame({0: [1, 2], 1: [1, 2], 2: [4, 5]})
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("mapping", ["mapping", "dict"])
def test_concat_mapping(self, mapping, non_dict_mapping_subclass):
constructor = dict if mapping == "dict" else non_dict_mapping_subclass
frames = constructor(
{
"foo": DataFrame(np.random.randn(4, 3)),
"bar": DataFrame(np.random.randn(4, 3)),
"baz": DataFrame(np.random.randn(4, 3)),
"qux": DataFrame(np.random.randn(4, 3)),
}
)
sorted_keys = list(frames.keys())
result = concat(frames)
expected = concat([frames[k] for k in sorted_keys], keys=sorted_keys)
tm.assert_frame_equal(result, expected)
result = concat(frames, axis=1)
expected = concat([frames[k] for k in sorted_keys], keys=sorted_keys, axis=1)
tm.assert_frame_equal(result, expected)
keys = ["baz", "foo", "bar"]
result = concat(frames, keys=keys)
expected = concat([frames[k] for k in keys], keys=keys)
tm.assert_frame_equal(result, expected)
def test_concat_ignore_index(self, sort):
frame1 = DataFrame(
{"test1": ["a", "b", "c"], "test2": [1, 2, 3], "test3": [4.5, 3.2, 1.2]}
)
frame2 = DataFrame({"test3": [5.2, 2.2, 4.3]})
frame1.index = Index(["x", "y", "z"])
frame2.index = Index(["x", "y", "q"])
v1 = concat([frame1, frame2], axis=1, ignore_index=True, sort=sort)
nan = np.nan
expected = DataFrame(
[
[nan, nan, nan, 4.3],
["a", 1, 4.5, 5.2],
["b", 2, 3.2, 2.2],
["c", 3, 1.2, nan],
],
index=Index(["q", "x", "y", "z"]),
)
if not sort:
expected = expected.loc[["x", "y", "z", "q"]]
tm.assert_frame_equal(v1, expected)
@pytest.mark.parametrize(
"name_in1,name_in2,name_in3,name_out",
[
("idx", "idx", "idx", "idx"),
("idx", "idx", None, None),
("idx", None, None, None),
("idx1", "idx2", None, None),
("idx1", "idx1", "idx2", None),
("idx1", "idx2", "idx3", None),
(None, None, None, None),
],
)
def test_concat_same_index_names(self, name_in1, name_in2, name_in3, name_out):
# GH13475
indices = [
Index(["a", "b", "c"], name=name_in1),
Index(["b", "c", "d"], name=name_in2),
Index(["c", "d", "e"], name=name_in3),
]
frames = [
DataFrame({c: [0, 1, 2]}, index=i) for i, c in zip(indices, ["x", "y", "z"])
]
result = pd.concat(frames, axis=1)
exp_ind = Index(["a", "b", "c", "d", "e"], name=name_out)
expected = DataFrame(
{
"x": [0, 1, 2, np.nan, np.nan],
"y": [np.nan, 0, 1, 2, np.nan],
"z": [np.nan, np.nan, 0, 1, 2],
},
index=exp_ind,
)
tm.assert_frame_equal(result, expected)
def test_concat_multiindex_with_keys(self):
index = MultiIndex(
levels=[["foo", "bar", "baz", "qux"], ["one", "two", "three"]],
codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=["first", "second"],
)
frame = DataFrame(
np.random.randn(10, 3),
index=index,
columns=Index(["A", "B", "C"], name="exp"),
)
result = concat([frame, frame], keys=[0, 1], names=["iteration"])
assert result.index.names == ("iteration",) + index.names
tm.assert_frame_equal(result.loc[0], frame)
tm.assert_frame_equal(result.loc[1], frame)
assert result.index.nlevels == 3
def test_concat_multiindex_with_none_in_index_names(self):
# GH 15787
index = pd.MultiIndex.from_product([[1], range(5)], names=["level1", None])
df = DataFrame({"col": range(5)}, index=index, dtype=np.int32)
result = concat([df, df], keys=[1, 2], names=["level2"])
index = pd.MultiIndex.from_product(
[[1, 2], [1], range(5)], names=["level2", "level1", None]
)
expected = DataFrame({"col": list(range(5)) * 2}, index=index, dtype=np.int32)
tm.assert_frame_equal(result, expected)
result = concat([df, df[:2]], keys=[1, 2], names=["level2"])
level2 = [1] * 5 + [2] * 2
level1 = [1] * 7
no_name = list(range(5)) + list(range(2))
tuples = list(zip(level2, level1, no_name))
index = pd.MultiIndex.from_tuples(tuples, names=["level2", "level1", None])
expected = DataFrame({"col": no_name}, index=index, dtype=np.int32)
tm.assert_frame_equal(result, expected)
def test_concat_keys_and_levels(self):
df = DataFrame(np.random.randn(1, 3))
df2 = DataFrame(np.random.randn(1, 4))
levels = [["foo", "baz"], ["one", "two"]]
names = ["first", "second"]
result = concat(
[df, df2, df, df2],
keys=[("foo", "one"), ("foo", "two"), ("baz", "one"), ("baz", "two")],
levels=levels,
names=names,
)
expected = concat([df, df2, df, df2])
exp_index = MultiIndex(
levels=levels + [[0]],
codes=[[0, 0, 1, 1], [0, 1, 0, 1], [0, 0, 0, 0]],
names=names + [None],
)
expected.index = exp_index
tm.assert_frame_equal(result, expected)
# no names
result = concat(
[df, df2, df, df2],
keys=[("foo", "one"), ("foo", "two"), ("baz", "one"), ("baz", "two")],
levels=levels,
)
assert result.index.names == (None,) * 3
# no levels
result = concat(
[df, df2, df, df2],
keys=[("foo", "one"), ("foo", "two"), ("baz", "one"), ("baz", "two")],
names=["first", "second"],
)
assert result.index.names == ("first", "second", None)
tm.assert_index_equal(
result.index.levels[0], Index(["baz", "foo"], name="first")
)
def test_concat_keys_levels_no_overlap(self):
# GH #1406
df = DataFrame(np.random.randn(1, 3), index=["a"])
df2 = DataFrame(np.random.randn(1, 4), index=["b"])
msg = "Values not found in passed level"
with pytest.raises(ValueError, match=msg):
concat([df, df], keys=["one", "two"], levels=[["foo", "bar", "baz"]])
msg = "Key one not in level"
with pytest.raises(ValueError, match=msg):
concat([df, df2], keys=["one", "two"], levels=[["foo", "bar", "baz"]])
def test_concat_rename_index(self):
a = DataFrame(
np.random.rand(3, 3),
columns=list("ABC"),
index=Index(list("abc"), name="index_a"),
)
b = DataFrame(
np.random.rand(3, 3),
columns=list("ABC"),
index=Index(list("abc"), name="index_b"),
)
result = concat([a, b], keys=["key0", "key1"], names=["lvl0", "lvl1"])
exp = concat([a, b], keys=["key0", "key1"], names=["lvl0"])
names = list(exp.index.names)
names[1] = "lvl1"
exp.index.set_names(names, inplace=True)
tm.assert_frame_equal(result, exp)
assert result.index.names == exp.index.names
def test_crossed_dtypes_weird_corner(self):
columns = ["A", "B", "C", "D"]
df1 = DataFrame(
{
"A": np.array([1, 2, 3, 4], dtype="f8"),
"B": np.array([1, 2, 3, 4], dtype="i8"),
"C": np.array([1, 2, 3, 4], dtype="f8"),
"D": np.array([1, 2, 3, 4], dtype="i8"),
},
columns=columns,
)
df2 = DataFrame(
{
"A": np.array([1, 2, 3, 4], dtype="i8"),
"B": np.array([1, 2, 3, 4], dtype="f8"),
"C": np.array([1, 2, 3, 4], dtype="i8"),
"D": np.array([1, 2, 3, 4], dtype="f8"),
},
columns=columns,
)
appended = df1.append(df2, ignore_index=True)
expected = DataFrame(
np.concatenate([df1.values, df2.values], axis=0), columns=columns
)
tm.assert_frame_equal(appended, expected)
df = DataFrame(np.random.randn(1, 3), index=["a"])
df2 = DataFrame(np.random.randn(1, 4), index=["b"])
result = concat([df, df2], keys=["one", "two"], names=["first", "second"])
assert result.index.names == ("first", "second")
def test_dups_index(self):
# GH 4771
# single dtypes
df = DataFrame(
np.random.randint(0, 10, size=40).reshape(10, 4),
columns=["A", "A", "C", "C"],
)
result = concat([df, df], axis=1)
tm.assert_frame_equal(result.iloc[:, :4], df)
tm.assert_frame_equal(result.iloc[:, 4:], df)
result = concat([df, df], axis=0)
tm.assert_frame_equal(result.iloc[:10], df)
tm.assert_frame_equal(result.iloc[10:], df)
# multi dtypes
df = concat(
[
DataFrame(np.random.randn(10, 4), columns=["A", "A", "B", "B"]),
DataFrame(
np.random.randint(0, 10, size=20).reshape(10, 2), columns=["A", "C"]
),
],
axis=1,
)
result = concat([df, df], axis=1)
tm.assert_frame_equal(result.iloc[:, :6], df)
tm.assert_frame_equal(result.iloc[:, 6:], df)
result = concat([df, df], axis=0)
tm.assert_frame_equal(result.iloc[:10], df)
tm.assert_frame_equal(result.iloc[10:], df)
# append
result = df.iloc[0:8, :].append(df.iloc[8:])
tm.assert_frame_equal(result, df)
result = df.iloc[0:8, :].append(df.iloc[8:9]).append(df.iloc[9:10])
tm.assert_frame_equal(result, df)
expected = concat([df, df], axis=0)
result = df.append(df)
tm.assert_frame_equal(result, expected)
def test_with_mixed_tuples(self, sort):
# 10697
# columns have mixed tuples, so handle properly
df1 = DataFrame({"A": "foo", ("B", 1): "bar"}, index=range(2))
df2 = DataFrame({"B": "foo", ("B", 1): "bar"}, index=range(2))
# it works
concat([df1, df2], sort=sort)
def test_handle_empty_objects(self, sort):
df = DataFrame(np.random.randn(10, 4), columns=list("abcd"))
baz = df[:5].copy()
baz["foo"] = "bar"
empty = df[5:5]
frames = [baz, empty, empty, df[5:]]
concatted = concat(frames, axis=0, sort=sort)
expected = df.reindex(columns=["a", "b", "c", "d", "foo"])
expected["foo"] = expected["foo"].astype("O")
expected.loc[0:4, "foo"] = "bar"
tm.assert_frame_equal(concatted, expected)
# empty as first element with time series
# GH3259
df = DataFrame(
dict(A=range(10000)), index=date_range("20130101", periods=10000, freq="s")
)
empty = DataFrame()
result = concat([df, empty], axis=1)
tm.assert_frame_equal(result, df)
result = concat([empty, df], axis=1)
tm.assert_frame_equal(result, df)
result = concat([df, empty])
tm.assert_frame_equal(result, df)
result = concat([empty, df])
tm.assert_frame_equal(result, df)
def test_concat_mixed_objs(self):
# concat mixed series/frames
# G2385
# axis 1
index = date_range("01-Jan-2013", periods=10, freq="H")
arr = np.arange(10, dtype="int64")
s1 = Series(arr, index=index)
s2 = Series(arr, index=index)
df = DataFrame(arr.reshape(-1, 1), index=index)
expected = DataFrame(
np.repeat(arr, 2).reshape(-1, 2), index=index, columns=[0, 0]
)
result = concat([df, df], axis=1)
tm.assert_frame_equal(result, expected)
expected = DataFrame(
np.repeat(arr, 2).reshape(-1, 2), index=index, columns=[0, 1]
)
result = concat([s1, s2], axis=1)
tm.assert_frame_equal(result, expected)
expected = DataFrame(
np.repeat(arr, 3).reshape(-1, 3), index=index, columns=[0, 1, 2]
)
result = concat([s1, s2, s1], axis=1)
tm.assert_frame_equal(result, expected)
expected = DataFrame(
np.repeat(arr, 5).reshape(-1, 5), index=index, columns=[0, 0, 1, 2, 3]
)
result = concat([s1, df, s2, s2, s1], axis=1)
tm.assert_frame_equal(result, expected)
# with names
s1.name = "foo"
expected = DataFrame(
np.repeat(arr, 3).reshape(-1, 3), index=index, columns=["foo", 0, 0]
)
result = concat([s1, df, s2], axis=1)
tm.assert_frame_equal(result, expected)
s2.name = "bar"
expected = DataFrame(
np.repeat(arr, 3).reshape(-1, 3), index=index, columns=["foo", 0, "bar"]
)
result = concat([s1, df, s2], axis=1)
tm.assert_frame_equal(result, expected)
# ignore index
expected = DataFrame(
np.repeat(arr, 3).reshape(-1, 3), index=index, columns=[0, 1, 2]
)
result = concat([s1, df, s2], axis=1, ignore_index=True)
tm.assert_frame_equal(result, expected)
# axis 0
expected = DataFrame(
np.tile(arr, 3).reshape(-1, 1), index=index.tolist() * 3, columns=[0]
)
result = concat([s1, df, s2])
tm.assert_frame_equal(result, expected)
expected = DataFrame(np.tile(arr, 3).reshape(-1, 1), columns=[0])
result = concat([s1, df, s2], ignore_index=True)
tm.assert_frame_equal(result, expected)
def test_empty_dtype_coerce(self):
# xref to #12411
# xref to #12045
# xref to #11594
# see below
# 10571
df1 = DataFrame(data=[[1, None], [2, None]], columns=["a", "b"])
df2 = DataFrame(data=[[3, None], [4, None]], columns=["a", "b"])
result = concat([df1, df2])
expected = df1.dtypes
tm.assert_series_equal(result.dtypes, expected)
def test_dtype_coerceion(self):
# 12411
df = DataFrame({"date": [pd.Timestamp("20130101").tz_localize("UTC"), pd.NaT]})
result = concat([df.iloc[[0]], df.iloc[[1]]])
tm.assert_series_equal(result.dtypes, df.dtypes)
# 12045
import datetime
df = DataFrame(
{"date": [datetime.datetime(2012, 1, 1), datetime.datetime(1012, 1, 2)]}
)
result = concat([df.iloc[[0]], df.iloc[[1]]])
tm.assert_series_equal(result.dtypes, df.dtypes)
# 11594
df = DataFrame({"text": ["some words"] + [None] * 9})
result = concat([df.iloc[[0]], df.iloc[[1]]])
tm.assert_series_equal(result.dtypes, df.dtypes)
def test_concat_series(self):
ts = tm.makeTimeSeries()
ts.name = "foo"
pieces = [ts[:5], ts[5:15], ts[15:]]
result = concat(pieces)
tm.assert_series_equal(result, ts)
assert result.name == ts.name
result = concat(pieces, keys=[0, 1, 2])
expected = ts.copy()
ts.index = DatetimeIndex(np.array(ts.index.values, dtype="M8[ns]"))
exp_codes = [np.repeat([0, 1, 2], [len(x) for x in pieces]), np.arange(len(ts))]
exp_index = MultiIndex(levels=[[0, 1, 2], ts.index], codes=exp_codes)
expected.index = exp_index
tm.assert_series_equal(result, expected)
def test_concat_series_axis1(self, sort=sort):
ts = tm.makeTimeSeries()
pieces = [ts[:-2], ts[2:], ts[2:-2]]
result = concat(pieces, axis=1)
expected = DataFrame(pieces).T
tm.assert_frame_equal(result, expected)
result = concat(pieces, keys=["A", "B", "C"], axis=1)
expected = DataFrame(pieces, index=["A", "B", "C"]).T
tm.assert_frame_equal(result, expected)
# preserve series names, #2489
s = Series(randn(5), name="A")
s2 = Series(randn(5), name="B")
result = concat([s, s2], axis=1)
expected = DataFrame({"A": s, "B": s2})
tm.assert_frame_equal(result, expected)
s2.name = None
result = concat([s, s2], axis=1)
tm.assert_index_equal(result.columns, Index(["A", 0], dtype="object"))
# must reindex, #2603
s = Series(randn(3), index=["c", "a", "b"], name="A")
s2 = Series(randn(4), index=["d", "a", "b", "c"], name="B")
result = concat([s, s2], axis=1, sort=sort)
expected = DataFrame({"A": s, "B": s2})
tm.assert_frame_equal(result, expected)
def test_concat_series_axis1_names_applied(self):
# ensure names argument is not ignored on axis=1, #23490
s = Series([1, 2, 3])
s2 = Series([4, 5, 6])
result = concat([s, s2], axis=1, keys=["a", "b"], names=["A"])
expected = DataFrame(
[[1, 4], [2, 5], [3, 6]], columns=Index(["a", "b"], name="A")
)
tm.assert_frame_equal(result, expected)
result = concat([s, s2], axis=1, keys=[("a", 1), ("b", 2)], names=["A", "B"])
expected = DataFrame(
[[1, 4], [2, 5], [3, 6]],
columns=MultiIndex.from_tuples([("a", 1), ("b", 2)], names=["A", "B"]),
)
tm.assert_frame_equal(result, expected)
def test_concat_single_with_key(self):
df = DataFrame(np.random.randn(10, 4))
result = concat([df], keys=["foo"])
expected = concat([df, df], keys=["foo", "bar"])
tm.assert_frame_equal(result, expected[:10])
def test_concat_exclude_none(self):
df = DataFrame(np.random.randn(10, 4))
pieces = [df[:5], None, None, df[5:]]
result = concat(pieces)
tm.assert_frame_equal(result, df)
with pytest.raises(ValueError, match="All objects passed were None"):
concat([None, None])
def test_concat_timedelta64_block(self):
from pandas import to_timedelta
rng = to_timedelta(np.arange(10), unit="s")
df = DataFrame({"time": rng})
result = concat([df, df])
assert (result.iloc[:10]["time"] == rng).all()
assert (result.iloc[10:]["time"] == rng).all()
def test_concat_keys_with_none(self):
# #1649
df0 = DataFrame([[10, 20, 30], [10, 20, 30], [10, 20, 30]])
result = concat(dict(a=None, b=df0, c=df0[:2], d=df0[:1], e=df0))
expected = concat(dict(b=df0, c=df0[:2], d=df0[:1], e=df0))
tm.assert_frame_equal(result, expected)
result = concat(
[None, df0, df0[:2], df0[:1], df0], keys=["a", "b", "c", "d", "e"]
)
expected = concat([df0, df0[:2], df0[:1], df0], keys=["b", "c", "d", "e"])
tm.assert_frame_equal(result, expected)
def test_concat_bug_1719(self):
ts1 = tm.makeTimeSeries()
ts2 = tm.makeTimeSeries()[::2]
# to join with union
# these two are of different length!
left = concat([ts1, ts2], join="outer", axis=1)
right = concat([ts2, ts1], join="outer", axis=1)
assert len(left) == len(right)
def test_concat_bug_2972(self):
ts0 = Series(np.zeros(5))
ts1 = Series(np.ones(5))
ts0.name = ts1.name = "same name"
result = concat([ts0, ts1], axis=1)
expected = DataFrame({0: ts0, 1: ts1})
expected.columns = ["same name", "same name"]
tm.assert_frame_equal(result, expected)
def test_concat_bug_3602(self):
# GH 3602, duplicate columns
df1 = DataFrame(
{
"firmNo": [0, 0, 0, 0],
"prc": [6, 6, 6, 6],
"stringvar": ["rrr", "rrr", "rrr", "rrr"],
}
)
df2 = DataFrame(
{"C": [9, 10, 11, 12], "misc": [1, 2, 3, 4], "prc": [6, 6, 6, 6]}
)
expected = DataFrame(
[
[0, 6, "rrr", 9, 1, 6],
[0, 6, "rrr", 10, 2, 6],
[0, 6, "rrr", 11, 3, 6],
[0, 6, "rrr", 12, 4, 6],
]
)
expected.columns = ["firmNo", "prc", "stringvar", "C", "misc", "prc"]
result = concat([df1, df2], axis=1)
tm.assert_frame_equal(result, expected)
def test_concat_inner_join_empty(self):
# GH 15328
df_empty = DataFrame()
df_a = DataFrame({"a": [1, 2]}, index=[0, 1], dtype="int64")
df_expected = DataFrame({"a": []}, index=[], dtype="int64")
for how, expected in [("inner", df_expected), ("outer", df_a)]:
result = pd.concat([df_a, df_empty], axis=1, join=how)
tm.assert_frame_equal(result, expected)
def test_concat_series_axis1_same_names_ignore_index(self):
dates = date_range("01-Jan-2013", "01-Jan-2014", freq="MS")[0:-1]
s1 = Series(randn(len(dates)), index=dates, name="value")
s2 = Series(randn(len(dates)), index=dates, name="value")
result = concat([s1, s2], axis=1, ignore_index=True)
expected = Index([0, 1])
tm.assert_index_equal(result.columns, expected)
def test_concat_iterables(self):
# GH8645 check concat works with tuples, list, generators, and weird
# stuff like deque and custom iterables
df1 = DataFrame([1, 2, 3])
df2 = DataFrame([4, 5, 6])
expected = DataFrame([1, 2, 3, 4, 5, 6])
tm.assert_frame_equal(concat((df1, df2), ignore_index=True), expected)
tm.assert_frame_equal(concat([df1, df2], ignore_index=True), expected)
tm.assert_frame_equal(
concat((df for df in (df1, df2)), ignore_index=True), expected
)
tm.assert_frame_equal(concat(deque((df1, df2)), ignore_index=True), expected)
class CustomIterator1:
def __len__(self) -> int:
return 2
def __getitem__(self, index):
try:
return {0: df1, 1: df2}[index]
except KeyError as err:
raise IndexError from err
tm.assert_frame_equal(pd.concat(CustomIterator1(), ignore_index=True), expected)
class CustomIterator2(abc.Iterable):
def __iter__(self):
yield df1
yield df2
tm.assert_frame_equal(pd.concat(CustomIterator2(), ignore_index=True), expected)
def test_concat_invalid(self):
# trying to concat a ndframe with a non-ndframe
df1 = tm.makeCustomDataframe(10, 2)
for obj in [1, dict(), [1, 2], (1, 2)]:
msg = (
f"cannot concatenate object of type '{type(obj)}'; "
"only Series and DataFrame objs are valid"
)
with pytest.raises(TypeError, match=msg):
concat([df1, obj])
def test_concat_invalid_first_argument(self):
df1 = tm.makeCustomDataframe(10, 2)
df2 = tm.makeCustomDataframe(10, 2)
msg = (
"first argument must be an iterable of pandas "
'objects, you passed an object of type "DataFrame"'
)
with pytest.raises(TypeError, match=msg):
concat(df1, df2)
# generator ok though
concat(DataFrame(np.random.rand(5, 5)) for _ in range(3))
# text reader ok
# GH6583
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
reader = read_csv(StringIO(data), chunksize=1)
result = concat(reader, ignore_index=True)
expected = read_csv(StringIO(data))
tm.assert_frame_equal(result, expected)
def test_concat_empty_series(self):
# GH 11082
s1 = Series([1, 2, 3], name="x")
s2 = Series(name="y", dtype="float64")
res = pd.concat([s1, s2], axis=1)
exp = DataFrame(
{"x": [1, 2, 3], "y": [np.nan, np.nan, np.nan]},
index=Index([0, 1, 2], dtype="O"),
)
tm.assert_frame_equal(res, exp)
s1 = Series([1, 2, 3], name="x")
s2 = Series(name="y", dtype="float64")
res = pd.concat([s1, s2], axis=0)
# name will be reset
exp = Series([1, 2, 3])
tm.assert_series_equal(res, exp)
# empty Series with no name
s1 = Series([1, 2, 3], name="x")
s2 = Series(name=None, dtype="float64")
res = pd.concat([s1, s2], axis=1)
exp = DataFrame(
{"x": [1, 2, 3], 0: [np.nan, np.nan, np.nan]},
columns=["x", 0],
index=Index([0, 1, 2], dtype="O"),
)
tm.assert_frame_equal(res, exp)
@pytest.mark.parametrize("tz", [None, "UTC"])
@pytest.mark.parametrize("values", [[], [1, 2, 3]])
def test_concat_empty_series_timelike(self, tz, values):
# GH 18447
first = Series([], dtype="M8[ns]").dt.tz_localize(tz)
dtype = None if values else np.float64
second = Series(values, dtype=dtype)
expected = DataFrame(
{
0: Series([pd.NaT] * len(values), dtype="M8[ns]").dt.tz_localize(tz),
1: values,
}
)
result = concat([first, second], axis=1)
tm.assert_frame_equal(result, expected)
def test_default_index(self):
# is_series and ignore_index
s1 = Series([1, 2, 3], name="x")
s2 = Series([4, 5, 6], name="y")
res = pd.concat([s1, s2], axis=1, ignore_index=True)
assert isinstance(res.columns, pd.RangeIndex)
exp = DataFrame([[1, 4], [2, 5], [3, 6]])
# use check_index_type=True to check the result have
# RangeIndex (default index)
tm.assert_frame_equal(res, exp, check_index_type=True, check_column_type=True)
# is_series and all inputs have no names
s1 = Series([1, 2, 3])
s2 = Series([4, 5, 6])
res = pd.concat([s1, s2], axis=1, ignore_index=False)
assert isinstance(res.columns, pd.RangeIndex)
exp = DataFrame([[1, 4], [2, 5], [3, 6]])
exp.columns = pd.RangeIndex(2)
tm.assert_frame_equal(res, exp, check_index_type=True, check_column_type=True)
# is_dataframe and ignore_index
df1 = DataFrame({"A": [1, 2], "B": [5, 6]})
df2 = DataFrame({"A": [3, 4], "B": [7, 8]})
res = pd.concat([df1, df2], axis=0, ignore_index=True)
exp = DataFrame([[1, 5], [2, 6], [3, 7], [4, 8]], columns=["A", "B"])
tm.assert_frame_equal(res, exp, check_index_type=True, check_column_type=True)
res = pd.concat([df1, df2], axis=1, ignore_index=True)
exp = DataFrame([[1, 5, 3, 7], [2, 6, 4, 8]])
tm.assert_frame_equal(res, exp, check_index_type=True, check_column_type=True)
def test_concat_multiindex_rangeindex(self):
# GH13542
# when multi-index levels are RangeIndex objects
# there is a bug in concat with objects of len 1
df = DataFrame(np.random.randn(9, 2))
df.index = MultiIndex(
levels=[pd.RangeIndex(3), pd.RangeIndex(3)],
codes=[np.repeat(np.arange(3), 3), np.tile(np.arange(3), 3)],
)
res = concat([df.iloc[[2, 3, 4], :], df.iloc[[5], :]])
exp = df.iloc[[2, 3, 4, 5], :]
tm.assert_frame_equal(res, exp)
def test_concat_multiindex_dfs_with_deepcopy(self):
# GH 9967
from copy import deepcopy
example_multiindex1 = pd.MultiIndex.from_product([["a"], ["b"]])
example_dataframe1 = DataFrame([0], index=example_multiindex1)
example_multiindex2 = pd.MultiIndex.from_product([["a"], ["c"]])
example_dataframe2 = DataFrame([1], index=example_multiindex2)
example_dict = {"s1": example_dataframe1, "s2": example_dataframe2}
expected_index = pd.MultiIndex(
levels=[["s1", "s2"], ["a"], ["b", "c"]],
codes=[[0, 1], [0, 0], [0, 1]],
names=["testname", None, None],
)
expected = DataFrame([[0], [1]], index=expected_index)
result_copy = pd.concat(deepcopy(example_dict), names=["testname"])
tm.assert_frame_equal(result_copy, expected)
result_no_copy = pd.concat(example_dict, names=["testname"])
tm.assert_frame_equal(result_no_copy, expected)
def test_categorical_concat_append(self):
cat = Categorical(["a", "b"], categories=["a", "b"])
vals = [1, 2]
df = DataFrame({"cats": cat, "vals": vals})
cat2 = Categorical(["a", "b", "a", "b"], categories=["a", "b"])
vals2 = [1, 2, 1, 2]
exp = DataFrame({"cats": cat2, "vals": vals2}, index=Index([0, 1, 0, 1]))
tm.assert_frame_equal(pd.concat([df, df]), exp)
tm.assert_frame_equal(df.append(df), exp)
# GH 13524 can concat different categories
cat3 = Categorical(["a", "b"], categories=["a", "b", "c"])
vals3 = [1, 2]
df_different_categories = DataFrame({"cats": cat3, "vals": vals3})
res = pd.concat([df, df_different_categories], ignore_index=True)
exp = DataFrame({"cats": list("abab"), "vals": [1, 2, 1, 2]})
tm.assert_frame_equal(res, exp)
res = df.append(df_different_categories, ignore_index=True)
tm.assert_frame_equal(res, exp)
def test_categorical_concat_dtypes(self):
# GH8143
index = ["cat", "obj", "num"]
cat = Categorical(["a", "b", "c"])
obj = Series(["a", "b", "c"])
num = Series([1, 2, 3])
df = pd.concat([Series(cat), obj, num], axis=1, keys=index)
result = df.dtypes == "object"
expected = Series([False, True, False], index=index)
tm.assert_series_equal(result, expected)
result = df.dtypes == "int64"
expected = Series([False, False, True], index=index)
tm.assert_series_equal(result, expected)
result = df.dtypes == "category"
expected = Series([True, False, False], index=index)
tm.assert_series_equal(result, expected)
def test_categorical_concat(self, sort):
# See GH 10177
df1 = DataFrame(
np.arange(18, dtype="int64").reshape(6, 3), columns=["a", "b", "c"]
)
df2 = DataFrame(np.arange(14, dtype="int64").reshape(7, 2), columns=["a", "c"])
cat_values = ["one", "one", "two", "one", "two", "two", "one"]
df2["h"] = Series(Categorical(cat_values))
res = pd.concat((df1, df2), axis=0, ignore_index=True, sort=sort)
exp = DataFrame(
{
"a": [0, 3, 6, 9, 12, 15, 0, 2, 4, 6, 8, 10, 12],
"b": [
1,
4,
7,
10,
13,
16,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
],
"c": [2, 5, 8, 11, 14, 17, 1, 3, 5, 7, 9, 11, 13],
"h": [None] * 6 + cat_values,
}
)
tm.assert_frame_equal(res, exp)
def test_categorical_concat_gh7864(self):
# GH 7864
# make sure ordering is preserved
df = DataFrame({"id": [1, 2, 3, 4, 5, 6], "raw_grade": list("abbaae")})
df["grade"] = Categorical(df["raw_grade"])
df["grade"].cat.set_categories(["e", "a", "b"])
df1 = df[0:3]
df2 = df[3:]
tm.assert_index_equal(df["grade"].cat.categories, df1["grade"].cat.categories)
tm.assert_index_equal(df["grade"].cat.categories, df2["grade"].cat.categories)
dfx = pd.concat([df1, df2])
tm.assert_index_equal(df["grade"].cat.categories, dfx["grade"].cat.categories)
dfa = df1.append(df2)
tm.assert_index_equal(df["grade"].cat.categories, dfa["grade"].cat.categories)
def test_categorical_concat_preserve(self):
# GH 8641 series concat not preserving category dtype
# GH 13524 can concat different categories
s = Series(list("abc"), dtype="category")
s2 = Series(list("abd"), dtype="category")
exp = Series(list("abcabd"))
res = pd.concat([s, s2], ignore_index=True)
tm.assert_series_equal(res, exp)
exp = Series(list("abcabc"), dtype="category")
res = pd.concat([s, s], ignore_index=True)
tm.assert_series_equal(res, exp)
exp = Series(list("abcabc"), index=[0, 1, 2, 0, 1, 2], dtype="category")
res = pd.concat([s, s])
tm.assert_series_equal(res, exp)
a = Series(np.arange(6, dtype="int64"))
b = Series(list("aabbca"))
df2 = DataFrame({"A": a, "B": b.astype(CategoricalDtype(list("cab")))})
res = pd.concat([df2, df2])
exp = DataFrame(
{
"A": pd.concat([a, a]),
"B": pd.concat([b, b]).astype(CategoricalDtype(list("cab"))),
}
)
tm.assert_frame_equal(res, exp)
def test_categorical_index_preserver(self):
a = Series(np.arange(6, dtype="int64"))
b = Series(list("aabbca"))
df2 = DataFrame(
{"A": a, "B": b.astype(CategoricalDtype(list("cab")))}
).set_index("B")
result = pd.concat([df2, df2])
expected = DataFrame(
{
"A": pd.concat([a, a]),
"B": pd.concat([b, b]).astype(CategoricalDtype(list("cab"))),
}
).set_index("B")
tm.assert_frame_equal(result, expected)
# wrong categories
df3 = DataFrame(
{"A": a, "B": Categorical(b, categories=list("abe"))}
).set_index("B")
msg = "categories must match existing categories when appending"
with pytest.raises(TypeError, match=msg):
pd.concat([df2, df3])
def test_concat_categoricalindex(self):
# GH 16111, categories that aren't lexsorted
categories = [9, 0, 1, 2, 3]
a = Series(1, index=pd.CategoricalIndex([9, 0], categories=categories))
b = Series(2, index=pd.CategoricalIndex([0, 1], categories=categories))
c = Series(3, index=pd.CategoricalIndex([1, 2], categories=categories))
result = pd.concat([a, b, c], axis=1)
exp_idx = pd.CategoricalIndex([9, 0, 1, 2], categories=categories)
exp = DataFrame(
{
0: [1, 1, np.nan, np.nan],
1: [np.nan, 2, 2, np.nan],
2: [np.nan, np.nan, 3, 3],
},
columns=[0, 1, 2],
index=exp_idx,
)
tm.assert_frame_equal(result, exp)
def test_concat_order(self):
# GH 17344
dfs = [DataFrame(index=range(3), columns=["a", 1, None])]
dfs += [DataFrame(index=range(3), columns=[None, 1, "a"]) for i in range(100)]
result = pd.concat(dfs, sort=True).columns
expected = dfs[0].columns
tm.assert_index_equal(result, expected)
def test_concat_different_extension_dtypes_upcasts(self):
a = Series(pd.core.arrays.integer_array([1, 2]))
b = Series(to_decimal([1, 2]))
result = pd.concat([a, b], ignore_index=True)
expected = Series([1, 2, Decimal(1), Decimal(2)], dtype=object)
tm.assert_series_equal(result, expected)
def test_concat_odered_dict(self):
# GH 21510
expected = pd.concat(
[Series(range(3)), Series(range(4))], keys=["First", "Another"]
)
result = pd.concat(
dict([("First", Series(range(3))), ("Another", Series(range(4)))])
)
tm.assert_series_equal(result, expected)
def test_concat_empty_dataframe_dtypes(self):
df = DataFrame(columns=list("abc"))
df["a"] = df["a"].astype(np.bool_)
df["b"] = df["b"].astype(np.int32)
df["c"] = df["c"].astype(np.float64)
result = pd.concat([df, df])
assert result["a"].dtype == np.bool_
assert result["b"].dtype == np.int32
assert result["c"].dtype == np.float64
result = pd.concat([df, df.astype(np.float64)])
assert result["a"].dtype == np.object_
assert result["b"].dtype == np.float64
assert result["c"].dtype == np.float64
@pytest.mark.parametrize("pdt", [Series, pd.DataFrame])
@pytest.mark.parametrize("dt", np.sctypes["float"])
def test_concat_no_unnecessary_upcast(dt, pdt):
# GH 13247
dims = pdt(dtype=object).ndim
dfs = [
pdt(np.array([1], dtype=dt, ndmin=dims)),
pdt(np.array([np.nan], dtype=dt, ndmin=dims)),
pdt(np.array([5], dtype=dt, ndmin=dims)),
]
x = pd.concat(dfs)
assert x.values.dtype == dt
@pytest.mark.parametrize("pdt", [create_series_with_explicit_dtype, pd.DataFrame])
@pytest.mark.parametrize("dt", np.sctypes["int"])
def test_concat_will_upcast(dt, pdt):
with catch_warnings(record=True):
dims = pdt().ndim
dfs = [
pdt(np.array([1], dtype=dt, ndmin=dims)),
pdt(np.array([np.nan], ndmin=dims)),
pdt(np.array([5], dtype=dt, ndmin=dims)),
]
x = pd.concat(dfs)
assert x.values.dtype == "float64"
def test_concat_empty_and_non_empty_frame_regression():
# GH 18178 regression test
df1 = DataFrame({"foo": [1]})
df2 = DataFrame({"foo": []})
expected = DataFrame({"foo": [1.0]})
result = pd.concat([df1, df2])
tm.assert_frame_equal(result, expected)
def test_concat_empty_and_non_empty_series_regression():
# GH 18187 regression test
s1 = | Series([1]) | pandas.Series |
################################
# LSTM DEVELOP AND DIAGNOSTIC #
################################
# This code takes raw data and corrected data, applies an LSTM model, and identifies anomalies.
import rules_detect
import anomaly_utilities
import modeling_utilities
import numpy as np
import tensorflow as tf
import pandas as pd
import seaborn as sns
from matplotlib.pylab import rcParams
import matplotlib.pyplot as plt
import plotly.io as pio
pio.renderers.default = "browser"
pd.options.mode.chained_assignment = None
sns.set(style='whitegrid', palette='muted')
rcParams['figure.figsize'] = 14, 8
np.random.seed(1)
print('Tensorflow version:', tf.__version__)
print("LSTM exploration script begin.")
##################################################
# LSTM Multivariate Retrieve and Preprocess Data #
##################################################
# DEFINE SITE and VARIABLE #
#########################################
# site = "BlackSmithFork"
site = "FranklinBasin"
# site = "MainStreet"
# site = "Mendon"
# site = "TonyGrove"
# site = "WaterLab"
sensor = ['temp', 'cond', 'ph', 'do']
year = [2014, 2015, 2016, 2017, 2018, 2019]
# GET DATA #
#########################################
df_full, sensor_array = anomaly_utilities.get_data(site, sensor, year, path="/LRO_data/")
# RULES BASED DETECTION #
#########################################
maximum = [13, 380, 9.2, 13]
minimum = [-2, 120, 7.5, 8]
length = 6
size = []
for i in range(0, len(sensor_array)):
sensor_array[sensor[i]] = rules_detect.range_check(sensor_array[sensor[i]], maximum[i], minimum[i])
sensor_array[sensor[i]] = rules_detect.persistence(sensor_array[sensor[i]], length)
s = rules_detect.group_size(sensor_array[sensor[i]])
size.append(s)
sensor_array[sensor[i]] = rules_detect.interpolate(sensor_array[sensor[i]])
# Create new data frame with raw and corrected data for variables of interest
df_observed = pd.DataFrame(index=df_full.index)
df_observed['temp_obs'] = sensor_array['temp']['observed']
df_observed['cond_obs'] = sensor_array['cond']['observed']
df_observed['ph_obs'] = sensor_array['ph']['observed']
df_observed['do_obs'] = sensor_array['do']['observed']
df_raw = | pd.DataFrame(index=df_full.index) | pandas.DataFrame |
# TODO: Dragon Real Estate - Price Predictor
# todo : Importing libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# todo : import data file
housing = | pd.read_csv("new data.csv") | pandas.read_csv |
# -*- coding: utf-8 -*-
# pylint: disable-msg=E1101,W0612
from datetime import datetime, timedelta
import pytest
import re
from numpy import nan as NA
import numpy as np
from numpy.random import randint
from pandas.compat import range, u
import pandas.compat as compat
from pandas import Index, Series, DataFrame, isna, MultiIndex, notna
from pandas.util.testing import assert_series_equal
import pandas.util.testing as tm
import pandas.core.strings as strings
class TestStringMethods(object):
def test_api(self):
# GH 6106, GH 9322
assert Series.str is strings.StringMethods
assert isinstance(Series(['']).str, strings.StringMethods)
# GH 9184
invalid = Series([1])
with tm.assert_raises_regex(AttributeError,
"only use .str accessor"):
invalid.str
assert not hasattr(invalid, 'str')
def test_iter(self):
# GH3638
strs = 'google', 'wikimedia', 'wikipedia', 'wikitravel'
ds = Series(strs)
for s in ds.str:
# iter must yield a Series
assert isinstance(s, Series)
# indices of each yielded Series should be equal to the index of
# the original Series
tm.assert_index_equal(s.index, ds.index)
for el in s:
# each element of the series is either a basestring/str or nan
assert isinstance(el, compat.string_types) or isna(el)
# desired behavior is to iterate until everything would be nan on the
# next iter so make sure the last element of the iterator was 'l' in
# this case since 'wikitravel' is the longest string
assert s.dropna().values.item() == 'l'
def test_iter_empty(self):
ds = Series([], dtype=object)
i, s = 100, 1
for i, s in enumerate(ds.str):
pass
# nothing to iterate over so nothing defined values should remain
# unchanged
assert i == 100
assert s == 1
def test_iter_single_element(self):
ds = Series(['a'])
for i, s in enumerate(ds.str):
pass
assert not i
assert_series_equal(ds, s)
def test_iter_object_try_string(self):
ds = Series([slice(None, randint(10), randint(10, 20)) for _ in range(
4)])
i, s = 100, 'h'
for i, s in enumerate(ds.str):
pass
assert i == 100
assert s == 'h'
def test_cat(self):
one = np.array(['a', 'a', 'b', 'b', 'c', NA], dtype=np.object_)
two = np.array(['a', NA, 'b', 'd', 'foo', NA], dtype=np.object_)
# single array
result = strings.str_cat(one)
exp = 'aabbc'
assert result == exp
result = strings.str_cat(one, na_rep='NA')
exp = 'aabbcNA'
assert result == exp
result = strings.str_cat(one, na_rep='-')
exp = 'aabbc-'
assert result == exp
result = strings.str_cat(one, sep='_', na_rep='NA')
exp = 'a_a_b_b_c_NA'
assert result == exp
result = strings.str_cat(two, sep='-')
exp = 'a-b-d-foo'
assert result == exp
# Multiple arrays
result = strings.str_cat(one, [two], na_rep='NA')
exp = np.array(['aa', 'aNA', 'bb', 'bd', 'cfoo', 'NANA'],
dtype=np.object_)
tm.assert_numpy_array_equal(result, exp)
result = strings.str_cat(one, two)
exp = np.array(['aa', NA, 'bb', 'bd', 'cfoo', NA], dtype=np.object_)
tm.assert_almost_equal(result, exp)
def test_count(self):
values = np.array(['foo', 'foofoo', NA, 'foooofooofommmfoo'],
dtype=np.object_)
result = strings.str_count(values, 'f[o]+')
exp = np.array([1, 2, NA, 4])
tm.assert_numpy_array_equal(result, exp)
result = Series(values).str.count('f[o]+')
exp = Series([1, 2, NA, 4])
assert isinstance(result, Series)
tm.assert_series_equal(result, exp)
# mixed
mixed = ['a', NA, 'b', True, datetime.today(), 'foo', None, 1, 2.]
rs = strings.str_count(mixed, 'a')
xp = np.array([1, NA, 0, NA, NA, 0, NA, NA, NA])
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.count('a')
xp = Series([1, NA, 0, NA, NA, 0, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = [u('foo'), u('foofoo'), NA, u('foooofooofommmfoo')]
result = strings.str_count(values, 'f[o]+')
exp = np.array([1, 2, NA, 4])
tm.assert_numpy_array_equal(result, exp)
result = Series(values).str.count('f[o]+')
exp = Series([1, 2, NA, 4])
assert isinstance(result, Series)
tm.assert_series_equal(result, exp)
def test_contains(self):
values = np.array(['foo', NA, 'fooommm__foo',
'mmm_', 'foommm[_]+bar'], dtype=np.object_)
pat = 'mmm[_]+'
result = strings.str_contains(values, pat)
expected = np.array([False, NA, True, True, False], dtype=np.object_)
tm.assert_numpy_array_equal(result, expected)
result = strings.str_contains(values, pat, regex=False)
expected = np.array([False, NA, False, False, True], dtype=np.object_)
tm.assert_numpy_array_equal(result, expected)
values = ['foo', 'xyz', 'fooommm__foo', 'mmm_']
result = strings.str_contains(values, pat)
expected = np.array([False, False, True, True])
assert result.dtype == np.bool_
tm.assert_numpy_array_equal(result, expected)
# case insensitive using regex
values = ['Foo', 'xYz', 'fOOomMm__fOo', 'MMM_']
result = strings.str_contains(values, 'FOO|mmm', case=False)
expected = np.array([True, False, True, True])
tm.assert_numpy_array_equal(result, expected)
# case insensitive without regex
result = strings.str_contains(values, 'foo', regex=False, case=False)
expected = np.array([True, False, True, False])
tm.assert_numpy_array_equal(result, expected)
# mixed
mixed = ['a', NA, 'b', True, datetime.today(), 'foo', None, 1, 2.]
rs = strings.str_contains(mixed, 'o')
xp = np.array([False, NA, False, NA, NA, True, NA, NA, NA],
dtype=np.object_)
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.contains('o')
xp = Series([False, NA, False, NA, NA, True, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = np.array([u'foo', NA, u'fooommm__foo', u'mmm_'],
dtype=np.object_)
pat = 'mmm[_]+'
result = strings.str_contains(values, pat)
expected = np.array([False, np.nan, True, True], dtype=np.object_)
tm.assert_numpy_array_equal(result, expected)
result = strings.str_contains(values, pat, na=False)
expected = np.array([False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
values = np.array(['foo', 'xyz', 'fooommm__foo', 'mmm_'],
dtype=np.object_)
result = strings.str_contains(values, pat)
expected = np.array([False, False, True, True])
assert result.dtype == np.bool_
tm.assert_numpy_array_equal(result, expected)
# na
values = Series(['om', 'foo', np.nan])
res = values.str.contains('foo', na="foo")
assert res.loc[2] == "foo"
def test_startswith(self):
values = Series(['om', NA, 'foo_nom', 'nom', 'bar_foo', NA, 'foo'])
result = values.str.startswith('foo')
exp = Series([False, NA, True, False, False, NA, True])
tm.assert_series_equal(result, exp)
# mixed
mixed = np.array(['a', NA, 'b', True, datetime.today(),
'foo', None, 1, 2.], dtype=np.object_)
rs = strings.str_startswith(mixed, 'f')
xp = np.array([False, NA, False, NA, NA, True, NA, NA, NA],
dtype=np.object_)
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.startswith('f')
assert isinstance(rs, Series)
xp = Series([False, NA, False, NA, NA, True, NA, NA, NA])
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('om'), NA, u('foo_nom'), u('nom'), u('bar_foo'), NA,
u('foo')])
result = values.str.startswith('foo')
exp = Series([False, NA, True, False, False, NA, True])
tm.assert_series_equal(result, exp)
result = values.str.startswith('foo', na=True)
tm.assert_series_equal(result, exp.fillna(True).astype(bool))
def test_endswith(self):
values = Series(['om', NA, 'foo_nom', 'nom', 'bar_foo', NA, 'foo'])
result = values.str.endswith('foo')
exp = Series([False, NA, False, False, True, NA, True])
tm.assert_series_equal(result, exp)
# mixed
mixed = ['a', NA, 'b', True, datetime.today(), 'foo', None, 1, 2.]
rs = strings.str_endswith(mixed, 'f')
xp = np.array([False, NA, False, NA, NA, False, NA, NA, NA],
dtype=np.object_)
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.endswith('f')
xp = Series([False, NA, False, NA, NA, False, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('om'), NA, u('foo_nom'), u('nom'), u('bar_foo'), NA,
u('foo')])
result = values.str.endswith('foo')
exp = Series([False, NA, False, False, True, NA, True])
tm.assert_series_equal(result, exp)
result = values.str.endswith('foo', na=False)
tm.assert_series_equal(result, exp.fillna(False).astype(bool))
def test_title(self):
values = Series(["FOO", "BAR", NA, "Blah", "blurg"])
result = values.str.title()
exp = Series(["Foo", "Bar", NA, "Blah", "Blurg"])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(["FOO", NA, "bar", True, datetime.today(), "blah", None,
1, 2.])
mixed = mixed.str.title()
exp = Series(["Foo", NA, "Bar", NA, NA, "Blah", NA, NA, NA])
tm.assert_almost_equal(mixed, exp)
# unicode
values = Series([u("FOO"), NA, u("bar"), u("Blurg")])
results = values.str.title()
exp = Series([u("Foo"), NA, u("Bar"), u("Blurg")])
tm.assert_series_equal(results, exp)
def test_lower_upper(self):
values = Series(['om', NA, 'nom', 'nom'])
result = values.str.upper()
exp = Series(['OM', NA, 'NOM', 'NOM'])
tm.assert_series_equal(result, exp)
result = result.str.lower()
tm.assert_series_equal(result, values)
# mixed
mixed = Series(['a', NA, 'b', True, datetime.today(), 'foo', None, 1,
2.])
mixed = mixed.str.upper()
rs = Series(mixed).str.lower()
xp = Series(['a', NA, 'b', NA, NA, 'foo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('om'), NA, u('nom'), u('nom')])
result = values.str.upper()
exp = Series([u('OM'), NA, u('NOM'), u('NOM')])
tm.assert_series_equal(result, exp)
result = result.str.lower()
tm.assert_series_equal(result, values)
def test_capitalize(self):
values = Series(["FOO", "BAR", NA, "Blah", "blurg"])
result = values.str.capitalize()
exp = Series(["Foo", "Bar", NA, "Blah", "Blurg"])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(["FOO", NA, "bar", True, datetime.today(), "blah", None,
1, 2.])
mixed = mixed.str.capitalize()
exp = Series(["Foo", NA, "Bar", NA, NA, "Blah", NA, NA, NA])
tm.assert_almost_equal(mixed, exp)
# unicode
values = Series([u("FOO"), NA, u("bar"), u("Blurg")])
results = values.str.capitalize()
exp = Series([u("Foo"), NA, u("Bar"), u("Blurg")])
tm.assert_series_equal(results, exp)
def test_swapcase(self):
values = Series(["FOO", "BAR", NA, "Blah", "blurg"])
result = values.str.swapcase()
exp = Series(["foo", "bar", NA, "bLAH", "BLURG"])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(["FOO", NA, "bar", True, datetime.today(), "Blah", None,
1, 2.])
mixed = mixed.str.swapcase()
exp = Series(["foo", NA, "BAR", NA, NA, "bLAH", NA, NA, NA])
tm.assert_almost_equal(mixed, exp)
# unicode
values = Series([u("FOO"), NA, u("bar"), u("Blurg")])
results = values.str.swapcase()
exp = Series([u("foo"), NA, u("BAR"), u("bLURG")])
tm.assert_series_equal(results, exp)
def test_casemethods(self):
values = ['aaa', 'bbb', 'CCC', 'Dddd', 'eEEE']
s = Series(values)
assert s.str.lower().tolist() == [v.lower() for v in values]
assert s.str.upper().tolist() == [v.upper() for v in values]
assert s.str.title().tolist() == [v.title() for v in values]
assert s.str.capitalize().tolist() == [v.capitalize() for v in values]
assert s.str.swapcase().tolist() == [v.swapcase() for v in values]
def test_replace(self):
values = Series(['fooBAD__barBAD', NA])
result = values.str.replace('BAD[_]*', '')
exp = Series(['foobar', NA])
tm.assert_series_equal(result, exp)
result = values.str.replace('BAD[_]*', '', n=1)
exp = Series(['foobarBAD', NA])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['aBAD', NA, 'bBAD', True, datetime.today(), 'fooBAD',
None, 1, 2.])
rs = Series(mixed).str.replace('BAD[_]*', '')
xp = Series(['a', NA, 'b', NA, NA, 'foo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('fooBAD__barBAD'), NA])
result = values.str.replace('BAD[_]*', '')
exp = Series([u('foobar'), NA])
tm.assert_series_equal(result, exp)
result = values.str.replace('BAD[_]*', '', n=1)
exp = Series([u('foobarBAD'), NA])
tm.assert_series_equal(result, exp)
# flags + unicode
values = Series([b"abcd,\xc3\xa0".decode("utf-8")])
exp = Series([b"abcd, \xc3\xa0".decode("utf-8")])
result = values.str.replace(r"(?<=\w),(?=\w)", ", ", flags=re.UNICODE)
tm.assert_series_equal(result, exp)
# GH 13438
for klass in (Series, Index):
for repl in (None, 3, {'a': 'b'}):
for data in (['a', 'b', None], ['a', 'b', 'c', 'ad']):
values = klass(data)
pytest.raises(TypeError, values.str.replace, 'a', repl)
def test_replace_callable(self):
# GH 15055
values = Series(['fooBAD__barBAD', NA])
# test with callable
repl = lambda m: m.group(0).swapcase()
result = values.str.replace('[a-z][A-Z]{2}', repl, n=2)
exp = Series(['foObaD__baRbaD', NA])
tm.assert_series_equal(result, exp)
# test with wrong number of arguments, raising an error
if compat.PY2:
p_err = r'takes (no|(exactly|at (least|most)) ?\d+) arguments?'
else:
p_err = (r'((takes)|(missing)) (?(2)from \d+ to )?\d+ '
r'(?(3)required )positional arguments?')
repl = lambda: None
with tm.assert_raises_regex(TypeError, p_err):
values.str.replace('a', repl)
repl = lambda m, x: None
with tm.assert_raises_regex(TypeError, p_err):
values.str.replace('a', repl)
repl = lambda m, x, y=None: None
with tm.assert_raises_regex(TypeError, p_err):
values.str.replace('a', repl)
# test regex named groups
values = Series(['Foo Bar Baz', NA])
pat = r"(?P<first>\w+) (?P<middle>\w+) (?P<last>\w+)"
repl = lambda m: m.group('middle').swapcase()
result = values.str.replace(pat, repl)
exp = Series(['bAR', NA])
tm.assert_series_equal(result, exp)
def test_replace_compiled_regex(self):
# GH 15446
values = Series(['fooBAD__barBAD', NA])
# test with compiled regex
pat = re.compile(r'BAD[_]*')
result = values.str.replace(pat, '')
exp = Series(['foobar', NA])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['aBAD', NA, 'bBAD', True, datetime.today(), 'fooBAD',
None, 1, 2.])
rs = Series(mixed).str.replace(pat, '')
xp = Series(['a', NA, 'b', NA, NA, 'foo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('fooBAD__barBAD'), NA])
result = values.str.replace(pat, '')
exp = Series([u('foobar'), NA])
tm.assert_series_equal(result, exp)
result = values.str.replace(pat, '', n=1)
exp = Series([u('foobarBAD'), NA])
tm.assert_series_equal(result, exp)
# flags + unicode
values = Series([b"abcd,\xc3\xa0".decode("utf-8")])
exp = Series([b"abcd, \xc3\xa0".decode("utf-8")])
pat = re.compile(r"(?<=\w),(?=\w)", flags=re.UNICODE)
result = values.str.replace(pat, ", ")
tm.assert_series_equal(result, exp)
# case and flags provided to str.replace will have no effect
# and will produce warnings
values = Series(['fooBAD__barBAD__bad', NA])
pat = re.compile(r'BAD[_]*')
with tm.assert_raises_regex(ValueError,
"case and flags cannot be"):
result = values.str.replace(pat, '', flags=re.IGNORECASE)
with tm.assert_raises_regex(ValueError,
"case and flags cannot be"):
result = values.str.replace(pat, '', case=False)
with tm.assert_raises_regex(ValueError,
"case and flags cannot be"):
result = values.str.replace(pat, '', case=True)
# test with callable
values = Series(['fooBAD__barBAD', NA])
repl = lambda m: m.group(0).swapcase()
pat = re.compile('[a-z][A-Z]{2}')
result = values.str.replace(pat, repl, n=2)
exp = Series(['foObaD__baRbaD', NA])
tm.assert_series_equal(result, exp)
def test_repeat(self):
values = Series(['a', 'b', NA, 'c', NA, 'd'])
result = values.str.repeat(3)
exp = Series(['aaa', 'bbb', NA, 'ccc', NA, 'ddd'])
tm.assert_series_equal(result, exp)
result = values.str.repeat([1, 2, 3, 4, 5, 6])
exp = Series(['a', 'bb', NA, 'cccc', NA, 'dddddd'])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['a', NA, 'b', True, datetime.today(), 'foo', None, 1,
2.])
rs = Series(mixed).str.repeat(3)
xp = Series(['aaa', NA, 'bbb', NA, NA, 'foofoofoo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('a'), u('b'), NA, u('c'), NA, u('d')])
result = values.str.repeat(3)
exp = Series([u('aaa'), u('bbb'), NA, u('ccc'), NA, u('ddd')])
tm.assert_series_equal(result, exp)
result = values.str.repeat([1, 2, 3, 4, 5, 6])
exp = Series([u('a'), u('bb'), NA, u('cccc'), NA, u('dddddd')])
tm.assert_series_equal(result, exp)
def test_match(self):
# New match behavior introduced in 0.13
values = Series(['fooBAD__barBAD', NA, 'foo'])
result = values.str.match('.*(BAD[_]+).*(BAD)')
exp = Series([True, NA, False])
tm.assert_series_equal(result, exp)
values = Series(['fooBAD__barBAD', NA, 'foo'])
result = values.str.match('.*BAD[_]+.*BAD')
exp = Series([True, NA, False])
tm.assert_series_equal(result, exp)
# test passing as_indexer still works but is ignored
values = Series(['fooBAD__barBAD', NA, 'foo'])
exp = Series([True, NA, False])
with tm.assert_produces_warning(FutureWarning):
result = values.str.match('.*BAD[_]+.*BAD', as_indexer=True)
tm.assert_series_equal(result, exp)
with tm.assert_produces_warning(FutureWarning):
result = values.str.match('.*BAD[_]+.*BAD', as_indexer=False)
tm.assert_series_equal(result, exp)
with tm.assert_produces_warning(FutureWarning):
result = values.str.match('.*(BAD[_]+).*(BAD)', as_indexer=True)
tm.assert_series_equal(result, exp)
pytest.raises(ValueError, values.str.match, '.*(BAD[_]+).*(BAD)',
as_indexer=False)
# mixed
mixed = Series(['aBAD_BAD', NA, 'BAD_b_BAD', True, datetime.today(),
'foo', None, 1, 2.])
rs = Series(mixed).str.match('.*(BAD[_]+).*(BAD)')
xp = Series([True, NA, True, NA, NA, False, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('fooBAD__barBAD'), NA, u('foo')])
result = values.str.match('.*(BAD[_]+).*(BAD)')
exp = Series([True, NA, False])
tm.assert_series_equal(result, exp)
# na GH #6609
res = Series(['a', 0, np.nan]).str.match('a', na=False)
exp = Series([True, False, False])
assert_series_equal(exp, res)
res = Series(['a', 0, np.nan]).str.match('a')
exp = Series([True, np.nan, np.nan])
assert_series_equal(exp, res)
def test_extract_expand_None(self):
values = Series(['fooBAD__barBAD', NA, 'foo'])
with tm.assert_produces_warning(FutureWarning):
values.str.extract('.*(BAD[_]+).*(BAD)', expand=None)
def test_extract_expand_unspecified(self):
values = Series(['fooBAD__barBAD', NA, 'foo'])
with tm.assert_produces_warning(FutureWarning):
values.str.extract('.*(BAD[_]+).*(BAD)')
def test_extract_expand_False(self):
# Contains tests like those in test_match and some others.
values = Series(['fooBAD__barBAD', NA, 'foo'])
er = [NA, NA] # empty row
result = values.str.extract('.*(BAD[_]+).*(BAD)', expand=False)
exp = DataFrame([['BAD__', 'BAD'], er, er])
tm.assert_frame_equal(result, exp)
# mixed
mixed = Series(['aBAD_BAD', NA, 'BAD_b_BAD', True, datetime.today(),
'foo', None, 1, 2.])
rs = Series(mixed).str.extract('.*(BAD[_]+).*(BAD)', expand=False)
exp = DataFrame([['BAD_', 'BAD'], er, ['BAD_', 'BAD'], er, er, er, er,
er, er])
tm.assert_frame_equal(rs, exp)
# unicode
values = Series([u('fooBAD__barBAD'), NA, u('foo')])
result = values.str.extract('.*(BAD[_]+).*(BAD)', expand=False)
exp = DataFrame([[u('BAD__'), u('BAD')], er, er])
tm.assert_frame_equal(result, exp)
# GH9980
# Index only works with one regex group since
# multi-group would expand to a frame
idx = Index(['A1', 'A2', 'A3', 'A4', 'B5'])
with tm.assert_raises_regex(ValueError, "supported"):
idx.str.extract('([AB])([123])', expand=False)
# these should work for both Series and Index
for klass in [Series, Index]:
# no groups
s_or_idx = klass(['A1', 'B2', 'C3'])
f = lambda: s_or_idx.str.extract('[ABC][123]', expand=False)
pytest.raises(ValueError, f)
# only non-capturing groups
f = lambda: s_or_idx.str.extract('(?:[AB]).*', expand=False)
pytest.raises(ValueError, f)
# single group renames series/index properly
s_or_idx = klass(['A1', 'A2'])
result = s_or_idx.str.extract(r'(?P<uno>A)\d', expand=False)
assert result.name == 'uno'
exp = klass(['A', 'A'], name='uno')
if klass == Series:
tm.assert_series_equal(result, exp)
else:
tm.assert_index_equal(result, exp)
s = Series(['A1', 'B2', 'C3'])
# one group, no matches
result = s.str.extract('(_)', expand=False)
exp = Series([NA, NA, NA], dtype=object)
tm.assert_series_equal(result, exp)
# two groups, no matches
result = s.str.extract('(_)(_)', expand=False)
exp = DataFrame([[NA, NA], [NA, NA], [NA, NA]], dtype=object)
tm.assert_frame_equal(result, exp)
# one group, some matches
result = s.str.extract('([AB])[123]', expand=False)
exp = Series(['A', 'B', NA])
tm.assert_series_equal(result, exp)
# two groups, some matches
result = s.str.extract('([AB])([123])', expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]])
tm.assert_frame_equal(result, exp)
# one named group
result = s.str.extract('(?P<letter>[AB])', expand=False)
exp = Series(['A', 'B', NA], name='letter')
tm.assert_series_equal(result, exp)
# two named groups
result = s.str.extract('(?P<letter>[AB])(?P<number>[123])',
expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]],
columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# mix named and unnamed groups
result = s.str.extract('([AB])(?P<number>[123])', expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]],
columns=[0, 'number'])
tm.assert_frame_equal(result, exp)
# one normal group, one non-capturing group
result = s.str.extract('([AB])(?:[123])', expand=False)
exp = Series(['A', 'B', NA])
tm.assert_series_equal(result, exp)
# two normal groups, one non-capturing group
result = Series(['A11', 'B22', 'C33']).str.extract(
'([AB])([123])(?:[123])', expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]])
tm.assert_frame_equal(result, exp)
# one optional group followed by one normal group
result = Series(['A1', 'B2', '3']).str.extract(
'(?P<letter>[AB])?(?P<number>[123])', expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, '3']],
columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# one normal group followed by one optional group
result = Series(['A1', 'B2', 'C']).str.extract(
'(?P<letter>[ABC])(?P<number>[123])?', expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], ['C', NA]],
columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# GH6348
# not passing index to the extractor
def check_index(index):
data = ['A1', 'B2', 'C']
index = index[:len(data)]
s = Series(data, index=index)
result = s.str.extract(r'(\d)', expand=False)
exp = Series(['1', '2', NA], index=index)
tm.assert_series_equal(result, exp)
result = Series(data, index=index).str.extract(
r'(?P<letter>\D)(?P<number>\d)?', expand=False)
e_list = [
['A', '1'],
['B', '2'],
['C', NA]
]
exp = DataFrame(e_list, columns=['letter', 'number'], index=index)
tm.assert_frame_equal(result, exp)
i_funs = [
tm.makeStringIndex, tm.makeUnicodeIndex, tm.makeIntIndex,
tm.makeDateIndex, tm.makePeriodIndex, tm.makeRangeIndex
]
for index in i_funs:
check_index(index())
# single_series_name_is_preserved.
s = Series(['a3', 'b3', 'c2'], name='bob')
r = s.str.extract(r'(?P<sue>[a-z])', expand=False)
e = Series(['a', 'b', 'c'], name='sue')
tm.assert_series_equal(r, e)
assert r.name == e.name
def test_extract_expand_True(self):
# Contains tests like those in test_match and some others.
values = Series(['fooBAD__barBAD', NA, 'foo'])
er = [NA, NA] # empty row
result = values.str.extract('.*(BAD[_]+).*(BAD)', expand=True)
exp = DataFrame([['BAD__', 'BAD'], er, er])
tm.assert_frame_equal(result, exp)
# mixed
mixed = Series(['aBAD_BAD', NA, 'BAD_b_BAD', True, datetime.today(),
'foo', None, 1, 2.])
rs = Series(mixed).str.extract('.*(BAD[_]+).*(BAD)', expand=True)
exp = DataFrame([['BAD_', 'BAD'], er, ['BAD_', 'BAD'], er, er,
er, er, er, er])
tm.assert_frame_equal(rs, exp)
# unicode
values = Series([u('fooBAD__barBAD'), NA, u('foo')])
result = values.str.extract('.*(BAD[_]+).*(BAD)', expand=True)
exp = DataFrame([[u('BAD__'), u('BAD')], er, er])
tm.assert_frame_equal(result, exp)
# these should work for both Series and Index
for klass in [Series, Index]:
# no groups
s_or_idx = klass(['A1', 'B2', 'C3'])
f = lambda: s_or_idx.str.extract('[ABC][123]', expand=True)
pytest.raises(ValueError, f)
# only non-capturing groups
f = lambda: s_or_idx.str.extract('(?:[AB]).*', expand=True)
pytest.raises(ValueError, f)
# single group renames series/index properly
s_or_idx = klass(['A1', 'A2'])
result_df = s_or_idx.str.extract(r'(?P<uno>A)\d', expand=True)
assert isinstance(result_df, DataFrame)
result_series = result_df['uno']
assert_series_equal(result_series, Series(['A', 'A'], name='uno'))
def test_extract_series(self):
# extract should give the same result whether or not the
# series has a name.
for series_name in None, "series_name":
s = Series(['A1', 'B2', 'C3'], name=series_name)
# one group, no matches
result = s.str.extract('(_)', expand=True)
exp = DataFrame([NA, NA, NA], dtype=object)
tm.assert_frame_equal(result, exp)
# two groups, no matches
result = s.str.extract('(_)(_)', expand=True)
exp = DataFrame([[NA, NA], [NA, NA], [NA, NA]], dtype=object)
tm.assert_frame_equal(result, exp)
# one group, some matches
result = s.str.extract('([AB])[123]', expand=True)
exp = DataFrame(['A', 'B', NA])
tm.assert_frame_equal(result, exp)
# two groups, some matches
result = s.str.extract('([AB])([123])', expand=True)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]])
tm.assert_frame_equal(result, exp)
# one named group
result = s.str.extract('(?P<letter>[AB])', expand=True)
exp = DataFrame({"letter": ['A', 'B', NA]})
tm.assert_frame_equal(result, exp)
# two named groups
result = s.str.extract(
'(?P<letter>[AB])(?P<number>[123])',
expand=True)
e_list = [
['A', '1'],
['B', '2'],
[NA, NA]
]
exp = DataFrame(e_list, columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# mix named and unnamed groups
result = s.str.extract('([AB])(?P<number>[123])', expand=True)
exp = DataFrame(e_list, columns=[0, 'number'])
tm.assert_frame_equal(result, exp)
# one normal group, one non-capturing group
result = s.str.extract('([AB])(?:[123])', expand=True)
exp = DataFrame(['A', 'B', NA])
tm.assert_frame_equal(result, exp)
def test_extract_optional_groups(self):
# two normal groups, one non-capturing group
result = Series(['A11', 'B22', 'C33']).str.extract(
'([AB])([123])(?:[123])', expand=True)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]])
tm.assert_frame_equal(result, exp)
# one optional group followed by one normal group
result = Series(['A1', 'B2', '3']).str.extract(
'(?P<letter>[AB])?(?P<number>[123])', expand=True)
e_list = [
['A', '1'],
['B', '2'],
[NA, '3']
]
exp = DataFrame(e_list, columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# one normal group followed by one optional group
result = Series(['A1', 'B2', 'C']).str.extract(
'(?P<letter>[ABC])(?P<number>[123])?', expand=True)
e_list = [
['A', '1'],
['B', '2'],
['C', NA]
]
exp = DataFrame(e_list, columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# GH6348
# not passing index to the extractor
def check_index(index):
data = ['A1', 'B2', 'C']
index = index[:len(data)]
result = Series(data, index=index).str.extract(
r'(\d)', expand=True)
exp = DataFrame(['1', '2', NA], index=index)
tm.assert_frame_equal(result, exp)
result = Series(data, index=index).str.extract(
r'(?P<letter>\D)(?P<number>\d)?', expand=True)
e_list = [
['A', '1'],
['B', '2'],
['C', NA]
]
exp = DataFrame(e_list, columns=['letter', 'number'], index=index)
tm.assert_frame_equal(result, exp)
i_funs = [
tm.makeStringIndex, tm.makeUnicodeIndex, tm.makeIntIndex,
tm.makeDateIndex, tm.makePeriodIndex, tm.makeRangeIndex
]
for index in i_funs:
check_index(index())
def test_extract_single_group_returns_frame(self):
# GH11386 extract should always return DataFrame, even when
# there is only one group. Prior to v0.18.0, extract returned
# Series when there was only one group in the regex.
s = Series(['a3', 'b3', 'c2'], name='series_name')
r = s.str.extract(r'(?P<letter>[a-z])', expand=True)
e = DataFrame({"letter": ['a', 'b', 'c']})
tm.assert_frame_equal(r, e)
def test_extractall(self):
subject_list = [
'<EMAIL>',
'<EMAIL>',
'<EMAIL>',
'<EMAIL> some text <EMAIL>',
'<EMAIL> some text c@d.<EMAIL> and <EMAIL>',
np.nan,
"",
]
expected_tuples = [
("dave", "google", "com"),
("tdhock5", "gmail", "com"),
("maudelaperriere", "gmail", "com"),
("rob", "gmail", "com"), ("steve", "gmail", "com"),
("a", "b", "com"), ("c", "d", "com"), ("e", "f", "com"),
]
named_pattern = r"""
(?P<user>[a-z0-9]+)
@
(?P<domain>[a-z]+)
\.
(?P<tld>[a-z]{2,4})
"""
expected_columns = ["user", "domain", "tld"]
S = Series(subject_list)
# extractall should return a DataFrame with one row for each
# match, indexed by the subject from which the match came.
expected_index = MultiIndex.from_tuples([
(0, 0),
(1, 0),
(2, 0),
(3, 0),
(3, 1),
(4, 0),
(4, 1),
(4, 2),
], names=(None, "match"))
expected_df = DataFrame(
expected_tuples, expected_index, expected_columns)
computed_df = S.str.extractall(named_pattern, re.VERBOSE)
tm.assert_frame_equal(computed_df, expected_df)
# The index of the input Series should be used to construct
# the index of the output DataFrame:
series_index = MultiIndex.from_tuples([
("single", "Dave"),
("single", "Toby"),
("single", "Maude"),
("multiple", "robAndSteve"),
("multiple", "abcdef"),
("none", "missing"),
("none", "empty"),
])
Si = Series(subject_list, series_index)
expected_index = MultiIndex.from_tuples([
("single", "Dave", 0),
("single", "Toby", 0),
("single", "Maude", 0),
("multiple", "robAndSteve", 0),
("multiple", "robAndSteve", 1),
("multiple", "abcdef", 0),
("multiple", "abcdef", 1),
("multiple", "abcdef", 2),
], names=(None, None, "match"))
expected_df = DataFrame(
expected_tuples, expected_index, expected_columns)
computed_df = Si.str.extractall(named_pattern, re.VERBOSE)
tm.assert_frame_equal(computed_df, expected_df)
# MultiIndexed subject with names.
Sn = Series(subject_list, series_index)
Sn.index.names = ("matches", "description")
expected_index.names = ("matches", "description", "match")
expected_df = DataFrame(
expected_tuples, expected_index, expected_columns)
computed_df = Sn.str.extractall(named_pattern, re.VERBOSE)
tm.assert_frame_equal(computed_df, expected_df)
# optional groups.
subject_list = ['', 'A1', '32']
named_pattern = '(?P<letter>[AB])?(?P<number>[123])'
computed_df = Series(subject_list).str.extractall(named_pattern)
expected_index = MultiIndex.from_tuples([
(1, 0),
(2, 0),
(2, 1),
], names=(None, "match"))
expected_df = DataFrame([
('A', '1'),
(NA, '3'),
(NA, '2'),
], expected_index, columns=['letter', 'number'])
tm.assert_frame_equal(computed_df, expected_df)
# only one of two groups has a name.
pattern = '([AB])?(?P<number>[123])'
computed_df = Series(subject_list).str.extractall(pattern)
expected_df = DataFrame([
('A', '1'),
(NA, '3'),
(NA, '2'),
], expected_index, columns=[0, 'number'])
tm.assert_frame_equal(computed_df, expected_df)
def test_extractall_single_group(self):
# extractall(one named group) returns DataFrame with one named
# column.
s = Series(['a3', 'b3', 'd4c2'], name='series_name')
r = s.str.extractall(r'(?P<letter>[a-z])')
i = MultiIndex.from_tuples([
(0, 0),
(1, 0),
(2, 0),
(2, 1),
], names=(None, "match"))
e = DataFrame({"letter": ['a', 'b', 'd', 'c']}, i)
tm.assert_frame_equal(r, e)
# extractall(one un-named group) returns DataFrame with one
# un-named column.
r = s.str.extractall(r'([a-z])')
e = DataFrame(['a', 'b', 'd', 'c'], i)
tm.assert_frame_equal(r, e)
def test_extractall_single_group_with_quantifier(self):
# extractall(one un-named group with quantifier) returns
# DataFrame with one un-named column (GH13382).
s = Series(['ab3', 'abc3', 'd4cd2'], name='series_name')
r = s.str.extractall(r'([a-z]+)')
i = MultiIndex.from_tuples([
(0, 0),
(1, 0),
(2, 0),
(2, 1),
], names=(None, "match"))
e = DataFrame(['ab', 'abc', 'd', 'cd'], i)
tm.assert_frame_equal(r, e)
def test_extractall_no_matches(self):
s = Series(['a3', 'b3', 'd4c2'], name='series_name')
# one un-named group.
r = s.str.extractall('(z)')
e = DataFrame(columns=[0])
tm.assert_frame_equal(r, e)
# two un-named groups.
r = s.str.extractall('(z)(z)')
e = DataFrame(columns=[0, 1])
tm.assert_frame_equal(r, e)
# one named group.
r = s.str.extractall('(?P<first>z)')
e = DataFrame(columns=["first"])
tm.assert_frame_equal(r, e)
# two named groups.
r = s.str.extractall('(?P<first>z)(?P<second>z)')
e = DataFrame(columns=["first", "second"])
tm.assert_frame_equal(r, e)
# one named, one un-named.
r = s.str.extractall('(z)(?P<second>z)')
e = DataFrame(columns=[0,
"second"])
tm.assert_frame_equal(r, e)
def test_extractall_stringindex(self):
s = Series(["a1a2", "b1", "c1"], name='xxx')
res = s.str.extractall(r"[ab](?P<digit>\d)")
exp_idx = MultiIndex.from_tuples([(0, 0), (0, 1), (1, 0)],
names=[None, 'match'])
exp = DataFrame({'digit': ["1", "2", "1"]}, index=exp_idx)
tm.assert_frame_equal(res, exp)
# index should return the same result as the default index without name
# thus index.name doesn't affect to the result
for idx in [Index(["a1a2", "b1", "c1"]),
Index(["a1a2", "b1", "c1"], name='xxx')]:
res = idx.str.extractall(r"[ab](?P<digit>\d)")
tm.assert_frame_equal(res, exp)
s = Series(["a1a2", "b1", "c1"], name='s_name',
index=Index(["XX", "yy", "zz"], name='idx_name'))
res = s.str.extractall(r"[ab](?P<digit>\d)")
exp_idx = MultiIndex.from_tuples([("XX", 0), ("XX", 1), ("yy", 0)],
names=["idx_name", 'match'])
exp = DataFrame({'digit': ["1", "2", "1"]}, index=exp_idx)
tm.assert_frame_equal(res, exp)
def test_extractall_errors(self):
# Does not make sense to use extractall with a regex that has
# no capture groups. (it returns DataFrame with one column for
# each capture group)
s = Series(['a3', 'b3', 'd4c2'], name='series_name')
with tm.assert_raises_regex(ValueError, "no capture groups"):
s.str.extractall(r'[a-z]')
def test_extract_index_one_two_groups(self):
s = Series(['a3', 'b3', 'd4c2'], index=["A3", "B3", "D4"],
name='series_name')
r = s.index.str.extract(r'([A-Z])', expand=True)
e = DataFrame(['A', "B", "D"])
tm.assert_frame_equal(r, e)
# Prior to v0.18.0, index.str.extract(regex with one group)
# returned Index. With more than one group, extract raised an
# error (GH9980). Now extract always returns DataFrame.
r = s.index.str.extract(
r'(?P<letter>[A-Z])(?P<digit>[0-9])', expand=True)
e_list = [
("A", "3"),
("B", "3"),
("D", "4"),
]
e = DataFrame(e_list, columns=["letter", "digit"])
tm.assert_frame_equal(r, e)
def test_extractall_same_as_extract(self):
s = Series(['a3', 'b3', 'c2'], name='series_name')
pattern_two_noname = r'([a-z])([0-9])'
extract_two_noname = s.str.extract(pattern_two_noname, expand=True)
has_multi_index = s.str.extractall(pattern_two_noname)
no_multi_index = has_multi_index.xs(0, level="match")
tm.assert_frame_equal(extract_two_noname, no_multi_index)
pattern_two_named = r'(?P<letter>[a-z])(?P<digit>[0-9])'
extract_two_named = s.str.extract(pattern_two_named, expand=True)
has_multi_index = s.str.extractall(pattern_two_named)
no_multi_index = has_multi_index.xs(0, level="match")
tm.assert_frame_equal(extract_two_named, no_multi_index)
pattern_one_named = r'(?P<group_name>[a-z])'
extract_one_named = s.str.extract(pattern_one_named, expand=True)
has_multi_index = s.str.extractall(pattern_one_named)
no_multi_index = has_multi_index.xs(0, level="match")
tm.assert_frame_equal(extract_one_named, no_multi_index)
pattern_one_noname = r'([a-z])'
extract_one_noname = s.str.extract(pattern_one_noname, expand=True)
has_multi_index = s.str.extractall(pattern_one_noname)
no_multi_index = has_multi_index.xs(0, level="match")
tm.assert_frame_equal(extract_one_noname, no_multi_index)
def test_extractall_same_as_extract_subject_index(self):
# same as above tests, but s has an MultiIndex.
i = MultiIndex.from_tuples([
("A", "first"),
("B", "second"),
("C", "third"),
], names=("capital", "ordinal"))
s = Series(['a3', 'b3', 'c2'], i, name='series_name')
pattern_two_noname = r'([a-z])([0-9])'
extract_two_noname = s.str.extract(pattern_two_noname, expand=True)
has_match_index = s.str.extractall(pattern_two_noname)
no_match_index = has_match_index.xs(0, level="match")
tm.assert_frame_equal(extract_two_noname, no_match_index)
pattern_two_named = r'(?P<letter>[a-z])(?P<digit>[0-9])'
extract_two_named = s.str.extract(pattern_two_named, expand=True)
has_match_index = s.str.extractall(pattern_two_named)
no_match_index = has_match_index.xs(0, level="match")
tm.assert_frame_equal(extract_two_named, no_match_index)
pattern_one_named = r'(?P<group_name>[a-z])'
extract_one_named = s.str.extract(pattern_one_named, expand=True)
has_match_index = s.str.extractall(pattern_one_named)
no_match_index = has_match_index.xs(0, level="match")
tm.assert_frame_equal(extract_one_named, no_match_index)
pattern_one_noname = r'([a-z])'
extract_one_noname = s.str.extract(pattern_one_noname, expand=True)
has_match_index = s.str.extractall(pattern_one_noname)
no_match_index = has_match_index.xs(0, level="match")
tm.assert_frame_equal(extract_one_noname, no_match_index)
def test_empty_str_methods(self):
empty_str = empty = Series(dtype=object)
empty_int = Series(dtype=int)
empty_bool = Series(dtype=bool)
empty_bytes = Series(dtype=object)
# GH7241
# (extract) on empty series
tm.assert_series_equal(empty_str, empty.str.cat(empty))
assert '' == empty.str.cat()
tm.assert_series_equal(empty_str, empty.str.title())
tm.assert_series_equal(empty_int, empty.str.count('a'))
tm.assert_series_equal(empty_bool, empty.str.contains('a'))
tm.assert_series_equal(empty_bool, empty.str.startswith('a'))
tm.assert_series_equal(empty_bool, empty.str.endswith('a'))
tm.assert_series_equal(empty_str, empty.str.lower())
tm.assert_series_equal(empty_str, empty.str.upper())
tm.assert_series_equal(empty_str, empty.str.replace('a', 'b'))
tm.assert_series_equal(empty_str, empty.str.repeat(3))
tm.assert_series_equal(empty_bool, empty.str.match('^a'))
tm.assert_frame_equal(
DataFrame(columns=[0], dtype=str),
empty.str.extract('()', expand=True))
tm.assert_frame_equal(
DataFrame(columns=[0, 1], dtype=str),
empty.str.extract('()()', expand=True))
tm.assert_series_equal(
empty_str,
empty.str.extract('()', expand=False))
tm.assert_frame_equal(
DataFrame(columns=[0, 1], dtype=str),
empty.str.extract('()()', expand=False))
tm.assert_frame_equal(DataFrame(dtype=str), empty.str.get_dummies())
tm.assert_series_equal(empty_str, empty_str.str.join(''))
tm.assert_series_equal(empty_int, empty.str.len())
tm.assert_series_equal(empty_str, empty_str.str.findall('a'))
tm.assert_series_equal(empty_int, empty.str.find('a'))
tm.assert_series_equal(empty_int, empty.str.rfind('a'))
tm.assert_series_equal(empty_str, empty.str.pad(42))
tm.assert_series_equal(empty_str, empty.str.center(42))
tm.assert_series_equal(empty_str, empty.str.split('a'))
tm.assert_series_equal(empty_str, empty.str.rsplit('a'))
tm.assert_series_equal(empty_str,
empty.str.partition('a', expand=False))
tm.assert_series_equal(empty_str,
empty.str.rpartition('a', expand=False))
tm.assert_series_equal(empty_str, empty.str.slice(stop=1))
tm.assert_series_equal(empty_str, empty.str.slice(step=1))
tm.assert_series_equal(empty_str, empty.str.strip())
tm.assert_series_equal(empty_str, empty.str.lstrip())
tm.assert_series_equal(empty_str, empty.str.rstrip())
tm.assert_series_equal(empty_str, empty.str.wrap(42))
tm.assert_series_equal(empty_str, empty.str.get(0))
tm.assert_series_equal(empty_str, empty_bytes.str.decode('ascii'))
tm.assert_series_equal(empty_bytes, empty.str.encode('ascii'))
tm.assert_series_equal(empty_str, empty.str.isalnum())
tm.assert_series_equal(empty_str, empty.str.isalpha())
tm.assert_series_equal(empty_str, empty.str.isdigit())
tm.assert_series_equal(empty_str, empty.str.isspace())
tm.assert_series_equal(empty_str, empty.str.islower())
tm.assert_series_equal(empty_str, empty.str.isupper())
tm.assert_series_equal(empty_str, empty.str.istitle())
tm.assert_series_equal(empty_str, empty.str.isnumeric())
tm.assert_series_equal(empty_str, empty.str.isdecimal())
tm.assert_series_equal(empty_str, empty.str.capitalize())
tm.assert_series_equal(empty_str, empty.str.swapcase())
tm.assert_series_equal(empty_str, empty.str.normalize('NFC'))
if compat.PY3:
table = str.maketrans('a', 'b')
else:
import string
table = string.maketrans('a', 'b')
tm.assert_series_equal(empty_str, empty.str.translate(table))
def test_empty_str_methods_to_frame(self):
empty = Series(dtype=str)
empty_df = DataFrame([])
tm.assert_frame_equal(empty_df, empty.str.partition('a'))
tm.assert_frame_equal(empty_df, empty.str.rpartition('a'))
def test_ismethods(self):
values = ['A', 'b', 'Xy', '4', '3A', '', 'TT', '55', '-', ' ']
str_s = Series(values)
alnum_e = [True, True, True, True, True, False, True, True, False,
False]
alpha_e = [True, True, True, False, False, False, True, False, False,
False]
digit_e = [False, False, False, True, False, False, False, True, False,
False]
# TODO: unused
num_e = [False, False, False, True, False, False, # noqa
False, True, False, False]
space_e = [False, False, False, False, False, False, False, False,
False, True]
lower_e = [False, True, False, False, False, False, False, False,
False, False]
upper_e = [True, False, False, False, True, False, True, False, False,
False]
title_e = [True, False, True, False, True, False, False, False, False,
False]
tm.assert_series_equal(str_s.str.isalnum(), Series(alnum_e))
tm.assert_series_equal(str_s.str.isalpha(), Series(alpha_e))
tm.assert_series_equal(str_s.str.isdigit(), Series(digit_e))
tm.assert_series_equal(str_s.str.isspace(), Series(space_e))
tm.assert_series_equal(str_s.str.islower(), Series(lower_e))
tm.assert_series_equal(str_s.str.isupper(), Series(upper_e))
tm.assert_series_equal(str_s.str.istitle(), Series(title_e))
assert str_s.str.isalnum().tolist() == [v.isalnum() for v in values]
assert str_s.str.isalpha().tolist() == [v.isalpha() for v in values]
assert str_s.str.isdigit().tolist() == [v.isdigit() for v in values]
assert str_s.str.isspace().tolist() == [v.isspace() for v in values]
assert str_s.str.islower().tolist() == [v.islower() for v in values]
assert str_s.str.isupper().tolist() == [v.isupper() for v in values]
assert str_s.str.istitle().tolist() == [v.istitle() for v in values]
def test_isnumeric(self):
# 0x00bc: ¼ VULGAR FRACTION ONE QUARTER
# 0x2605: ★ not number
# 0x1378: ፸ ETHIOPIC NUMBER SEVENTY
# 0xFF13: 3 Em 3
values = ['A', '3', u'¼', u'★', u'፸', u'3', 'four']
s = Series(values)
numeric_e = [False, True, True, False, True, True, False]
decimal_e = [False, True, False, False, False, True, False]
tm.assert_series_equal(s.str.isnumeric(), Series(numeric_e))
tm.assert_series_equal(s.str.isdecimal(), Series(decimal_e))
unicodes = [u'A', u'3', u'¼', u'★', u'፸', u'3', u'four']
assert s.str.isnumeric().tolist() == [v.isnumeric() for v in unicodes]
assert s.str.isdecimal().tolist() == [v.isdecimal() for v in unicodes]
values = ['A', np.nan, u'¼', u'★', np.nan, u'3', 'four']
s = Series(values)
numeric_e = [False, np.nan, True, False, np.nan, True, False]
decimal_e = [False, np.nan, False, False, np.nan, True, False]
tm.assert_series_equal(s.str.isnumeric(), Series(numeric_e))
tm.assert_series_equal(s.str.isdecimal(), Series(decimal_e))
def test_get_dummies(self):
s = Series(['a|b', 'a|c', np.nan])
result = s.str.get_dummies('|')
expected = DataFrame([[1, 1, 0], [1, 0, 1], [0, 0, 0]],
columns=list('abc'))
tm.assert_frame_equal(result, expected)
s = Series(['a;b', 'a', 7])
result = s.str.get_dummies(';')
expected = DataFrame([[0, 1, 1], [0, 1, 0], [1, 0, 0]],
columns=list('7ab'))
tm.assert_frame_equal(result, expected)
# GH9980, GH8028
idx = Index(['a|b', 'a|c', 'b|c'])
result = idx.str.get_dummies('|')
expected = MultiIndex.from_tuples([(1, 1, 0), (1, 0, 1),
(0, 1, 1)], names=('a', 'b', 'c'))
tm.assert_index_equal(result, expected)
def test_get_dummies_with_name_dummy(self):
# GH 12180
# Dummies named 'name' should work as expected
s = Series(['a', 'b,name', 'b'])
result = s.str.get_dummies(',')
expected = DataFrame([[1, 0, 0], [0, 1, 1], [0, 1, 0]],
columns=['a', 'b', 'name'])
tm.assert_frame_equal(result, expected)
idx = Index(['a|b', 'name|c', 'b|name'])
result = idx.str.get_dummies('|')
expected = MultiIndex.from_tuples([(1, 1, 0, 0), (0, 0, 1, 1),
(0, 1, 0, 1)],
names=('a', 'b', 'c', 'name'))
tm.assert_index_equal(result, expected)
def test_join(self):
values = Series(['a_b_c', 'c_d_e', np.nan, 'f_g_h'])
result = values.str.split('_').str.join('_')
tm.assert_series_equal(values, result)
# mixed
mixed = Series(['a_b', NA, 'asdf_cas_asdf', True, datetime.today(),
'foo', None, 1, 2.])
rs = Series(mixed).str.split('_').str.join('_')
xp = Series(['a_b', NA, 'asdf_cas_asdf', NA, NA, 'foo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('a_b_c'), u('c_d_e'), np.nan, u('f_g_h')])
result = values.str.split('_').str.join('_')
tm.assert_series_equal(values, result)
def test_len(self):
values = Series(['foo', 'fooo', 'fooooo', np.nan, 'fooooooo'])
result = values.str.len()
exp = values.map(lambda x: len(x) if notna(x) else NA)
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['a_b', NA, 'asdf_cas_asdf', True, datetime.today(),
'foo', None, 1, 2.])
rs = Series(mixed).str.len()
xp = Series([3, NA, 13, NA, NA, 3, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('foo'), u('fooo'), u('fooooo'), np.nan, u(
'fooooooo')])
result = values.str.len()
exp = values.map(lambda x: len(x) if notna(x) else NA)
tm.assert_series_equal(result, exp)
def test_findall(self):
values = Series(['fooBAD__barBAD', NA, 'foo', 'BAD'])
result = values.str.findall('BAD[_]*')
exp = Series([['BAD__', 'BAD'], NA, [], ['BAD']])
tm.assert_almost_equal(result, exp)
# mixed
mixed = Series(['fooBAD__barBAD', NA, 'foo', True, datetime.today(),
'BAD', None, 1, 2.])
rs = Series(mixed).str.findall('BAD[_]*')
xp = Series([['BAD__', 'BAD'], NA, [], NA, NA, ['BAD'], NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('fooBAD__barBAD'), NA, u('foo'), u('BAD')])
result = values.str.findall('BAD[_]*')
exp = Series([[u('BAD__'), u('BAD')], NA, [], [u('BAD')]])
tm.assert_almost_equal(result, exp)
def test_find(self):
values = Series(['ABCDEFG', 'BCDEFEF', 'DEFGHIJEF', 'EFGHEF', 'XXXX'])
result = values.str.find('EF')
tm.assert_series_equal(result, Series([4, 3, 1, 0, -1]))
expected = np.array([v.find('EF') for v in values.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.rfind('EF')
tm.assert_series_equal(result, Series([4, 5, 7, 4, -1]))
expected = np.array([v.rfind('EF') for v in values.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.find('EF', 3)
tm.assert_series_equal(result, Series([4, 3, 7, 4, -1]))
expected = np.array([v.find('EF', 3) for v in values.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.rfind('EF', 3)
tm.assert_series_equal(result, Series([4, 5, 7, 4, -1]))
expected = np.array([v.rfind('EF', 3) for v in values.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.find('EF', 3, 6)
tm.assert_series_equal(result, Series([4, 3, -1, 4, -1]))
expected = np.array([v.find('EF', 3, 6) for v in values.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.rfind('EF', 3, 6)
tm.assert_series_equal(result, Series([4, 3, -1, 4, -1]))
expected = np.array([v.rfind('EF', 3, 6) for v in values.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
with tm.assert_raises_regex(TypeError,
"expected a string object, not int"):
result = values.str.find(0)
with tm.assert_raises_regex(TypeError,
"expected a string object, not int"):
result = values.str.rfind(0)
def test_find_nan(self):
values = Series(['ABCDEFG', np.nan, 'DEFGHIJEF', np.nan, 'XXXX'])
result = values.str.find('EF')
tm.assert_series_equal(result, Series([4, np.nan, 1, np.nan, -1]))
result = values.str.rfind('EF')
tm.assert_series_equal(result, Series([4, np.nan, 7, np.nan, -1]))
result = values.str.find('EF', 3)
tm.assert_series_equal(result, Series([4, np.nan, 7, np.nan, -1]))
result = values.str.rfind('EF', 3)
tm.assert_series_equal(result, Series([4, np.nan, 7, np.nan, -1]))
result = values.str.find('EF', 3, 6)
tm.assert_series_equal(result, Series([4, np.nan, -1, np.nan, -1]))
result = values.str.rfind('EF', 3, 6)
tm.assert_series_equal(result, Series([4, np.nan, -1, np.nan, -1]))
def test_index(self):
def _check(result, expected):
if isinstance(result, Series):
tm.assert_series_equal(result, expected)
else:
tm.assert_index_equal(result, expected)
for klass in [Series, Index]:
s = klass(['ABCDEFG', 'BCDEFEF', 'DEFGHIJEF', 'EFGHEF'])
result = s.str.index('EF')
_check(result, klass([4, 3, 1, 0]))
expected = np.array([v.index('EF') for v in s.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = s.str.rindex('EF')
_check(result, klass([4, 5, 7, 4]))
expected = np.array([v.rindex('EF') for v in s.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = s.str.index('EF', 3)
_check(result, klass([4, 3, 7, 4]))
expected = np.array([v.index('EF', 3) for v in s.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = s.str.rindex('EF', 3)
_check(result, klass([4, 5, 7, 4]))
expected = np.array([v.rindex('EF', 3) for v in s.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = s.str.index('E', 4, 8)
_check(result, klass([4, 5, 7, 4]))
expected = np.array([v.index('E', 4, 8) for v in s.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = s.str.rindex('E', 0, 5)
_check(result, klass([4, 3, 1, 4]))
expected = np.array([v.rindex('E', 0, 5) for v in s.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
with tm.assert_raises_regex(ValueError,
"substring not found"):
result = s.str.index('DE')
with tm.assert_raises_regex(TypeError,
"expected a string "
"object, not int"):
result = s.str.index(0)
# test with nan
s = Series(['abcb', 'ab', 'bcbe', np.nan])
result = s.str.index('b')
tm.assert_series_equal(result, Series([1, 1, 0, np.nan]))
result = s.str.rindex('b')
tm.assert_series_equal(result, Series([3, 1, 2, np.nan]))
def test_pad(self):
values = Series(['a', 'b', NA, 'c', NA, 'eeeeee'])
result = values.str.pad(5, side='left')
exp = Series([' a', ' b', NA, ' c', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
result = values.str.pad(5, side='right')
exp = Series(['a ', 'b ', NA, 'c ', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
result = values.str.pad(5, side='both')
exp = Series([' a ', ' b ', NA, ' c ', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
# mixed
mixed = Series(['a', NA, 'b', True, datetime.today(), 'ee', None, 1, 2.
])
rs = Series(mixed).str.pad(5, side='left')
xp = Series([' a', NA, ' b', NA, NA, ' ee', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
mixed = Series(['a', NA, 'b', True, datetime.today(), 'ee', None, 1, 2.
])
rs = Series(mixed).str.pad(5, side='right')
xp = Series(['a ', NA, 'b ', NA, NA, 'ee ', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
mixed = Series(['a', NA, 'b', True, datetime.today(), 'ee', None, 1, 2.
])
rs = Series(mixed).str.pad(5, side='both')
xp = Series([' a ', NA, ' b ', NA, NA, ' ee ', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('a'), u('b'), NA, u('c'), NA, u('eeeeee')])
result = values.str.pad(5, side='left')
exp = Series([u(' a'), u(' b'), NA, u(' c'), NA, u('eeeeee')])
tm.assert_almost_equal(result, exp)
result = values.str.pad(5, side='right')
exp = Series([u('a '), u('b '), NA, u('c '), NA, u('eeeeee')])
tm.assert_almost_equal(result, exp)
result = values.str.pad(5, side='both')
exp = Series([u(' a '), u(' b '), NA, u(' c '), NA, u('eeeeee')])
tm.assert_almost_equal(result, exp)
def test_pad_fillchar(self):
values = Series(['a', 'b', NA, 'c', NA, 'eeeeee'])
result = values.str.pad(5, side='left', fillchar='X')
exp = Series(['XXXXa', 'XXXXb', NA, 'XXXXc', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
result = values.str.pad(5, side='right', fillchar='X')
exp = Series(['aXXXX', 'bXXXX', NA, 'cXXXX', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
result = values.str.pad(5, side='both', fillchar='X')
exp = Series(['XXaXX', 'XXbXX', NA, 'XXcXX', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
with tm.assert_raises_regex(TypeError,
"fillchar must be a "
"character, not str"):
result = values.str.pad(5, fillchar='XY')
with tm.assert_raises_regex(TypeError,
"fillchar must be a "
"character, not int"):
result = values.str.pad(5, fillchar=5)
def test_pad_width(self):
# GH 13598
s = Series(['1', '22', 'a', 'bb'])
for f in ['center', 'ljust', 'rjust', 'zfill', 'pad']:
with tm.assert_raises_regex(TypeError,
"width must be of "
"integer type, not*"):
getattr(s.str, f)('f')
def test_translate(self):
def _check(result, expected):
if isinstance(result, Series):
tm.assert_series_equal(result, expected)
else:
tm.assert_index_equal(result, expected)
for klass in [Series, Index]:
s = klass(['abcdefg', 'abcc', 'cdddfg', 'cdefggg'])
if not compat.PY3:
import string
table = string.maketrans('abc', 'cde')
else:
table = str.maketrans('abc', 'cde')
result = s.str.translate(table)
expected = klass(['cdedefg', 'cdee', 'edddfg', 'edefggg'])
_check(result, expected)
# use of deletechars is python 2 only
if not compat.PY3:
result = s.str.translate(table, deletechars='fg')
expected = klass(['cdede', 'cdee', 'eddd', 'ede'])
_check(result, expected)
result = s.str.translate(None, deletechars='fg')
expected = klass(['abcde', 'abcc', 'cddd', 'cde'])
_check(result, expected)
else:
with tm.assert_raises_regex(
ValueError, "deletechars is not a valid argument"):
result = s.str.translate(table, deletechars='fg')
# Series with non-string values
s = Series(['a', 'b', 'c', 1.2])
expected = Series(['c', 'd', 'e', np.nan])
result = s.str.translate(table)
tm.assert_series_equal(result, expected)
def test_center_ljust_rjust(self):
values = Series(['a', 'b', NA, 'c', NA, 'eeeeee'])
result = values.str.center(5)
exp = Series([' a ', ' b ', NA, ' c ', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
result = values.str.ljust(5)
exp = Series(['a ', 'b ', NA, 'c ', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
result = values.str.rjust(5)
exp = Series([' a', ' b', NA, ' c', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
# mixed
mixed = Series(['a', NA, 'b', True, datetime.today(), 'c', 'eee', None,
1, 2.])
rs = Series(mixed).str.center(5)
xp = Series([' a ', NA, ' b ', NA, NA, ' c ', ' eee ', NA, NA, NA
])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
rs = Series(mixed).str.ljust(5)
xp = Series(['a ', NA, 'b ', NA, NA, 'c ', 'eee ', NA, NA, NA
])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
rs = Series(mixed).str.rjust(5)
xp = Series([' a', NA, ' b', NA, NA, ' c', ' eee', NA, NA, NA
])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('a'), u('b'), NA, u('c'), NA, u('eeeeee')])
result = values.str.center(5)
exp = Series([u(' a '), u(' b '), NA, u(' c '), NA, u('eeeeee')])
tm.assert_almost_equal(result, exp)
result = values.str.ljust(5)
exp = Series([u('a '), u('b '), NA, u('c '), NA, u('eeeeee')])
tm.assert_almost_equal(result, exp)
result = values.str.rjust(5)
exp = Series([u(' a'), u(' b'), NA, u(' c'), NA, u('eeeeee')])
tm.assert_almost_equal(result, exp)
def test_center_ljust_rjust_fillchar(self):
values = Series(['a', 'bb', 'cccc', 'ddddd', 'eeeeee'])
result = values.str.center(5, fillchar='X')
expected = Series(['XXaXX', 'XXbbX', 'Xcccc', 'ddddd', 'eeeeee'])
tm.assert_series_equal(result, expected)
expected = np.array([v.center(5, 'X') for v in values.values],
dtype=np.object_)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.ljust(5, fillchar='X')
expected = Series(['aXXXX', 'bbXXX', 'ccccX', 'ddddd', 'eeeeee'])
tm.assert_series_equal(result, expected)
expected = np.array([v.ljust(5, 'X') for v in values.values],
dtype=np.object_)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.rjust(5, fillchar='X')
expected = Series(['XXXXa', 'XXXbb', 'Xcccc', 'ddddd', 'eeeeee'])
tm.assert_series_equal(result, expected)
expected = np.array([v.rjust(5, 'X') for v in values.values],
dtype=np.object_)
tm.assert_numpy_array_equal(result.values, expected)
# If fillchar is not a charatter, normal str raises TypeError
# 'aaa'.ljust(5, 'XY')
# TypeError: must be char, not str
with tm.assert_raises_regex(TypeError,
"fillchar must be a "
"character, not str"):
result = values.str.center(5, fillchar='XY')
with tm.assert_raises_regex(TypeError,
"fillchar must be a "
"character, not str"):
result = values.str.ljust(5, fillchar='XY')
with tm.assert_raises_regex(TypeError,
"fillchar must be a "
"character, not str"):
result = values.str.rjust(5, fillchar='XY')
with tm.assert_raises_regex(TypeError,
"fillchar must be a "
"character, not int"):
result = values.str.center(5, fillchar=1)
with tm.assert_raises_regex(TypeError,
"fillchar must be a "
"character, not int"):
result = values.str.ljust(5, fillchar=1)
with tm.assert_raises_regex(TypeError,
"fillchar must be a "
"character, not int"):
result = values.str.rjust(5, fillchar=1)
def test_zfill(self):
values = Series(['1', '22', 'aaa', '333', '45678'])
result = values.str.zfill(5)
expected = Series(['00001', '00022', '00aaa', '00333', '45678'])
tm.assert_series_equal(result, expected)
expected = np.array([v.zfill(5) for v in values.values],
dtype=np.object_)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.zfill(3)
expected = Series(['001', '022', 'aaa', '333', '45678'])
tm.assert_series_equal(result, expected)
expected = np.array([v.zfill(3) for v in values.values],
dtype=np.object_)
tm.assert_numpy_array_equal(result.values, expected)
values = Series(['1', np.nan, 'aaa', np.nan, '45678'])
result = values.str.zfill(5)
expected = Series(['00001', np.nan, '00aaa', np.nan, '45678'])
tm.assert_series_equal(result, expected)
def test_split(self):
values = Series(['a_b_c', 'c_d_e', NA, 'f_g_h'])
result = values.str.split('_')
exp = Series([['a', 'b', 'c'], ['c', 'd', 'e'], NA, ['f', 'g', 'h']])
tm.assert_series_equal(result, exp)
# more than one char
values = Series(['a__b__c', 'c__d__e', NA, 'f__g__h'])
result = values.str.split('__')
tm.assert_series_equal(result, exp)
result = values.str.split('__', expand=False)
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['a_b_c', NA, 'd_e_f', True, datetime.today(), None, 1,
2.])
result = mixed.str.split('_')
exp = Series([['a', 'b', 'c'], NA, ['d', 'e', 'f'], NA, NA, NA, NA, NA
])
assert isinstance(result, Series)
tm.assert_almost_equal(result, exp)
result = mixed.str.split('_', expand=False)
assert isinstance(result, Series)
tm.assert_almost_equal(result, exp)
# unicode
values = Series([u('a_b_c'), u('c_d_e'), NA, u('f_g_h')])
result = values.str.split('_')
exp = Series([[u('a'), u('b'), u('c')], [u('c'), u('d'), u('e')], NA,
[u('f'), u('g'), u('h')]])
tm.assert_series_equal(result, exp)
result = values.str.split('_', expand=False)
tm.assert_series_equal(result, exp)
# regex split
values = Series([u('a,b_c'), u('c_d,e'), NA, u('f,g,h')])
result = values.str.split('[,_]')
exp = Series([[u('a'), u('b'), u('c')], [u('c'), u('d'), u('e')], NA,
[u('f'), u('g'), u('h')]])
tm.assert_series_equal(result, exp)
def test_rsplit(self):
values = Series(['a_b_c', 'c_d_e', NA, 'f_g_h'])
result = values.str.rsplit('_')
exp = Series([['a', 'b', 'c'], ['c', 'd', 'e'], NA, ['f', 'g', 'h']])
tm.assert_series_equal(result, exp)
# more than one char
values = Series(['a__b__c', 'c__d__e', NA, 'f__g__h'])
result = values.str.rsplit('__')
tm.assert_series_equal(result, exp)
result = values.str.rsplit('__', expand=False)
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['a_b_c', NA, 'd_e_f', True, datetime.today(), None, 1,
2.])
result = mixed.str.rsplit('_')
exp = Series([['a', 'b', 'c'], NA, ['d', 'e', 'f'], NA, NA, NA, NA, NA
])
assert isinstance(result, Series)
tm.assert_almost_equal(result, exp)
result = mixed.str.rsplit('_', expand=False)
assert isinstance(result, Series)
tm.assert_almost_equal(result, exp)
# unicode
values = Series([u('a_b_c'), u('c_d_e'), NA, u('f_g_h')])
result = values.str.rsplit('_')
exp = Series([[u('a'), u('b'), u('c')], [u('c'), u('d'), u('e')], NA,
[u('f'), u('g'), u('h')]])
tm.assert_series_equal(result, exp)
result = values.str.rsplit('_', expand=False)
tm.assert_series_equal(result, exp)
# regex split is not supported by rsplit
values = Series([u('a,b_c'), u('c_d,e'), NA, u('f,g,h')])
result = values.str.rsplit('[,_]')
exp = Series([[u('a,b_c')], [u('c_d,e')], NA, [u('f,g,h')]])
tm.assert_series_equal(result, exp)
# setting max number of splits, make sure it's from reverse
values = Series(['a_b_c', 'c_d_e', NA, 'f_g_h'])
result = values.str.rsplit('_', n=1)
exp = Series([['a_b', 'c'], ['c_d', 'e'], NA, ['f_g', 'h']])
tm.assert_series_equal(result, exp)
def test_split_noargs(self):
# #1859
s = Series(['<NAME>', '<NAME>'])
result = s.str.split()
expected = ['Travis', 'Oliphant']
assert result[1] == expected
result = s.str.rsplit()
assert result[1] == expected
def test_split_maxsplit(self):
# re.split 0, str.split -1
s = Series(['bd asdf jfg', 'kjasdflqw asdfnfk'])
result = s.str.split(n=-1)
xp = s.str.split()
tm.assert_series_equal(result, xp)
result = s.str.split(n=0)
tm.assert_series_equal(result, xp)
xp = s.str.split('asdf')
result = s.str.split('asdf', n=0)
tm.assert_series_equal(result, xp)
result = s.str.split('asdf', n=-1)
tm.assert_series_equal(result, xp)
def test_split_no_pat_with_nonzero_n(self):
s = Series(['split once', 'split once too!'])
result = s.str.split(n=1)
expected = Series({0: ['split', 'once'], 1: ['split', 'once too!']})
tm.assert_series_equal(expected, result, check_index_type=False)
def test_split_to_dataframe(self):
s = Series(['nosplit', 'alsonosplit'])
result = s.str.split('_', expand=True)
exp = DataFrame({0: Series(['nosplit', 'alsonosplit'])})
tm.assert_frame_equal(result, exp)
s = Series(['some_equal_splits', 'with_no_nans'])
result = s.str.split('_', expand=True)
exp = DataFrame({0: ['some', 'with'],
1: ['equal', 'no'],
2: ['splits', 'nans']})
tm.assert_frame_equal(result, exp)
s = Series(['some_unequal_splits', 'one_of_these_things_is_not'])
result = s.str.split('_', expand=True)
exp = DataFrame({0: ['some', 'one'],
1: ['unequal', 'of'],
2: ['splits', 'these'],
3: [NA, 'things'],
4: [NA, 'is'],
5: [NA, 'not']})
tm.assert_frame_equal(result, exp)
s = Series(['some_splits', 'with_index'], index=['preserve', 'me'])
result = s.str.split('_', expand=True)
exp = DataFrame({0: ['some', 'with'], 1: ['splits', 'index']},
index=['preserve', 'me'])
tm.assert_frame_equal(result, exp)
with tm.assert_raises_regex(ValueError, "expand must be"):
s.str.split('_', expand="not_a_boolean")
def test_split_to_multiindex_expand(self):
idx = Index(['nosplit', 'alsonosplit'])
result = idx.str.split('_', expand=True)
exp = idx
tm.assert_index_equal(result, exp)
assert result.nlevels == 1
idx = Index(['some_equal_splits', 'with_no_nans'])
result = idx.str.split('_', expand=True)
exp = MultiIndex.from_tuples([('some', 'equal', 'splits'), (
'with', 'no', 'nans')])
tm.assert_index_equal(result, exp)
assert result.nlevels == 3
idx = Index(['some_unequal_splits', 'one_of_these_things_is_not'])
result = idx.str.split('_', expand=True)
exp = MultiIndex.from_tuples([('some', 'unequal', 'splits', NA, NA, NA
), ('one', 'of', 'these', 'things',
'is', 'not')])
tm.assert_index_equal(result, exp)
assert result.nlevels == 6
with tm.assert_raises_regex(ValueError, "expand must be"):
idx.str.split('_', expand="not_a_boolean")
def test_rsplit_to_dataframe_expand(self):
s = Series(['nosplit', 'alsonosplit'])
result = s.str.rsplit('_', expand=True)
exp = DataFrame({0: Series(['nosplit', 'alsonosplit'])})
tm.assert_frame_equal(result, exp)
s = Series(['some_equal_splits', 'with_no_nans'])
result = s.str.rsplit('_', expand=True)
exp = DataFrame({0: ['some', 'with'],
1: ['equal', 'no'],
2: ['splits', 'nans']})
tm.assert_frame_equal(result, exp)
result = s.str.rsplit('_', expand=True, n=2)
exp = DataFrame({0: ['some', 'with'],
1: ['equal', 'no'],
2: ['splits', 'nans']})
tm.assert_frame_equal(result, exp)
result = s.str.rsplit('_', expand=True, n=1)
exp = DataFrame({0: ['some_equal', 'with_no'], 1: ['splits', 'nans']})
tm.assert_frame_equal(result, exp)
s = Series(['some_splits', 'with_index'], index=['preserve', 'me'])
result = s.str.rsplit('_', expand=True)
exp = DataFrame({0: ['some', 'with'], 1: ['splits', 'index']},
index=['preserve', 'me'])
tm.assert_frame_equal(result, exp)
def test_rsplit_to_multiindex_expand(self):
idx = Index(['nosplit', 'alsonosplit'])
result = idx.str.rsplit('_', expand=True)
exp = idx
tm.assert_index_equal(result, exp)
assert result.nlevels == 1
idx = Index(['some_equal_splits', 'with_no_nans'])
result = idx.str.rsplit('_', expand=True)
exp = MultiIndex.from_tuples([('some', 'equal', 'splits'), (
'with', 'no', 'nans')])
tm.assert_index_equal(result, exp)
assert result.nlevels == 3
idx = Index(['some_equal_splits', 'with_no_nans'])
result = idx.str.rsplit('_', expand=True, n=1)
exp = MultiIndex.from_tuples([('some_equal', 'splits'),
('with_no', 'nans')])
tm.assert_index_equal(result, exp)
assert result.nlevels == 2
def test_split_nan_expand(self):
# gh-18450
s = Series(["foo,bar,baz", NA])
result = s.str.split(",", expand=True)
exp = DataFrame([["foo", "bar", "baz"], [NA, NA, NA]])
tm.assert_frame_equal(result, exp)
# check that these are actually np.nan and not None
# TODO see GH 18463
# tm.assert_frame_equal does not differentiate
assert all(np.isnan(x) for x in result.iloc[1])
def test_split_with_name(self):
# GH 12617
# should preserve name
s = Series(['a,b', 'c,d'], name='xxx')
res = s.str.split(',')
exp = Series([['a', 'b'], ['c', 'd']], name='xxx')
tm.assert_series_equal(res, exp)
res = s.str.split(',', expand=True)
exp = DataFrame([['a', 'b'], ['c', 'd']])
tm.assert_frame_equal(res, exp)
idx = Index(['a,b', 'c,d'], name='xxx')
res = idx.str.split(',')
exp = Index([['a', 'b'], ['c', 'd']], name='xxx')
assert res.nlevels == 1
tm.assert_index_equal(res, exp)
res = idx.str.split(',', expand=True)
exp = MultiIndex.from_tuples([('a', 'b'), ('c', 'd')])
assert res.nlevels == 2
tm.assert_index_equal(res, exp)
def test_partition_series(self):
values = Series(['a_b_c', 'c_d_e', NA, 'f_g_h'])
result = values.str.partition('_', expand=False)
exp = Series([('a', '_', 'b_c'), ('c', '_', 'd_e'), NA,
('f', '_', 'g_h')])
tm.assert_series_equal(result, exp)
result = values.str.rpartition('_', expand=False)
exp = Series([('a_b', '_', 'c'), ('c_d', '_', 'e'), NA,
('f_g', '_', 'h')])
tm.assert_series_equal(result, exp)
# more than one char
values = Series(['a__b__c', 'c__d__e', NA, 'f__g__h'])
result = values.str.partition('__', expand=False)
exp = Series([('a', '__', 'b__c'), ('c', '__', 'd__e'), NA,
('f', '__', 'g__h')])
tm.assert_series_equal(result, exp)
result = values.str.rpartition('__', expand=False)
exp = Series([('a__b', '__', 'c'), ('c__d', '__', 'e'), NA,
('f__g', '__', 'h')])
tm.assert_series_equal(result, exp)
# None
values = Series(['a b c', 'c d e', NA, 'f g h'])
result = values.str.partition(expand=False)
exp = Series([('a', ' ', 'b c'), ('c', ' ', 'd e'), NA,
('f', ' ', 'g h')])
tm.assert_series_equal(result, exp)
result = values.str.rpartition(expand=False)
exp = Series([('a b', ' ', 'c'), ('c d', ' ', 'e'), NA,
('f g', ' ', 'h')])
tm.assert_series_equal(result, exp)
# Not splited
values = Series(['abc', 'cde', NA, 'fgh'])
result = values.str.partition('_', expand=False)
exp = Series([('abc', '', ''), ('cde', '', ''), NA, ('fgh', '', '')])
tm.assert_series_equal(result, exp)
result = values.str.rpartition('_', expand=False)
exp = Series([('', '', 'abc'), ('', '', 'cde'), NA, ('', '', 'fgh')])
tm.assert_series_equal(result, exp)
# unicode
values = Series([u'a_b_c', u'c_d_e', NA, u'f_g_h'])
result = values.str.partition('_', expand=False)
exp = Series([(u'a', u'_', u'b_c'), (u'c', u'_', u'd_e'),
NA, (u'f', u'_', u'g_h')])
tm.assert_series_equal(result, exp)
result = values.str.rpartition('_', expand=False)
exp = Series([(u'a_b', u'_', u'c'), (u'c_d', u'_', u'e'),
NA, (u'f_g', u'_', u'h')])
tm.assert_series_equal(result, exp)
# compare to standard lib
values = Series(['A_B_C', 'B_C_D', 'E_F_G', 'EFGHEF'])
result = values.str.partition('_', expand=False).tolist()
assert result == [v.partition('_') for v in values]
result = values.str.rpartition('_', expand=False).tolist()
assert result == [v.rpartition('_') for v in values]
def test_partition_index(self):
values = Index(['a_b_c', 'c_d_e', 'f_g_h'])
result = values.str.partition('_', expand=False)
exp = Index(np.array([('a', '_', 'b_c'), ('c', '_', 'd_e'), ('f', '_',
'g_h')]))
tm.assert_index_equal(result, exp)
assert result.nlevels == 1
result = values.str.rpartition('_', expand=False)
exp = Index(np.array([('a_b', '_', 'c'), ('c_d', '_', 'e'), (
'f_g', '_', 'h')]))
tm.assert_index_equal(result, exp)
assert result.nlevels == 1
result = values.str.partition('_')
exp = Index([('a', '_', 'b_c'), ('c', '_', 'd_e'), ('f', '_', 'g_h')])
tm.assert_index_equal(result, exp)
assert isinstance(result, MultiIndex)
assert result.nlevels == 3
result = values.str.rpartition('_')
exp = Index([('a_b', '_', 'c'), ('c_d', '_', 'e'), ('f_g', '_', 'h')])
tm.assert_index_equal(result, exp)
assert isinstance(result, MultiIndex)
assert result.nlevels == 3
def test_partition_to_dataframe(self):
values = Series(['a_b_c', 'c_d_e', NA, 'f_g_h'])
result = values.str.partition('_')
exp = DataFrame({0: ['a', 'c', np.nan, 'f'],
1: ['_', '_', np.nan, '_'],
2: ['b_c', 'd_e', np.nan, 'g_h']})
tm.assert_frame_equal(result, exp)
result = values.str.rpartition('_')
exp = DataFrame({0: ['a_b', 'c_d', np.nan, 'f_g'],
1: ['_', '_', np.nan, '_'],
2: ['c', 'e', np.nan, 'h']})
tm.assert_frame_equal(result, exp)
values = Series(['a_b_c', 'c_d_e', NA, 'f_g_h'])
result = values.str.partition('_', expand=True)
exp = DataFrame({0: ['a', 'c', np.nan, 'f'],
1: ['_', '_', np.nan, '_'],
2: ['b_c', 'd_e', np.nan, 'g_h']})
tm.assert_frame_equal(result, exp)
result = values.str.rpartition('_', expand=True)
exp = DataFrame({0: ['a_b', 'c_d', np.nan, 'f_g'],
1: ['_', '_', np.nan, '_'],
2: ['c', 'e', np.nan, 'h']})
tm.assert_frame_equal(result, exp)
def test_partition_with_name(self):
# GH 12617
s = Series(['a,b', 'c,d'], name='xxx')
res = s.str.partition(',')
exp = DataFrame({0: ['a', 'c'], 1: [',', ','], 2: ['b', 'd']})
tm.assert_frame_equal(res, exp)
# should preserve name
res = s.str.partition(',', expand=False)
exp = Series([('a', ',', 'b'), ('c', ',', 'd')], name='xxx')
tm.assert_series_equal(res, exp)
idx = Index(['a,b', 'c,d'], name='xxx')
res = idx.str.partition(',')
exp = MultiIndex.from_tuples([('a', ',', 'b'), ('c', ',', 'd')])
assert res.nlevels == 3
tm.assert_index_equal(res, exp)
# should preserve name
res = idx.str.partition(',', expand=False)
exp = Index(np.array([('a', ',', 'b'), ('c', ',', 'd')]), name='xxx')
assert res.nlevels == 1
tm.assert_index_equal(res, exp)
def test_pipe_failures(self):
# #2119
s = Series(['A|B|C'])
result = s.str.split('|')
exp = Series([['A', 'B', 'C']])
tm.assert_series_equal(result, exp)
result = s.str.replace('|', ' ')
exp = Series(['A B C'])
tm.assert_series_equal(result, exp)
def test_slice(self):
values = Series(['aafootwo', 'aabartwo', NA, 'aabazqux'])
result = values.str.slice(2, 5)
exp = Series(['foo', 'bar', NA, 'baz'])
tm.assert_series_equal(result, exp)
for start, stop, step in [(0, 3, -1), (None, None, -1), (3, 10, 2),
(3, 0, -1)]:
try:
result = values.str.slice(start, stop, step)
expected = Series([s[start:stop:step] if not isna(s) else NA
for s in values])
tm.assert_series_equal(result, expected)
except:
print('failed on %s:%s:%s' % (start, stop, step))
raise
# mixed
mixed = Series(['aafootwo', NA, 'aabartwo', True, datetime.today(),
None, 1, 2.])
rs = Series(mixed).str.slice(2, 5)
xp = Series(['foo', NA, 'bar', NA, NA, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
rs = Series(mixed).str.slice(2, 5, -1)
xp = Series(['oof', NA, 'rab', NA, NA, NA, NA, NA])
# unicode
values = Series([u('aafootwo'), u('aabartwo'), NA, u('aabazqux')])
result = values.str.slice(2, 5)
exp = Series([u('foo'), u('bar'), NA, u('baz')])
tm.assert_series_equal(result, exp)
result = values.str.slice(0, -1, 2)
exp = Series([u('afow'), u('abrw'), NA, u('abzu')])
tm.assert_series_equal(result, exp)
def test_slice_replace(self):
values = Series(['short', 'a bit longer', 'evenlongerthanthat', '', NA
])
exp = Series(['shrt', 'a it longer', 'evnlongerthanthat', '', NA])
result = values.str.slice_replace(2, 3)
tm.assert_series_equal(result, exp)
exp = Series(['shzrt', 'a zit longer', 'evznlongerthanthat', 'z', NA])
result = values.str.slice_replace(2, 3, 'z')
tm.assert_series_equal(result, exp)
exp = Series(['shzort', 'a zbit longer', 'evzenlongerthanthat', 'z', NA
])
result = values.str.slice_replace(2, 2, 'z')
tm.assert_series_equal(result, exp)
exp = Series(['shzort', 'a zbit longer', 'evzenlongerthanthat', 'z', NA
])
result = values.str.slice_replace(2, 1, 'z')
tm.assert_series_equal(result, exp)
exp = Series(['shorz', 'a bit longez', 'evenlongerthanthaz', 'z', NA])
result = values.str.slice_replace(-1, None, 'z')
tm.assert_series_equal(result, exp)
exp = Series(['zrt', 'zer', 'zat', 'z', NA])
result = values.str.slice_replace(None, -2, 'z')
tm.assert_series_equal(result, exp)
exp = Series(['shortz', 'a bit znger', 'evenlozerthanthat', 'z', NA])
result = values.str.slice_replace(6, 8, 'z')
tm.assert_series_equal(result, exp)
exp = Series(['zrt', 'a zit longer', 'evenlongzerthanthat', 'z', NA])
result = values.str.slice_replace(-10, 3, 'z')
tm.assert_series_equal(result, exp)
def test_strip_lstrip_rstrip(self):
values = Series([' aa ', ' bb \n', NA, 'cc '])
result = values.str.strip()
exp = Series(['aa', 'bb', NA, 'cc'])
tm.assert_series_equal(result, exp)
result = values.str.lstrip()
exp = Series(['aa ', 'bb \n', NA, 'cc '])
tm.assert_series_equal(result, exp)
result = values.str.rstrip()
exp = Series([' aa', ' bb', NA, 'cc'])
tm.assert_series_equal(result, exp)
def test_strip_lstrip_rstrip_mixed(self):
# mixed
mixed = Series([' aa ', NA, ' bb \t\n', True, datetime.today(), None,
1, 2.])
rs = Series(mixed).str.strip()
xp = Series(['aa', NA, 'bb', NA, NA, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
rs = Series(mixed).str.lstrip()
xp = Series(['aa ', NA, 'bb \t\n', NA, NA, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
rs = Series(mixed).str.rstrip()
xp = Series([' aa', NA, ' bb', NA, NA, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
def test_strip_lstrip_rstrip_unicode(self):
# unicode
values = Series([u(' aa '), u(' bb \n'), NA, u('cc ')])
result = values.str.strip()
exp = Series([u('aa'), u('bb'), NA, u('cc')])
tm.assert_series_equal(result, exp)
result = values.str.lstrip()
exp = Series([u('aa '), u('bb \n'), NA, u('cc ')])
tm.assert_series_equal(result, exp)
result = values.str.rstrip()
exp = Series([u(' aa'), u(' bb'), NA, u('cc')])
tm.assert_series_equal(result, exp)
def test_strip_lstrip_rstrip_args(self):
values = Series(['xxABCxx', 'xx BNSD', 'LDFJH xx'])
rs = values.str.strip('x')
xp = Series(['ABC', ' BNSD', 'LDFJH '])
assert_series_equal(rs, xp)
rs = values.str.lstrip('x')
xp = Series(['ABCxx', ' BNSD', 'LDFJH xx'])
assert_series_equal(rs, xp)
rs = values.str.rstrip('x')
xp = Series(['xxABC', 'xx BNSD', 'LDFJH '])
assert_series_equal(rs, xp)
def test_strip_lstrip_rstrip_args_unicode(self):
values = Series([u('xxABCxx'), u('xx BNSD'), u('LDFJH xx')])
rs = values.str.strip(u('x'))
xp = Series(['ABC', ' BNSD', 'LDFJH '])
assert_series_equal(rs, xp)
rs = values.str.lstrip(u('x'))
xp = Series(['ABCxx', ' BNSD', 'LDFJH xx'])
assert_series_equal(rs, xp)
rs = values.str.rstrip(u('x'))
xp = Series(['xxABC', 'xx BNSD', 'LDFJH '])
assert_series_equal(rs, xp)
def test_wrap(self):
# test values are: two words less than width, two words equal to width,
# two words greater than width, one word less than width, one word
# equal to width, one word greater than width, multiple tokens with
# trailing whitespace equal to width
values = Series([u('hello world'), u('hello world!'), u(
'hello world!!'), u('abcdefabcde'), u('abcdefabcdef'), u(
'abcdefabcdefa'), u('ab ab ab ab '), u('ab ab ab ab a'), u(
'\t')])
# expected values
xp = Series([u('hello world'), u('hello world!'), u('hello\nworld!!'),
u('abcdefabcde'), u('abcdefabcdef'), u('abcdefabcdef\na'),
u('ab ab ab ab'), u('ab ab ab ab\na'), u('')])
rs = values.str.wrap(12, break_long_words=True)
assert_series_equal(rs, xp)
# test with pre and post whitespace (non-unicode), NaN, and non-ascii
# Unicode
values = Series([' pre ', np.nan, u('\xac\u20ac\U00008000 abadcafe')
])
xp = Series([' pre', NA, u('\xac\u20ac\U00008000 ab\nadcafe')])
rs = values.str.wrap(6)
assert_series_equal(rs, xp)
def test_get(self):
values = Series(['a_b_c', 'c_d_e', np.nan, 'f_g_h'])
result = values.str.split('_').str.get(1)
expected = Series(['b', 'd', np.nan, 'g'])
tm.assert_series_equal(result, expected)
# mixed
mixed = Series(['a_b_c', NA, 'c_d_e', True, datetime.today(), None, 1,
2.])
rs = Series(mixed).str.split('_').str.get(1)
xp = Series(['b', NA, 'd', NA, NA, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('a_b_c'), u('c_d_e'), np.nan, u('f_g_h')])
result = values.str.split('_').str.get(1)
expected = Series([u('b'), u('d'), np.nan, u('g')])
tm.assert_series_equal(result, expected)
# bounds testing
values = Series(['1_2_3_4_5', '6_7_8_9_10', '11_12'])
# positive index
result = values.str.split('_').str.get(2)
expected = Series(['3', '8', np.nan])
tm.assert_series_equal(result, expected)
# negative index
result = values.str.split('_').str.get(-3)
expected = Series(['3', '8', np.nan])
tm.assert_series_equal(result, expected)
def test_more_contains(self):
# PR #1179
s = Series(['A', 'B', 'C', 'Aaba', 'Baca', '', NA,
'CABA', 'dog', 'cat'])
result = s.str.contains('a')
expected = Series([False, False, False, True, True, False, np.nan,
False, False, True])
assert_series_equal(result, expected)
result = s.str.contains('a', case=False)
expected = Series([True, False, False, True, True, False, np.nan, True,
False, True])
assert_series_equal(result, expected)
result = s.str.contains('Aa')
expected = Series([False, False, False, True, False, False, np.nan,
False, False, False])
assert_series_equal(result, expected)
result = s.str.contains('ba')
expected = Series([False, False, False, True, False, False, np.nan,
False, False, False])
assert_series_equal(result, expected)
result = s.str.contains('ba', case=False)
expected = Series([False, False, False, True, True, False, np.nan,
True, False, False])
assert_series_equal(result, expected)
def test_contains_nan(self):
# PR #14171
s = Series([np.nan, np.nan, np.nan], dtype=np.object_)
result = s.str.contains('foo', na=False)
expected = Series([False, False, False], dtype=np.bool_)
| assert_series_equal(result, expected) | pandas.util.testing.assert_series_equal |
"""
In this file we run ours models one by one
"""
# Imports
import random
from random import shuffle
import numpy as np
import os
import scipy.sparse as sp
import torch
from tqdm import tqdm
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.utils.data import DataLoader
import pickle
from torch.utils.data import DataLoader
from models import MLP_With_Average_Voting, PretrainedDensenet, PretrainedResnet, CNN_With_Average_Voting, \
MLP_With_Max_Pooling, CNN_MLP_Average_Voting, CNN_MLP_Max_Pooling, PretrainedDensenetAverageVoting, \
PretrainedDensenetRELU, PretrainedDensenetAverageVotingRELU, CNN_With_Average_VotingRELU, \
CNN_MLP_Average_VotingRELU, CNN_MLP_Max_PoolingRELU, CNN_With_Max_Pooling, CNN_With_Max_PoolingRELU
from sklearn.metrics import roc_curve, auc, roc_auc_score, average_precision_score
import re
import argparse
import logging
import pandas as pd
import json
from dataloader import get_study_level_data, get_dataloaders
# Seed for our experiments
seed = 1997
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
# Setting cuda for GPU if it is available
use_cuda = torch.cuda.is_available()
if use_cuda:
torch.cuda.manual_seed(seed)
# Base directory for checkpoints
odir_checkpoint = '/mnt/data/sotiris/checkpoints/'
# odir_checkpoint = 'drive/My Drive/MURA Project/checkpoints/'
# Initialize the logger handle to None
hdlr = None
# Initialize names of the body parts for the MURA dataset
study_wrist = 'XR_WRIST'
study_elbow = 'XR_ELBOW'
study_finger = 'XR_FINGER'
study_forearm = 'XR_FOREARM'
study_hand = 'XR_HAND'
study_humerus = 'XR_HUMERUS'
study_shoulder = 'XR_SHOULDER'
# Set checkpoints for each model
# THIS IS FOR DENSENET PRETRAINED WITH MLP WITH 1 LAYER AND AVERAGE VOTING -- OUR LOSS
# best_checkpoint_name = 'densenet_mlp_averagevoting.pth.tar'
# progress_checkpoint = 'densenet_mlp_averagevoting_progress.pth.tar'
# THIS IS FOR DENSENET PRETRAINED WITH MLP WITH 1 LAYER AND AVERAGE VOTING WITH RELU -- OUR LOSS
# best_checkpoint_name = 'densenet_mlp_averagevoting_relu.pth.tar'
# progress_checkpoint = 'densenet_mlp_averagevoting_relu_progress.pth.tar'
# THIS IS FOR DENSENET PRETRAINED WITH MLP WITH 1 LAYER AND MAX POOLING OVER THE VIEWS -- OUR LOSS
# best_checkpoint_name = 'densenet_mlp_maxpooling.pth.tar'
# progress_checkpoint = 'densenet_mlp_maxpooling_progress.pth.tar'
# THIS IS FOR DENSENET PRETRAINED WITH MLP WITH 1 LAYER AND MAX POOLING OVER THE VIEWS WITH RELU -- OUR LOSS
# best_checkpoint_name = 'densenet_mlp_maxpooling_relu.pth.tar'
# progress_checkpoint = 'densenet_mlp_maxpooling_relu_progress.pth.tar'
# THIS IS FOR FROZEN DENSENET PRETRAINED WITH MLP WITH 1 LAYER AND AVERAGE VOTING -- OUR LOSS
# best_checkpoint_name = 'frozen_densenet_mlp_averagevoting.pth.tar'
# progress_checkpoint = 'frozen_densenet_mlp_averagevoting_progress.pth.tar'
# THIS IS FOR FROZEN DENSENET PRETRAINED WITH MLP WITH 1 LAYER AND AVERAGE VOTING WITH RELU -- OUR LOSS
# best_checkpoint_name = 'frozen_densenet_mlp_averagevoting_relu.pth.tar'
# progress_checkpoint = 'frozen_densenet_mlp_averagevoting_relu_progress.pth.tar'
# THIS IS FOR FROZEN DENSENET PRETRAINED WITH MLP WITH 1 LAYER AND MAX POOLING OVER THE VIEWS -- OUR LOSS
# best_checkpoint_name = 'frozen_densenet_mlp_maxpooling.pth.tar'
# progress_checkpoint = 'frozen_densenet_mlp_maxpooling_progress.pth.tar'
# THIS IS FOR FROZEN DENSENET PRETRAINED WITH MLP WITH 1 LAYER AND MAX POOLING OVER THE VIEWS WITH RELU -- OUR LOSS
# best_checkpoint_name = 'frozen_densenet_mlp_maxpooling_relu.pth.tar'
# progress_checkpoint = 'frozen_densenet_mlp_maxpooling_relu_progress.pth.tar'
# THIS IS FOR RESNET PRETRAINED WITH MLP WITH 1 LAYER AND MAX POOLING OVER THE VIEWS -- OUR LOSS
# best_checkpoint_name = 'resnet_mlp_maxpooling.pth.tar'
# progress_checkpoint = 'resnet_mlp_maxpooling_progress.pth.tar'
# THIS IS FOR CNN 2 LAYERS + AVERAGE VOTING -- OUR LOSS
# best_checkpoint_name = 'cnn_2layers_averagevoting.pth.tar'
# progress_checkpoint = 'cnn_2layers_averagevoting_progress.pth.tar'
# THIS IS FOR CNN 2 LAYERS + MAX POOLING -- OUR LOSS
# best_checkpoint_name = 'cnn_2layers_maxpooling.pth.tar'
# progress_checkpoint = 'cnn_2layers_maxpooling.pth.tar'
# THIS IS FOR CNN 2 LAYERS + MLP + AVERAGE VOTING -- OUR LOSS
# best_checkpoint_name = 'cnn_2layers_mlp_averagevoting.pth.tar'
# progress_checkpoint = 'cnn_2layers_mlp_averagevoting_progress.pth.tar'
# THIS IS FOR CNN 2 LAYERS + MLP + MAX POOLING OVER VIEWS -- OUR LOSS
# best_checkpoint_name = 'cnn_2layers_mpl_maxpooling.pth.tar'
# progress_checkpoint = 'cnn_2layers_mpl_maxpooling_progress.pth.tar'
# THIS IS FOR CNN 2 LAYERS + AVERAGE VOTING WITH RELU -- OUR LOSS
# best_checkpoint_name = 'cnn_2layers_averagevoting_relu.pth.tar'
# progress_checkpoint = 'cnn_2layers_averagevoting_relu_progress.pth.tar'
# THIS IS FOR CNN 2 LAYERS + MAX POOLING OVER VIEWS WITH RELU -- OUR LOSS
# best_checkpoint_name = 'cnn_2layers_maxpooling_relu.pth.tar'
# progress_checkpoint = 'cnn_2layers_maxpooling_relu_progress.pth.tar'
# THIS IS FOR CNN 2 LAYERS + MLP + AVERAGE VOTING WITH RELU-- OUR LOSS
# best_checkpoint_name = 'cnn_2layers_mlp_averagevoting_relu.pth.tar'
# progress_checkpoint = 'cnn_2layers_mlp_averagevoting_relu_progress.pth.tar'
# THIS IS FOR CNN 2 LAYERS + MLP + MAX POOLING OVER VIEWS WITH RELU-- OUR LOSS
# best_checkpoint_name = 'cnn_2layers_mpl_maxpooling_relu.pth.tar'
# progress_checkpoint = 'cnn_2layers_mpl_maxpooling_relu_progress.pth.tar'
# THIS IS FOR MLP + AVERAGE POOLING -- OUR LOSS
# best_checkpoint_name = 'mlp_averagevoting.pth.tar'
# progress_checkpoint = 'mlp_averagevoting_progress.pth.tar'
# best_checkpoint_name = 'mlp_averagevoting_nodropout.pth.tar'
# progress_checkpoint = 'mlp_averagevoting_nodropout_progress.pth.tar'
# THIS IS FOR MLP + MAX POOLING -- OUR LOSS
# best_checkpoint_name = 'mlp_maxpooling.pth.tar'
# progress_checkpoint = 'mlp_maxpooling_progress.pth.tar'
# best_checkpoint_name = 'mlp_maxpooling_nodropout.pth.tar'
# progress_checkpoint = 'mlp_maxpooling_nodropout_progress.pth.tar'
# FOR TESTING
# best_checkpoint_name = 'testing.pth.tar'
# progress_checkpoint = 'testing_progress.pth.tar'
# FOR BEST MODEL
best_checkpoint_name = 'densenet_maxpooling_relu/hyperopt_trial_0.pth.tar'
progress_checkpoint = None
# Create the checkpoints directory
if not os.path.exists(odir_checkpoint):
os.makedirs(odir_checkpoint)
def print_params(model):
'''
It just prints the number of parameters in the model.
:param model: The pytorch model
:return: Nothing.
'''
print(40 * '=')
print(model)
print(40 * '=')
logger.info(40 * '=')
logger.info(model)
logger.info(40 * '=')
trainable = 0
untrainable = 0
for parameter in model.parameters():
# print(parameter.size())
v = 1
for s in parameter.size():
v *= s
if parameter.requires_grad:
trainable += v
else:
untrainable += v
total_params = trainable + untrainable
print(40 * '=')
print('trainable:{} untrainable:{} total:{}'.format(trainable, untrainable, total_params))
print(40 * '=')
logger.info(40 * '=')
logger.info('trainable:{} untrainable:{} total:{}'.format(trainable, untrainable, total_params))
logger.info(40 * '=')
logger.info('')
logger.info('')
def save_checkpoint(state, filename='checkpoint.pth.tar'):
"""
Save the torch checkpoint
:param state: The state/checkpoint to save
:param filename: The path and filename
:return: Nothing
"""
torch.save(state, filename)
def init_the_logger(hdlr):
"""
Initializes the logger
:param hdlr: The handler for the logger
:return: The logger and its handler
"""
# Create the checkpoints folder
if not os.path.exists(odir_checkpoint):
os.makedirs(odir_checkpoint)
# Set the logger base directory
od = odir_checkpoint.split('/')[-1]
logger = logging.getLogger(od)
# Remove the previous handler
if (hdlr is not None):
logger.removeHandler(hdlr)
# Create the handler for the logger for each experiment
# THIS IS FOR DENSENET PRETRAINED WITH MLP WITH 1 LAYER AND AVERAGE VOTING -- OUR LOSS
# hdlr = logging.FileHandler(os.path.join(odir_checkpoint, 'densenet_mlp_averagevoting.log'))
# THIS IS FOR DENSENET PRETRAINED WITH MLP WITH 1 LAYER AND AVERAGE VOTING WITH RELU -- OUR LOSS
# hdlr = logging.FileHandler(os.path.join(odir_checkpoint, 'densenet_mlp_averagevoting_relu.log'))
# THIS IS FOR DENSENET PRETRAINED WITH MLP WITH 1 LAYER AND MAX POOLING OVER THE VIEWS -- OUR LOSS
# hdlr = logging.FileHandler(os.path.join(odir_checkpoint, 'densenet_mlp_maxpooling.log'))
# THIS IS FOR DENSENET PRETRAINED WITH MLP WITH 1 LAYER AND MAX POOLING OVER THE VIEWS WITH RELU -- OUR LOSS
# hdlr = logging.FileHandler(os.path.join(odir_checkpoint, 'densenet_mlp_maxpooling_relu.log'))
# THIS IS FOR FROZEN DENSENET PRETRAINED WITH MLP WITH 1 LAYER AND AVERAGE VOTING -- OUR LOSS
# hdlr = logging.FileHandler(os.path.join(odir_checkpoint, 'frozen_densenet_mlp_averagevoting.log'))
# THIS IS FOR FROZEN DENSENET PRETRAINED WITH MLP WITH 1 LAYER AND AVERAGE VOTING WITH RELU -- OUR LOSS
# hdlr = logging.FileHandler(os.path.join(odir_checkpoint, 'frozen_densenet_mlp_averagevoting_relu.log'))
# THIS IS FOR FROZEN DENSENET PRETRAINED WITH MLP WITH 1 LAYER AND MAX POOLING OVER THE VIEWS -- OUR LOSS
# hdlr = logging.FileHandler(os.path.join(odir_checkpoint, 'frozen_densenet_mlp_maxpooling.log'))
# THIS IS FOR FROZEN DENSENET PRETRAINED WITH MLP WITH 1 LAYER AND MAX POOLING OVER THE VIEWS WITH RELU -- OUR LOSS
# hdlr = logging.FileHandler(os.path.join(odir_checkpoint, 'frozen_densenet_mlp_maxpooling_relu.log'))
# THIS IS FOR RESNET PRETRAINED WITH MLP WITH 1 LAYER AND MAX POOLING OVER THE VIEWS -- OUR LOSS
# hdlr = logging.FileHandler(os.path.join(odir_checkpoint, 'resnet_mlp_maxpooling.log'))
# THIS IS FOR CNN 2 LAYERS + AVERAGE VOTING -- OUR LOSS
# hdlr = logging.FileHandler(os.path.join(odir_checkpoint, 'cnn_2layers_averagevoting.log'))
# THIS IS FOR CNN 2 LAYERS + MAX POOLING OVER VIEWS -- OUR LOSS
# hdlr = logging.FileHandler(os.path.join(odir_checkpoint, 'cnn_2layers_maxpooling.log'))
# THIS IS FOR CNN 2 LAYERS + MLP + AVERAGE VOTING -- OUR LOSS
# hdlr = logging.FileHandler(os.path.join(odir_checkpoint, 'cnn_2layers_mlp_averagevoting.log'))
# THIS IS FOR CNN 2 LAYERS + MLP + MAX POOLING OVER VIEWS -- OUR LOSS
# hdlr = logging.FileHandler(os.path.join(odir_checkpoint, 'cnn_2layers_mpl_maxpooling.log'))
# THIS IS FOR CNN 2 LAYERS + AVERAGE VOTING WITH RELU -- OUR LOSS
# hdlr = logging.FileHandler(os.path.join(odir_checkpoint, 'cnn_2layers_averagevoting_relu.log'))
# THIS IS FOR CNN 2 LAYERS + MAX POOLING OVER VIEWS WITH RELU -- OUR LOSS
# hdlr = logging.FileHandler(os.path.join(odir_checkpoint, 'cnn_2layers_maxpooling_relu.log'))
# THIS IS FOR CNN 2 LAYERS + MLP + AVERAGE VOTING WITH RELU -- OUR LOSS
# hdlr = logging.FileHandler(os.path.join(odir_checkpoint, 'cnn_2layers_mlp_averagevoting_relu.log'))
# THIS IS FOR CNN 2 LAYERS + MLP + MAX POOLING OVER VIEWS WITH RELU -- OUR LOSS
# hdlr = logging.FileHandler(os.path.join(odir_checkpoint, 'cnn_2layers_mpl_maxpooling_relu.log'))
# THIS IS FOR MLP + AVERAGE VOTING -- OUR LOSS
# hdlr = logging.FileHandler(os.path.join(odir_checkpoint, 'mlp_averagevoting.log'))
# hdlr = logging.FileHandler(os.path.join(odir_checkpoint, 'mlp_averagevoting_nodropout.log'))
# THIS IS FOR MLP + MAX POOLING -- OUR LOSS
# hdlr = logging.FileHandler(os.path.join(odir_checkpoint, 'mlp_maxpooling.log'))
# hdlr = logging.FileHandler(os.path.join(odir_checkpoint, 'mlp_maxpooling_nodropout.log'))
# FOR TESTING
hdlr = logging.FileHandler(os.path.join(odir_checkpoint, 'testing.log'))
# Set the format for the logger
formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
hdlr.setFormatter(formatter)
# Add the handler to the logger
logger.addHandler(hdlr)
logger.setLevel(logging.INFO)
return logger, hdlr
# Initialize the logger
logger, hdlr = init_the_logger(hdlr)
def back_prop(batch_costs):
"""
Perform back propagation for a batch
:param batch_costs: The costs for the batch
:return: The average cost of the batch
"""
batch_cost = sum(batch_costs) / float(len(batch_costs))
batch_cost.backward()
optimizer.step()
optimizer.zero_grad()
batch_aver_cost = batch_cost.cpu().item()
return batch_aver_cost
# HERE YOU PASS POSITIVE AND NEGATIVE WEIGHTS
# IT IS THE LOSS FROM THE PAPER
# def weighted_binary_cross_entropy(output, target, weights=None):
# if weights is not None:
# assert len(weights) == 2
# loss = weights[1] * (target * torch.log(output)) + weights[0] * ((1 - target) * torch.log(1 - output))
# else:
# loss = target * torch.log(output) + (1 - target) * torch.log(1 - output)
# return torch.neg(torch.mean(loss))
print()
print('Loading Data...')
print()
print('Loading ELBOW')
study_data_elbow = get_study_level_data(study_elbow)
print('Loading FINGER')
study_data_finger = get_study_level_data(study_finger)
print('Loading FOREARM')
study_data_forearm = get_study_level_data(study_forearm)
print('Loading HAND')
study_data_hand = get_study_level_data(study_hand)
print('Loading WRIST')
study_data_wrist = get_study_level_data(study_wrist)
print('Loading SHOULDER')
study_data_shoulder = get_study_level_data(study_shoulder)
print('Loading HUMERUS')
study_data_humerus = get_study_level_data(study_humerus)
print()
print('Data Loaded!')
print()
frames_train = [study_data_elbow['train'],
study_data_finger['train'],
study_data_forearm['train'],
study_data_hand['train'],
study_data_wrist['train'],
study_data_shoulder['train'],
study_data_humerus['train']]
frames_dev = [study_data_elbow['valid'],
study_data_finger['valid'],
study_data_forearm['valid'],
study_data_hand['valid'],
study_data_wrist['valid'],
study_data_shoulder['valid'],
study_data_humerus['valid']]
for_test_dev = | pd.concat(frames_dev) | pandas.concat |
# -*- coding: utf-8 -*-
# @Time : 2019/4/11 10:07 PM
# @Author : Edwin
# @File : test.py
# @Software: PyCharm
from time_series_detector import detect
import pandas as pd
import warnings
import matplotlib.pyplot as plt
from ora_dual import models
import os,time,datetime
import pandas
warnings.filterwarnings('ignore')
pd.set_option('display.max_columns', 1000)
| pd.set_option('display.width', 1000) | pandas.set_option |
import numpy as np
import pandas as pd
import glob
import json
import os
import sys
sys.path.append('../')
from sklearn.metrics import roc_auc_score, roc_curve
from src.utils.results_processing import metric_barplot, add_stat_significance
from src.datasets.MURADataset import MURA_TrainValidTestSplitter
import matplotlib
import matplotlib.pyplot as plt
#matplotlib.use('Agg')
FIGURE_PATH = '../../Figures/'
OUTPUT_PATH = '../../Outputs/'
expe_folders = ['AE_DSAD_2020_06_05_01h15',
'AE_DMSAD_19_06',
'SimCLR_DSAD_2020_06_01_10h52',
'SimCLR_DMSAD_2020_06_16_17h06']
pretrain = ['AE','AE','SimCLR','SimCLR']
model = ['DSAD','DMSAD','DSAD','DMSAD']
data_info_path = '../../data/data_info.csv'
def plot_tSNE_bodypart(tsne2D, body_part, ax, title='', legend=False):
"""
plot a 2D t-SNE by body part.
"""
cmap = matplotlib.cm.get_cmap('Set2')
color_list = cmap(np.linspace(0.1,0.9,7))
for bp, color in zip(np.unique(body_part), color_list):
ax.scatter(tsne2D[body_part == bp, 0],
tsne2D[body_part == bp, 1],
s=2, color=color, marker='.', alpha=0.8)
ax.set_axis_off()
ax.set_title(title, fontsize=12, fontweight='bold')
if legend:
handles = [matplotlib.patches.Patch(facecolor=color) for color in color_list]
leg_name = [bp.title() for bp in np.unique(body_part)]
ax.legend(handles, leg_name, ncol=4, loc='upper center', frameon=False,
fontsize=12, bbox_to_anchor=(1, 0), bbox_transform=ax.transAxes)
def plot_tSNE_label(tsne2D, labels, ax, title='', legend=False):
"""
plot a 2D t-SNE by labels.
"""
color_dict = {1: 'coral', 0: 'limegreen'}
for lab, color in color_dict.items():
ax.scatter(tsne2D[labels == lab, 0],
tsne2D[labels == lab, 1],
s=2, color=color, marker='.', alpha=0.5)
ax.set_axis_off()
ax.set_title(title, fontsize=12, fontweight='bold')
if legend:
handles = [matplotlib.patches.Patch(facecolor=color, alpha=0.5) for color in color_dict.values()]
leg_names = ['Normal' if lab == 0 else 'Abnormal' for lab in color_dict.keys()]
ax.legend(handles, leg_names, ncol=2, loc='upper center', frameon=False,
fontsize=12, bbox_to_anchor=(1, 0), bbox_transform=ax.transAxes)
def plot_score_dist(scores, labels, ax, title='', legend=False, min_val=None, max_val=None):
"""
Plot the score distribution by labels.
"""
if not min_val:
min_val = scores.min()
if not max_val:
max_val = scores.max()
ax.hist(scores[labels == 1],
bins=40, density=False, log=True,
range=(min_val, max_val),
color='coral', alpha=0.5)
# plot normal distribution
ax.hist(scores[labels == 0],
bins=40, density=False, log=True,
range=(min_val, max_val),
color='limegreen', alpha=0.5)
ax.set_title(title, fontsize=12, fontweight='bold')
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
if legend:
handles = [matplotlib.patches.Patch(facecolor=color, alpha=0.5) for color in ['limegreen', 'coral']]
leg_names = ['Normal', 'Abnormal']
ax.legend(handles, leg_names, ncol=2, loc='upper center', frameon=False,
fontsize=12, bbox_to_anchor=(0, -0.2), bbox_transform=ax.transAxes)
ax.set_xlabel('anomaly score [-]', fontsize=12)
# def get_AUC_list(scores, labels, body_part):
# """
#
# """
# auc_list = [roc_auc_score(labels, scores)]
# name_list = ['All']
# for bp in np.unique(body_part):
# auc_list.append(roc_auc_score(labels[body_part == bp], scores[body_part == bp]))
# name_list.append(bp.title())
#
# return np.array(auc_list).reshape(1,-1), name_list
#%%#############################################################################
# get data #
################################################################################
df_info = pd.read_csv(data_info_path)
df_info = df_info.drop(df_info.columns[0], axis=1)
df_info = df_info[df_info.low_contrast == 0]
# Get valid and test set
spliter = MURA_TrainValidTestSplitter(df_info, train_frac=0.5,
ratio_known_normal=0.05,
ratio_known_abnormal=0.05, random_state=42)
spliter.split_data(verbose=False)
valid_df = spliter.get_subset('valid')
test_df = spliter.get_subset('test')
# %% Get representstion of first replicate
# load t-SNE representations of valid set
rep = 1
set = 'valid'
df_set = valid_df if set == 'valid' else test_df
df_sim = {'AE':[], 'SimCLR':[]}
df_ad = {'AE':{'DSAD':[], 'DMSAD':[]}, 'SimCLR':{'DSAD':[], 'DMSAD':[]}}
for expe, pre, mod in zip(expe_folders, pretrain, model):
with open(OUTPUT_PATH + expe + f'/results/results_{rep}.json', 'r') as f:
results = json.load(f)
# representation of SimCLR or AE
df_tmp = df_set.copy() \
.drop(columns=['patient_any_abnormal', 'body_part_abnormal', 'low_contrast', 'semi_label'])
cols = ['idx', '512_embed', '128_embed'] if pre == 'SimCLR' else ['idx', 'label', 'AE_score', '512_embed', '128_embed']
df_scores = pd.DataFrame(data=results[pre][set]['embedding'], columns=cols) \
.set_index('idx')
df_sim[pre].append(pd.merge(df_tmp, df_scores, how='inner', left_index=True, right_index=True))
# scores and embedding of D(M)SAD
df_tmp = df_set.copy() \
.drop(columns=['patient_any_abnormal', 'body_part_abnormal', 'low_contrast', 'semi_label'])
cols = ['idx', 'label', 'ad_score', 'sphere_idx','128_embed'] if mod == 'DMSAD' else ['idx', 'label', 'ad_score', '128_embed']
# if pre == 'AE' and mod == 'DMSAD':
# cols = ['idx', 'label', 'ad_score', '128_embed']
df_scores = | pd.DataFrame(data=results['AD'][set]['scores'], columns=cols) | pandas.DataFrame |
import cv2
import numpy as np
from matplotlib import pyplot as plt
import os
import xlsxwriter
import pandas as pd # Excel
import struct # Binary writing
import h5py
import time
import scipy.signal
import scipy.ndimage
import scipy.io as sio # Read .mat files
from scipy.ndimage.filters import convolve,correlate,median_filter
import sklearn.metrics as skmet
import sklearn.decomposition as skdec
import sklearn.linear_model as sklin
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.model_selection import LeaveOneOut
from sklearn.model_selection import KFold
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import RandomForestRegressor
from sklearn.preprocessing import normalize
from sklearn import svm
from sklearn import neighbors
#Regression
def regress(features,score):
pred = []
#Leave one out split
loo = LeaveOneOut()
for trainidx, testidx in loo.split(features):
#Indices
X_train, X_test = features[trainidx], features[testidx]
X_test -= X_train.mean(0)
X_train -= X_train.mean(0)
Y_train, Y_test = score[trainidx], score[testidx]
#Linear regression
regr = sklin.Ridge(alpha=1)
regr.fit(X_train,Y_train)
#Predicted score
pred.append(regr.predict(X_test))
return np.array(pred)
#Logistic regression
def logreg(features,score):
pred = []
#Leave one out split
loo = LeaveOneOut()
for trainidx, testidx in loo.split(features):
#Indices
X_train, X_test = features[trainidx], features[testidx]
X_test -= X_train.mean(0)
X_train -= X_train.mean(0)
Y_train, Y_test = score[trainidx], score[testidx]
#Linear regression
regr = sklin.LogisticRegression(solver='newton-cg',max_iter=1000)
regr.fit(X_train,Y_train)
#Predicted score
P = regr.predict_proba(X_test)
pred.append(P)
pred = np.array(pred)
pred = pred[:,:,1]
return pred.flatten()
#Scikit PCA
def ScikitPCA(features,ncomp):
pca = skdec.PCA(n_components=ncomp, svd_solver='full')
score = pca.fit(features).transform(features)
return pca, score
#Principal component analysis
def PCA(features,ncomp):
#Feature dimension, x=num variables,N=num observations
x,N = np.shape(features)
#Mean feature
mean_f = np.mean(features,axis=1)
#Centering
centrd = np.zeros((x,N))
for k in range(N):
centrd[:,k] = features[:,k]-mean_f
#PCs from covariance matrix if N>=x, svd otherwise
if False:
#Covariance matrix
Cov = np.zeros((x,x))
f = np.zeros((x,1))
for k in range(N):
f[:,0] = centrd[:,k]
Cov = Cov+1/N*np.matmul(f,f.T)
#Eigen values
E,V = np.linalg.eig(Cov)
#Sort eigenvalues and vectors to descending order
idx = np.argsort(E)[::-1]
V = np.matrix(V[:,idx])
E = E[idx]
for k in range(ncomp):
s = np.matmul(V[:,k].T,centrd).T
try:
score = np.concatenate((score,s),axis=1)
except NameError:
score = s
p = V[:,k]
try:
pcomp = np.concatenate((pcomp,p),axis=1)
except NameError:
pcomp = p
else:
#PCA with SVD
u,s,v = np.linalg.svd(centrd,compute_uv=1)
pcomp = v[:,:ncomp]
# Save results
writer = pd.ExcelWriter(r'C:\Users\sarytky\Desktop\trials' + r'\PCA_test.xlsx')
df1 = pd.DataFrame(centrd)
df1.to_excel(writer, sheet_name='dataAdjust')
df2 = pd.DataFrame(u)
df2.to_excel(writer, sheet_name='u')
df3 = pd.DataFrame(s)
df3.to_excel(writer, sheet_name='s')
df4 = pd.DataFrame(v)
df4.to_excel(writer, sheet_name='v')
writer.save()
np.savetxt(r'C:\Users\sarytky\Desktop\trials' + '\\''dataAdjust_python.csv', centrd, delimiter=',')
score = np.matmul(u,s).T[:,1:ncomp]
return pcomp,score
#Local grayscale standardization
def localstandard(im,w1,w2,sigma1,sigma2):
#Centers grayscales with Gaussian weighted mean
#Gaussian kernels
kernel1 = Gauss2D(w1,sigma1)
kernel2 = Gauss2D(w2,sigma2)
#Blurring
blurred1 = scipy.ndimage.convolve(im,kernel1)
blurred2 = scipy.ndimage.convolve(im,kernel2)
#print(blurred1[11,:])
#Centering grayscale values
centered = im-blurred1
#Standardization
std = (scipy.ndimage.convolve(centered**2,kernel2))**0.5
new_im = centered/(std+1e-09)
return new_im
#Gaussian kernel
def Gauss2D(w,sigma):
#Generates 2d gaussian kernel
kernel = np.zeros((w,w))
#Constant for centering
r = (w-1)/2
for ii in range(w):
for jj in range(w):
x = -((ii-r)**2+(jj-r)**2)/(2*sigma**2)
kernel[ii,jj] = np.exp(x)
#Normalizing the kernel
kernel = 1/np.sum(kernel)*kernel
return kernel
def bnw(x,y):
#Rounding
x1 = np.floor(x)
x2 = np.ceil(x)
y1 = np.floor(y)
y2 = np.ceil(y)
#Compute weights
if x2-x1 != 0:
w11 = (x2-x)/(x2-x1)
w12 = (x-x1)/(x2-x1)
w21 = (x2-x)/(x2-x1)
w22 = (x-x1)/(x2-x1)
else:
w11 = 1
w12 = 1
w21 = 1
w22 = 1
if y2-y1 != 0:
w11 *= (y2-y)/(y2-y1)
w12 *= (y2-y)/(y2-y1)
w21 *= (y-y1)/(y2-y1)
w22 *= (y-y1)/(y2-y1)
else:
w11 *= 1
w12 *= 1
w21 *= 1
w22 *= 1
return w11,w12,w21,w22
def LBP(I,N,R):
ks = 2*(R+1)+1
c = R+1
kernel = np.zeros((ks,ks))
filterbank = []
theta = np.linspace(0,N-1,N)
theta *= 2*np.pi/N
for k in range(N):
#Set center to -1
_krnl = kernel.copy()
#Compute neighbour coordinates
x = R*np.cos(theta[k])
y = R*np.sin(theta[k])
x1 = int(np.floor(x))
x2 = int(np.ceil(x))
y1 = int(np.floor(y))
y2 = int(np.ceil(y))
#Compute interpolation weights
w11,w12,w21,w22 = bnw(x,y)
#Insert weights to kernel
_krnl[c+y1,c+x1] = w11
_krnl[c+y1,c+x2] = w12
_krnl[c+y2,c+x1] = w21
_krnl[c+y2,c+x2] = w22
#Append kernel to list
filterbank.append(_krnl)
#Compute lbp
lbp = []
for k in range(len(filterbank)):
_lbp = correlate(I,filterbank[k])-I
_lbp = _lbp
_lbp = (_lbp>=1e-6)*1.0
lbp.append(_lbp)
#LBP to numpy array, channels to 3rd axis
lbp = np.array(lbp)
lbp = np.swapaxes(lbp,0,2)
lbpI = np.zeros(lbp[:,:,0].shape)
for k in range(lbp.shape[2]):
lbpI += lbp[:,:,k]*2**(lbp[:,:,k]*k)
return lbp,lbpI
def MRELBP(I,N,R,r,wc,wR,wr, mode='hist'):
print(np.shape(I))
#Mean grayscale value and std
muI = I.mean()
stdI = I.std()
#Centering and scaling with std
I = (I-muI)/stdI
Ic = median_filter(I,wc)
IR = median_filter(I,wR)
Ir = median_filter(I,wr)
print(np.shape(I))
#kernel weigths
f1 = []
f2 = []
ks = 2*(R+1)+1
c = R+1
kernel = np.zeros((ks,ks))
theta = np.linspace(0,N-1,N)
theta *= 2*np.pi/N
#Kernels
for k in range(N):
#Large radius
_krnl = kernel.copy()
#Compute neighbour coordinates
x = R*np.cos(theta[k])
y = R*np.sin(theta[k])
x1 = int(np.floor(x))
x2 = int(np.ceil(x))
y1 = int(np.floor(y))
y2 = int(np.ceil(y))
#Compute interpolation weights
w11,w12,w21,w22 = bnw(x,y)
#Insert weights to kernel
_krnl[c+y1,c+x1] = w11
_krnl[c+y1,c+x2] = w12
_krnl[c+y2,c+x1] = w21
_krnl[c+y2,c+x2] = w22
#Append kernel to list
f1.append(_krnl)
#Small radius
_krnl = kernel.copy()
#Compute neighbour coordinates
x = r*np.cos(theta[k])
y = r*np.sin(theta[k])
x1 = int(np.floor(x))
x2 = int(np.ceil(x))
y1 = int(np.floor(y))
y2 = int(np.ceil(y))
#Compute interpolation weights
w11,w12,w21,w22 = bnw(x,y)
#Insert weights to kernel
_krnl[c+y1,c+x1] = w11
_krnl[c+y1,c+x2] = w12
_krnl[c+y2,c+x1] = w21
_krnl[c+y2,c+x2] = w22
#Append kernel to list
f2.append(_krnl)
#Compute lbps
lbpR = []
lbpr = []
lbpD = []
for k in range(len(f1)):
_lbpR = correlate(I,f1[k])-Ic
_lbpR = (_lbpR>=1e-6)*1.0
lbpR.append(_lbpR)
_lbpr = correlate(I,f2[k])-Ic
_lbpr = (_lbpr>=1e-6)*1.0
lbpr.append(_lbpr)
_lbpD = _lbpR-_lbpr
_lbpD = (_lbpD>=1e-6)*1.0
lbpD.append(_lbpD)
#LBP to numpy array, channels to 3rd axis
lbpR = np.array(lbpR)
lbpR = np.swapaxes(lbpR,0,2)
lbpr = np.array(lbpR)
lbpr = np.swapaxes(lbpR,0,2)
lbpD = np.array(lbpD)
lbpD = np.swapaxes(lbpD,0,2)
lbpIR = np.zeros(lbpR[:,:,0].shape)
lbpIr = np.zeros(lbpr[:,:,0].shape)
lbpID = np.zeros(lbpD[:,:,0].shape)
print(np.shape(lbpIR))
for k in range(lbpR.shape[2]):
lbpIR += lbpR[:,:,k]*2**k
lbpIr += lbpr[:,:,k]*2**k
lbpID += lbpD[:,:,k]*2**k
#histograms
#Center pixels
d = round(R+(wR-1)/2)
lbpIR = lbpIR[d:-d,d:-d]
d1 = round((wr-1)/2)
lbpIr = lbpIr[d1:-d1,d1:-d1]
d2 = round((wR-1)/2)
lbpID = lbpID[d2:-d2,d2:-d2]
histR = np.zeros((2**N,1))
histr = np.zeros((2**N,1))
histD = np.zeros((2**N,1))
for k in range(2**N):
_tmp = (lbpIR==k)*1.0
histR[k] += _tmp.sum()
_tmp = (lbpIr==k)*1.0
histr[k] += _tmp.sum()
_tmp = (lbpID==k)*1.0
histD[k] += _tmp.sum()
lbpc = (Ic-Ic.mean())>=1e-6
d = round(R+(wc-1)/2)
lbpc = lbpc[d:-d,d:-d]
histc = np.zeros((2,1))
histc[0,0] = np.sum((lbpc==0)*1.0)
histc[1,0] = np.sum((lbpc==1)*1.0)
if mode == 'hist':
return histc,histR,histr,histD
else:
return lbpc,lbpIR,lbpIr,lbpID
#Mapping
def getmapping(N):
#Defines rotation invariant uniform mapping for lbp of N neighbours
newMax = N + 2
table = np.zeros((1,2**N))
for k in range(2**N):
#Binary representation of bin number
binrep = np.binary_repr(k,N)
#Convert string to list of digits
i_bin = np.zeros((1,len(binrep)))
for ii in range(len(binrep)):
i_bin[0,ii] = int(float(binrep[ii]))
#Rotation
j_bin = np.roll(i_bin,-1)
#uniformity
numt = np.sum(i_bin!=j_bin)
#Binning
if numt <= 2:
b = np.binary_repr(k,N)
c=0
for ii in range(len(b)):
c = c+int(float(b[ii]))
table[0,k] = c
else:
table[0,k] = N+1
#num = newMax
return table
#Apply mapping to lbp
def maplbp(bin,mapping):
#Applies mapping to lbp bin
#Number of bins in output
N = int(np.max(mapping))
#Empty array
outbin = np.zeros((1,N+1))
for k in range(N+1):
#RIU indices
M = mapping==k
#Extract indices from original bin to new bin
outbin[0,k] = np.sum(M*bin)
return outbin
def loadbinary(path):
bytesarray = np.fromfile(path, dtype=np.int32) # read everything as int32
w = bytesarray[0]
l = int((bytesarray.size - 1) / w)
with open(path, "rb") as f: # open to read binary file
f.seek(4) # skip first integer (width)
features = np.zeros((w,l))
for i in range(w):
for j in range(l):
features[i, j] = struct.unpack('<i', f.read(4))[0] # when reading byte by byte (struct),
#data type can be defined with every byte
return features
def writebinaryweights(path, ncomp, eigenvectors, singularvalues, weights):
with open(path, "wb") as f:
f.write(struct.pack('<i', eigenvectors.shape[1])) # Width
f.write(struct.pack('<i', ncomp)) # Number of components
# Eigenvectors
for i in range(eigenvectors.shape[0]):
for j in range(eigenvectors.shape[1]):
f.write(struct.pack('<f', eigenvectors[i, j]))
# Singular values
for i in range(singularvalues.shape[0]):
f.write(struct.pack('<f', singularvalues[i]))
# Weights
for i in range(weights.shape[0]):
f.write(struct.pack('<f', weights[i]))
return True
def writebinaryimage(path, image, dtype):
with open(path, "wb") as f:
f.write(struct.pack('<i', image.shape[0])) # Width
# Image values as float
for i in range(image.shape[0]):
for j in range(image.shape[1]):
if dtype == 'float':
f.write(struct.pack('<f', image[i, j]))
if dtype == 'double':
f.write(struct.pack('<d', image[i, j]))
if dtype == 'int':
f.write(struct.pack('<i', image[i, j]))
return True
### Program ###
#Start time
start_time = time.time()
#Samples
impath = r'V:\Tuomas\PTASurfaceImages'
path = r'C:\Users\sarytky\Desktop\trials'
filelist = os.listdir(impath)
filelist.sort()
# Load grades to array
grades = | pd.read_excel(r'C:\Users\sarytky\Desktop\trials' + r'\PTAgreiditjanaytteet.xls', 'Sheet1') | pandas.read_excel |
from os import path
from shutil import copyfile
import numpy as np
import pandas as pd
import MDAnalysis
import matplotlib.pyplot as plt
import networkx as nx
from enmspring import pairtype
from enmspring.spring import Spring
from enmspring.k_b0_util import get_df_by_filter_st, get_df_by_filter_PP, get_df_by_filter_R, get_df_by_filter_RB, get_df_by_filter_PB, get_df_by_filter_PP2_angles, get_df_same_resid, get_df_not_same_resid, FilterSB0Agent
from enmspring.hb_util import HBAgent
from enmspring.na_seq import sequences
from enmspring.networkx_display import THY_Base, CYT_Base, ADE_Base, GUA_Base, THY_Right_Base, CYT_Right_Base, ADE_Right_Base, GUA_Right_Base
hosts = ['a_tract_21mer', 'gcgc_21mer', 'tgtg_21mer',
'atat_21mer', 'ctct_21mer', 'g_tract_21mer']
class GraphAgent:
type_na = 'bdna+bdna'
n_bp = 21
cutoff = 4.7
def __init__(self, host, rootfolder, time_label='0_5000'):
self.host = host
self.rootfolder = rootfolder
self.time_label = time_label
self.host_folder = path.join(rootfolder, host)
self.na_folder = path.join(self.host_folder, self.type_na)
self.input_folder = path.join(self.na_folder, 'input')
self.spring_obj = Spring(self.rootfolder, self.host, self.type_na, self.n_bp, time_label)
self.df_all_k = self.spring_obj.read_k_b0_pairtype_df_given_cutoff(self.cutoff)
self.crd = path.join(self.input_folder, '{0}.nohydrogen.avg.crd'.format(self.type_na))
self.npt4_crd = path.join(self.input_folder, '{0}.nohydrogen.crd'.format(self.type_na))
self.u = MDAnalysis.Universe(self.crd, self.crd)
self.map, self.inverse_map, self.residues_map, self.atomid_map,\
self.atomid_map_inverse, self.atomname_map, self.strandid_map,\
self.resid_map, self.mass_map = self.build_map()
self.node_list = None
self.d_idx = None
self.n_node = None
self.adjacency_mat = None
self.degree_mat = None
self.laplacian_mat = None
self.b0_mat = None
self.w = None # Eigenvalue array
self.v = None # Eigenvector matrix, the i-th column is the i-th eigenvector
self.strand1_array = list() # 0: STRAND1, 1: STRAND2
self.strand2_array = list() #
self.strand1_benchmark = None
self.strand2_benchmark = None
self.d_seq = {'STRAND1': sequences[host]['guide'], 'STRAND2': sequences[host]['target']}
def build_node_list(self):
node_list = list()
d_idx = dict()
idx = 0
for cgname, atomname in self.atomname_map.items():
atom_type = pairtype.d_atomcgtype[atomname]
if atom_type == 'B':
node_list.append(cgname)
d_idx[cgname] = idx
idx += 1
self.node_list = node_list
self.d_idx = d_idx
self.n_node = len(self.node_list)
print(f"Thare are {self.n_node} nodes.")
def initialize_three_mat(self):
self.adjacency_mat = np.zeros((self.n_node, self.n_node))
self.degree_mat = np.zeros((self.n_node, self.n_node))
self.laplacian_mat = np.zeros((self.n_node, self.n_node))
self.b0_mat = np.zeros((self.n_node, self.n_node))
print('Initialize adjacency, degree and Laplacian matrices... Done.')
def build_degree_from_adjacency(self):
for idx in range(self.n_node):
self.degree_mat[idx, idx] = self.adjacency_mat[idx, :].sum()
def build_laplacian_by_adjacency_degree(self):
self.laplacian_mat = self.degree_mat + self.adjacency_mat
print("Finish the setup for Laplaican matrix.")
def get_networkx_graph(self, df, key='k'):
# key: 'k', 'b0'
node1_list = df['Atomid_i'].tolist()
node2_list = df['Atomid_j'].tolist()
weight_list = df[key].tolist()
edges_list = [(node1, node2, {'weight': weight}) for node1, node2, weight in zip(node1_list, node2_list, weight_list)]
G = nx.Graph()
G.add_nodes_from(self.get_node_list_by_id())
G.add_edges_from(edges_list)
return G
def get_node_list_by_id(self):
return [self.atomid_map[name] for name in self.node_list]
def get_networkx_d_pos(self, radius, dist_bw_base, dist_bw_strand):
d_atcg = {'A': {'STRAND1': ADE_Base, 'STRAND2': ADE_Right_Base},
'T': {'STRAND1': THY_Base, 'STRAND2': THY_Right_Base},
'C': {'STRAND1': CYT_Base, 'STRAND2': CYT_Right_Base},
'G': {'STRAND1': GUA_Base, 'STRAND2': GUA_Right_Base}
}
d_strandid_resid = self.get_d_strandid_resid()
d_pos = dict()
x_move = 0
y_move = 0
for strand_id in ['STRAND1', 'STRAND2']:
for resid in range(1, self.n_bp+1):
resname = self.d_seq[strand_id][resid-1]
nucleobase = d_atcg[resname][strand_id](radius)
nucleobase.translate_xy(x_move, y_move)
for name in d_strandid_resid[strand_id][resid]:
atomid = self.atomid_map[name]
atomname = self.atomname_map[name]
d_pos[atomid] = nucleobase.d_nodes[atomname]
if strand_id == 'STRAND1' and (resid != self.n_bp):
y_move += dist_bw_base
elif (strand_id == 'STRAND1') and (resid == self.n_bp):
y_move -= 0
else:
y_move -= dist_bw_base
x_move -= dist_bw_strand
return d_pos
def get_d_strandid_resid(self):
d_strandid_resid = self.initialize_d_strandid_resid()
for name in self.node_list:
strandid = self.strandid_map[name]
resid = self.resid_map[name]
d_strandid_resid[strandid][resid].append(name)
return d_strandid_resid
def initialize_d_strandid_resid(self):
d_strandid_resid = dict()
for strand_id in ['STRAND1', 'STRAND2']:
d_strandid_resid[strand_id] = dict()
for resid in range(1, self.n_bp+1):
d_strandid_resid[strand_id][resid] = list()
return d_strandid_resid
def get_D_by_atomname_strandid(self, sele_name, sele_strandid):
sele_resid_list = list(range(4, 19))
sele_idx_list = list()
for idx, name in enumerate(self.node_list):
if (self.atomname_map[name] == sele_name) and (self.strandid_map[name] == sele_strandid) and (self.resid_map[name] in sele_resid_list):
sele_idx_list.append(idx)
sele_D = np.zeros((self.n_node, self.n_node))
for idx in sele_idx_list:
sele_D[idx, idx] = self.degree_mat[idx, idx]
return sele_D
def get_D_by_atomname_strandid_resname(self, sele_name, sele_strandid, sele_resname):
sele_resid_list = self.get_sele_resid_list_by_resname(sele_resname, sele_strandid)
sele_idx_list = list()
for idx, name in enumerate(self.node_list):
if (self.atomname_map[name] == sele_name) and (self.strandid_map[name] == sele_strandid) and (self.resid_map[name] in sele_resid_list):
sele_idx_list.append(idx)
sele_D = np.zeros((self.n_node, self.n_node))
for idx in sele_idx_list:
sele_D[idx, idx] = self.degree_mat[idx, idx]
return sele_D
def get_sele_resid_list_by_resname(self, resname, strandid):
sele_resid_list = list()
central_resids = list(range(4, 19))
#central_resids = list(range(1, 22))
for idx, nt_name in enumerate(self.d_seq[strandid]):
resid = idx + 1
if (resid in central_resids) and (nt_name == resname):
sele_resid_list.append(resid)
return sele_resid_list
def get_A_by_atomname1_atomname2(self, atomname_i, atomname_j, sele_strandid):
sele_idx_list = list()
for resid_i in range(4, 18):
resid_j = resid_i + 1
idx_i = self.d_idx[self.map[self.get_key_by_atomname_resid_strandid(atomname_i, resid_i, sele_strandid)]]
idx_j = self.d_idx[self.map[self.get_key_by_atomname_resid_strandid(atomname_j, resid_j, sele_strandid)]]
sele_idx_list.append((idx_i, idx_j))
sele_A = np.zeros((self.n_node, self.n_node))
for idx_i, idx_j in sele_idx_list:
sele_A[idx_i, idx_j] = self.adjacency_mat[idx_i, idx_j]
i_lower = np.tril_indices(self.n_node, -1)
sele_A[i_lower] = sele_A.transpose()[i_lower] # make the matrix symmetric
return sele_A
def get_A_by_atomname1_atomname2_by_resnames(self, atomname_i, atomname_j, resname_i, resname_j, sele_strandid):
sele_idx_list = list()
resid_i_list, resid_j_list = self.get_resid_i_resid_j_list(resname_i, resname_j, sele_strandid)
for resid_i, resid_j in zip(resid_i_list, resid_j_list):
idx_i = self.d_idx[self.map[self.get_key_by_atomname_resid_strandid(atomname_i, resid_i, sele_strandid)]]
idx_j = self.d_idx[self.map[self.get_key_by_atomname_resid_strandid(atomname_j, resid_j, sele_strandid)]]
sele_idx_list.append((idx_i, idx_j))
sele_A = np.zeros((self.n_node, self.n_node))
for idx_i, idx_j in sele_idx_list:
sele_A[idx_i, idx_j] = self.adjacency_mat[idx_i, idx_j]
i_lower = np.tril_indices(self.n_node, -1)
sele_A[i_lower] = sele_A.transpose()[i_lower] # make the matrix symmetric
return sele_A
def get_resid_i_resid_j_list(self, resname_i, resname_j, sele_strandid):
seq = self.d_seq[sele_strandid]
central_resids = range(4, 19)
resid_i_list = list()
resid_j_list = list()
for resid in central_resids:
if (seq[resid-1] == resname_i) and (seq[resid] == resname_j):
resid_i_list.append(resid)
resid_j_list.append(resid+1)
return resid_i_list, resid_j_list
def get_atomidpairs_atomname1_atomname2(self, atomname_i, atomname_j, sele_strandid):
atomidpairs = list()
for resid_i in range(4, 18):
resid_j = resid_i + 1
idx_i = self.atomid_map[self.map[self.get_key_by_atomname_resid_strandid(atomname_i, resid_i, sele_strandid)]]
idx_j = self.atomid_map[self.map[self.get_key_by_atomname_resid_strandid(atomname_j, resid_j, sele_strandid)]]
atomidpairs.append((idx_i, idx_j))
return atomidpairs
def get_key_by_atomname_resid_strandid(self, atomname, resid, strandid):
return f'segid {strandid} and resid {resid} and name {atomname}'
def get_filter_by_atomname_strandid(self, sele_name, sele_strandid):
sele_resid_list = list(range(4, 19))
sele_idx_list = list()
for idx, name in enumerate(self.node_list):
if (self.atomname_map[name] == sele_name) and (self.strandid_map[name] == sele_strandid) and (self.resid_map[name] in sele_resid_list):
sele_idx_list.append(idx)
y = np.zeros(self.n_node)
y[sele_idx_list] = 1
return y / np.linalg.norm(y)
def get_filter_by_atomname_for_YR(self, sele_name, sele_resname, sele_strandid):
sele_resid_list = list(range(4, 19))
sele_idx_list = list()
for idx, name in enumerate(self.node_list):
resid = self.resid_map[name]
if resid not in sele_resid_list:
continue
strandid = self.strandid_map[name]
if strandid != sele_strandid:
continue
resname = self.d_seq[strandid][resid-1]
if resname != sele_resname:
continue
if self.atomname_map[name] == sele_name:
sele_idx_list.append(idx)
y = np.zeros(self.n_node)
y[sele_idx_list] = 1
return y / np.linalg.norm(y)
def eigen_decompose(self):
w, v = np.linalg.eig(self.laplacian_mat)
idx = w.argsort()[::-1] # sort from big to small
self.w = w[idx]
self.v = v[:, idx]
def get_eigenvalue_by_id(self, sele_id):
return self.w[sele_id-1]
def get_eigenvector_by_id(self, sele_id):
return self.v[:,sele_id-1]
def get_qtAq(self, sele_id):
eigvector_sele = self.get_eigenvector_by_id(sele_id)
return np.dot(eigvector_sele.T, np.dot(self.adjacency_mat, eigvector_sele))
def get_qtDq(self, sele_id):
eigvector_sele = self.get_eigenvector_by_id(sele_id)
return np.dot(eigvector_sele.T, np.dot(self.degree_mat, eigvector_sele))
def get_qtMq(self, sele_id, M):
### M is customized matrix
eigvector_sele = self.get_eigenvector_by_id(sele_id)
return np.dot(eigvector_sele.T, np.dot(M, eigvector_sele))
def vmd_show_crd(self):
print(f'vmd -cor {self.npt4_crd}')
def copy_nohydrogen_crd(self):
allsys_root = '/home/yizaochen/codes/dna_rna/all_systems'
srt = path.join(allsys_root, self.host, self.type_na, 'input', 'heavyatoms', f'{self.type_na}.nohydrogen.crd')
dst = self.npt4_crd
copyfile(srt, dst)
print(f'cp {srt} {dst}')
def decide_eigenvector_strand(self, eigv_id):
eigv = self.get_eigenvector_by_id(eigv_id)
dot_product = np.dot(eigv, self.strand1_benchmark)
if np.isclose(dot_product, 0.):
return True #'STRAND2'
else:
return False #'STRAND1'
def set_strand_array(self):
for eigv_id in range(1, self.n_node+1):
if self.decide_eigenvector_strand(eigv_id):
self.strand2_array.append(eigv_id)
else:
self.strand1_array.append(eigv_id)
print(f'Total number of nodes: {self.n_node}')
print(f'There are {len(self.strand1_array)} eigenvectors belonging to STRAND1.')
print(f'There are {len(self.strand2_array)} eigenvectors belonging to STRAND2.')
print(f'Sum of two strands: {len(self.strand1_array)+len(self.strand2_array)}')
def get_lambda_by_strand(self, strandid):
if strandid == 'STRAND1':
return [self.get_eigenvalue_by_id(eigv_id) for eigv_id in self.strand1_array]
else:
return [self.get_eigenvalue_by_id(eigv_id) for eigv_id in self.strand2_array]
def get_eigvector_by_strand(self, strandid, sele_id):
if strandid == 'STRAND1':
real_eigv_id = self.strand1_array[sele_id]
else:
real_eigv_id = self.strand2_array[sele_id]
return self.get_eigenvector_by_id(real_eigv_id), self.get_eigenvalue_by_id(real_eigv_id)
def set_adjacency_by_df(self, df_sele):
idx_i_list = self.__get_idx_list(df_sele['Atomid_i'])
idx_j_list = self.__get_idx_list(df_sele['Atomid_j'])
k_list = df_sele['k'].tolist()
for idx_i, idx_j, k in zip(idx_i_list, idx_j_list, k_list):
self.adjacency_mat[idx_i, idx_j] = k
def set_b0_mat_by_df(self, df_sele):
idx_i_list = self.__get_idx_list(df_sele['Atomid_i'])
idx_j_list = self.__get_idx_list(df_sele['Atomid_j'])
b0_list = df_sele['b0'].tolist()
for idx_i, idx_j, b0 in zip(idx_i_list, idx_j_list, b0_list):
self.b0_mat[idx_i, idx_j] = b0
i_lower = np.tril_indices(self.n_node, -1)
self.b0_mat[i_lower] = self.b0_mat.transpose()[i_lower] # make the matrix symmetric
def set_adjacency_by_d(self, d_sele):
idx_i_list = self.__get_idx_list(d_sele['Atomid_i'])
idx_j_list = self.__get_idx_list(d_sele['Atomid_j'])
k_list = d_sele['k']
for idx_i, idx_j, k in zip(idx_i_list, idx_j_list, k_list):
self.adjacency_mat[idx_i, idx_j] = k
def make_adjacency_symmetry(self):
i_lower = np.tril_indices(self.n_node, -1)
self.adjacency_mat[i_lower] = self.adjacency_mat.transpose()[i_lower] # make the matrix symmetric
def write_show_nodes_tcl(self, tcl_out, colorid=0, vdw_radius=1.0):
serials_str = self.__get_serial_nodes()
f = open(tcl_out, 'w')
f.write('display resize 362 954\n\n')
f.write('mol color ColorID 6\n')
f.write('mol representation Lines 3.000\n')
f.write('mol selection all\n')
f.write('mol material Opaque\n')
f.write('mol addrep 0\n')
f.write(f'mol color ColorID {colorid}\n')
f.write(f'mol representation VDW {vdw_radius:.3f} 12.000\n')
f.write(f'mol selection serial {serials_str}\n')
f.write('mol material Opaque\n')
f.write('mol addrep 0\n')
f.write(f'mol color ColorID 7\n')
f.write(f'mol representation VDW 0.300 12.000\n')
f.write(f'mol selection serial 6 7 8 9\n')
f.write('mol material Opaque\n')
f.write('mol addrep 0\n')
f.close()
print(f'Write tcl to {tcl_out}')
print(f'source {tcl_out}')
def process_lines_for_edges_tcl(self, lines, df_sele, radius=0.05):
u_npt4 = MDAnalysis.Universe(self.npt4_crd, self.npt4_crd)
for atomid1, atomid2 in zip(df_sele['Atomid_i'], df_sele['Atomid_j']):
line = self.__get_draw_edge_line(u_npt4.atoms.positions, atomid1-1, atomid2-1, radius)
lines.append(line)
return lines
def write_lines_to_tcl_out(self, lines, tcl_out):
f = open(tcl_out, 'w')
for line in lines:
f.write(line)
f.close()
print(f'Write tcl to {tcl_out}')
print(f'source {tcl_out}')
def __get_idx_list(self, df_column):
cgname_list = [self.atomid_map_inverse[atomid] for atomid in df_column]
return [self.d_idx[cgname] for cgname in cgname_list]
def __get_serial_nodes(self):
serials_list = [str(self.atomid_map[cgname]) for cgname in self.d_idx.keys()]
return ' '.join(serials_list)
def __get_draw_edge_line(self, positions, atomid1, atomid2, radius):
str_0 = 'graphics 0 cylinder {'
str_1 = f'{positions[atomid1,0]:.3f} {positions[atomid1,1]:.3f} {positions[atomid1,2]:.3f}'
str_2 = '} {'
str_3 = f'{positions[atomid2,0]:.3f} {positions[atomid2,1]:.3f} {positions[atomid2,2]:.3f}'
str_4 = '} '
str_5 = f'radius {radius:.2f}\n'
return str_0 + str_1 + str_2 + str_3 + str_4 + str_5
def build_map(self):
d1 = dict() # key: selction, value: cgname
d2 = dict() # key: cgname, value: selection
d3 = dict()
d4 = dict() # key: cgname, value: atomid
d5 = dict() # key: atomid, value: cgname
d6 = dict() # key: cgname, value: atomname
d7 = dict() # key: cgname, value: strand_id
d8 = dict() # key: cgname, value: resid
d9 = dict() # key: cgname, value: mass
atomid = 1
segid1 = self.u.select_atoms("segid STRAND1")
d3['STRAND1'] = dict()
for i, atom in enumerate(segid1):
cgname = 'A{0}'.format(i+1)
selection = self.__get_selection(atom)
d1[selection] = cgname
d2[cgname] = selection
if atom.resid not in d3['STRAND1']:
d3['STRAND1'][atom.resid] = list()
d3['STRAND1'][atom.resid].append(cgname)
d4[cgname] = atomid
d5[atomid] = cgname
d6[cgname] = atom.name
d7[cgname] = 'STRAND1'
d8[cgname] = atom.resid
d9[cgname] = atom.mass
atomid += 1
segid2 = self.u.select_atoms("segid STRAND2")
d3['STRAND2'] = dict()
for i, atom in enumerate(segid2):
cgname = 'B{0}'.format(i+1)
selection = self.__get_selection(atom)
d1[selection] = cgname
d2[cgname] = selection
if atom.resid not in d3['STRAND2']:
d3['STRAND2'][atom.resid] = list()
d3['STRAND2'][atom.resid].append(cgname)
d4[cgname] = atomid
d5[atomid] = cgname
d6[cgname] = atom.name
d7[cgname] = 'STRAND2'
d8[cgname] = atom.resid
d9[cgname] = atom.mass
atomid += 1
return d1, d2, d3, d4, d5, d6, d7, d8, d9
def __get_selection(self, atom):
return 'segid {0} and resid {1} and name {2}'.format(atom.segid, atom.resid, atom.name)
class Stack(GraphAgent):
def __init__(self, host, rootfolder, time_label='0_5000'):
super().__init__(host, rootfolder, time_label)
self.df_st = self.read_df_st()
def pre_process(self):
self.build_node_list()
self.initialize_three_mat()
self.build_adjacency_from_df_st()
self.build_degree_from_adjacency()
self.build_laplacian_by_adjacency_degree()
self.eigen_decompose()
self.set_benchmark_array()
self.set_strand_array()
self.set_b0_mat_by_df(self.df_st)
def build_adjacency_from_df_st(self):
self.set_adjacency_by_df(self.df_st)
self.make_adjacency_symmetry()
def set_benchmark_array(self):
idx_start_strand2 = self.d_idx['B6']
strand1 = np.zeros(self.n_node)
strand2 = np.zeros(self.n_node)
strand1[:idx_start_strand2] = 1.
strand2[idx_start_strand2:] = 1.
self.strand1_benchmark = strand1
self.strand2_benchmark = strand2
def write_show_base_edges_tcl(self, tcl_out, radius=0.05):
lines = ['graphics 0 color 1\n', 'graphics 0 material AOShiny\n']
lines = self.process_lines_for_edges_tcl(lines, self.df_st, radius=radius)
self.write_lines_to_tcl_out(lines, tcl_out)
def get_df_qTAq_for_vmd_draw(self, eigv_id, strandid):
df = self.df_st
columns_qTAq = ['Strand_i', 'Resid_i', 'Atomname_i', 'Strand_j', 'Resid_j', 'Atomname_j']
d_qTAq = {col_name: df[col_name].tolist() for col_name in columns_qTAq}
d_qTAq['qTAq'] = np.zeros(df.shape[0])
q = self.get_eigvector_by_strand(strandid, eigv_id)[0]
for idx, atomids in enumerate(zip(df['Atomid_i'], df['Atomid_j'])):
atomid_i , atomid_j = atomids
A = self.get_sele_A_by_idx(atomid_i, atomid_j)
d_qTAq['qTAq'][idx] = np.dot(q.T, np.dot(A, q))
df_result = pd.DataFrame(d_qTAq)
columns_qTAq.append('qTAq')
return df_result[columns_qTAq]
def get_sele_A_by_idx(self, atomid_i, atomid_j):
sele_A = np.zeros((self.n_node, self.n_node))
idx_i = self.d_idx[self.atomid_map_inverse[atomid_i]]
idx_j = self.d_idx[self.atomid_map_inverse[atomid_j]]
sele_A[idx_i, idx_j] = self.adjacency_mat[idx_i, idx_j]
i_lower = np.tril_indices(self.n_node, -1)
sele_A[i_lower] = sele_A.transpose()[i_lower]
return sele_A
def read_df_st(self):
criteria = 1e-3
df1 = get_df_by_filter_st(self.df_all_k, 'st')
mask = (df1['k'] > criteria)
print("Read Dataframe of stacking: df_st")
return df1[mask]
class StackHB(Stack):
def __init__(self, host, rootfolder, time_label='0_5000'):
super().__init__(host, rootfolder, time_label)
self.hb_agent = HBAgent(host, rootfolder, self.n_bp)
def build_adjacency_from_df_st_df_hb(self):
self.set_adjacency_by_df(self.df_st)
d_hb_new = self.hb_agent.get_d_hb_contain_atomid_k_all_basepair()
self.set_adjacency_by_d(d_hb_new)
self.make_adjacency_symmetry()
def write_show_base_hb_edges_tcl(self, tcl_out, radius=0.05):
lines = ['graphics 0 color 1\n', 'graphics 0 material AOShiny\n']
lines = self.process_lines_for_edges_tcl(lines, self.df_st, radius=radius)
lines += ['graphics 0 color 1\n', 'graphics 0 material AOShiny\n']
d_hb_new = self.hb_agent.get_d_hb_contain_atomid_k_all_basepair()
lines = self.process_lines_for_edges_tcl(lines, d_hb_new, radius=radius)
self.write_lines_to_tcl_out(lines, tcl_out)
class onlyHB(StackHB):
def pre_process(self):
self.build_node_list()
self.initialize_three_mat()
self.build_adjacency_from_df_hb()
self.build_degree_from_adjacency()
self.build_laplacian_by_adjacency_degree()
self.eigen_decompose()
def build_adjacency_from_df_hb(self):
d_hb_new = self.hb_agent.get_d_hb_contain_atomid_k_all_basepair()
self.set_adjacency_by_d(d_hb_new)
self.make_adjacency_symmetry()
def write_show_base_hb_edges_tcl(self, tcl_out, radius=0.05):
lines = ['graphics 0 color 1\n', 'graphics 0 material AOShiny\n']
d_hb_new = self.hb_agent.get_d_hb_contain_atomid_k_all_basepair()
lines = self.process_lines_for_edges_tcl(lines, d_hb_new, radius=radius)
self.write_lines_to_tcl_out(lines, tcl_out)
def get_df_hb_new(self):
columns = ['Strand_i', 'Resid_i', 'Atomname_i', 'Atomid_i', 'Strand_j', 'Resid_j', 'Atomname_j', 'Atomid_j', 'k']
d_result = dict()
d_hb_new = self.hb_agent.get_d_hb_contain_atomid_k_all_basepair()
cgname_i_list = [self.atomid_map_inverse[atomid_i] for atomid_i in d_hb_new['Atomid_i']]
cgname_j_list = [self.atomid_map_inverse[atomid_j] for atomid_j in d_hb_new['Atomid_j']]
d_result['Strand_i'] = [self.strandid_map[cgname_i] for cgname_i in cgname_i_list]
d_result['Strand_j'] = [self.strandid_map[cgname_j] for cgname_j in cgname_j_list]
d_result['Resid_i'] = [self.resid_map[cgname_i] for cgname_i in cgname_i_list]
d_result['Resid_j'] = [self.resid_map[cgname_j] for cgname_j in cgname_j_list]
d_result['Atomname_i'] = [self.atomname_map[cgname_i] for cgname_i in cgname_i_list]
d_result['Atomname_j'] = [self.atomname_map[cgname_j] for cgname_j in cgname_j_list]
d_result['Atomid_i'] = d_hb_new['Atomid_i']
d_result['Atomid_j'] = d_hb_new['Atomid_j']
d_result['k'] = d_hb_new['k']
df_hb_new = pd.DataFrame(d_result)
criteria = 1e-3
mask = (df_hb_new['k'] > criteria)
df_hb_new = df_hb_new[mask]
return df_hb_new[columns]
def get_df_qTAq_for_vmd_draw(self, eigv_id):
df = self.get_df_hb_new()
columns_qTAq = ['Strand_i', 'Resid_i', 'Atomname_i', 'Strand_j', 'Resid_j', 'Atomname_j']
d_qTAq = {col_name: df[col_name].tolist() for col_name in columns_qTAq}
d_qTAq['qTAq'] = np.zeros(df.shape[0])
q = self.get_eigenvector_by_id(eigv_id)
for idx, atomids in enumerate(zip(df['Atomid_i'], df['Atomid_j'])):
atomid_i , atomid_j = atomids
A = self.get_sele_A_by_idx(atomid_i, atomid_j)
d_qTAq['qTAq'][idx] = np.dot(q.T, np.dot(A, q))
df_result = pd.DataFrame(d_qTAq)
columns_qTAq.append('qTAq')
return df_result[columns_qTAq]
class BackboneRibose(GraphAgent):
def pre_process(self):
self.build_node_list()
self.initialize_three_mat()
self.build_adjacency_from_pp_r()
self.build_degree_from_adjacency()
self.build_laplacian_by_adjacency_degree()
self.eigen_decompose()
self.set_benchmark_array()
self.set_strand_array()
def build_node_list(self):
node_list = list()
d_idx = dict()
idx = 0
for cgname, atomname in self.atomname_map.items():
atom_type = pairtype.d_atomcgtype[atomname]
if (atom_type == 'P') or (atom_type == 'S') or (atom_type == 'B'):
node_list.append(cgname)
d_idx[cgname] = idx
idx += 1
self.node_list = node_list
self.d_idx = d_idx
self.n_node = len(self.node_list)
print(f"Thare are {self.n_node} nodes.")
def get_df_backbone_ribose(self):
df_pp2_filter_angle = get_df_by_filter_PP2_angles(get_df_by_filter_PP(self.df_all_k, 'PP2'))
df_pp3 = get_df_by_filter_PP(self.df_all_k, 'PP3')
df_pp_lst = [df_pp2_filter_angle, df_pp3]
df_rb_lst = [get_df_by_filter_RB(self.df_all_k, subcategory) for subcategory in ['RB2', 'RB3']]
df_pb_lst = [get_df_by_filter_PB(self.df_all_k, subcategory) for subcategory in ['PB']]
df_pp_r_rb = pd.concat(df_pp_lst+df_rb_lst+df_pb_lst)
#df_pp_r_rb = pd.concat(df_pp_lst)
criteria = 1e-1
mask = (df_pp_r_rb['k'] > criteria)
return df_pp_r_rb[mask]
def build_adjacency_from_pp_r(self):
df_sele = self.get_df_backbone_ribose()
self.set_adjacency_by_df(df_sele)
self.make_adjacency_symmetry()
self.set_b0_mat_by_df(df_sele)
def get_sele_A_by_idx(self, atomid_i, atomid_j):
sele_A = np.zeros((self.n_node, self.n_node))
idx_i = self.d_idx[self.atomid_map_inverse[atomid_i]]
idx_j = self.d_idx[self.atomid_map_inverse[atomid_j]]
sele_A[idx_i, idx_j] = self.adjacency_mat[idx_i, idx_j]
i_lower = np.tril_indices(self.n_node, -1)
sele_A[i_lower] = sele_A.transpose()[i_lower]
return sele_A
def get_df_qTAq_for_vmd_draw(self, eigv_id, strandid):
df = self.get_df_backbone_ribose()
columns_qTAq = ['Strand_i', 'Resid_i', 'Atomname_i', 'Strand_j', 'Resid_j', 'Atomname_j']
d_qTAq = {col_name: df[col_name].tolist() for col_name in columns_qTAq}
d_qTAq['qTAq'] = np.zeros(df.shape[0])
q = self.get_eigvector_by_strand(strandid, eigv_id)[0]
for idx, atomids in enumerate(zip(df['Atomid_i'], df['Atomid_j'])):
atomid_i , atomid_j = atomids
A = self.get_sele_A_by_idx(atomid_i, atomid_j)
d_qTAq['qTAq'][idx] = np.dot(q.T, np.dot(A, q))
df_result = pd.DataFrame(d_qTAq)
columns_qTAq.append('qTAq')
return df_result[columns_qTAq]
def set_benchmark_array(self):
idx_start_strand2 = self.d_idx['B1']
strand1 = np.zeros(self.n_node)
strand2 = np.zeros(self.n_node)
strand1[:idx_start_strand2] = 1.
strand2[idx_start_strand2:] = 1.
self.strand1_benchmark = strand1
self.strand2_benchmark = strand2
def write_show_backbone_edges_tcl(self, tcl_out, radius=0.05):
lines = ['graphics 0 color 1\n', 'graphics 0 material AOShiny\n']
for subcategory in ['PP0', 'PP1', 'PP2', 'PP3']:
df_sele = get_df_by_filter_PP(self.df_all_k, subcategory)
lines = self.process_lines_for_edges_tcl(lines, df_sele, radius=radius)
for subcategory in ['R0', 'R1']:
df_sele = get_df_by_filter_R(self.df_all_k, subcategory)
lines = self.process_lines_for_edges_tcl(lines, df_sele, radius=radius)
self.write_lines_to_tcl_out(lines, tcl_out)
class BB1(GraphAgent):
def pre_process(self):
self.build_node_list()
self.initialize_three_mat()
self.build_adjacency_from_pp_r()
self.build_degree_from_adjacency()
self.build_laplacian_by_adjacency_degree()
self.eigen_decompose()
self.set_benchmark_array()
self.set_strand_array()
def build_node_list(self):
node_list = list()
d_idx = dict()
idx = 0
for cgname, atomname in self.atomname_map.items():
atom_type = pairtype.d_atomcgtype[atomname]
if (atom_type == 'P') or (atom_type == 'S') or (atom_type == 'B'):
node_list.append(cgname)
d_idx[cgname] = idx
idx += 1
self.node_list = node_list
self.d_idx = d_idx
self.n_node = len(self.node_list)
print(f"Thare are {self.n_node} nodes.")
def get_df_backbone_ribose(self):
df_pp2_filter_angle = get_df_by_filter_PP2_angles(get_df_by_filter_PP(self.df_all_k, 'PP2'))
df_pp3 = get_df_by_filter_PP(self.df_all_k, 'PP3')
df_pp_lst = [df_pp2_filter_angle, df_pp3]
df_rb_lst = [get_df_by_filter_RB(self.df_all_k, subcategory) for subcategory in ['RB2', 'RB3']]
df_pb_lst = [get_df_by_filter_PB(self.df_all_k, subcategory) for subcategory in ['PB']]
df_pp_r_rb = pd.concat(df_pp_lst+df_rb_lst+df_pb_lst)
df_pp_r_rb = get_df_same_resid(df_pp_r_rb)
criteria = 1e-1
df_pp_r_rb = df_pp_r_rb[df_pp_r_rb['k']>criteria]
f_agent = FilterSB0Agent(self.host, df_pp_r_rb, self.d_seq)
df_final = f_agent.filterSB0_main()
return df_final
def build_adjacency_from_pp_r(self):
df_sele = self.get_df_backbone_ribose()
self.set_adjacency_by_df(df_sele)
self.make_adjacency_symmetry()
self.set_b0_mat_by_df(df_sele)
def get_sele_A_by_idx(self, atomid_i, atomid_j):
sele_A = np.zeros((self.n_node, self.n_node))
idx_i = self.d_idx[self.atomid_map_inverse[atomid_i]]
idx_j = self.d_idx[self.atomid_map_inverse[atomid_j]]
sele_A[idx_i, idx_j] = self.adjacency_mat[idx_i, idx_j]
i_lower = np.tril_indices(self.n_node, -1)
sele_A[i_lower] = sele_A.transpose()[i_lower]
return sele_A
def get_df_qTAq_for_vmd_draw(self, eigv_id, strandid):
df = self.get_df_backbone_ribose()
columns_qTAq = ['Strand_i', 'Resid_i', 'Atomname_i', 'Strand_j', 'Resid_j', 'Atomname_j']
d_qTAq = {col_name: df[col_name].tolist() for col_name in columns_qTAq}
d_qTAq['qTAq'] = np.zeros(df.shape[0])
q = self.get_eigvector_by_strand(strandid, eigv_id)[0]
for idx, atomids in enumerate(zip(df['Atomid_i'], df['Atomid_j'])):
atomid_i , atomid_j = atomids
A = self.get_sele_A_by_idx(atomid_i, atomid_j)
d_qTAq['qTAq'][idx] = np.dot(q.T, np.dot(A, q))
df_result = pd.DataFrame(d_qTAq)
columns_qTAq.append('qTAq')
return df_result[columns_qTAq]
def set_benchmark_array(self):
idx_start_strand2 = self.d_idx['B1']
strand1 = np.zeros(self.n_node)
strand2 = np.zeros(self.n_node)
strand1[:idx_start_strand2] = 1.
strand2[idx_start_strand2:] = 1.
self.strand1_benchmark = strand1
self.strand2_benchmark = strand2
def write_show_backbone_edges_tcl(self, tcl_out, radius=0.05):
lines = ['graphics 0 color 1\n', 'graphics 0 material AOShiny\n']
for subcategory in ['PP0', 'PP1', 'PP2', 'PP3']:
df_sele = get_df_by_filter_PP(self.df_all_k, subcategory)
lines = self.process_lines_for_edges_tcl(lines, df_sele, radius=radius)
for subcategory in ['R0', 'R1']:
df_sele = get_df_by_filter_R(self.df_all_k, subcategory)
lines = self.process_lines_for_edges_tcl(lines, df_sele, radius=radius)
self.write_lines_to_tcl_out(lines, tcl_out)
class BB2(GraphAgent):
def pre_process(self):
self.build_node_list()
self.initialize_three_mat()
self.build_adjacency_from_pp_r()
self.build_degree_from_adjacency()
self.build_laplacian_by_adjacency_degree()
self.eigen_decompose()
self.set_benchmark_array()
self.set_strand_array()
def build_node_list(self):
node_list = list()
d_idx = dict()
idx = 0
for cgname, atomname in self.atomname_map.items():
atom_type = pairtype.d_atomcgtype[atomname]
if (atom_type == 'P') or (atom_type == 'S'):
node_list.append(cgname)
d_idx[cgname] = idx
idx += 1
self.node_list = node_list
self.d_idx = d_idx
self.n_node = len(self.node_list)
print(f"Thare are {self.n_node} nodes.")
def get_df_backbone_ribose(self):
df_pp2_filter_angle = get_df_by_filter_PP2_angles(get_df_by_filter_PP(self.df_all_k, 'PP2'))
df_pp3 = get_df_by_filter_PP(self.df_all_k, 'PP3')
df_pp_lst = [df_pp2_filter_angle, df_pp3]
df_pp_r_rb = | pd.concat(df_pp_lst) | pandas.concat |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2018 Birkbeck College. All rights reserved.
#
# Licensed under the MIT license. See file LICENSE for details.
#
# Author(s): <NAME>, <NAME>
import sys
import re
import logging
import pandas as pd
import numpy as np
from scipy.fftpack import rfft, fftfreq
from scipy.signal import butter, lfilter, correlate, freqz
import matplotlib.pylab as plt
import scipy.signal as sig
from numpy import array
from scipy.spatial.distance import euclidean
def load_cloudupdrs_data(filename, convert_times=1000000000.0):
"""
This method loads data in the cloudupdrs format
Usually the data will be saved in a csv file and it should look like this:
.. code-block:: json
timestamp_0, x_0, y_0, z_0
timestamp_1, x_1, y_1, z_1
timestamp_2, x_2, y_2, z_2
.
.
.
timestamp_n, x_n, y_n, z_n
where x, y, z are the components of the acceleration
:param filename: The path to load data from
:type filename: string
:param convert_times: Convert times. The default is from from nanoseconds to seconds.
:type convert_times: float
"""
# data_m = pd.read_table(filename, sep=',', header=None)
try:
data_m = np.genfromtxt(filename, delimiter=',', invalid_raise=False)
date_times = pd.to_datetime((data_m[:, 0] - data_m[0, 0]))
time_difference = (data_m[:, 0] - data_m[0, 0]) / convert_times
magnitude_sum_acceleration = \
np.sqrt(data_m[:, 1] ** 2 + data_m[:, 2] ** 2 + data_m[:, 3] ** 2)
data = {'td': time_difference, 'x': data_m[:, 1], 'y': data_m[:, 2], 'z': data_m[:, 3],
'mag_sum_acc': magnitude_sum_acceleration}
data_frame = pd.DataFrame(data, index=date_times, columns=['td', 'x', 'y', 'z', 'mag_sum_acc'])
return data_frame
except IOError as e:
ierr = "({}): {}".format(e.errno, e.strerror)
logging.error("load data, file not found, I/O error %s", ierr)
except ValueError as verr:
logging.error("load data ValueError ->%s", verr.message)
except:
logging.error("Unexpected error on load data method: %s", sys.exc_info()[0])
def load_opdc_data(filename, convert_times=1000000000.0):
"""
This method loads data in the OPDC format
Usually the data will be saved in a csv file and it should look like this:
.. code-block:: json
timestamp_0, x_0, y_0, z_0
timestamp_1, x_1, y_1, z_1
timestamp_2, x_2, y_2, z_2
.
.
.
timestamp_n, x_n, y_n, z_n
where x, y, z are the components of the acceleration
:param filename: The path to load data from
:type filename: string
:param convert_times: Convert times. The default is from from nanoseconds to seconds.
:type convert_times: float
"""
# data_m = pd.read_table(filename, sep=',', header=None)
try:
data_m = np.genfromtxt(filename, delimiter=',', invalid_raise=False)
data_m[:, 0] = data_m[:, 0] * 1000000000
date_times = pd.to_datetime((data_m[:, 0] - data_m[0, 0]))
time_difference = (data_m[:, 0] - data_m[0, 0]) / convert_times
magnitude_sum_acceleration = \
np.sqrt(data_m[:, 1] ** 2 + data_m[:, 2] ** 2 + data_m[:, 3] ** 2)
data = {'td': time_difference, 'x': data_m[:, 1], 'y': data_m[:, 2], 'z': data_m[:, 3],
'mag_sum_acc': magnitude_sum_acceleration}
data_frame = pd.DataFrame(data, index=date_times, columns=['td', 'x', 'y', 'z', 'mag_sum_acc'])
return data_frame
except IOError as e:
ierr = "({}): {}".format(e.errno, e.strerror)
logging.error("load data, file not found, I/O error %s", ierr)
except ValueError as verr:
logging.error("load data ValueError ->%s", verr.message)
except:
logging.error("Unexpected error on load data method: %s", sys.exc_info()[0])
def get_sampling_rate_from_timestamp(d):
# group on minutes as pandas gives us the same second number
# for seconds belonging to different minutes
minutes = d.groupby(d.index.minute)
# get the first minute (0) since we normalised the time above
sampling_rate = d.iloc[minutes.indices[0]].index.second.value_counts().mean()
print('Sampling rate is {} Hz'.format(sampling_rate))
return sampling_rate
def load_segmented_data(filename):
"""
Helper function to load segmented gait time series data.
:param filename: The full path of the file that contais our data. This should be a comma separated value (csv file).
:type filename: str
:return: The gait time series segmented data, with a x, y, z, mag_acc_sum and segmented columns.
:rtype: pandas.DataFrame
"""
data = pd.read_csv(filename, index_col=0)
data.index = data.index.astype(np.datetime64)
return data
def load_freeze_data(filename):
data = pd.read_csv(filename, delimiter=' ', header=None,)
data.columns = ['td', 'ankle_f', 'ankle_v', 'ankle_l', 'leg_f', 'leg_v', 'leg_l', 'x', 'y', 'z', 'anno']
data.td = data.td - data.td[0]
# the dataset specified it uses ms
date_time = pd.to_datetime(data.td, unit='ms')
mag_acc_sum = np.sqrt(data.x ** 2 + data.y ** 2 + data.z ** 2)
data['mag_sum_acc'] = mag_acc_sum
data.index = date_time
del data.index.name
sampling_rate = get_sampling_rate_from_timestamp(data)
return data
def load_huga_data(filepath):
data = pd.read_csv(filepath, delimiter='\t', comment='#')
# this dataset does not have timestamps so we had to infer the sampling rate from the description
# we used 1 because sample each second
# 58.82 because that's 679073 samples divided by 11544 seconds
# and we used 1000 because milliseconds to seconds
freq = int((1 / 58.82) * 1000)
# this will make that nice date index that we know and love ...
data.index = pd.date_range(start='1970-01-01', periods=data.shape[0], freq='{}ms'.format(freq))
# this hardcoded as we don't need all that data...
keep = ['acc_lt_x', 'acc_lt_y', 'acc_lt_z']#, 'act']
drop = [c for c in data.columns if c not in keep]
data = data.drop(columns=drop)
# just keep the last letter (x, y and z)
data = data.rename(lambda x: x[-1], axis=1)
mag_acc_sum = np.sqrt(data.x ** 2 + data.y ** 2 + data.z ** 2)
data['mag_sum_acc'] = mag_acc_sum
data['td'] = data.index - data.index[0]
sampling_rate = get_sampling_rate_from_timestamp(data)
return data
def load_physics_data(filename):
dd = pd.read_csv(filename)
dd['mag_sum_acc'] = np.sqrt(dd.x ** 2 + dd.y ** 2 + dd.z ** 2)
dd.index = pd.to_datetime(dd.time, unit='s')
del dd.index.name
dd = dd.drop(columns=['time'])
sampling_rate = get_sampling_rate_from_timestamp(dd)
return dd
def load_accapp_data(filename, convert_times=1000.0):
df = pd.read_csv(filename, sep='\t', header=None)
df.drop(columns=[0, 5], inplace=True)
df.columns = ['td', 'x', 'y', 'z']
df.td = (df.td - df.td[0])
df.index = pd.to_datetime(df.td * convert_times * 1000)
df.td = df.td / convert_times
del df.index.name
df['mag_sum_acc'] = np.sqrt(df.x ** 2 + df.y ** 2 + df.z ** 2)
return df
def load_mpower_data(filename, convert_times=1000000000.0):
"""
This method loads data in the `mpower <https://www.synapse.org/#!Synapse:syn4993293/wiki/247859>`_ format
The format is like:
.. code-block:: json
[
{
"timestamp":19298.67999479167,
"x": ... ,
"y": ...,
"z": ...,
},
{...},
{...}
]
:param filename: The path to load data from
:type filename: string
:param convert_times: Convert times. The default is from from nanoseconds to seconds.
:type convert_times: float
"""
raw_data = pd.read_json(filename)
date_times = pd.to_datetime(raw_data.timestamp * convert_times - raw_data.timestamp[0] * convert_times)
time_difference = (raw_data.timestamp - raw_data.timestamp[0])
time_difference = time_difference.values
magnitude_sum_acceleration = \
np.sqrt(raw_data.x.values ** 2 + raw_data.y.values ** 2 + raw_data.z.values ** 2)
data = {'td': time_difference, 'x': raw_data.x.values, 'y': raw_data.y.values,
'z': raw_data.z.values, 'mag_sum_acc': magnitude_sum_acceleration}
data_frame = pd.DataFrame(data, index=date_times, columns=['td', 'x', 'y', 'z', 'mag_sum_acc'])
return data_frame
def load_finger_tapping_cloudupdrs_data(filename, convert_times=1000.0):
"""
This method loads data in the cloudupdrs format for the finger tapping processor
Usually the data will be saved in a csv file and it should look like this:
.. code-block:: json
timestamp_0, . , action_type_0, x_0, y_0, . , . , x_target_0, y_target_0
timestamp_1, . , action_type_1, x_1, y_1, . , . , x_target_1, y_target_1
timestamp_2, . , action_type_2, x_2, y_2, . , . , x_target_2, y_target_2
.
.
.
timestamp_n, . , action_type_n, x_n, y_n, . , . , x_target_n, y_target_n
where data_frame.x, data_frame.y: components of tapping position. data_frame.x_target,
data_frame.y_target their target.
:param filename: The path to load data from
:type filename: string
:param convert_times: Convert times. The default is from from milliseconds to seconds.
:type convert_times: float
"""
data_m = np.genfromtxt(filename, delimiter=',', invalid_raise=False, skip_footer=1)
date_times = pd.to_datetime((data_m[:, 0] - data_m[0, 0]))
time_difference = (data_m[:, 0] - data_m[0, 0]) / convert_times
data = {'td': time_difference, 'action_type': data_m[:, 2],'x': data_m[:, 3], 'y': data_m[:, 4],
'x_target': data_m[:, 7], 'y_target': data_m[:, 8]}
data_frame = | pd.DataFrame(data, index=date_times, columns=['td', 'action_type','x', 'y', 'x_target', 'y_target']) | pandas.DataFrame |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.