prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
import numpy as np
import pytest
from pandas import Series
import pandas._testing as tm
@pytest.mark.parametrize("f", [lambda v: Series(v).sum(), np.nansum, np.sum])
def test_expanding_apply_consistency_sum_nans(consistency_data, min_periods, f):
x, is_constant, no_nans = consistency_data
if f is np.nansum and min_periods == 0:
pass
elif f is np.sum and not no_nans:
pass
else:
expanding_f_result = x.expanding(min_periods=min_periods).sum()
expanding_apply_f_result = x.expanding(min_periods=min_periods).apply(
func=f, raw=True
)
tm.assert_equal(expanding_f_result, expanding_apply_f_result)
@pytest.mark.parametrize("ddof", [0, 1])
def test_moments_consistency_var(consistency_data, min_periods, ddof):
x, is_constant, no_nans = consistency_data
var_x = x.expanding(min_periods=min_periods).var(ddof=ddof)
assert not (var_x < 0).any().any()
if ddof == 0:
# check that biased var(x) == mean(x^2) - mean(x)^2
mean_x2 = (x * x).expanding(min_periods=min_periods).mean()
mean_x = x.expanding(min_periods=min_periods).mean()
tm.assert_equal(var_x, mean_x2 - (mean_x * mean_x))
@pytest.mark.parametrize("ddof", [0, 1])
def test_moments_consistency_var_constant(consistency_data, min_periods, ddof):
x, is_constant, no_nans = consistency_data
if is_constant:
count_x = x.expanding(min_periods=min_periods).count()
var_x = x.expanding(min_periods=min_periods).var(ddof=ddof)
# check that variance of constant series is identically 0
assert not (var_x > 0).any().any()
expected = x * np.nan
expected[count_x >= max(min_periods, 1)] = 0.0
if ddof == 1:
expected[count_x < 2] = np.nan
tm.assert_equal(var_x, expected)
@pytest.mark.parametrize("ddof", [0, 1])
def test_expanding_consistency_var_std_cov(consistency_data, min_periods, ddof):
x, is_constant, no_nans = consistency_data
var_x = x.expanding(min_periods=min_periods).var(ddof=ddof)
assert not (var_x < 0).any().any()
std_x = x.expanding(min_periods=min_periods).std(ddof=ddof)
assert not (std_x < 0).any().any()
# check that var(x) == std(x)^2
tm.assert_equal(var_x, std_x * std_x)
cov_x_x = x.expanding(min_periods=min_periods).cov(x, ddof=ddof)
assert not (cov_x_x < 0).any().any()
# check that var(x) == cov(x, x)
tm.assert_equal(var_x, cov_x_x)
@pytest.mark.parametrize("ddof", [0, 1])
def test_expanding_consistency_series_cov_corr(consistency_data, min_periods, ddof):
x, is_constant, no_nans = consistency_data
if isinstance(x, Series):
var_x_plus_y = (x + x).expanding(min_periods=min_periods).var(ddof=ddof)
var_x = x.expanding(min_periods=min_periods).var(ddof=ddof)
var_y = x.expanding(min_periods=min_periods).var(ddof=ddof)
cov_x_y = x.expanding(min_periods=min_periods).cov(x, ddof=ddof)
# check that cov(x, y) == (var(x+y) - var(x) -
# var(y)) / 2
| tm.assert_equal(cov_x_y, 0.5 * (var_x_plus_y - var_x - var_y)) | pandas._testing.assert_equal |
import pandas as pd
import os
directory = "C:/HRRR"
observed_data = pd.read_csv("C:/Users/<NAME>/Documents/HRSD GIS/Site Data/MMPS_043_no_blanks.csv",
index_col="Datetime", parse_dates=True, infer_datetime_format=True)
# create blank dataframes
mmps043_hrrr = pd.DataFrame()
mmps125_hrrr = pd.DataFrame()
mmps129_hrrr = pd.DataFrame()
mmps153_hrrr = pd.DataFrame()
mmps155_hrrr = | pd.DataFrame() | pandas.DataFrame |
import utils
import json
import os
from pathlib import Path
import pandas as pd
import datetime
# initialize logger
log = utils.set_logging()
starttime = datetime.datetime.now()
log.info('Loading input data')
### loading scraped schools
df_scrape = utils.load_scraped_data()
### loading mappings of shl and scraped schools
path_mapping = Path(__file__) / "../data/matching_shl_data_enriched.json"
with open(path_mapping, encoding="utf8") as json_file:
data = json.load(json_file)
df_shl_enriched = | pd.json_normalize(data) | pandas.json_normalize |
from __future__ import print_function
import os
import datetime
import sys
import pandas as pd
import numpy as np
import requests
import copy
# import pytz
import seaborn as sns
from urllib.parse import quote
import monetio.obs.obs_util as obs_util
"""
NAME: cems_api.py
PGRMMER: <NAME> ORG: ARL
This code written at the NOAA air resources laboratory
Python 3
#################################################################
The key and url for the epa api should be stored in a file called
.epaapirc in the $HOME directory.
The contents should be
key: apikey
url: https://api.epa.gov/FACT/1.0/
TO DO
-----
Date is in local time (not daylight savings)
Need to convert to UTC. This will require an extra package or api.
Classes:
----------
EpaApiObject - Base class
EmissionsCall
FacilitiesData
MonitoringPlan
Emissions
CEMS
Functions:
----------
addquarter
get_datelist
findquarter
sendrequest
getkey
"""
def test_end(endtime, current):
# if endtime None return True
if isinstance(endtime, pd._libs.tslibs.nattype.NaTType):
return True
elif not endtime:
return True
# if endtime greater than current return true
elif endtime >= current:
return True
# if endtime less than current time return true
elif endtime < current:
return False
else:
return True
def get_filename(fname, prompt):
"""
determines if file exists. If prompt is True then will prompt for
new filename if file does not exist.
"""
if fname:
done = False
iii = 0
while not done:
if iii > 2:
done = True
iii += 1
if os.path.isfile(fname):
done = True
elif prompt:
istr = "\n" + fname + " is not a valid name for Facilities Data \n"
istr += "Please enter a new filename \n"
istr += "enter None to load from the api \n"
istr += "enter x to exit program \n"
fname = input(istr)
# print('checking ' + fname)
if fname == "x":
sys.exit()
if fname.lower() == "none":
fname = None
done = True
else:
fname = None
done = True
return fname
# def get_timezone_offset(latitude, longitude):
# """
# uses geonames API
# must store username in the $HOME/.epaapirc file
# geousername: username
# """
# username = getkey()
# print(username)
# username = username["geousername"]
# url = "http://api.geonames.org/timezoneJSON?lat="
# request = url + str(latitude)
# request += "&lng="
# request += str(longitude)
# request += "&username="
# request += username
# try:
# data = requests.get(request)
# except BaseException:
# data = -99
#
# jobject = data.json()
# print(jobject)
# print(data)
# # raw offset should give standard time offset.
# if data == -99:
# return 0
# else:
# offset = jobject["rawOffset"]
# return offset
def getkey():
"""
key and url should be stored in $HOME/.epaapirc
"""
dhash = {}
homedir = os.environ["HOME"]
fname = "/.epaapirc"
if os.path.isfile(homedir + fname):
with open(homedir + fname) as fid:
lines = fid.readlines()
for temp in lines:
temp = temp.split(" ")
dhash[temp[0].strip().replace(":", "")] = temp[1].strip()
else:
dhash["key"] = None
dhash["url"] = None
dhash["geousername"] = None
return dhash
def sendrequest(rqq, key=None, url=None):
"""
Method for sending requests to the EPA API
Inputs :
--------
rqq : string
request string.
Returns:
--------
data : response object
"""
if not key or not url:
keyhash = getkey()
apiurl = keyhash["url"]
key = keyhash["key"]
if key:
# apiurl = "https://api.epa.gov/FACT/1.0/"
rqq = apiurl + rqq + "?api_key=" + key
print("Request: ", rqq)
data = requests.get(rqq)
print("Status Code", data.status_code)
if data.status_code == 429:
print("Too many requests Please Wait before trying again.")
sys.exit()
else:
print("WARNING: your api key for EPA data was not found")
print("Please obtain a key from")
print("https://www.epa.gov/airmarkets/field-audit-checklist_tool-fact-api")
print("The key should be placed in $HOME/.epaapirc")
print("Contents of the file should be as follows")
print("key: apikey")
print("url: https://api.epa.gov/FACT/1.0/")
sys.exit()
return data
def get_lookups():
"""
Request to get lookups - descriptions of various codes.
"""
getstr = "emissions/lookUps"
# rqq = self.apiurl + "emissions/" + getstr
# rqq += "?api_key=" + self.key
data = sendrequest(getstr)
jobject = data.json()
dstr = unpack_response(jobject)
return dstr
# According to lookups MODC values
# 01 primary monitoring system
# 02 backup monitoring system
# 03 alternative monitoring system
# 04 backup monitoring system
# 06 average hour before/hour after
# 07 average hourly
# 21 negative value replaced with 0.
# 08 90th percentile value in Lookback Period
# 09 95th precentile value in Lookback Period
# etc.
# it looks like values between 1-4 ok
# 6-7 probably ok
# higher values should be flagged.
def quarter2date(year, quarter):
if quarter == 1:
dt = datetime.datetime(year, 1, 1)
elif quarter == 2:
dt = datetime.datetime(year, 4, 1)
elif quarter == 3:
dt = datetime.datetime(year, 7, 1)
elif quarter == 4:
dt = datetime.datetime(year, 11, 1)
return dt
def addquarter(rdate):
"""
INPUT
rdate : datetime object
RETURNS
newdate : datetime object
requests for emissions are made per quarter.
Returns first date in the next quarter from the input date.
"""
quarter = findquarter(rdate)
quarter += 1
year = rdate.year
if quarter > 4:
quarter = 1
year += 1
month = 3 * quarter - 2
newdate = datetime.datetime(year, month, 1, 0)
return newdate
def get_datelist_sub(r1, r2):
rlist = []
qt1 = findquarter(r1)
yr1 = r1.year
qt2 = findquarter(r2)
yr2 = r2.year
done = False
iii = 0
while not done:
rlist.append(quarter2date(yr1, qt1))
if yr1 > yr2:
done = True
elif yr1 == yr2 and qt1 == qt2:
done = True
qt1 += 1
if qt1 > 4:
qt1 = 1
yr1 += 1
iii += 0
if iii > 30:
break
return rlist
def get_datelist(rdate):
"""
INPUT
rdate : tuple of datetime objects
(start date, end date)
RETURNS:
rdatelist : list of datetimes covering range specified by rdate by quarter.
Return list of first date in each quarter from
startdate to end date.
"""
if isinstance(rdate, list):
rdatelist = get_datelist_sub(rdate[0], rdate[1])
else:
rdatelist = [rdate]
return rdatelist
def findquarter(idate):
if idate.month <= 3:
qtr = 1
elif idate.month <= 6:
qtr = 2
elif idate.month <= 9:
qtr = 3
elif idate.month <= 12:
qtr = 4
return qtr
def keepcols(df, keeplist):
tcols = df.columns.values
klist = []
for ttt in keeplist:
if ttt in tcols:
# if ttt not in tcols:
# print("NOT IN ", ttt)
# print('Available', tcols)
# else:
klist.append(ttt)
tempdf = df[klist]
return tempdf
def get_so2(df):
"""
drop columns that are not in keep.
"""
keep = [
# "DateHour",
"time local",
# "time",
"OperatingTime",
# "HourLoad",
# "u so2_lbs",
"so2_lbs",
# "AdjustedFlow",
# "UnadjustedFlow",
# "FlowMODC",
"SO2MODC",
"unit",
"stackht",
"oris",
"latitude",
"longitude",
]
df = keepcols(df, keep)
if not df.empty:
df = df[df["oris"] != "None"]
return df
class EpaApiObject:
def __init__(self, fname=None, save=True, prompt=False, fdir=None):
"""
Base class for all classes that send request to EpaApi.
to avoid sending repeat requests to the api, the default option
is to save the data in a file - specified by fname.
fname : str
fdir : str
save : boolean
prompt : boolean
"""
# fname is name of file that data would be saved to.
self.status_code = None
self.df = pd.DataFrame()
self.fname = fname
self.datefmt = "%Y %m %d %H:%M"
if fdir:
self.fdir = fdir
else:
self.fdir = "./apifiles/"
if self.fdir[-1] != "/":
self.fdir += "/"
# returns None if filename does not exist.
# if prompt True then will ask for new filename if does not exist.
fname2 = get_filename(self.fdir + fname, prompt)
self.getstr = self.create_getstr()
# if the file exists load data from it.
getboolean = True
if fname2:
print("Loading from file ", self.fdir + self.fname)
self.fname = fname2
self.df, getboolean = self.load()
elif fname:
self.fname = self.fdir + fname
# if it doesn't load then get it from the api.
# if save is True then save.
if self.df.empty and getboolean:
# get sends request to api and processes data received.
self.df = self.get()
if save:
self.save()
def set_filename(self, fname):
self.fname = fname
def load(self):
chash = {"mid": str, "oris": str}
df = pd.read_csv(self.fname, index_col=[0], converters=chash, parse_dates=True)
# df = pd.read_csv(self.fname, index_col=[0])
return df, True
def save(self):
"""
save to a csv file.
"""
print("saving here", self.fname)
if not self.df.empty:
self.df.to_csv(self.fname, date_format=self.datefmt)
else:
with open(self.fname, "w") as fid:
fid.write("no data")
def create_getstr(self):
# each derived class should have
# its own create_getstr method.
return "placeholder" + self.fname
def printall(self):
data = sendrequest(self.getstr)
jobject = data.json()
rstr = self.getstr + "\n"
rstr += unpack_response(jobject)
return rstr
def return_empty(self):
return pd.DataFrame()
def get_raw_data(self):
data = sendrequest(self.getstr)
if data.status_code != 200:
return self.return_empty()
else:
return data
def get(self):
data = self.get_raw_data()
try:
self.status_code = data.status_code
except:
self.status_code = "None"
try:
jobject = data.json()
except BaseException:
return data
df = self.unpack(jobject)
return df
def unpack(self, data):
# each derived class should have
# its own unpack method.
return pd.DataFrame()
class EmissionsCall(EpaApiObject):
"""
class that represents data returned by one emissions/hourlydata call to the restapi.
Attributes
"""
def __init__(self, oris, mid, year, quarter, fname=None, calltype='CEM',
save=True, prompt=False):
self.oris = oris # oris code of facility
self.mid = mid # monitoring location id.
self.year = str(year)
self.quarter = str(quarter)
calltype = calltype.upper().strip()
if calltype=='F23': calltype='AD'
if not fname:
fname = "Emissions." + self.year + ".q" + self.quarter
if calltype=='AD':
fname += '.AD'
fname += "." + str(self.mid) + "." + str(oris) + ".csv"
self.dfall = pd.DataFrame()
self.calltype= calltype
if calltype.upper().strip() == "AD":
self.so2name = "SO2ADReportedSO2MassRate"
elif calltype.upper().strip() == "CEM":
self.so2name = "SO2CEMReportedSO2MassRate"
elif calltype.upper().strip() == "LME":
# this should probably be so2mass??? TO DO.
self.so2name = "LMEReportedSO2Mass"
else:
self.so2name = "SO2CEMReportedSO2MassRate"
self.so2nameB = "UnadjustedSO2"
super().__init__(fname, save, prompt)
# if 'DateHour' in df.columns:
# df = df.drop(['DateHour'], axis=1)
def create_getstr(self):
# for locationID in unitra:
# efile = "efile.txt"
if self.calltype.upper().strip() == "AD":
estr = "emissions/hourlyFuelData/csv"
elif self.calltype.upper().strip() == "LME":
estr = "emissions/hourlyData/csv"
else:
estr = "emissions/hourlyData/csv"
getstr = quote(
"/".join([estr, str(self.oris), str(self.mid), self.year, self.quarter])
)
return getstr
def load(self):
# Emissions call
# datefmt = "%Y %m %d %H:%M"
datefmt = self.datefmt
datefmt2 = "%Y %m %d %H:%M:%S"
chash = {"mid": str, "oris": str, "unit": str}
df = pd.read_csv(self.fname, index_col=[0], converters=chash, parse_dates=False)
# if not df.empty:
if not df.empty:
self.status_code = 200
print("SO2 DATA EXISTS")
temp = df[df['so2_lbs']>0]
if temp.empty:
print('SO2 lbs all zero')
# check for two date formats.
# -----------------------------------------
def newdate(x):
rval = x["time local"]
if isinstance(rval, float):
if np.isnan(rval):
return pd.NaT
rval = rval.replace("-", " ")
rval = rval.strip()
fail = 0
try:
rval = datetime.datetime.strptime(rval, datefmt)
except:
fail = 1
if fail == 1:
try:
rval = datetime.datetime.strptime(rval, datefmt2)
except:
fail = 2
print(self.fname)
print("WARNING: Could not parse date " + rval)
return rval
# -----------------------------------------
df["time local"] = df.apply(newdate, axis=1)
# if 'DateHour' in df.columns:
# df = df.drop(['DateHour'], axis=1)
# df = pd.read_csv(self.fname, index_col=[0])
else:
print("NO SO2 DATA in FILE")
return df, False
def return_empty(self):
return None
def get(self):
data = self.get_raw_data()
try:
self.status_code = data.status_code
except:
self.status_code = None
if data:
df = self.unpack(data)
else:
df = pd.DataFrame()
return df
def unpack(self, data):
logfile = "warnings.emit.txt"
iii = 0
cols = []
tra = []
print('----UNPACK-----------------')
for line in data.iter_lines(decode_unicode=True):
#if iii < 5:
#print('LINE')
#print(line)
# 1. Process First line
temp = line.split(',')
if temp[-1] and self.calltype=='LME':
print(line)
if iii == 0:
tcols = line.split(",")
# add columns for unit id and oris code
tcols.append("unit")
tcols.append("oris")
# add columns for other info (stack height, latitude etc).
# for edata in data2add:
# tcols.append(edata[0])
# 1a write column headers to a file.
verbose = True
if verbose:
with open("headers.txt", "w") as fid:
for val in tcols:
fid.write(val + "\n")
# print('press a key to continue ')
# input()
# 1b check to see if desired emission variable is in the file.
if self.so2name not in tcols:
with open(logfile, "a") as fid:
rstr = "ORIS " + str(self.oris)
rstr += " mid " + str(self.mid) + "\n"
rstr += "NO adjusted SO2 data \n"
if self.so2name not in tcols:
rstr += "NO SO2 data \n"
rstr += "------------------------\n"
fid.write(rstr)
print("--------------------------------------")
print("ORIS " + str(self.oris))
print("UNIT " + str(self.mid) + " no SO2 data")
print(self.fname)
print("--------------------------------------")
# return empty dataframe
return pd.DataFrame()
else:
cols = tcols
print("--------------------------------------")
print("ORIS " + str(self.oris))
print("UNIT " + str(self.mid) + " YES SO2 data")
print(self.fname)
print("--------------------------------------")
# 2. Process rest of lines
else:
lt = line.split(",")
# add input info to line.
lt.append(str(self.mid))
lt.append(str(self.oris))
# for edata in data2add:
# lt.append(edata[1])
tra.append(lt)
iii += 1
# with open(efile, "a") as fid:
# fid.write(line)
# ----------------------------------------------------
df = pd.DataFrame(tra, columns=cols)
df.apply(pd.to_numeric, errors="ignore")
df = self.manage_date(df)
if self.calltype == 'AD':
df['SO2MODC'] = -8
if self.calltype == 'LME':
df['SO2MODC'] = -9
df = self.convert_cols(df)
df = self.manage_so2modc(df)
df = get_so2(df)
# the LME data sometimes has duplicate rows.
# causing emissions to be over-estimated.
if self.calltype == 'LME':
df = df.drop_duplicates()
return df
# ----------------------------------------------------------------------------------------------
def manage_date(self, df):
"""DateHour field is originally in string form 4/1/2016 02:00:00 PM
Here, change to a datetime object.
# also need to change to UTC.
# time is local standard time (never daylight savings)
"""
# Using the %I for the hour field and %p for AM/Pm converts time
# correctly.
def newdate(xxx):
fmt = "%m/%d/%Y %I:%M:%S %p"
try:
rdt = datetime.datetime.strptime(xxx["DateHour"], fmt)
except BaseException:
# print("LINE WITH NO DATE :", xxx["DateHour"], ":")
rdt = pd.NaT
return rdt
df["time local"] = df.apply(newdate, axis=1)
df = df.drop(["DateHour"], axis=1)
return df
def manage_so2modc(self,df):
if "SO2CEMSO2FormulaCode" not in df.columns.values:
return df
def checkmodc(formula, so2modc, so2_lbs):
# if F-23 is the formula code and
# so2modc is Nan then change so2modc to -7.
if not so2_lbs or so2_lbs==0:
return so2modc
if so2modc!=0 or not formula:
return so2modc
else:
if 'F-23' in str(formula):
return -7
else:
return -10
df["SO2MODC"] = df.apply(lambda row:
checkmodc(row["SO2CEMSO2FormulaCode"],
row['SO2MODC'],
row['so2_lbs']),
axis=1)
return df
def convert_cols(self, df):
"""
All columns are read in as strings and must be converted to the
appropriate units. NaNs or empty values may be present in the columns.
OperatingTime : fraction of the clock hour during which the unit
combusted any fuel. If unit, stack or pipe did not
operate report 0.00.
"""
# three different ways to convert columns
# def toint(xxx):
# try:
# rt = int(xxx)
# except BaseException:
# rt = -99
# return rt
def tostr(xxx):
try:
rt = str(xxx)
except BaseException:
rt = "none"
return rt
def simpletofloat(xxx):
try:
rt = float(xxx)
except BaseException:
rt = 0
return rt
# calculate lbs of so2 by multiplying rate by operating time.
# checked this with FACTS
def getmass(optime, cname):
# if operating time is zero then emissions are zero.
if float(optime) < 0.0001:
rval = 0
else:
try:
rval = float(cname) * float(optime)
except BaseException:
rval = np.NaN
return rval
def lme_getmass(cname):
try:
rval = float(cname)
except BaseException:
rval = np.NaN
return rval
df["SO2MODC"] = df["SO2MODC"].map(simpletofloat)
# map OperatingTime to a float
df["OperatingTime"] = df["OperatingTime"].map(simpletofloat)
# map Adjusted Flow to a float
#df["AdjustedFlow"] = df["AdjustedFlow"].map(simpletofloat)
# df["oris"] = df["oris"].map(toint)
df["oris"] = df.apply(lambda row: tostr(row["oris"]), axis=1)
# map SO2 data to a float
# if operating time is zero then map to 0 (it is '' in file)
optime = "OperatingTime"
cname = self.so2name
if self.calltype=='LME':
df["so2_lbs"] = df.apply(lambda row: lme_getmass(row[cname]), axis=1)
else:
df["so2_lbs"] = df.apply(lambda row: getmass(row[optime], row[cname]), axis=1)
temp = df[["time local", "so2_lbs", cname, optime]]
temp = df[df["OperatingTime"] > 1.0]
if not temp.empty:
print("Operating Time greater than 1 ")
print(
temp[
["oris", "unit", "OperatingTime", "time local", "so2_lbs", self.so2name]
]
)
# -------------------------------------------------------------
# these were checks to see what values the fields were holding.
# temp is values that are not valid
# temp = temp[temp["OperatingTime"] > 0]
# print("Values that cannot be converted to float")
# print(temp[cname].unique())
# print("MODC ", temp["SO2MODC"].unique())
# ky = "MATSSstartupshutdownflat"
# if ky in temp.keys():
# print("MATSSstartupshutdownflat", temp["MATSStartupShutdownFlag"].unique())
# print(temp['date'].unique())
# ky = "Operating Time"
# if ky in temp.keys():
# print("Operating Time", temp["OperatingTime"].unique())
# if ky in df.keys():
# print("All op times", df["OperatingTime"].unique())
# for line in temp.iterrows():
# print(line)
# -------------------------------------------------------------
return df
class Emissions:
"""
class that represents data returned by emissions/hourlydata call to the restapi.
Attributes
self.df : DataFrame
Methods
__init__
add
see
https://www.epa.gov/airmarkets/field-audit-checklist-tool-fact-field-references#EMISSION
class that represents data returned by facilities call to the restapi.
# NOTES
# BAF - bias adjustment factor
# MEC - maximum expected concentraiton
# MPF - maximum potential stack gas flow rate
# monitoring plan specified monitor range.
# FlowPMA % of time flow monitoring system available.
# SO2CEMReportedAdjustedSO2 - average adjusted so2 concentration
# SO2CEMReportedSO2MassRate - average adjusted so2 rate (lbs/hr)
# AdjustedFlow - average volumetric flow rate for the hour. adjusted for
# bias.
# It looks like MassRate is calculated from concentration of SO2 and flow
# rate. So flow rate should be rate of all gasses coming out of stack.
"""
def __init__(self):
self.df = pd.DataFrame()
self.orislist = []
self.unithash = {}
# self.so2name = "SO2CEMReportedAdjustedSO2"
self.so2name = "SO2CEMReportedSO2MassRate"
self.so2nameB = "UnadjustedSO2"
def add(self, oris, locationID, year, quarter, method, logfile="warnings.emit.txt",
):
"""
oris : int
locationID : str
year : int
quarter : int
ifile : str
data2add : list of tuples (str, value)
str is name of column. value to add to column.
"""
if oris not in self.orislist:
self.orislist.append(oris)
if oris not in self.unithash.keys():
self.unithash[oris] = []
self.unithash[oris].append(locationID)
with open(logfile, "w") as fid:
dnow = datetime.datetime.now()
fid.write(dnow.strftime("%Y %m %d %H:%M/n"))
# if locationID == None:
# unitra = self.get_units(oris)
# else:
# unitra = [locationID]
if int(quarter) > 4:
print("Warning: quarter greater than 4")
sys.exit()
# for locationID in unitra:
locationID = str(locationID)
#print('call type :', method)
ec = EmissionsCall(oris, locationID, year, quarter, calltype=method)
df = ec.df
# print('EMISSIONS CALL to DF', year, quarter, locationID)
# print(df[0:10])
if self.df.empty:
self.df = df
elif not df.empty:
self.df = self.df.append(df)
# self.df.to_csv(efile)
return ec.status_code
def save(self):
efile = "efile.txt"
self.df.to_csv(efile)
def merge_facilities(self, dfac):
dfnew = pd.merge(
self.df,
dfac,
how="left",
left_on=["oris", "unit"],
right_on=["oris", "unit"],
)
return dfnew
def plot(self):
import matplotlib.pyplot as plt
df = self.df.copy()
temp1 = df[df["date"].dt.year != 1700]
sns.set()
for unit in df["unit"].unique():
temp = temp1[temp1["unit"] == unit]
temp = temp[temp["SO2MODC"].isin(["01", "02", "03", "04"])]
plt.plot(temp["date"], temp["so2_lbs"], label=str(unit))
print("UNIT", str(unit))
print(temp["SO2MODC"].unique())
# for unit in df["unit"].unique():
# temp = temp1[temp1["unit"] == unit]
# temp = temp[temp["SO2MODC"].isin(
# ["01", "02", "03", "04"]) == False]
# plt.plot(temp["date"], temp["so2_lbs"], label="bad " + str(unit))
# print("UNIT", str(unit))
# print(temp["SO2MODC"].unique())
ax = plt.gca()
handles, labels = ax.get_legend_handles_labels()
ax.legend(handles, labels)
plt.show()
for unit in df["unit"].unique():
temp = temp1[temp1["unit"] == unit]
print("BAF", temp["FlowBAF"].unique())
print("MODC", temp["FlowMODC"].unique())
print("PMA", temp["FlowPMA"].unique())
#plt.plot(temp["date"], temp["AdjustedFlow"], label=str(unit))
plt.show()
class MonitoringPlan(EpaApiObject):
"""
Stack height is converted to meters.
Request to get monitoring plans for oris code and locationID.
locationIDs for an oris code can be found in
The monitoring plan has locationAttributes which
include the stackHeight, crossAreaExit, crossAreaFlow.
It also includes monitoringSystems which includes
ststeymTypeDescription (such as So2 Concentration)
QuarterlySummaries gives so2Mass each quarter.
# currently stack height is the only information
# we want to get from monitoring plan
# request string
# date which indicates quarter of request
# oris
# mid
# stack height
------------------------------------------------------------------------------
6.0 Monitoring Method Data March 11, 2015
Environmental Protection Agency Monitoring Plan Reporting Instructions -- Page
37
If a location which has an SO2 monitor combusts both high sulfur fuel (e.g., coal
or oil)
and a low sulfur fuel, and uses a default SO2 emission rate in conjunction with
Equation
F-23 for hours in which very low sulfur fuel is combusted (see ?75.11(e)(1)),
report one
monitor method record for parameter SO2 with a monitoring methodology code
CEMF23. If only low-sulfur fuel is combusted and the F-23 calculation is used
for every
hour, report the SO2 monitoring method as F23
------------------------------------------------------------------------------
"""
def __init__(self, oris, mid, date, fname="Mplans.csv", save=True, prompt=False):
self.oris = oris # oris code of facility
self.mid = mid # monitoring location id.
self.date = date # date
self.dfall = pd.DataFrame()
self.dfmt="%Y-%m-%dT%H:%M:%S"
super().__init__(fname, save, prompt)
def to_dict(self, unit=None):
if self.df.empty:
return None
if unit:
df = self.df[self.df["name"] == unit]
else:
df = self.df.copy()
try:
mhash = df.reset_index().to_dict("records")
except:
mhash = None
return mhash
def get_stackht(self, unit):
#print(self.df)
df = self.df[self.df["name"] == unit]
#print(df)
stackhts = df['stackht'].unique()
#print('get stackht', stackhts)
return stackhts
def get_method(self, unit, daterange):
# TO DO. pick method code based on dates.
temp = self.df[self.df["name"] == unit]
sdate = daterange[0]
edate = daterange[1]
temp = temp[temp["beginDateHour"] <= sdate]
if temp.empty:
return None
temp["testdate"] = temp.apply(
lambda row: test_end(row["endDateHour"], edate), axis=1
)
temp = temp[temp["testdate"] == True]
method = temp['methodCode'].unique()
return method
def load(self):
# Multiple mplans may be saved to the same csv file.
# so this may return an emptly dataframe
# returns empty dataframe and flag to send request.
# return pd.DataFrame(), True
# df = super().load()
chash = {"mid": str, "oris": str, "name": str}
def parsedate(x, sfmt):
if not x:
return pd.NaT
elif x=='None':
return pd.NaT
else:
try:
return pd.to_datetime(x, format=sfmt)
except:
print('time value', x)
return pd.NaT
df = pd.read_csv(self.fname, index_col=[0], converters=chash,
parse_dates=['beginDateHour','endDateHour'],
date_parser=lambda x: parsedate(x, self.dfmt))
self.dfall = df.copy()
df = df[df["oris"] == self.oris]
df = df[df["mid"] == self.mid]
if not df.empty:
self.status_code = 200
return df, True
def save(self):
# do not want to overwrite other mplans in the file.
df = | pd.DataFrame() | pandas.DataFrame |
# BSD 3-Clause License
# Copyright (c) 2022- , <NAME> <<EMAIL>>
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os, json, itertools
import altair as alt
import datetime as dt
import pandas as pd
# Overall layout
GRAPH_WIDTH = 800
GRAPH_BAR_HEIGHT = 10
GRAPH_BAR_SPACE = 35
GRAPH_BAR_OPACITY = 0.8
# Shift milestone text below marker, date lower
EVENT_DESC_OFFSET_X = 5
EVENT_DESC_OFFSET_Y = 15
EVENT_DATE_DESC_OFFSET_X = 5
EVENT_DATE_DESC_OFFSET_Y = 24
# Only plot 5 quarters: [-1, +4]
RANGE_START = (pd.Timestamp.today() - pd.tseries.offsets.QuarterEnd(2) + pd.Timedelta(days = 1)).strftime('%Y-%m-%d')
RANGE_END = (pd.Timestamp.today() + pd.tseries.offsets.QuarterEnd(4)).strftime('%Y-%m-%d')
# Chart background and text color
CHART_COLOR_BG = '#E7EFF1'
CHART_COLOR_FG = '#000000'
TODAY_COLOR = '#43C59E'
# Chart header (quarter & month) color
QUARTER_COLOR_BG = '#000000'
QUARTER_COLOR_FG = '#FFFFFF'
MONTH_COLOR_BG = itertools.cycle(['#A3B0BB', '#60696B'])
MONTH_COLOR_FG = '#000000'
class ProgramChart():
head_bar_list_q = []
head_bar_list_m = []
chart_today = None
chart_header = []
chart_program = []
def __init__(self, ps):
self.ps = ps
self.name = ps.description
def PrepareQuarterHeader(self):
self.head_bar_list_q = []
for quarter in pd.date_range(RANGE_START, RANGE_END, freq = 'QS'):
q_start = quarter.strftime('%Y-%m-%d')
q_end = (quarter + pd.tseries.offsets.QuarterEnd(1)).strftime('%Y-%m-%d')
q_entry = {'Program': 'Quarter',
'Index': 0,
'Start': pd.to_datetime(q_start),
'End': pd.to_datetime(q_end),
'BGColor': QUARTER_COLOR_BG,
'FGColor': QUARTER_COLOR_FG,
'Description': str(quarter.year) + 'Q' + str(quarter.quarter)
}
self.head_bar_list_q.append(q_entry)
def PrepareMonthHeader(self):
self.head_bar_list_m = []
for month in pd.date_range(RANGE_START, RANGE_END, freq = 'MS'):
m_start = month.strftime('%Y-%m-%d')
m_end = (month + pd.tseries.offsets.MonthEnd(1)).strftime('%Y-%m-%d')
m_entry = {'Program': 'Month',
'Index': 1,
'Start': pd.to_datetime(m_start),
'End': pd.to_datetime(m_end),
'BGColor': next(MONTH_COLOR_BG),
'FGColor': MONTH_COLOR_FG,
'Description': month.strftime('%m')}
self.head_bar_list_m.append(m_entry)
def PrepareChartHeader(self):
self.PrepareQuarterHeader()
self.PrepareMonthHeader()
def PlotMonthQuarterBlock(self):
self.chart_header.append(
alt.Chart(pd.DataFrame(self.head_bar_list_q + self.head_bar_list_m)).mark_bar(
opacity = GRAPH_BAR_OPACITY,
cornerRadius = 5
).encode(
x = alt.X('Start',
scale = alt.Scale(domain = [RANGE_START, RANGE_END]),
axis = alt.Axis(title = self.name,
labelAngle = 0,
format = ('%m'),
tickCount = {'interval': 'month', 'step': 1},
orient = 'top',
labels = False,
ticks = False,
),
timeUnit = 'yearmonth',
),
x2 = 'End',
y = alt.Y('Index:N',
axis = alt.Axis(title = None,
labels = False,
ticks = False
),
sort = alt.SortField(field = 'Index', order = 'ascending')
),
color = alt.Color('BGColor:N', scale = None)
).properties(width = GRAPH_WIDTH)
)
def PlotQuarterText(self):
self.chart_header.append(
alt.Chart(pd.DataFrame(self.head_bar_list_q)).mark_text(dx = 80, align = 'center', color = QUARTER_COLOR_FG).encode(
x = 'Start',
x2 = 'End',
y = 'Index:N',
detail = 'site:N',
text = alt.Text('Description')
)
)
def PlotMonthText(self):
self.chart_header.append(
alt.Chart(pd.DataFrame(self.head_bar_list_m)).mark_text(dx = 25, align = 'center', color = MONTH_COLOR_FG).encode(
x = 'Start',
x2 = 'End',
y = 'Index:N',
detail = 'site:N',
text = alt.Text('Description')
)
)
def PlotChartToday(self):
self.chart_today = alt.Chart(pd.DataFrame({'Date': [pd.Timestamp.today().strftime('%Y-%m-%d')], 'Color': [TODAY_COLOR]})
).mark_rule(strokeWidth = 2, strokeDash=[5, 3]).encode(
x = alt.X('Date:T', scale = alt.Scale(domain = [RANGE_START, RANGE_END])),
color = alt.Color('Color:N', scale = None)
).properties(width = GRAPH_WIDTH)
def PlotChartHeader(self):
self.PlotMonthQuarterBlock()
self.PlotQuarterText()
self.PlotMonthText()
self.PlotChartToday()
def PlotProgramPhase(self):
legend_domain = []
legend_range = []
for p in self.ps.phases:
legend_domain.append(p['Description'])
legend_range.append(p['BGColor'])
self.chart_program.append(
alt.Chart(pd.DataFrame(self.ps.program_bar_range_list)).mark_bar(
opacity = GRAPH_BAR_OPACITY,
size = GRAPH_BAR_HEIGHT,
cornerRadius = 5
).encode(
x = alt.X('Start',
scale = alt.Scale(domain = [RANGE_START, RANGE_END]),
axis = alt.Axis(title = '',
labelAngle=0,
format = ('%m'),
tickCount = {'interval': 'month', 'step': 1},
orient = 'top',
labels = False,
ticks = False,
),
),
x2 = 'End',
y = alt.Y('Index:N',
axis = alt.Axis(title = None, ticks = False, labels = False),
sort = alt.EncodingSortField(field = 'Index', order = 'ascending'),
),
color = alt.Color('Type:N',
title = 'Phase',
scale = alt.Scale(domain = legend_domain, range = legend_range),
legend = alt.Legend(orient = 'right')
),
).properties(width = GRAPH_WIDTH, height = (GRAPH_BAR_SPACE * len(self.ps.schedule_data['Data'])))
)
def PlotProgramName(self):
self.chart_program.append(
alt.Chart(pd.DataFrame(self.ps.program_bar_name_list)).mark_text(dx = -5, align = 'right').encode(
x = alt.value(0),
y = alt.Y('Index:N',
axis = alt.Axis(title = None, ticks = False, labels = False),
sort = alt.EncodingSortField(field = 'Index', order = 'ascending'),
),
color = alt.Color('FGColor:N', scale = None, legend = None),
text = 'Program:N'
).properties(width = GRAPH_WIDTH, height = (GRAPH_BAR_SPACE * len(self.ps.schedule_data['Data'])))
)
def PlotProgramPhaseDescription(self):
self.chart_program.append(
alt.Chart(pd.DataFrame(self.ps.program_bar_range_list)).mark_text(dx = 5, align = 'left').encode(
x = alt.X('Start', scale = alt.Scale(domain=[RANGE_START, RANGE_END])),
y = alt.Y('Index:N',
axis = alt.Axis(title = None, ticks = False, labels = False),
sort = alt.EncodingSortField(field = 'Index', order = 'ascending'),
),
color = alt.Color('FGColor:N', scale = None, legend = None),
text = 'Description:N'
).properties(width = GRAPH_WIDTH, height = (GRAPH_BAR_SPACE * len(self.ps.schedule_data['Data'])))
)
def PlotProgramEvent(self):
legend_domain = []
legend_range = []
for e in self.ps.events:
legend_domain.append(e['Description'])
legend_range.append(e['BGColor'])
self.chart_program.append(
alt.Chart(pd.DataFrame(self.ps.program_bar_event_list)).mark_point(filled = True, size = 100, yOffset = 10).encode(
x = alt.X('Date',
scale = alt.Scale(domain=[RANGE_START, RANGE_END])),
y = alt.Y('Index:O',
axis = alt.Axis(title = None, ticks = False, labels = False),
sort = alt.EncodingSortField(field = 'Index', order = 'ascending'),
),
shape = alt.Shape('Type:N',
title = 'Milestone',
scale = alt.Scale(domain = legend_domain, range = ['triangle-up'])),
color = alt.Color('Type:N',
title = 'Milestone',
scale = alt.Scale(domain = legend_domain, range = legend_range),
legend = alt.Legend(orient = 'right')
),
).properties(width = GRAPH_WIDTH, height = (GRAPH_BAR_SPACE * len(self.ps.schedule_data['Data'])))
)
def PlotProgramEventDescription(self):
self.chart_program.append(
alt.Chart(pd.DataFrame(self.ps.program_bar_event_list)).mark_text(dx = EVENT_DESC_OFFSET_X, dy = EVENT_DESC_OFFSET_Y, align = 'left').encode(
x = alt.X('Date', scale = alt.Scale(domain = [RANGE_START, RANGE_END])),
y = alt.Y('Index:N',
axis = alt.Axis(title = None, ticks = False, labels = False),
sort = alt.EncodingSortField(field = 'Index', order = 'ascending'),
),
color = alt.Color('FGColor:N', scale = None, legend = None),
text = 'Description'
).properties(width = GRAPH_WIDTH, height = (GRAPH_BAR_SPACE * len(self.ps.schedule_data['Data'])))
)
def PlotProgramEventDate(self):
self.chart_program.append(
alt.Chart(pd.DataFrame(self.ps.program_bar_event_list)).mark_text(dx = EVENT_DATE_DESC_OFFSET_X, dy = EVENT_DATE_DESC_OFFSET_Y, align = 'left').encode(
x = alt.X('Date', scale = alt.Scale(domain=[RANGE_START, RANGE_END])),
y = alt.Y('Index:N',
axis = alt.Axis(title = None, ticks = False, labels = False),
sort = alt.EncodingSortField(field = 'Index', order = 'ascending'),
),
color = alt.Color('FGColor:N', scale = None, legend = None),
text = 'Date_Short'
).properties(width = GRAPH_WIDTH, height = (GRAPH_BAR_SPACE * len(self.ps.schedule_data['Data'])))
)
def PlotChartBody(self):
self.PlotProgramPhase()
self.PlotProgramName()
self.PlotProgramPhaseDescription()
self.PlotProgramEvent()
self.PlotProgramEventDescription()
self.PlotProgramEventDate()
def PlotShow(self):
alt.renderers.enable('altair_viewer')
alt.vconcat(alt.layer(
self.chart_header[0],
self.chart_header[1],
self.chart_header[2],
self.chart_today
),
alt.layer(
self.chart_program[0],
self.chart_program[1],
self.chart_program[2],
self.chart_program[3],
self.chart_program[4],
self.chart_program[5],
self.chart_today
).resolve_scale(
color = 'independent',
shape = 'independent'
)
).configure(
background = CHART_COLOR_BG
).configure_concat(
spacing = 0
).show()
class ProgramSchedule():
schedule_data = json.dumps({'key': 1})
description = 'Program Details'
phases = []
events = []
program_bar_name_list = []
program_bar_range_list = []
program_bar_event_list = []
def __init__(self, name):
self.name = name
def ParseDataFromJSON(self, file):
with open(file) as f:
try:
self.schedule_data = json.load(f)
if 'Data' not in self.schedule_data:
print('JSON %s doesn\'t have valid data for schedule and event' %(file))
exit()
if 'Phase_List' not in self.schedule_data:
print('JSON %s doesn\'t have valid Phase definition' %(file))
exit()
if 'Event_List' not in self.schedule_data:
print('JSON %s doesn\'t have valid Event definition' %(file))
exit()
if 'Description' in self.schedule_data:
self.description = self.schedule_data['Description']
except ValueError as err:
print('Invalid JSON for %s' %(file))
exit()
print(self.description, 'Loaded from JSON')
def PreparePhaseList(self):
self.phases = []
for phase_def in self.schedule_data['Phase_List']:
self.phases.append({'Type': phase_def['Type'], 'Description': phase_def['Description'], 'BGColor': phase_def['BGColor'], 'FGColor': phase_def['FGColor']})
def PrepareEventList(self):
self.events = []
for event_def in self.schedule_data['Event_List']:
self.events.append({'Type': event_def['Type'], 'Description': event_def['Description'], 'BGColor': event_def['BGColor'], 'FGColor': event_def['FGColor']})
def ProcessProgramDetails(self):
self.program_bar_name_list = []
self.program_bar_range_list = []
self.program_bar_event_list = []
for program_data in self.schedule_data['Data']:
# Prepare all program name to be shown on y-axis
self.program_bar_name_list.append({'Program': program_data['Program'], 'Index': program_data['Index'], 'FGColor': CHART_COLOR_FG})
# Prepare phase bar
if 'Phase' in program_data:
for program_phase in program_data['Phase']:
# Decide current phase type
unsupported = True
for p in self.phases:
if (p['Type'] == program_phase['Type']):
unsupported = False
break
if (unsupported):
print('Unsupported Phase type %d for %s' %(program_phase['Type'], program_data['Program']))
exit()
entry = {'Program': program_data['Program'],
'Index': program_data['Index'],
'Type': p['Description'], # Use description as Type for legend label
'Start': ((pd.to_datetime(RANGE_START)) if (pd.to_datetime(program_phase['Start']) < pd.to_datetime(RANGE_START)) else (pd.to_datetime(program_phase['Start']))) if (program_phase['Start'] != '') else (pd.to_datetime(RANGE_START)),
'End' : ((pd.to_datetime(RANGE_END)) if (pd.to_datetime(program_phase['End']) > pd.to_datetime(RANGE_END)) else (pd.to_datetime(program_phase['End']))) if (program_phase['End'] != '') else (pd.to_datetime(RANGE_END)),
'BGColor': p['BGColor'],
'FGColor': p['FGColor'],
'Description': ''
}
if (('End_Today' in program_phase) and program_phase['End_Today']):
entry['End'] = pd.to_datetime(pd.Timestamp.today().strftime('%Y-%m-%d'))
# Hide phase description
if (not program_phase['Hide_Description']):
entry['Description'] = (p['Description'] + str(program_phase['Additional Info'])) if ('Additional Info' in program_phase) else (p['Description'])
self.program_bar_range_list.append(entry)
# Prepare event marker
if 'Event' in program_data:
for program_event in program_data['Event']:
# Decide current event type
unsupported = True
for e in self.events:
if (e['Type'] == program_event['Type']):
unsupported = False
break
if (unsupported):
print('Unsupported Event type %d for %s' %(program_event['Type'], program_data['Program']))
exit()
entry = {'Program': program_data['Program'],
'Index': program_data['Index'],
'Type': e['Description'], # Use description as Type for legend label
'Date': pd.to_datetime(program_event['Date']),
'BGColor': e['BGColor'],
'FGColor': e['FGColor'],
'Description': (e['Description'] + str(program_event['Additional Info'])) if ('Additional Info' in program_event) else (e['Description']),
'Date_Short': str( | pd.to_datetime(program_event['Date']) | pandas.to_datetime |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Jan 22 10:16:42 2021
@author: tungbioinfo
"""
import argparse
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from tqdm import tqdm
import time
from sklearn.model_selection import train_test_split
from sklearn.utils import shuffle
import pickle
import os, sys
from joblib import Parallel, delayed
import PCA_Analysis as pca
import RF_Analysis_Multiclass as rfc
import RF_Analysis_Binary as rfb
from Auto_ML_Multiclass import AutoML_classification
###############################################################################
############################## Read data set ##################################
###############################################################################
rumi = pd.read_csv("rumi.csv")
rumi = rumi.drop(rumi[rumi["Depressiongroup"]==1].index, axis=0).reset_index(drop=True)
depre_gr = rumi["Depressiongroup"].apply(lambda x: "BPD"
if x == 2 else "H"
if x == 0 else "MDD")
sex = rumi["Gender_1_male"].apply(lambda x: 0 if x == 2 else 1)
rumi = rumi.drop(columns = ["Depressiongroup", "Gender_1_male"])
rumi = pd.concat([depre_gr, sex, rumi], axis = 1)
rumi = shuffle(rumi).reset_index(drop=True)
rumi_meta = rumi[['MRI_expID', 'MRI_ordID', 'CurrentDepression', 'Depressiongroup', 'TIV',
'Age', 'Gender_1_male', 'BDI_Total', 'RRS_Brooding', 'RRS_Reflection', 'RRS_DepressiveRumination',
'RRS_Total', 'Dep_PastEpisodes', 'Dep_Duration']]
rumi_meta = rumi_meta.set_index('MRI_expID')
sns.pairplot(rumi_meta[['RRS_Brooding', 'RRS_Reflection', 'RRS_DepressiveRumination', 'RRS_Total', 'Depressiongroup']],
hue="Depressiongroup")
rumi_meta_bdp = rumi_meta.loc[rumi_meta['Depressiongroup'] == "BPD"]
rumi_meta_mdd = rumi_meta.loc[rumi_meta['Depressiongroup'] == 'MDD']
sns.pairplot(rumi_meta_bdp[['RRS_Brooding', 'RRS_Reflection', 'RRS_DepressiveRumination', 'RRS_Total', 'CurrentDepression']],
hue="CurrentDepression")
sns.pairplot(rumi_meta_mdd[['RRS_Brooding', 'RRS_Reflection', 'RRS_DepressiveRumination', 'RRS_Total', 'CurrentDepression']],
hue="CurrentDepression")
rumi_region = rumi.drop(columns = ['MRI_ordID', 'CurrentDepression', 'Depressiongroup', 'TIV',
'Age', 'Gender_1_male', 'BDI_Total', 'RRS_Brooding', 'RRS_Reflection', 'RRS_DepressiveRumination',
'RRS_Total', 'Dep_PastEpisodes', 'Dep_Duration'])
rumi_region = rumi_region.set_index('MRI_expID')
rumi_region_T = rumi_region.T
rumi_region_bdp = rumi_region.loc[rumi_meta_bdp.index]
rumi_region_mdd = rumi_region.loc[rumi_meta_mdd.index]
y = rumi_meta["Depressiongroup"].apply(lambda x: 0
if x == "MDD" else 1
if x == "BPD" else 2)
class_name = ["MDD", "BPD", 'Healthy']
X_train, X_test, y_train, y_test = train_test_split(rumi_region, y, test_size=0.3, random_state=42)
###############################################################################
######################## Step 1 - Run Auto_ML #################################
###############################################################################
automl = AutoML_classification()
result = automl.fit(X_train, y_train, X_test, y_test)
###############################################################################
################### Step 2 - Run selected models ##############################
###############################################################################
log_best, _, _, _, _ = automl.LogisticRegression(X_train, y_train, X_test, y_test)
evaluate_dt = automl.evaluate_multiclass(log_best, X_train, y_train, X_test, y_test,
model = "Logistics_regression", num_class=3, class_name = class_name)
sgd_best, _, _, _, _ = automl.Stochastic_Gradient_Descent(X_train, y_train, X_test, y_test)
evaluate_dt = automl.evaluate_multiclass(sgd_best, X_train, y_train, X_test, y_test,
model = "Stochastic_Gradient_Descent", num_class=3, class_name = class_name)
rf_best, _, _, _, _ = automl.Random_Forest(X_train, y_train, X_test, y_test)
evaluate_rf = automl.evaluate_multiclass(rf_best, X_train, y_train, X_test, y_test,
model = "Random Forest", num_class=3, top_features=20, class_name = class_name)
###############################################################################
########## Step 3.1 - Run forward algorithm + Random Forest ###################
###############################################################################
import itertools
from scipy import interp
from itertools import cycle
from sklearn.preprocessing import label_binarize
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import f1_score
from sklearn import metrics
from sklearn.metrics import roc_curve, auc
from sklearn.metrics import accuracy_score
from sklearn.metrics import plot_confusion_matrix
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import cross_validate
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import KFold, RepeatedStratifiedKFold, RepeatedKFold
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier
from sklearn.model_selection import TimeSeriesSplit, GridSearchCV, RandomizedSearchCV
from xgboost import XGBClassifier
from datetime import datetime as dt
import warnings
warnings.filterwarnings("ignore")
st_t = dt.now()
n_samples, n_features = X_train.shape
n_estimators = [5, 10, 50, 100, 150, 200, 250, 300]
max_depth = [5, 10, 25, 50, 75, 100]
min_samples_leaf = [1, 2, 4, 8, 10]
min_samples_split = [2, 4, 6, 8, 10]
max_features = ["auto", "sqrt", "log2", None]
hyperparameter = {'n_estimators': n_estimators,
'max_depth': max_depth,
'min_samples_leaf': min_samples_leaf,
'min_samples_split': min_samples_split,
'max_features': max_features,
}
base_model_rf = RandomForestClassifier(criterion = "gini", random_state=42)
n_iter_search = 30
scoring = "accuracy"
n_selected_features = 240
# selected feature set, initialized to be empty
F = []
count = 0
ddict = {}
all_F = []
all_c = []
all_acc = []
all_model = []
start = time.time()
while count < n_selected_features:
max_acc = 0
time_loop = time.time()
for i in X_train.columns:
if i not in F:
F.append(i)
X_train_tmp = X_train[F]
acc = 0
rsearch_cv = RandomizedSearchCV(estimator=base_model_rf,
random_state=42,
param_distributions=hyperparameter,
n_iter=n_iter_search,
#cv=cv_timeSeries,
cv=2,
scoring=scoring,
n_jobs=-1)
rsearch_cv.fit(X_train_tmp, y_train)
best_estimator = rsearch_cv.best_estimator_
y_pred = best_estimator.predict(X_test[F])
acc = metrics.accuracy_score(y_test, y_pred)
F.pop()
if acc > max_acc:
max_acc = acc
idx = i
best_model = best_estimator
F.append(idx)
count += 1
print("The current number of features: {} - Accuracy: {}%".format(count, round(max_acc*100, 2)))
print("Time for computation: {}".format(time.time() - time_loop))
all_F.append(np.array(F))
all_c.append(count)
all_acc.append(max_acc)
all_model.append(best_model)
c = pd.DataFrame(all_c)
a = pd.DataFrame(all_acc)
f = pd.DataFrame(all_F)
f["All"] = f[f.columns[0:]].apply(
lambda x: ', '.join(x.dropna().astype(str)), axis=1)
all_info = pd.concat([c, a, f["All"]], axis=1)
all_info.columns = ['Num_feature', 'Accuracy', 'Feature']
all_info = all_info.sort_values(by='Accuracy', ascending=False).reset_index(drop=True)
all_info.to_csv("CDI_subset_accuracy.csv", index=False)
f.to_csv("CDI_subset.csv")
with open("CDI_models.txt", "wb") as fp:
pickle.dump(all_model, fp)
###############################################################################
################# Step 3.1 - Run forward algorithm + SGD ######################
###############################################################################
from sklearn.linear_model import SGDClassifier
st_t = dt.now()
n_samples, n_features = X_train.shape
# Loss function
loss = ["hinge", "log", "modified_huber", "squared_hinge", "perceptron"]
penalty = ["l2", "l1", "elasticnet"]
# The higher the value, the stronger the regularization
alpha = np.logspace(-7, -1, 100)
# The Elastic Net mixing parameter
l1_ratio = np.linspace(0, 1, 100)
epsilon = np.logspace(-5, -1, 100)
learning_rate = ["constant", "optimal", "invscaling", "adaptive"]
eta0 = np.logspace(-7, -1, 100)
hyperparameter = {"loss": loss,
"penalty": penalty,
"alpha": alpha,
"l1_ratio": l1_ratio,
"epsilon": epsilon,
"learning_rate": learning_rate,
"eta0": eta0}
model = SGDClassifier(n_jobs = -1)
n_iter_search = 30
scoring = "accuracy"
n_selected_features = 240
# selected feature set, initialized to be empty
F = []
count = 0
ddict = {}
all_F = []
all_c = []
all_acc = []
all_model = []
start = time.time()
while count < n_selected_features:
max_acc = 0
time_loop = time.time()
for i in X_train.columns:
if i not in F:
F.append(i)
X_train_tmp = X_train[F]
acc = 0
rsearch_cv = RandomizedSearchCV(estimator = model,
param_distributions = hyperparameter,
cv = 2,
scoring = scoring,
n_iter = n_iter_search,
n_jobs = -1)
rsearch_cv.fit(X_train_tmp, y_train)
best_estimator = rsearch_cv.best_estimator_
y_pred = best_estimator.predict(X_test[F])
acc = metrics.accuracy_score(y_test, y_pred)
F.pop()
if acc > max_acc:
max_acc = acc
idx = i
best_model = best_estimator
F.append(idx)
count += 1
print("The current number of features: {} - Accuracy: {}%".format(count, round(max_acc*100, 2)))
print("Time for computation: {}".format(time.time() - time_loop))
all_F.append(np.array(F))
all_c.append(count)
all_acc.append(max_acc)
all_model.append(best_model)
c = pd.DataFrame(all_c)
a = pd.DataFrame(all_acc)
f = pd.DataFrame(all_F)
f["All"] = f[f.columns[0:]].apply(
lambda x: ', '.join(x.dropna().astype(str)), axis=1)
###############################################################################
######## Step 4.1 - Run forward algorithm + Random_Forest_regression ##########
###############################################################################
from Auto_ML_Regression import AutoML_Regression
from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor
from sklearn.metrics import mean_squared_error, r2_score, mean_absolute_percentage_error
from sklearn.metrics import mean_absolute_percentage_error
import math
y = rumi_meta["RRS_Brooding"]
rumi_region_plus = pd.concat([rumi_meta[['CurrentDepression', 'TIV', 'Age','Gender_1_male']],
rumi_region], axis=1)
#-------
y = rumi_meta_bdp["BDI_Total"]
rumi_region_bdp_plus = pd.concat([rumi_meta_bdp[['CurrentDepression', 'TIV', 'Age','Gender_1_male']],
rumi_region_bdp], axis=1)
X_train, X_test, y_train, y_test = train_test_split(rumi_region_bdp_plus, y, test_size=0.3, random_state=42)
# ------
y = rumi_meta_bdp["BDI_Total"]
rumi_region_mdd_plus = pd.concat([rumi_meta_mdd[['CurrentDepression', 'TIV', 'Age','Gender_1_male']],
rumi_region_mdd], axis=1)
X_train, X_test, y_train, y_test = train_test_split(rumi_region_mdd_plus, y, test_size=0.3, random_state=42)
# ------
ress_BPD_brain = pd.read_csv("BPD_brain.csv", header=None)
ress_BPD_brain.columns = rumi_region.columns
ress_BPD_meta = pd.read_csv("BPD_rrs.csv", header=None)
ress_BPD_meta.columns = ['RRS_Brooding', 'RRS_Reflection', 'RRS_DepressiveRumination','RRS_Total']
y = ress_BPD_meta["RRS_Brooding"]
X_train, X_test, y_train, y_test = train_test_split(BPD_subset, y, test_size=0.3, random_state=42)
# ------
ress_MDD_brain = pd.read_csv("MDD_brain.csv", header=None)
ress_MDD_brain.columns = rumi_region.columns
ress_MDD_meta = pd.read_csv("MDD_rrs.csv", header=None)
ress_MDD_meta.columns = ['RRS_Brooding', 'RRS_Reflection', 'RRS_DepressiveRumination','RRS_Total']
y = ress_MDD_meta["RRS_Brooding"]
X_train, X_test, y_train, y_test = train_test_split(ress_MDD_brain, y, test_size=0.3, random_state=42)
# ------
ress_HC_brain = pd.read_csv("Health_brain.csv", header=None)
ress_HC_brain.columns = rumi_region.columns
ress_HC_meta = pd.read_csv("Health_rrs.csv", header=None)
ress_HC_meta.columns = ['RRS_Brooding', 'RRS_Reflection', 'RRS_DepressiveRumination','RRS_Total']
y = ress_HC_meta["RRS_Brooding"]
X_train, X_test, y_train, y_test = train_test_split(ress_HC_brain, y, test_size=0.3, random_state=42)
automl = AutoML_Regression()
result = automl.fit(X_train, y_train, X_test, y_test)
result.to_csv("AutoML_RRS_total_rumi_region_plus.csv", index = False)
ress_BPD_meta["Label"] = "BPD"
ress_MDD_meta["Label"] = "MDD"
ress_HC_meta["Label"] = "HC"
ress = pd.concat([ress_BPD_meta, ress_MDD_meta, ress_HC_meta]).reset_index(drop=True)
sns.pairplot(ress, hue="Label")
#------------------------------------------------------------------------------
automl = AutoML_Regression()
lasso_best, _, _, _ = automl.Random_Forest(X_train, y_train, X_test, y_test)
lasso_best.fit(X_train, y_train)
y_pred = lasso_best.predict(X_test)
plt.scatter(y_pred, y_test, s=8)
plt.plot([min(y_pred), max(y_pred)], [min(y_test), max(y_test)], '--k')
plt.ylabel('True RRS_total')
plt.xlabel('Predicted RRS_total')
#plt.text(s='Random Forest without Forward varible', x=1,
# y=2, fontsize=12, multialignment='center')
plt.text(min(y_pred), max(y_test) - 5, r'$R^2$ = %.2f' % (r2_score(y_test, y_pred)))
plt.text(min(y_pred), max(y_test) - 10, r'MSE = %.2f' % (mean_squared_error(y_test, y_pred)))
plt.text(min(y_pred), max(y_test) - 15, r'Accuracy = %.2f %' % (100 - 100*mean_absolute_percentage_error(y_test, y_pred)))
#plt.ticklabel_format(axis="both", style="sci", scilimits=(0, 0))
errors = abs(y_pred - y_test)
mean_err = np.stack(errors/y_test)
mean_err = mean_err[np.isfinite(mean_err)]
mape = 100 * np.mean(mean_err)
acc = 100 - mape
#------------------------------------------------------------------------------
n_samples, n_features = X_train.shape
n_estimators = [5, 10, 50, 100, 150, 200, 250, 300]
max_depth = [5, 10, 25, 50, 75, 100]
min_samples_leaf = [1, 2, 4, 8, 10]
min_samples_split = [2, 4, 6, 8, 10]
max_features = ["auto", "sqrt", "log2", None]
hyperparameter = {'n_estimators': n_estimators,
'max_depth': max_depth,
'min_samples_leaf': min_samples_leaf,
'min_samples_split': min_samples_split,
'max_features': max_features,
}
my_cv = RepeatedKFold(n_splits=10, n_repeats=10, random_state=42)
base_model_rf = RandomForestRegressor(criterion = "mse", random_state=42)
n_iter_search = 30
scoring = "neg_mean_squared_error"
n_selected_features = 240
F = []
count = 0
ddict = {}
all_F = []
all_c = []
all_acc = []
all_mse = []
all_model = []
start = time.time()
while count < n_selected_features:
max_acc = 0
min_err = np.inf
time_loop = time.time()
for i in X_train.columns:
if i not in F:
F.append(i)
X_train_tmp = X_train[F]
acc = 0
rsearch_cv = RandomizedSearchCV(estimator=base_model_rf,
random_state=42,
param_distributions=hyperparameter,
n_iter=n_iter_search,
#cv=my_cv,
cv=5,
scoring=scoring,
n_jobs=-1)
rsearch_cv.fit(X_train_tmp, y_train)
best_estimator = rsearch_cv.best_estimator_
y_pred = best_estimator.predict(X_test[F])
mse = mean_squared_error(y_test, y_pred)
#acc = metrics.accuracy_score(y_test, y_pred)
F.pop()
if mse < min_err:
min_err = mse
idx = i
best_model = best_estimator
#errors = abs(y_pred - y_test)
#mean_err = np.stack(errors/y_test)
#mean_err = mean_err[np.isfinite(mean_err)]
mape = mean_absolute_percentage_error(y_test, y_pred)
max_acc = 100 - (100*mape)
F.append(idx)
count += 1
print("The current number of features: {} - MSE: {}".format(count, round(min_err, 2)))
print("Time for computation: {}".format(time.time() - time_loop))
all_F.append(np.array(F))
all_c.append(count)
all_acc.append(max_acc)
all_model.append(best_model)
all_mse.append(min_err)
c = pd.DataFrame(all_c)
a = pd.DataFrame(all_acc)
f = pd.DataFrame(all_F)
e = pd.DataFrame(all_mse)
f["All"] = f[f.columns[0:]].apply(
lambda x: ', '.join(x.dropna().astype(str)), axis=1)
all_info = pd.concat([c, e, a, f["All"]], axis=1)
all_info.columns = ['Num_feature', 'Mean_Squared_Error', 'Accuracy', 'Feature']
all_info = all_info.sort_values(by='Accuracy', ascending=False).reset_index(drop=True)
all_info.to_csv("RRS_total_subset_RF_accuracy.csv", index=False)
f.to_csv("RRS_total_subset_RF.csv")
with open("RRS_total_RF_models.txt", "wb") as fp:
pickle.dump(all_model, fp)
###############################################################################
############# Step 4.2 - Run forward algorithm + Ridge_regression #############
###############################################################################
from sklearn.linear_model import Ridge, SGDRegressor
from sklearn.linear_model import ElasticNet, LarsCV, Lasso, LassoLars
from sklearn.linear_model import MultiTaskElasticNet, MultiTaskLasso
y = rumi_meta["RRS_Brooding"]
rumi_region_plus = pd.concat([rumi_meta[['CurrentDepression', 'TIV', 'Age','Gender_1_male']],
rumi_region], axis=1)
X_train, X_test, y_train, y_test = train_test_split(rumi_region_plus, y, test_size=0.3, random_state=42)
alphas = np.logspace(-5, 5, 100)
tuned_parameters = [{"alpha": alphas}]
my_cv = RepeatedKFold(n_splits=10, n_repeats=10, random_state=42)
model = Lasso()
scoring = "neg_mean_squared_error"
n_selected_features = 240
F = []
count = 0
ddict = {}
all_F = []
all_c = []
all_acc = []
all_mse = []
all_model = []
start = time.time()
while count < n_selected_features:
max_acc = 0
min_err = np.inf
time_loop = time.time()
for i in X_train.columns:
if i not in F:
F.append(i)
X_train_tmp = X_train[F]
acc = 0
gsearch_cv = GridSearchCV(estimator = model, param_grid = tuned_parameters,
scoring = "neg_mean_squared_error", cv = my_cv, n_jobs=-1)
gsearch_cv.fit(X_train_tmp, y_train)
best_estimator = gsearch_cv.best_estimator_
y_pred = best_estimator.predict(X_test[F])
mse = mean_squared_error(y_test, y_pred)
#acc = metrics.accuracy_score(y_test, y_pred)
F.pop()
if mse < min_err:
min_err = mse
idx = i
best_model = best_estimator
#errors = abs(y_pred - y_test)
#mean_err = np.stack(errors/y_test)
#mean_err = mean_err[np.isfinite(mean_err)]
mape = mean_absolute_percentage_error(y_test, y_pred)
max_acc = 100 - (100*mape)
F.append(idx)
count += 1
print("The current number of features: {} - MSE: {}".format(count, round(min_err, 2)))
print("Time for computation: {}".format(time.time() - time_loop))
all_F.append(np.array(F))
all_c.append(count)
all_acc.append(max_acc)
all_model.append(best_model)
all_mse.append(min_err)
c = pd.DataFrame(all_c)
a = pd.DataFrame(all_acc)
f = pd.DataFrame(all_F)
e = pd.DataFrame(all_mse)
f["All"] = f[f.columns[0:]].apply(
lambda x: ', '.join(x.dropna().astype(str)), axis=1)
all_info = pd.concat([c, e, a, f["All"]], axis=1)
all_info.columns = ['Num_feature', 'Mean_Squared_Error', 'Accuracy', 'Feature']
all_info = all_info.sort_values(by='Mean_Squared_Error', ascending=True).reset_index(drop=True)
# =============================================================================
# Test accuracy model
# =============================================================================
all_features_grid = pd.read_csv("RRS_total_subset_RF.csv")
all_info_grid = pd.read_csv("RRS_total_subset_RF_accuracy.csv")
with open("RRS_total_RF_models.txt", "rb") as fp:
load_grid_model = pickle.load(fp)
subset = all_features_grid.drop(columns = ["Unnamed: 0", "All"])
best_model_55 = load_grid_model[25]
subset = subset.iloc[25].dropna()
region_subset = rumi_region_plus[subset]
X_train, X_test, y_train, y_test = train_test_split(region_subset, y, test_size=0.3, random_state=42)
best_model_55.fit(X_train, y_train)
y_pred = best_model_55.predict(X_test)
errors = abs(y_pred - y_test)
mean_err = np.stack(errors/y_test)
mean_err = mean_err[np.isfinite(mean_err)]
mape = 100 * np.mean(mean_err)
acc = 100 - mape
plt.scatter(y_pred, y_test, s=8)
plt.plot([min(y_pred), max(y_pred)], [min(y_test), max(y_test)], '--k')
plt.ylabel('True RRS_total')
plt.xlabel('Predicted RRS_total')
#plt.text(s='Random Forest without Forward varible', x=1,
# y=2, fontsize=12, multialignment='center')
plt.text(min(y_pred), max(y_test) - 1, r'$R^2$ = %.2f' % (r2_score(y_test, y_pred)))
plt.text(min(y_pred), max(y_test) - 6, r'MSE = %.2f' % (mean_squared_error(y_test, y_pred)))
plt.text(min(y_pred), max(y_test) - 11, r'Accuracy = %.2f' % acc)
importances = best_model_55.feature_importances_
indices = np.argsort(importances)[::-1]
feature_tab = pd.DataFrame({"Features": list(X_train.columns),
"Importance": importances})
feature_tab = feature_tab.sort_values("Importance", ascending = False).reset_index(drop=True)
index = feature_tab["Features"].iloc[:26]
importance_desc = feature_tab["Importance"].iloc[:26]
feature_space = []
for i in range(indices.shape[0]-1, -1, -1):
feature_space.append(X_train.columns[indices[i]])
fig, ax = plt.subplots(figsize=(20,20))
ax = plt.gca()
plt.title("Feature importances", fontsize=30)
plt.barh(index, importance_desc, align="center", color="blue", alpha=0.6)
plt.grid(axis="x", color="white", linestyle="-")
plt.xlabel("The average of decrease in impurity", fontsize=20)
plt.ylabel("Features", fontsize=20)
plt.yticks(fontsize=30)
plt.xticks(fontsize=20)
plt.show()
RRS_region_plus = pd.concat([rumi_meta["RRS_Total"], region_subset], axis=1)
RRS_corr = RRS_region_plus.corr(method = "spearman").sort_values(by = "RRS_Total", ascending=False)
RRS_corr = RRS_corr["RRS_Total"]
sns.jointplot(data = RRS_region_plus, y = "RRS_Total", x = "BNA067lPCLA4ll", kind = "reg")
##
BPD_subset = pd.read_csv("BPD_19.csv")
MDD_feature = pd.read_csv("Feature_Importance_MDD.csv")
HC_feature = pd.read_csv("Feature_Importace_HC.csv")
BPD_MDD_feature = MDD_feature[MDD_feature.index.isin(BPD_subset.columns)]
MDD_subset = ress_MDD_brain[MDD_feature.index]
HC_subset = ress_HC_brain[HC_feature.index]
BPD_subset_corr = pd.concat([ress_BPD_meta["RRS_Brooding"], BPD_subset], axis=1)
BPD_subset_corr_ = BPD_subset_corr.corr(method = "spearman").sort_values(by = "RRS_Brooding", ascending=False)
BPD_subset_corr_ = BPD_subset_corr_.drop("RRS_Brooding", axis=0)
BPD_subset_corr_ = BPD_subset_corr_["RRS_Brooding"]
MDD_subset_corr = pd.concat([ress_MDD_meta["RRS_Brooding"], MDD_subset], axis=1)
MDD_subset_corr_ = MDD_subset_corr.corr(method = "spearman").sort_values(by = "RRS_Brooding", ascending=False)
MDD_subset_corr_ = MDD_subset_corr_.drop("RRS_Brooding", axis=0)
MDD_subset_corr_ = MDD_subset_corr_["RRS_Brooding"]
HC_subset_corr = pd.concat([ress_HC_meta["RRS_Brooding"], HC_subset], axis=1)
HC_subset_corr_ = HC_subset_corr.corr(method = "spearman").sort_values(by = "RRS_Brooding", ascending=False)
HC_subset_corr_ = HC_subset_corr_.drop("RRS_Brooding", axis=0)
HC_subset_corr_ = HC_subset_corr_["RRS_Brooding"]
MDD_tha = MDD_feature.loc[['BNA231lThamPFtha', 'BNA242rThaOtha', 'BNA244rThacTtha', 'BNA240rThaPPtha']]
BPD_tha = ress_BPD_brain[['BNA245lThalPFtha', 'BNA243lThacTtha', 'BNA234rThamPMtha', 'BNA236rThaStha']]
HC_tha = HC_feature.loc[["BNA242rThaOtha", "BNA232rThamPFtha", "BNA239lThaPPtha"]]
MDD_cin = MDD_feature.loc[['BNA186rCingA23c', 'BNA218rHippcHipp']]
HC_cin = HC_feature.loc[['BNA187lCingA32sg', 'BNA184rCingA24cd', 'BNA217lHippcHipp']]
MDD_fjg = MDD_feature.loc[['BNA030rIFGA44d']]
tha_3types = | pd.concat([MDD_tha, HC_tha, MDD_cin, HC_cin], axis=0) | pandas.concat |
from time import sleep
import pandas as pd
from Bio import Entrez
# Helper functions
def chunks(l, n):
for i in range(0, len(l), n):
yield l[i : i + n]
def extract(d, keys):
return dict((k, d[k]) for k in keys if k in d)
COLUMNS = [
"Id",
"PubDate",
"EPubDate",
"Source",
"AuthorList",
"LastAuthor",
"Title",
"Volume",
"Issue",
"Pages",
"LangList",
"NlmUniqueID",
"ISSN",
"ESSN",
"PubTypeList",
"RecordStatus",
"PubStatus",
"ArticleIds",
"DOI",
"History",
"References",
"HasAbstract",
"PmcRefCount",
"FullJournalName",
"ELocationID",
"SO",
]
# Import document summaries
ds = pd.read_csv(snakemake.input[0], sep=",")
# Get list of pubmed ids
pids = ds["PubMedIds"].tolist()
pids = [i for i in pids if str(i) != "nan"]
pids = [i.split(";") for i in pids]
pids = [i for sublist in pids for i in sublist]
pids = list(set(pids))
# Setup query params
Entrez.email = snakemake.params.get("email", None)
if Entrez.email is None:
raise ValueError("An email must be provided")
api_key = snakemake.params.get("api_key", None)
if api_key is None:
print(
"Personal API key from NCBI. If not set, only 3 queries per second are allowed. 10 queries per seconds otherwise with a valid API key."
)
sleeptime = 1 / 3
else:
Entrez.api_key = api_key
sleeptime = 1 / 10
db = "pubmed"
batch_size = snakemake.params.get("batch_size", 1)
chunked_ids = chunks(pids, batch_size)
# Fetch and parse summaries
with open(snakemake.output[0], "a") as f:
for chunk in chunked_ids:
summary = Entrez.esummary(db=db, id=",".join(chunk), retmode="xml")
docsums = pd.DataFrame(columns=COLUMNS)
for record in Entrez.parse(summary):
record = extract(record, COLUMNS)
docsums = docsums.append(
{k: v for k, v in record.items()}, ignore_index=True
)
for col in docsums.columns:
if "List" in col:
docsums[col] = docsums[col].apply(
lambda x: ";".join( | pd.Series(x, dtype="str") | pandas.Series |
import pandas as pd
from clr import AddReference
AddReference("System")
AddReference("QuantConnect.Algorithm")
AddReference("QuantConnect.Common")
AddReference("QuantConnect.Indicators")
from System import *
from QuantConnect import *
from QuantConnect.Algorithm import *
from QuantConnect.Algorithm.Framework import *
from QuantConnect.Algorithm.Framework.Risk import *
from QuantConnect.Algorithm.Framework.Alphas import *
from QuantConnect.Algorithm.Framework.Execution import *
from QuantConnect.Algorithm.Framework.Portfolio import *
from QuantConnect.Algorithm.Framework.Selection import *
from QuantConnect.Data import SubscriptionDataSource
from QuantConnect.Python import PythonData
from datetime import timedelta, datetime, date
from sklearn.ensemble import RandomForestClassifier
from sklearn.utils import resample, shuffle
class RunConfig:
"""This class contains major parameters for running algorithm
"""
# Start date for back-testing
StartDate = date(2009, 5, 1)
# End date for backtesting
EndDate = date(2019, 12, 1)
# Initial Cash
StrategyCash = 200000
# Selection of long only (True) or long-short (False)
LongOnly = True
# Position holding period, days
PositionLifetime = timedelta(days=25)
# Vertical barrier, days (25 or 35 days for QC platform)
VertBarDays = 25
# For running on LEAN locally please provide a path to folder with data
PathToLocalFolder = ""
class QCTickDataStrategy(QCAlgorithm):
""" This algo implements RF triple barrier strategy based on raw tick data.
"""
def __init__(self):
# symbols of assets from MOEX
self.assets_keys = ['AFKS', 'ALRS', 'CHMF', 'GAZP',
'GMKN', 'LKOH', 'MGNT', 'MTSS',
'NVTK', 'ROSN', 'RTKM', 'SBER',
'SNGS', 'TATN', 'VTBR', 'YNDX']
# features to store in dataframe for ML
self.colsU = ['Logret', 'Momone', 'Momtwo', 'Momthree', 'Momfour', 'Momfive',
'Volatilityfifty', 'Volatilitythirtyone', 'Volatilityfifteen',
'Autocorrone', 'Autocorrtwo', 'Autocorrthree', 'Autocorrfour', 'Autocorrfive',
'Logtone', 'Logttwo', 'Logtthree', 'Logtfour', 'Logtfive',
'Bin', 'Side']
# dictionary to store custom asset objects
self.assets = {}
# dictionary to store pandas DataFrames with features for ML
self.features_dict = {}
# dictionary to store ML classifier (RandomForest)
self.clf_dict = {}
# dictionary to store end holding time for each position
self.stop_time_dict = {}
def Initialize(self):
# setting start and end date to run algorithm
self.SetStartDate(RunConfig.StartDate)
self.SetEndDate(RunConfig.EndDate)
# setting initial funds
self.SetCash(RunConfig.StrategyCash)
# creating custom assets from AdvancedBars class for each symbol
self.assets = {i: self.AddData(AdvancedBars, i) for i in self.assets_keys}
# creating empty dataframes for each symbol
self.features_dict = {i: pd.DataFrame(columns=self.colsU) for i in self.assets_keys}
# creating a dictionary of classifiers with initial None value
self.clf_dict = {i: None for i in self.assets_keys}
# creating a dictionary with stoptimes for each symbol
self.stop_time_dict = {i: self.Time for i in self.assets_keys}
# setting a schedule to run ML training
self.Schedule.On(self.DateRules.MonthStart(), self.TimeRules.At(13, 10), Action(self.TrainML))
def OnData(self, data):
"""OnData event is the primary entry point for your algorithm. Each new data point will be pumped in here.
Arguments:
data: Slice object keyed by symbol containing the stock data
"""
for k in self.assets_keys:
cond1 = (self.Time > self.stop_time_dict[k]) # if holding time is over
cond2 = self.Portfolio[k].Invested # if the position open
if cond1 and cond2:
self.Log(f", {k}, to liq, {self.Portfolio[k].Quantity}, {self.Portfolio[k].Price}")
self.Liquidate(k) # liquidate open position
for k in self.assets_keys:
if not data.ContainsKey(k):
continue
dat = data[k]
time = dat.Time
# saving data into feature for ML
try:
self.features_dict[k].loc[time] = [dat.Logret, dat.Momone, dat.Momtwo, dat.Momthree, dat.Momfour,
dat.Momfive, dat.Volatilityfifty, dat.Volatilitythirtyone,
dat.Volatilityfifteen,
dat.Autocorrone, dat.Autocorrtwo, dat.Autocorrthree,
dat.Autocorrfour, dat.Autocorrfive,
dat.Logtone, dat.Logttwo, dat.Logtthree, dat.Logtfour, dat.Logtfive,
dat.Bin, dat.Side]
except AttributeError as e:
continue
if self.clf_dict[k] is not None: # feed data into ML if RF classifier was created
# features
X = self.features_dict[k].drop(["Bin"], axis=1).loc[time].values.reshape(1, -1)
# predicted value
y_pred = self.clf_dict[k].predict(X)
if y_pred > .8: # for probably enough decision to trade
# decision of trade direction is based on sma
if dat.Side == 1: # long position
# set new stop time for position holding
self.stop_time_dict[k] = self.Time + RunConfig.PositionLifetime
if not self.Portfolio[k].IsLong: # if no long position invested
if self.Portfolio[k].Invested: # if short position
self.Liquidate(k)
else:
continue
elif dat.Side == -1 and not RunConfig.LongOnly: # if not long only portfolio, and if short side
# set new stop time for position holding
self.stop_time_dict[k] = self.Time + RunConfig.PositionLifetime
if self.Portfolio[k].IsLong:
self.Liquidate(k)
else:
continue
size = dat.Side * min((self.Portfolio.Cash / self.Portfolio.TotalPortfolioValue) * 0.90, 0.1)
self.SetHoldings(k, size)
# store trade to log
self.Log(f", {k}, pos, {self.Portfolio[k].Quantity}, {self.Portfolio[k].Price}")
def Balancing(self, X, y):
"""Module to make equial amount of labels. This code is sampled from 'mlfinlab' package
"""
train_df = pd.concat([y, X], axis=1, join='inner')
# Upsample the training data to have a 50 - 50 split
# https://elitedatascience.com/imbalanced-classes
majority = train_df[train_df['Bin'] == 0]
minority = train_df[train_df['Bin'] == 1]
if len(majority) < len(minority):
majority, minority = minority, majority
new_minority = resample(minority,
replace=True, # sample with replacement
n_samples=majority.shape[0], # to match majority class
random_state=42)
train_df = | pd.concat([majority, new_minority]) | pandas.concat |
import os
from typing import List
import pandas as pd
import numpy as np
from tqdm import tqdm_notebook as tqdm
from .dataset import Dataset
DATA_DIR = "data"
NYSE_PRICES = os.path.join(DATA_DIR, 'nyse', 'prices-split-adjusted.csv')
# TODO: Currently only using open for one less dimension
# FEATURES = ['date', 'symbol', 'open', 'close', 'low', 'high', 'volume']
FEATURES = ['date', 'symbol']
TIME_FEATURES = ['open', 'close', 'low', 'high', 'volume', 'movement', 'gspc', 'vix']
DEFAULT_TIME_FEATURES = ['open', 'close']
START_DATE = pd.to_datetime('2010-01-04')
TRAIN_VAL_SPLIT = pd.to_datetime('2014-01-04')
TRAIN_TEST_SPLIT = pd.to_datetime('2016-01-04')
END_DATE = pd.to_datetime('2016-12-30')
FINAL_TEST_SPLIT = pd.to_datetime('2012-12-31')
TEXT_END = pd.to_datetime('2013-11-29') # last article is on 26 but we take the whole week
# These companies have no available stock prices before the FINAL_TEST_SPLIT
COMPANIES_MISSING_IN_TRAIN = [
'ABBV', 'ALLE', 'CFG', 'COTY', 'CSRA', 'DLPH', 'EVHC', 'FB', 'FBHS', 'FTV',
'HCA', 'HPE', 'KHC', 'KMI', 'KORS', 'MNK', 'MPC', 'NAVI', 'NLSN', 'NWS',
'NWSA', 'PSX', 'PYPL', 'QRVO', 'SYF', 'TDG', 'TRIP', 'WLTW', 'WRK', 'XYL', 'ZTS']
COMPANIES_JOINING_DURING_TRAIN = [
'CHTR', # 2010-01-05
'LYB', # 2010-04-28
'GM', # 2010-11-18
]
COMPANIES_JOINING_DURING_TEST = [
'ZTS', # 2013-02-01
'COTY', # 2013-06-13
'MNK', # 2013-06-17
'NWS', 'NWSA', # 2013-06-19
'EVHC', # 2013-08-14
'ALLE', # 2013-11-18
'CFG', 'NAVI', 'QRVO', 'SYF', # 2015-01-02
'WRK', # 2015-06-24
'KHC', 'PYPL', # 2015-07-06
'HPE', # 2015-10-19
'CSRA', # 2015-11-16
'WLTW', # 2016-01-05
'FTV', # 2016-07-05
]
class NyseStocksDataset(Dataset):
def __init__(self, name: str = 'NyseStocksDataset',
file_path: str = NYSE_PRICES,
epsilon: int = 0.01, # Good classes distribution: 0.004
look_back: int = 7,
forecast_out: int = 1,
features: List[str] = DEFAULT_TIME_FEATURES,
companies: List[int] = None,
load: bool = False,
incl_test: bool = False,
only_test: bool = False):
super().__init__(name)
self.prices = None
self.file_path = file_path
self.file_dir, _ = os.path.split(self.file_path)
self.epsilon = epsilon
self.incl_test = incl_test
self.only_test = only_test
assert look_back > 0
self.look_back = look_back
assert forecast_out > 0
self.forecast_out = forecast_out
self.features = features
self.companies = companies
if load:
self.load()
def load(self):
"""Load data"""
self.logger.debug(
'Reading NYSE stocks data (takes about 43 seconds)...')
prices = | pd.read_csv(self.file_path) | pandas.read_csv |
import torch
import numpy as np
import pandas as pd
import os
import sys
from torchsummary import summary
import torch.nn as nn
from collections import defaultdict
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
matplotlib.style.use('ggplot')
from matplotlib import cm
import seaborn as sns
sns.set(
font_scale=1.5,
style="whitegrid",
rc={
'text.usetex' : False,
'lines.linewidth': 2
}
)
# sns.set_theme()
# sns.set_style('whitegrid')
import glob
import copy
import math
import models
import random
import torch.optim
import torch
import argparse
import utils
from sklearn.linear_model import LogisticRegression
try:
from tqdm import tqdm
except:
def tqdm(x): return x
"""
Plot the data contained in quant (keys: the name of the experiments), agains the reference (contained in stats_ref)
dirname: the output directory name
"""
def process_df(quant, dirname, stats_ref=None, args=None, args_model=None, save=True, split=False):
global table_format
col_names = ["experiment", "stat", "set", "layer"]
quant = utils.assert_col_order(quant, col_names, id_vars="var")
keys = list(quant.columns.levels[0].sort_values())
output_root = os.path.join(dirname, f"merge_" + "_".join(keys))
os.makedirs(output_root, exist_ok=True)
idx = pd.IndexSlice
cols_error = idx[:, 'error', :, :]
N_L = len(quant.columns.unique(level="layer")) # number of hidden layers
# errors = quant["error"]
# losses = quant["loss"]
quant.drop("val", axis=1,level="set", inplace=True, errors='ignore')
quant.drop(("test", "loss"), axis=1, inplace=True, errors='ignore')
if save:
quant.to_csv(os.path.join(output_root, 'merge.csv'))
if stats_ref is not None:
stats_ref.to_csv(os.path.join(output_root, 'stats_ref.csv'))
quant.sort_index(axis=1, inplace=True)
quant.loc[:, cols_error] *= 100 # in %
quant.groupby(level=["experiment", "stat", "set"], axis=1, group_keys=False).describe().to_csv(os.path.join(output_root, 'describe.csv'))
quant_ref = None
Ts = { -1: 0, 0: 0, 1: 12.71, 2: 4.303, 3: 3.182, 4: 2.776, 9: 2.262}
# quant.where(quant != 0, 6.1*10**(-5), inplace=True)
if args.yscale == "log":
quant_log = np.log10(quant)
# quant_log.loc[:, Idx['B', "loss", :, 10]]
if stats_ref is not None: # the reference to plot against
N_S = len(stats_ref.columns)
quant_ref_merge = pd.DataFrame()
stats_ref.loc[:, "error"] = stats_ref["error"].values * 100
if "layer" in stats_ref.columns.names:
stats_ref.columns = stats_ref.columns.droplevel('layer')
# confidence intervals for the reference loss
quant_ref = stats_ref.agg(['mean', 'count', 'std'])
quant_ref.loc['se'] = quant_ref.loc['std'] / np.sqrt(quant_ref.loc['count']) # standard error
quant_ref.loc['ci95'] = [ Ts[n-1] * se for (n, se) in zip(quant_ref.loc['count'], quant_ref.loc['se']) ] # 95% CI
if args.yscale == "log":
quant_ref_log = np.log10(stats_ref).agg(['mean', 'count', 'std'])
quant_ref_log.loc['se'] = quant_ref_log.loc['std'] / np.sqrt(quant_ref_log.loc['count'])
quant_ref_log.loc['ci95'] = [ Ts[n-1] * se for (n, se) in zip(quant_ref_log.loc['count'], quant_ref_log.loc['se']) ] # 95% CI
# if args_model is not None:
# else:
xlabels=[str(i) for i in range(N_L)]
logstr = "_log" if args.yscale == "log" else ""
has_ref = quant_ref is not None
# if len(keys) <= 2:
palette=sns.color_palette(n_colors=len(keys))
if not split:
fig, axes = plt.subplots(2, 1, figsize=(4, 8), sharex=False)
# sns.set(font_scale=1,rc={"lines.linewidth":3})
k = 0
# the confidence intervals
df_ci = quant.describe()
df_ci.loc["ymax", :] = [mean + Ts[int(n-1)] / np.sqrt(n) * std for (mean, std, n) in zip(df_ci.loc["mean", :], df_ci.loc["std", :], df_ci.loc["count", :])]
df_ci.loc["ymin", :] = [mean - Ts[int(n-1)] / np.sqrt(n) * std for (mean, std, n) in zip(df_ci.loc["mean", :], df_ci.loc["std", :], df_ci.loc["count", :])]
#confidence intervals for the log plot
if args.yscale == "log":
df_ci_log = quant_log.describe()
df_ci_log.loc["ymax", :] = [mean + Ts[int(n-1)] / np.sqrt(n) * std for (mean, std, n) in zip(df_ci_log.loc["mean", :], df_ci_log.loc["std", :], df_ci_log.loc["count", :])]
df_ci_log.loc["ymin", :] = [mean - Ts[int(n-1)] / np.sqrt(n) * std for (mean, std, n) in zip(df_ci_log.loc["mean", :], df_ci_log.loc["std", :], df_ci_log.loc["count", :])]
#rp.set_axis_labels("layer", "Loss", labelpad=10)
#quant.loc[1, Idx["loss", :, 0]].lineplot(x="layer_ids", y="value", hue="")
for i, stat in enumerate(["loss","error" ]):
for j, setn in enumerate(["train","test"]):
if stat == "loss" and setn=="test":
continue
if stat == "error" and setn=="train":
continue
# axes[k] = rp.axes[j,i]
log_plot = args.yscale == "log" and setn == "train"
if split:
fig, ax = plt.subplots(1, 1, figsize=(4, 4), sharex=False)
else:
ax = axes.flatten()[k]
if log_plot:
df_plot = quant_log.loc[:, Idx[:, stat, setn, :]]
df_ci_plot = df_ci_log
else:
df_plot = quant.loc[:, Idx[:, stat, setn, :]]#.min(axis=0).to_frame(name="value")
df_ci_plot = df_ci
df_plot = pd.melt(df_plot.reset_index(), id_vars="var")
lp = sns.lineplot(
#data=rel_losses.min(axis=0).to_frame(name="loss"),
data=df_plot,
#hue="width",
hue="experiment",
hue_order=keys,
x="layer",
y="value",
legend=None,
# style='set',
ci=None,
palette=palette,
#style='layer',
markers=False,
ax=ax,
dashes=True,
# linewidth=3.,
#legend_out=True,
#y="value",
)
lp.set(xticks=range(0, len(xlabels)))
# rp.set_xticklabels(xlabels)
# rp.axes[0,0].locator_params(axis='x', nbins=len(xlabels))
lp.set_xticklabels(xlabels)#, rotation=40*(is_vgg))
for j, exp in enumerate(keys):
xs =quant.loc[:, Idx[exp, stat, setn, :]].columns.get_level_values('layer').unique()
df_ci_pplot = df_ci_plot.loc[:, Idx[exp, stat, setn, xs]]
ax.fill_between(xs, df_ci_pplot.loc["ymax",:].values, df_ci_pplot.loc["ymin", :].values, color=ax.lines[j].get_color(), alpha=0.3)
# else:
# lp.set_xticklabels(len(xlabels)*[None])
if not split:
ax.set_title("{} {}{}".format(setn.title()+(setn=="train")*"ing", stat.title(), " (%)" if stat=="error" else ''))
# ylabel = stat if stat == "loss" else "error (%)"
ax.set_xlabel("layer index l")
ax.set_ylabel(None)
if setn == "test":
ax.set_ylim(df_plot["value"].min(), df_plot["value"].max())
if log_plot: # set the axis in power of 10 values
ax.get_yaxis().get_major_formatter().set_useMathText(True)
ax.get_yaxis().set_major_formatter(lambda x, pos: "$10^{" + f"{int(x)}" + "}$")
if has_ref:
# data_ref = quant_ref[stat, setn].reset_index()
if not log_plot:
ax.axline((0,quant_ref[stat, setn][0]), (1, quant_ref[stat, setn][0]), ls=":", zorder=2, c='g') # for the mean
y1 = quant_ref.loc['mean', (stat, setn)] + quant_ref.loc['ci95', (stat, setn)]#quant_ref.loc['std', (stat, setn)] #
y2 = quant_ref.loc['mean', (stat, setn)] - quant_ref.loc['ci95', (stat, setn)] #quant_ref.loc['ci95', (stat, setn)]
ax.axhspan(y1, y2, facecolor='g', alpha=0.5)
else:
ax.axline((0,quant_ref_log[stat, setn][0]), (1, quant_ref_log[stat, setn][0]), ls=":", zorder=2, c='g') # for the mean
y1 = quant_ref_log.loc['mean', (stat, setn)] + quant_ref_log.loc['ci95', (stat, setn)]#quant_ref_log.loc['std', (stat, setn)] #
y2 = quant_ref_log.loc['mean', (stat, setn)] - quant_ref_log.loc['ci95', (stat, setn)] #quant_ref_log.loc['ci95', (stat, setn)]
ax.axhspan(y1, y2, facecolor='g', alpha=0.5)
# data_ref.index = pd.Index(range(len(data_ref)))
# ax=ax,
# if setn == "train":
# ax.set_yscale(args.yscale)
if split:
# if k == 1:
labels=keys + has_ref*["ref."]
if setn == "test": # reset the name (not log)
logstr = ""
fig.legend(handles=ax.lines, labels=labels,
# title="Exp.",
loc="upper right", borderaxespad=0, bbox_to_anchor=(0.9,0.9))#, bbox_transform=fig.transFigure)
# fig.tight_layout()
plt.margins()
plt.savefig(fname=os.path.join(output_root, f"{setn}_{stat}{logstr}.pdf"), bbox_inches='tight')
k += 1
# fig.subplots_adjust(top=0.85)
# if is_vgg:
if not split:
labels=keys + has_ref*["ref."]
fig.legend(handles=ax.lines, labels=labels,
# title="Exp.",
loc="upper right", borderaxespad=0, bbox_to_anchor=(0.9,0.9))#, bbox_transform=fig.transFigure)
fig.tight_layout()
# plt.margins()
fig.savefig(fname=os.path.join(output_root, f"train_loss_test_error{logstr}.pdf"), bbox_inches='tight')
k=0
# sns.set(font_scale=1,rc={"lines.linewidth":3})
fig, axes = plt.subplots(1, 1, figsize=(4, 4), sharex=False)
for i, stat in enumerate(["error"]):
for j, setn in enumerate(["train"]):
if stat == "loss" and setn=="test":
continue
if stat=="error" and setn=="test":
continue
# axes[k] = rp.axes[j,i]
ax = axes
# df_plot = quant.loc[:, Idx[:, stat, setn, :]].min(axis=0).to_frame(name="value")
df_plot = quant.loc[:, Idx[:, stat, setn, :]]#.min(axis=0).to_frame(name="value")
df_plot = pd.melt(df_plot.reset_index(), id_vars="var")
lp = sns.lineplot(
#data=rel_losses.min(axis=0).to_frame(name="loss"),
data=df_plot,
#hue="width",
hue="experiment",
hue_order=keys,
x="layer",
y="value",
legend=None,
# style='set',
ci=95,
palette=palette,
#style='layer',
markers=False,
ax=ax,
dashes=True,
#legend_out=True,
#y="value",
)
lp.set(xticks=range(0, len(xlabels)))
# rp.set_xticklabels(xlabels)
# rp.axes[0,0].locator_params(axis='x', nbins=len(xlabels))
# rp.axes[0,1].locator_params(axis='x', nbins=len(xlabels))
lp.set_xticklabels(xlabels)#, rotation=40*(is_vgg))
if not split:
ax.set_title("{} {}{}".format(setn.title()+(setn=="train")*'ing', stat.title(), " (%)" if stat=="error" else ''))
# ylabel = stat if stat == "loss" else "error (%)"
ax.set_xlabel("layer index l")
ax.set_ylabel(None)
if setn == "train":
ax.set_yscale(args.yscale)
if quant_ref is not None:
# data_ref = quant_ref[stat, setn].reset_index()
ax.axline((0,quant_ref[stat, setn][0]), (1,quant_ref[stat, setn][0]), ls=":", zorder=2, c='g')
# data_ref.index = pd.Index(range(len(data_ref)))
# sns.lineplot(
# data=data_ref, # repeat the datasaet N_L times
# ax=ax,
# # x=range(len(data_ref)),
# # y="value",
# # xc np.tile(np.linspace(1, N_L, num=N_L), 2),
# # x='',
# # hue='r',
# # color='red',
# palette=['red'],
# # style='set',
# # x='index',
# # dashes=True,
# legend=False,
# # y="value"
# )
# for ax in ax.lines[-1:]: # the last two
# ax.set_linestyle('--')
k += 1
# fig.subplots_adjust(top=0.85)
# if is_vgg:
labels=keys + ["ref."]
fig.legend(handles=ax.lines, labels=keys,
#title="Exp.",
loc="upper right", bbox_to_anchor=(0.9,0.9),borderaxespad=0)#, bbox_transform=fig.transFigure)
plt.margins()
plt.savefig(fname=os.path.join(output_root, f"error_train{logstr}.pdf"), bbox_inches='tight')
if "B" in keys:
df_B = quant["B"]
elif "B2" in keys:
df_B = quant["B2"]
else:
return
n_draws = len(df_B.index)
# vary_draw=copy.deepcopy(df_B)
df_B_plot = pd.melt(df_B.reset_index(), id_vars="var")
cp = sns.FacetGrid(
data=df_B_plot,
# hue="experiment",
# hue_order=["A", "B"],
col="stat",
col_order=["loss", "error"],
row="set",
row_order=["train", "test"],
# x="layer",
# y="value",
# kind='line',
# legend="full",
# style='set',
# ci='sd',
# palette=palette,
#style='layer',
# markers=False,
# dashes=True,
#legend_out=True,
# facet_kws={
sharey= False,
sharex= True,
#y="value",
)
styles=['dotted', 'dashed', 'dashdot', 'solid']
# for i_k, k in enumerate([10, 50, 100, 200]):
draws = len(df_B.index)
df_bound = pd.DataFrame(columns=df_B.columns)
# df_bound.columns = df_B.columns
# for k in range(1, draws+1):
# # df_cut = pd.melt(df_B[:k].reset_index(), id_vars="draw")
# df_bound.loc[k, :] = df_B[:k].min(axis=0)
# # idx_min = df_cut.query('stat=="loss"idxmin")
# fig, axes= plt.subplots(2,2,figsize=(12,12), sharex=True)
# for i, stat in enumerate(["loss", "error"]):
# for j, setn in enumerate(["train", "test"]):
# df_bound_plot = df_bound[stat,setn].max(axis=1)
# ax=axes[i,j]
# ax.set_title("{} {}".format(setn.title(), stat.title()))
# sns.lineplot(
# data=df_bound_plot,
# ax=ax,
# )
# # cp.axes[j,i].set_title("{} {}".format(setn.title(), stat.title()))
# plt.savefig(fname=os.path.join(output_root, "comp_draws.pdf"), bbox_inches='tight')
plt.close('all')
# ylabel = stat if stat == "loss" else "error (%)"
# cp.axes[j,i].set_ylabel(ylabel)
# cp.axes[j,i].set_xlabel("layer index l")
# df_cut_plot = pd.melt(df_cut_min.query(f'stat=="{stat}" & set=="{setn}"'))
# if quant_ref is not None:
# data_ref = quant_ref[stat, setn].reset_index()
# data_ref.index = pd.Index(range(len(data_ref)))
# sns.lineplot(
# data=df_cut_plot, repeat the datasaet N_L times
# ax=cp.axes[j,i],
# x=range(len(data_ref)),
# y="value",
# xc np.tile(np.linspace(1, N_L, num=N_L), 2),
# x='layer',
# hue='r',
# color='red',
# palette=['red'],
# style='set',
# x='index',
# dashes=True,
# legend=False,
# y="value"
# )
# for ax in cp.axes[j,i].lines[-1:]: the last two
# ax.set_linestyle(styles[i_k])
def process_csv(file_csv):
'''Read and process a previously computed result stored inside a checkpoint'''
idx = pd.IndexSlice
quant = | pd.read_csv(file_csv, header=[0,1], index_col=0) | pandas.read_csv |
import copy
import importlib
import itertools
import os
import sys
import warnings
import numpy as np
import pandas as pd
try:
import ixmp
has_ix = True
except ImportError:
has_ix = False
from pyam import plotting
from pyam.logger import logger
from pyam.run_control import run_control
from pyam.utils import (
write_sheet,
read_ix,
read_files,
read_pandas,
format_data,
pattern_match,
years_match,
isstr,
islistable,
cast_years_to_int,
META_IDX,
YEAR_IDX,
REGION_IDX,
IAMC_IDX,
SORT_IDX,
LONG_IDX,
)
from pyam.timeseries import fill_series
class IamDataFrame(object):
"""This class is a wrapper for dataframes following the IAMC format.
It provides a number of diagnostic features (including validation of data,
completeness of variables provided) as well as a number of visualization
and plotting tools.
"""
def __init__(self, data, **kwargs):
"""Initialize an instance of an IamDataFrame
Parameters
----------
data: ixmp.TimeSeries, ixmp.Scenario, pd.DataFrame or data file
an instance of an TimeSeries or Scenario (requires `ixmp`),
or pd.DataFrame or data file with IAMC-format data columns.
A pd.DataFrame can have the required data as columns or index.
Special support is provided for data files downloaded directly from
IIASA SSP and RCP databases. If you run into any problems loading
data, please make an issue at:
https://github.com/IAMconsortium/pyam/issues
"""
# import data from pd.DataFrame or read from source
if isinstance(data, pd.DataFrame):
self.data = format_data(data.copy())
elif has_ix and isinstance(data, ixmp.TimeSeries):
self.data = read_ix(data, **kwargs)
else:
self.data = read_files(data, **kwargs)
# cast year column to `int` if necessary
if not self.data.year.dtype == 'int64':
self.data.year = cast_years_to_int(self.data.year)
# define a dataframe for categorization and other metadata indicators
self.meta = self.data[META_IDX].drop_duplicates().set_index(META_IDX)
self.reset_exclude()
# execute user-defined code
if 'exec' in run_control():
self._execute_run_control()
def __getitem__(self, key):
_key_check = [key] if isstr(key) else key
if set(_key_check).issubset(self.meta.columns):
return self.meta.__getitem__(key)
else:
return self.data.__getitem__(key)
def __setitem__(self, key, value):
_key_check = [key] if isstr(key) else key
if set(_key_check).issubset(self.meta.columns):
return self.meta.__setitem__(key, value)
else:
return self.data.__setitem__(key, value)
def __len__(self):
return self.data.__len__()
def _execute_run_control(self):
for module_block in run_control()['exec']:
fname = module_block['file']
functions = module_block['functions']
dirname = os.path.dirname(fname)
if dirname:
sys.path.append(dirname)
module = os.path.basename(fname).split('.')[0]
mod = importlib.import_module(module)
for func in functions:
f = getattr(mod, func)
f(self)
def head(self, *args, **kwargs):
"""Identical to pd.DataFrame.head() operating on data"""
return self.data.head(*args, **kwargs)
def tail(self, *args, **kwargs):
"""Identical to pd.DataFrame.tail() operating on data"""
return self.data.tail(*args, **kwargs)
def models(self):
"""Get a list of models"""
return pd.Series(self.meta.index.levels[0])
def scenarios(self):
"""Get a list of scenarios"""
return pd.Series(self.meta.index.levels[1])
def regions(self):
"""Get a list of regions"""
return pd.Series(self.data['region'].unique(), name='region')
def variables(self, include_units=False):
"""Get a list of variables
Parameters
----------
include_units: boolean, default False
include the units
"""
if include_units:
return self.data[['variable', 'unit']].drop_duplicates()\
.reset_index(drop=True).sort_values('variable')
else:
return pd.Series(self.data.variable.unique(), name='variable')
def append(self, other, ignore_meta_conflict=False, inplace=False,
**kwargs):
"""Append any castable object to this IamDataFrame.
Columns in `other.meta` that are not in `self.meta` are always merged,
duplicate region-variable-unit-year rows raise a ValueError.
Parameters
----------
other: pyam.IamDataFrame, ixmp.TimeSeries, ixmp.Scenario,
pd.DataFrame or data file
An IamDataFrame, TimeSeries or Scenario (requires `ixmp`),
pandas.DataFrame or data file with IAMC-format data columns
ignore_meta_conflict : bool, default False
If False and `other` is an IamDataFrame, raise an error if
any meta columns present in `self` and `other` are not identical.
inplace : bool, default False
If True, do operation inplace and return None
"""
ret = copy.deepcopy(self) if not inplace else self
if not isinstance(other, IamDataFrame):
other = IamDataFrame(other, **kwargs)
ignore_meta_conflict = True
diff = other.meta.index.difference(ret.meta.index)
intersect = other.meta.index.intersection(ret.meta.index)
# merge other.meta columns not in self.meta for existing scenarios
if not intersect.empty:
# if not ignored, check that overlapping meta dataframes are equal
if not ignore_meta_conflict:
cols = [i for i in other.meta.columns if i in ret.meta.columns]
if not ret.meta.loc[intersect, cols].equals(
other.meta.loc[intersect, cols]):
conflict_idx = (
pd.concat([ret.meta.loc[intersect, cols],
other.meta.loc[intersect, cols]]
).drop_duplicates()
.index.drop_duplicates()
)
msg = 'conflict in `meta` for scenarios {}'.format(
[i for i in pd.DataFrame(index=conflict_idx).index])
raise ValueError(msg)
cols = [i for i in other.meta.columns if i not in ret.meta.columns]
_meta = other.meta.loc[intersect, cols]
ret.meta = ret.meta.merge(_meta, how='outer',
left_index=True, right_index=True)
# join other.meta for new scenarios
if not diff.empty:
# sorting not supported by ` pd.append()` prior to version 23
sort_kwarg = {} if int(pd.__version__.split('.')[1]) < 23 \
else dict(sort=False)
ret.meta = ret.meta.append(other.meta.loc[diff, :], **sort_kwarg)
# append other.data (verify integrity for no duplicates)
ret.data.set_index(LONG_IDX, inplace=True)
other.data.set_index(LONG_IDX, inplace=True)
ret.data = ret.data.append(other.data, verify_integrity=True)\
.reset_index(drop=False)
if not inplace:
return ret
def pivot_table(self, index, columns, values='value',
aggfunc='count', fill_value=None, style=None):
"""Returns a pivot table
Parameters
----------
index: str or list of strings
rows for Pivot table
columns: str or list of strings
columns for Pivot table
values: str, default 'value'
dataframe column to aggregate or count
aggfunc: str or function, default 'count'
function used for aggregation,
accepts 'count', 'mean', and 'sum'
fill_value: scalar, default None
value to replace missing values with
style: str, default None
output style for pivot table formatting
accepts 'highlight_not_max', 'heatmap'
"""
index = [index] if isstr(index) else index
columns = [columns] if isstr(columns) else columns
df = self.data
# allow 'aggfunc' to be passed as string for easier user interface
if isstr(aggfunc):
if aggfunc == 'count':
df = self.data.groupby(index + columns, as_index=False).count()
fill_value = 0
elif aggfunc == 'mean':
df = self.data.groupby(index + columns, as_index=False).mean()\
.round(2)
aggfunc = np.sum
fill_value = 0 if style == 'heatmap' else ""
elif aggfunc == 'sum':
aggfunc = np.sum
fill_value = 0 if style == 'heatmap' else ""
df = df.pivot_table(values=values, index=index, columns=columns,
aggfunc=aggfunc, fill_value=fill_value)
return df
def interpolate(self, year):
"""Interpolate missing values in timeseries (linear interpolation)
Parameters
----------
year: int
year to be interpolated
"""
df = self.pivot_table(index=IAMC_IDX, columns=['year'],
values='value', aggfunc=np.sum)
# drop year-rows where values are already defined
if year in df.columns:
df = df[np.isnan(df[year])]
fill_values = df.apply(fill_series,
raw=False, axis=1, year=year)
fill_values = fill_values.dropna().reset_index()
fill_values = fill_values.rename(columns={0: "value"})
fill_values['year'] = year
self.data = self.data.append(fill_values, ignore_index=True)
def as_pandas(self, with_metadata=False):
"""Return this as a pd.DataFrame
Parameters
----------
with_metadata : bool, default False
if True, join data with existing metadata
"""
df = self.data
if with_metadata:
df = (df
.set_index(META_IDX)
.join(self.meta)
.reset_index()
)
return df
def timeseries(self):
"""Returns a dataframe in the standard IAMC format
"""
return (
self.data
.pivot_table(index=IAMC_IDX, columns='year')
.value # column name
.rename_axis(None, axis=1)
)
def reset_exclude(self):
"""Reset exclusion assignment for all scenarios to `exclude: False`"""
self.meta['exclude'] = False
def set_meta(self, meta, name=None, index=None):
"""Add metadata columns as pd.Series, list or value (int/float/str)
Parameters
----------
meta: pd.Series, list, int, float or str
column to be added to metadata
(by `['model', 'scenario']` index if possible)
name: str, optional
meta column name (defaults to meta pd.Series.name);
either a meta.name or the name kwarg must be defined
index: pyam.IamDataFrame, pd.DataFrame or pd.MultiIndex, optional
index to be used for setting meta column (`['model', 'scenario']`)
"""
if (name or (hasattr(meta, 'name') and meta.name)) in [None, False]:
raise ValueError('Must pass a name or use a named pd.Series')
# check if meta has a valid index and use it for further workflow
if hasattr(meta, 'index') and hasattr(meta.index, 'names') \
and set(META_IDX).issubset(meta.index.names):
index = meta.index
# if no valid index is provided, add meta as new column `name` and exit
if index is None:
self.meta[name] = list(meta) if islistable(meta) else meta
return # EXIT FUNCTION
# use meta.index if index arg is an IamDataFrame
if isinstance(index, IamDataFrame):
index = index.meta.index
# turn dataframe to index if index arg is a DataFrame
if isinstance(index, pd.DataFrame):
index = index.set_index(META_IDX).index
if not isinstance(index, pd.MultiIndex):
raise ValueError('index cannot be coerced to pd.MultiIndex')
# raise error if index is not unique
if index.duplicated().any():
raise ValueError("non-unique ['model', 'scenario'] index!")
# create pd.Series from meta, index and name if provided
meta = pd.Series(data=meta, index=index, name=name)
meta.name = name = name or meta.name
# reduce index dimensions to model-scenario only
meta = (
meta
.reset_index()
.reindex(columns=META_IDX + [name])
.set_index(META_IDX)
)
# check if trying to add model-scenario index not existing in self
diff = meta.index.difference(self.meta.index)
if not diff.empty:
error = "adding metadata for non-existing scenarios '{}'!"
raise ValueError(error.format(diff))
self._new_meta_column(name)
self.meta[name] = meta[name].combine_first(self.meta[name])
def categorize(self, name, value, criteria,
color=None, marker=None, linestyle=None):
"""Assign scenarios to a category according to specific criteria
or display the category assignment
Parameters
----------
name: str
category column name
value: str
category identifier
criteria: dict
dictionary with variables mapped to applicable checks
('up' and 'lo' for respective bounds, 'year' for years - optional)
color: str
assign a color to this category for plotting
marker: str
assign a marker to this category for plotting
linestyle: str
assign a linestyle to this category for plotting
"""
# add plotting run control
for kind, arg in [('color', color), ('marker', marker),
('linestyle', linestyle)]:
if arg:
run_control().update({kind: {name: {value: arg}}})
# find all data that matches categorization
rows = _apply_criteria(self.data, criteria,
in_range=True, return_test='all')
idx = _meta_idx(rows)
if len(idx) == 0:
logger().info("No scenarios satisfy the criteria")
return # EXIT FUNCTION
# update metadata dataframe
self._new_meta_column(name)
self.meta.loc[idx, name] = value
msg = '{} scenario{} categorized as `{}: {}`'
logger().info(msg.format(len(idx), '' if len(idx) == 1 else 's',
name, value))
def _new_meta_column(self, name):
"""Add a column to meta if it doesn't exist, set to value `np.nan`"""
if name is None:
raise ValueError('cannot add a meta column `{}`'.format(name))
if name not in self.meta:
self.meta[name] = np.nan
def require_variable(self, variable, unit=None, year=None,
exclude_on_fail=False):
"""Check whether all scenarios have a required variable
Parameters
----------
variable: str
required variable
unit: str, default None
name of unit (optional)
years: int or list, default None
years (optional)
exclude: bool, default False
flag scenarios missing the required variables as `exclude: True`
"""
criteria = {'variable': variable}
if unit:
criteria.update({'unit': unit})
if year:
criteria.update({'year': year})
keep = _apply_filters(self.data, self.meta, criteria)
idx = self.meta.index.difference(_meta_idx(self.data[keep]))
n = len(idx)
if n == 0:
logger().info('All scenarios have the required variable `{}`'
.format(variable))
return
msg = '{} scenario does not include required variable `{}`' if n == 1 \
else '{} scenarios do not include required variable `{}`'
if exclude_on_fail:
self.meta.loc[idx, 'exclude'] = True
msg += ', marked as `exclude: True` in metadata'
logger().info(msg.format(n, variable))
return pd.DataFrame(index=idx).reset_index()
def validate(self, criteria={}, exclude_on_fail=False):
"""Validate scenarios using criteria on timeseries values
Parameters
----------
criteria: dict
dictionary with variable keys and check values
('up' and 'lo' for respective bounds, 'year' for years)
exclude_on_fail: bool, default False
flag scenarios failing validation as `exclude: True`
"""
df = _apply_criteria(self.data, criteria, in_range=False)
if not df.empty:
msg = '{} of {} data points to not satisfy the criteria'
logger().info(msg.format(len(df), len(self.data)))
if exclude_on_fail and len(df) > 0:
self._exclude_on_fail(df)
return df
def rename(self, mapping, inplace=False):
"""Rename and aggregate column entries using `groupby.sum()` on values.
When renaming models or scenarios, the uniqueness of the index must be
maintained, and the function will raise an error otherwise.
Parameters
----------
mapping: dict
for each column where entries should be renamed, provide current
name and target name
{<column name>: {<current_name_1>: <target_name_1>,
<current_name_2>: <target_name_2>}}
inplace: bool, default False
if True, do operation inplace and return None
"""
ret = copy.deepcopy(self) if not inplace else self
for col, _mapping in mapping.items():
if col in ['model', 'scenario']:
index = pd.DataFrame(index=ret.meta.index).reset_index()
index.loc[:, col] = index.loc[:, col].replace(_mapping)
if index.duplicated().any():
raise ValueError('Renaming to non-unique {} index!'
.format(col))
ret.meta.index = index.set_index(META_IDX).index
elif col not in ['region', 'variable', 'unit']:
raise ValueError('Renaming by {} not supported!'.format(col))
ret.data.loc[:, col] = ret.data.loc[:, col].replace(_mapping)
ret.data = ret.data.groupby(LONG_IDX).sum().reset_index()
if not inplace:
return ret
def convert_unit(self, conversion_mapping, inplace=False):
"""Converts units based on provided unit conversion factors
Parameters
----------
conversion_mapping: dict
for each unit for which a conversion should be carried out,
provide current unit and target unit and conversion factor
{<current unit>: [<target unit>, <conversion factor>]}
inplace: bool, default False
if True, do operation inplace and return None
"""
ret = copy.deepcopy(self) if not inplace else self
for current_unit, (new_unit, factor) in conversion_mapping.items():
factor = pd.to_numeric(factor)
where = ret.data['unit'] == current_unit
ret.data.loc[where, 'value'] *= factor
ret.data.loc[where, 'unit'] = new_unit
if not inplace:
return ret
def check_aggregate(self, variable, components=None, units=None,
exclude_on_fail=False, multiplier=1, **kwargs):
"""Check whether the timeseries data match the aggregation
of components or sub-categories
Parameters
----------
variable: str
variable to be checked for matching aggregation of sub-categories
components: list of str, default None
list of variables, defaults to all sub-categories of `variable`
units: str or list of str, default None
filter variable and components for given unit(s)
exclude_on_fail: boolean, default False
flag scenarios failing validation as `exclude: True`
multiplier: number, default 1
factor when comparing variable and sum of components
kwargs: passed to `np.isclose()`
"""
# default components to all variables one level below `variable`
if components is None:
components = self.filter(variable='{}|*'.format(variable),
level=0).variables()
if not len(components):
msg = '{} - cannot check aggregate because it has no components'
logger().info(msg.format(variable))
return
# filter and groupby data, use `pd.Series.align` for matching index
df_variable, df_components = (
_aggregate_by_variables(self.data, variable, units)
.align(_aggregate_by_variables(self.data, components, units))
)
# use `np.isclose` for checking match
diff = df_variable[~np.isclose(df_variable, multiplier * df_components,
**kwargs)]
if len(diff):
msg = '{} - {} of {} data points are not aggregates of components'
logger().info(msg.format(variable, len(diff), len(df_variable)))
if exclude_on_fail:
self._exclude_on_fail(diff.index.droplevel([2, 3]))
diff = pd.concat([diff], keys=[variable], names=['variable'])
return diff.unstack().rename_axis(None, axis=1)
def check_aggregate_regions(self, variable, region='World',
components=None, units=None,
exclude_on_fail=False, **kwargs):
"""Check whether the region timeseries data match the aggregation
of components
Parameters
----------
variable: str
variable to be checked for matching aggregation of components data
region: str
region to be checked for matching aggregation of components data
components: list of str, default None
list of regions, defaults to all regions except region
units: str or list of str, default None
filter variable and components for given unit(s)
exclude_on_fail: boolean, default False
flag scenarios failing validation as `exclude: True`
kwargs: passed to `np.isclose()`
"""
var_df = self.filter(variable=variable, level=0)
if components is None:
components = var_df.filter(region=region, keep=False).regions()
if not len(components):
msg = (
'{} - cannot check regional aggregate because it has no '
'regional components'
)
logger().info(msg.format(variable))
return None
# filter and groupby data, use `pd.Series.align` for matching index
df_region, df_components = (
_aggregate_by_regions(var_df.data, region, units)
.align(_aggregate_by_regions(var_df.data, components, units))
)
df_components.index = df_components.index.droplevel(
"variable"
)
# Add in variables that are included in region totals but which
# aren't included in the regional components.
# For example, if we are looking at World and Emissions|BC, we need
# to add aviation and shipping to the sum of Emissions|BC for each
# of World's regional components to do a valid check.
different_region = components[0]
variable_components = self.filter(
variable="{}|*".format(variable)
).variables()
for var_to_add in variable_components:
var_rows = self.data.variable == var_to_add
region_rows = self.data.region == different_region
var_has_regional_info = (var_rows & region_rows).any()
if not var_has_regional_info:
df_var_to_add = self.filter(
region=region, variable=var_to_add
).data.groupby(REGION_IDX).sum()['value']
df_var_to_add.index = df_var_to_add.index.droplevel("variable")
if len(df_var_to_add):
df_components = df_components.add(df_var_to_add,
fill_value=0)
df_components = pd.concat([df_components], keys=[variable],
names=['variable'])
# use `np.isclose` for checking match
diff = df_region[~np.isclose(df_region, df_components, **kwargs)]
if len(diff):
msg = (
'{} - {} of {} data points are not aggregates of regional '
'components'
)
logger().info(msg.format(variable, len(diff), len(df_region)))
if exclude_on_fail:
self._exclude_on_fail(diff.index.droplevel([2, 3]))
diff = pd.concat([diff], keys=[region], names=['region'])
return diff.unstack().rename_axis(None, axis=1)
def check_internal_consistency(self, **kwargs):
"""Check whether the database is internally consistent
We check that all variables are equal to the sum of their sectoral
components and that all the regions add up to the World total. If
the check is passed, None is returned, otherwise a dictionary of
inconsistent variables is returned.
Note: at the moment, this method's regional checking is limited to
checking that all the regions sum to the World region. We cannot
make this more automatic unless we start to store how the regions
relate, see
[this issue](https://github.com/IAMconsortium/pyam/issues/106).
Parameters
----------
kwargs: passed to `np.isclose()`
"""
inconsistent_vars = {}
for variable in self.variables():
diff_agg = self.check_aggregate(variable, **kwargs)
if diff_agg is not None:
inconsistent_vars[variable + "-aggregate"] = diff_agg
diff_regional = self.check_aggregate_regions(variable, **kwargs)
if diff_regional is not None:
inconsistent_vars[variable + "-regional"] = diff_regional
return inconsistent_vars if inconsistent_vars else None
def _exclude_on_fail(self, df):
"""Assign a selection of scenarios as `exclude: True` in meta"""
idx = df if isinstance(df, pd.MultiIndex) else _meta_idx(df)
self.meta.loc[idx, 'exclude'] = True
logger().info('{} non-valid scenario{} will be excluded'
.format(len(idx), '' if len(idx) == 1 else 's'))
def filter(self, filters=None, keep=True, inplace=False, **kwargs):
"""Return a filtered IamDataFrame (i.e., a subset of current data)
Parameters
----------
keep: bool, default True
keep all scenarios satisfying the filters (if True) or the inverse
inplace: bool, default False
if True, do operation inplace and return None
filters by kwargs or dict (deprecated):
The following columns are available for filtering:
- metadata columns: filter by category assignment in metadata
- 'model', 'scenario', 'region', 'variable', 'unit':
string or list of strings, where ``*`` can be used as a wildcard
- 'level': the maximum "depth" of IAM variables (number of '|')
(exluding the strings given in the 'variable' argument)
- 'year': takes an integer, a list of integers or a range
note that the last year of a range is not included,
so ``range(2010,2015)`` is interpreted as ``[2010, ..., 2014]``
- 'regexp=True' overrides pseudo-regexp syntax in `pattern_match()`
"""
if filters is not None:
warnings.warn(
'`filters` keyword argument in filters() is deprecated and will be removed in the next release')
kwargs.update(filters)
_keep = _apply_filters(self.data, self.meta, kwargs)
_keep = _keep if keep else ~_keep
ret = copy.deepcopy(self) if not inplace else self
ret.data = ret.data[_keep]
idx = pd.MultiIndex.from_tuples(
pd.unique(list(zip(ret.data['model'], ret.data['scenario']))),
names=('model', 'scenario')
)
if len(idx) == 0:
logger().warning('Filtered IamDataFrame is empty!')
ret.meta = ret.meta.loc[idx]
if not inplace:
return ret
def col_apply(self, col, func, *args, **kwargs):
"""Apply a function to a column
Parameters
----------
col: string
column in either data or metadata
func: functional
function to apply
"""
if col in self.data:
self.data[col] = self.data[col].apply(func, *args, **kwargs)
else:
self.meta[col] = self.meta[col].apply(func, *args, **kwargs)
def _to_file_format(self):
"""Return a dataframe suitable for writing to a file"""
df = self.timeseries().reset_index()
df = df.rename(columns={c: str(c).title() for c in df.columns})
return df
def to_csv(self, path, index=False, **kwargs):
"""Write data to a csv file
Parameters
----------
index: boolean, default False
write row names (index)
"""
self._to_file_format().to_csv(path, index=False, **kwargs)
def to_excel(self, path=None, writer=None, sheet_name='data', index=False,
**kwargs):
"""Write timeseries data to Excel using the IAMC template convention
(wrapper for `pd.DataFrame.to_excel()`)
Parameters
----------
excel_writer: string or ExcelWriter object
file path or existing ExcelWriter
sheet_name: string, default 'data'
name of the sheet that will contain the (filtered) IamDataFrame
index: boolean, default False
write row names (index)
"""
if (path is None and writer is None) or \
(path is not None and writer is not None):
raise ValueError('Only one of path and writer must have a value')
if writer is None:
writer = pd.ExcelWriter(path)
self._to_file_format().to_excel(writer, sheet_name=sheet_name,
index=index, **kwargs)
def export_metadata(self, path):
"""Export metadata to Excel
Parameters
----------
path: string
path/filename for xlsx file of metadata export
"""
writer = pd.ExcelWriter(path)
write_sheet(writer, 'meta', self.meta, index=True)
writer.save()
def load_metadata(self, path, *args, **kwargs):
"""Load metadata exported from `pyam.IamDataFrame` instance
Parameters
----------
path: string
xlsx file with metadata exported from `pyam.IamDataFrame` instance
"""
if not os.path.exists(path):
raise ValueError("no metadata file '" + path + "' found!")
if path.endswith('csv'):
df = pd.read_csv(path, *args, **kwargs)
else:
xl = pd.ExcelFile(path)
if len(xl.sheet_names) > 1 and 'sheet_name' not in kwargs:
kwargs['sheet_name'] = 'meta'
df = pd.read_excel(path, *args, **kwargs)
req_cols = ['model', 'scenario', 'exclude']
if not set(req_cols).issubset(set(df.columns)):
e = 'File `{}` does not have required columns ({})!'
raise ValueError(e.format(path, req_cols))
# set index, filter to relevant scenarios from imported metadata file
df.set_index(META_IDX, inplace=True)
idx = self.meta.index.intersection(df.index)
n_invalid = len(df) - len(idx)
if n_invalid > 0:
msg = 'Ignoring {} scenario{} from imported metadata'
logger().info(msg.format(n_invalid, 's' if n_invalid > 1 else ''))
if idx.empty:
raise ValueError('No valid scenarios in imported metadata file!')
df = df.loc[idx]
# Merge in imported metadata
msg = 'Importing metadata for {} scenario{} (for total of {})'
logger().info(msg.format(len(df), 's' if len(df) > 1 else '',
len(self.meta)))
for col in df.columns:
self._new_meta_column(col)
self.meta[col] = df[col].combine_first(self.meta[col])
# set column `exclude` to bool
self.meta.exclude = self.meta.exclude.astype('bool')
def line_plot(self, x='year', y='value', **kwargs):
"""Plot timeseries lines of existing data
see pyam.plotting.line_plot() for all available options
"""
df = self.as_pandas(with_metadata=True)
# pivot data if asked for explicit variable name
variables = df['variable'].unique()
if x in variables or y in variables:
keep_vars = set([x, y]) & set(variables)
df = df[df['variable'].isin(keep_vars)]
idx = list(set(df.columns) - set(['value']))
df = (df
.reset_index()
.set_index(idx)
.value # df -> series
.unstack(level='variable') # keep_vars are columns
.rename_axis(None, axis=1) # rm column index name
.reset_index()
.set_index(META_IDX)
)
if x != 'year' and y != 'year':
df = df.drop('year', axis=1) # years causes NaNs
ax, handles, labels = plotting.line_plot(df, x=x, y=y, **kwargs)
return ax
def stack_plot(self, *args, **kwargs):
"""Plot timeseries stacks of existing data
see pyam.plotting.stack_plot() for all available options
"""
df = self.as_pandas(with_metadata=True)
ax = plotting.stack_plot(df, *args, **kwargs)
return ax
def bar_plot(self, *args, **kwargs):
"""Plot timeseries bars of existing data
see pyam.plotting.bar_plot() for all available options
"""
df = self.as_pandas(with_metadata=True)
ax = plotting.bar_plot(df, *args, **kwargs)
return ax
def pie_plot(self, *args, **kwargs):
"""Plot a pie chart
see pyam.plotting.pie_plot() for all available options
"""
df = self.as_pandas(with_metadata=True)
ax = plotting.pie_plot(df, *args, **kwargs)
return ax
def scatter(self, x, y, **kwargs):
"""Plot a scatter chart using metadata columns
see pyam.plotting.scatter() for all available options
"""
xisvar = x in self.data['variable'].unique()
yisvar = y in self.data['variable'].unique()
if not xisvar and not yisvar:
df = self.meta.reset_index()
elif xisvar and yisvar:
# filter pivot both and rename
dfx = (
self
.filter(variable=x)
.as_pandas()
.rename(columns={'value': x, 'unit': 'xunit'})
.set_index(YEAR_IDX)
.drop('variable', axis=1)
)
dfy = (
self
.filter(variable=y)
.as_pandas()
.rename(columns={'value': y, 'unit': 'yunit'})
.set_index(YEAR_IDX)
.drop('variable', axis=1)
)
df = dfx.join(dfy).reset_index()
else:
# filter, merge with meta, and rename value column to match var
var = x if xisvar else y
df = (
self
.filter(variable=var)
.as_pandas(with_metadata=True)
.rename(columns={'value': var})
)
ax = plotting.scatter(df, x, y, **kwargs)
return ax
def map_regions(self, map_col, agg=None, copy_col=None, fname=None,
region_col=None, remove_duplicates=False, inplace=False):
"""Plot regional data for a single model, scenario, variable, and year
see pyam.plotting.region_plot() for all available options
Parameters
----------
map_col: string
The column used to map new regions to. Common examples include
iso and 5_region.
agg: string, optional
Perform a data aggregation. Options include: sum.
copy_col: string, optional
Copy the existing region data into a new column for later use.
fname: string, optional
Use a non-default region mapping file
region_col: string, optional
Use a non-default column name for regions to map from.
remove_duplicates: bool, optional, default: False
If there are duplicates in the mapping from one regional level to
another, then remove these duplicates by counting the most common
mapped value.
This option is most useful when mapping from high resolution
(e.g., model regions) to low resolution (e.g., 5_region).
inplace : bool, default False
if True, do operation inplace and return None
"""
models = self.meta.index.get_level_values('model').unique()
fname = fname or run_control()['region_mapping']['default']
mapping = read_pandas(fname).rename(str.lower, axis='columns')
map_col = map_col.lower()
ret = copy.deepcopy(self) if not inplace else self
_df = ret.data
columns_orderd = _df.columns
# merge data
dfs = []
for model in models:
df = _df[_df['model'] == model]
_col = region_col or '{}.REGION'.format(model)
_map = mapping.rename(columns={_col.lower(): 'region'})
_map = _map[['region', map_col]].dropna().drop_duplicates()
_map = _map[_map['region'].isin(_df['region'])]
if remove_duplicates and _map['region'].duplicated().any():
# find duplicates
where_dup = _map['region'].duplicated(keep=False)
dups = _map[where_dup]
logger().warning("""
Duplicate entries found for the following regions.
Mapping will occur only for the most common instance.
{}""".format(dups['region'].unique()))
# get non duplicates
_map = _map[~where_dup]
# order duplicates by the count frequency
dups = (dups
.groupby(['region', map_col])
.size()
.reset_index(name='count')
.sort_values(by='count', ascending=False)
.drop('count', axis=1))
# take top occurance
dups = dups[~dups['region'].duplicated(keep='first')]
# combine them back
_map = | pd.concat([_map, dups]) | pandas.concat |
from decimal import Decimal, getcontext
from typing import Dict, Tuple
import numpy as np
import pandas as pd
import ftx.data._precisions as prec
getcontext().prec = 6
def decimal_with_precision(x: str, precision: int) -> Decimal:
return round(Decimal(x), precision)
def convert_futures_size(futures_fills: pd.DataFrame) -> pd.DataFrame:
futures_fills.loc[:, 'size'] = futures_fills.loc[:, ['size']].astype('str')
for mkt in futures_fills['market'].unique():
mkt = mkt.split('-')[0]
# Will error if there is no entry in the Precisions Dict.
futures_fills.loc[futures_fills['market'].str.contains(mkt), 'size'] = futures_fills.loc[futures_fills['market'].str.contains(mkt), 'size'].apply(decimal_with_precision, args=(prec.FUTURES_SIZE[mkt], ))
return futures_fills
def convert_futures_price(futures_fills: pd.DataFrame) -> pd.DataFrame:
futures_fills.loc[:, 'price'] = futures_fills.loc[:, ['price']].astype('str')
for mkt in futures_fills['market'].unique():
mkt = mkt.split('-')[0]
futures_fills.loc[futures_fills['market'].str.contains(mkt), 'price'] = futures_fills.loc[futures_fills['market'].str.contains(mkt), 'price'].apply(decimal_with_precision, args=(prec.FUTURES_PRICE[mkt], ))
return futures_fills
def convert_futures_fee(futures_fills: pd.DataFrame) -> pd.DataFrame:
futures_fills.loc[:, 'fee'] = futures_fills.loc[:, ['fee']].astype('str')
for mkt in futures_fills['market'].unique():
mkt = mkt.split('-')[0]
futures_fills.loc[futures_fills['market'].str.contains(mkt), 'fee'] = futures_fills.loc[futures_fills['market'].str.contains(mkt), 'fee'].apply(decimal_with_precision, args=(prec.FUTURES_FEE, ))
return futures_fills
def convert_futures_feeRate(futures_fills: pd.DataFrame) -> pd.DataFrame:
futures_fills.loc[:, 'feeRate'] = futures_fills.loc[:, ['feeRate']].astype('str')
futures_fills.loc[:, 'feeRate'] = futures_fills.loc[:, 'feeRate'].apply(decimal_with_precision, args=(prec.FUTURES_FEERATE, ))
return futures_fills
def compute_deltas(fills: pd.DataFrame) -> pd.DataFrame:
fills['size'] = fills['size'].mask(fills['side'] == 'sell', -fills['size'])
fills['delta'] = fills['size'].cumsum()
return fills
def preprocess_fills(fills: pd.DataFrame) -> Tuple[pd.DataFrame, pd.DataFrame, Dict[str, pd.DataFrame], Dict[str, pd.DataFrame]]:
# Convert unique identifiers to string
fills['id'] = fills['id'].astype('str')
fills['orderId'] = fills['orderId'].astype('str')
fills['tradeId'] = fills['tradeId'].astype('str')
fills['time'] = pd.to_datetime(fills['time'])
# Sort fills by time and id
fills.sort_values(['time', 'id'], inplace=True, axis=0, ignore_index=True)
# Add volume column
fills['volume'] = fills['size'] * fills['price']
# Split spot and futures
spot = fills[(fills['future'].isnull()) & (fills['type'] != 'otc')].copy()
futures = fills[fills['future'].notnull()].copy()
# Drop unused columns in futures
futures.drop('baseCurrency', axis=1, inplace=True)
futures.drop('quoteCurrency', axis=1, inplace=True)
# Convert size to Decimal
futures = convert_futures_size(futures)
# Split futures by market
futures_by_market = {mkt: futures[futures['market'] == mkt].copy() for mkt in futures['market'].unique()}
spot_by_market = {mkt: spot[spot['market'] == mkt].copy() for mkt in spot['market'].unique()}
return spot, futures, futures_by_market, spot_by_market
def preprocess_funding(funding: pd.DataFrame) -> pd.DataFrame:
# Convert time
funding['time'] = | pd.to_datetime(funding['time']) | pandas.to_datetime |
import requests
import urllib3
import utilities
import pandas as pd
from bs4 import BeautifulSoup
# Disable Insecure Request Warning
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
def main():
# Make dataframe to hold links.
df = | pd.DataFrame() | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Author: <NAME>, <NAME>, 2017
"""
import numpy as np
import pandas as pd
import pastas as ps
class TestGXG(object):
def test_ghg(self):
idx = pd.to_datetime(['20160114', '20160115', '20160128', '20160214'])
s = pd.Series([10., 3., 30., 20.], index=idx)
v = ps.stats.ghg(s, min_n_meas=1, min_n_years=1)
assert v == 30.0
def test_ghg_ffill(self):
idx = pd.to_datetime(['20160101', '20160115', '20160130'])
s = pd.Series([0., 0., 10.], index=idx)
v = ps.stats.ghg(s, fill_method='ffill', limit=15, min_n_meas=1,
min_n_years=1)
assert v == 0.
def test_ghg_bfill(self):
idx = pd.to_datetime(['20160101', '20160115', '20160130'])
s = pd.Series([0., 0., 10.], index=idx)
v = ps.stats.ghg(s, fill_method='bfill', limit=15, min_n_meas=1,
min_n_years=1)
# TODO is this correct?
assert v == 10.
def test_ghg_linear(self):
idx = pd.to_datetime(['20160101', '20160110', '20160120', '20160130'])
s = pd.Series([0., 0., 10., 10.], index=idx)
v = ps.stats.ghg(s, fill_method='linear', min_n_meas=1,
min_n_years=1, limit=8)
# TODO is this correct?
assert v == 10.
def test_ghg_len_yearly(self):
idx = pd.date_range('20000101', '20550101', freq='d')
s = pd.Series(np.ones(len(idx)), index=idx)
v = ps.stats.ghg(s, output='yearly')
assert v.notna().sum() == 55
def test_glg(self):
idx = pd.date_range('20000101', '20550101', freq='d')
s = pd.Series([x.month + x.day for x in idx], index=idx, )
v = ps.stats.glg(s, year_offset='a')
assert v == 16.
def test_glg_fill_limit(self):
idx = pd.to_datetime(['20170115', '20170130', '20200101'])
s = pd.Series(np.ones(len(idx)), index=idx)
v = ps.stats.glg(s, fill_method='linear', limit=15,
output='yearly', year_offset='a', min_n_meas=1)
assert v.notna().sum() == 2
def test_glg_fill_limit_null(self):
idx = pd.to_datetime(['20170101', '20170131', '20200101'])
s = pd.Series(np.ones(len(idx)), index=idx)
v = ps.stats.glg(s, fill_method='linear', limit=None,
output='yearly', year_offset='a', min_n_meas=1)
assert v.notna().sum() == 3
def test_gvg(self):
idx = | pd.to_datetime(['20170314', '20170328', '20170414', '20170428']) | pandas.to_datetime |
import os
import h5py
from glob import glob
import numpy as np
from tqdm import tqdm
import pandas as pd
import os.path as osp
from datetime import datetime, timedelta
import sys
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
plt.switch_backend('agg')
'''
Pre-processing functions. Most of these were only run once, for example, to load data, modify it, and save it. The
functions are saved here for possible future use.
'''
hand_features = ['name','vs0', 'PSLV_v2', 'PSLV_v3', 'PSLV_v4', 'PSLV_v5', 'PSLV_v6', 'PSLV_v7',
'PSLV_v8', 'PSLV_v9', 'PSLV_v10', 'PSLV_v11', 'PSLV_v12', 'PSLV_v13',
'PSLV_v14', 'PSLV_v15', 'PSLV_v16', 'PSLV_v17', 'PSLV_v18', 'PSLV_v19',
'MTPW_v2', 'MTPW_v3', 'MTPW_v4', 'MTPW_v5', 'MTPW_v6', 'MTPW_v7',
'MTPW_v8', 'MTPW_v9', 'MTPW_v10', 'MTPW_v11', 'MTPW_v12', 'MTPW_v13',
'MTPW_v14', 'MTPW_v15', 'MTPW_v16', 'MTPW_v17', 'MTPW_v18', 'MTPW_v19',
'MTPW_v20', 'MTPW_v21', 'MTPW_v22', 'IR00_v2', 'IR00_v3', 'IR00_v4',
'IR00_v5', 'IR00_v6', 'IR00_v7', 'IR00_v8', 'IR00_v9', 'IR00_v10',
'IR00_v11', 'IR00_v12', 'IR00_v13', 'IR00_v14', 'IR00_v15', 'IR00_v16',
'IR00_v17', 'IR00_v18', 'IR00_v19', 'IR00_v20', 'IR00_v21', 'CSST_t24',
'CD20_t24', 'CD26_t24', 'COHC_t24', 'DTL_t24', 'RSST_t24', 'U200_t24',
'U20C_t24', 'V20C_t24', 'E000_t24', 'EPOS_t24', 'ENEG_t24', 'EPSS_t24',
'ENSS_t24', 'RHLO_t24', 'RHMD_t24', 'RHHI_t24', 'Z850_t24', 'D200_t24',
'REFC_t24', 'PEFC_t24', 'T000_t24', 'R000_t24', 'Z000_t24', 'TLAT_t24',
'TLON_t24', 'TWAC_t24', 'TWXC_t24', 'G150_t24', 'G200_t24', 'G250_t24',
'V000_t24', 'V850_t24', 'V500_t24', 'V300_t24', 'TGRD_t24', 'TADV_t24',
'PENC_t24', 'SHDC_t24', 'SDDC_t24', 'SHGC_t24', 'DIVC_t24', 'T150_t24',
'T200_t24', 'T250_t24', 'SHRD_t24', 'SHTD_t24', 'SHRS_t24', 'SHTS_t24',
'SHRG_t24', 'PENV_t24', 'VMPI_t24', 'VVAV_t24', 'VMFX_t24', 'VVAC_t24',
'HE07_t24', 'HE05_t24', 'O500_t24', 'O700_t24', 'CFLX_t24', 'DELV-12',
'dvs24']
def path2arr(path):
h5 = h5py.File(path, 'r')
return h5['matrix'].value
def get_nan_fracs(arr):
nan_fracs = []
for c in range(arr.shape[-1]):
nan_fracs.append(np.sum(np.isnan(arr[:, :, c])) / np.product(arr[:, :, c].shape))
return np.array(nan_fracs)
def clean_nans(arr):
'''
Input is a 2d array possibly containing nans
Output is same 2d array with nans replaced by nearest non-nan neighbor
'''
def get_neighbor_val(i,j):
d = 0
while True:
d += 1
for a in range(-d, d+1):
if i+a < 0: continue # dont end-index
for b in range(-d, d+1):
if j+b < 0: continue # dont end-index
if abs(a) != d and abs(b) != d: continue # only iterate over perimeter
try:
val = arr[i+a,j+b]
except IndexError:
continue
else:
if not np.isnan(val):
return val
arr_clean = np.copy(arr)
for i,j in np.argwhere(np.isnan(arr)):
arr_clean[i,j] = get_neighbor_val(i,j)
return arr_clean
def extract_images():
'''
Extract images from original big dataset and save separately.
Remove two middle channels.
Don't save images with a lot of nans.
If image has a small number of nans then replace with nearest neighbor values
'''
h5_filename = '/raid/data/hurricane/TCIR-ALL_2017.h5'
output_basepath = '/raid/data/hurricane/images_64_2017'
h5 = h5py.File(h5_filename, 'r')
df = | pd.read_hdf(h5_filename, key="info", mode='r') | pandas.read_hdf |
import logging
import pandas
import os
import numpy
from scipy import stats
from .. import Constants
from .. import Utilities
from .. import MatrixManager
from ..PredictionModel import WDBQF, WDBEQF, load_model, dataframe_from_weight_data
from ..misc import DataFrameStreamer
from . import AssociationCalculation
class SimpleContext(AssociationCalculation.Context):
def __init__(self, gwas, model, covariance):
self.gwas = gwas
self.model = model
self.covariance = covariance
def get_weights(self, gene):
w = self.model.weights
w = w[w.gene == gene]
return w
def get_covariance(self, gene, snps):
return self.covariance.get(gene, snps, strict_whitelist=False)
def get_n_in_covariance(self, gene):
return self.covariance.n_ids(gene)
def get_gwas(self, snps):
g = self.gwas
g = g[g[Constants.SNP].isin(snps)]
return g
def get_model_snps(self):
return set(self.model.weights.rsid)
def get_data_intersection(self):
return _data_intersection(self.model, self.gwas)
def provide_calculation(self, gene):
w = self.get_weights(gene)
gwas = self.get_gwas(w[WDBQF.K_RSID].values)
i = pandas.merge(w, gwas, left_on="rsid", right_on="snp")
if not Constants.BETA in i: i[Constants.BETA] = None
i = i[[Constants.SNP, WDBQF.K_WEIGHT, Constants.ZSCORE, Constants.BETA]]
snps, cov = self.get_covariance(gene, i[Constants.SNP].values)
# fast subsetting and aligning
d_columns = i.columns.values
if snps is not None and len(snps):
d = {x[0]: x for x in i.values}
d = [d[snp] for snp in snps]
d = list(zip(*d))
d = {d_columns[i]:d[i] for i in range(0, len(d_columns))}
i = pandas.DataFrame(d)
else:
i = pandas.DataFrame(columns=d_columns)
return len(w.weight), i, cov, snps
def get_model_info(self):
return self.model.extra
class OptimizedContext(SimpleContext):
def __init__(self, gwas, model, covariance, MAX_R):
self.covariance = covariance
self.genes, self.weight_data, self.snps_in_model = _prepare_weight_data(model, MAX_R)
self.gwas_data = _prepare_gwas_data(gwas)
self.extra = model.extra
self.last_gene = None
self.data_cache = None
self.pedantic = MAX_R is None
def _get_weights(self, gene):
w = self.weight_data[gene]
w = {x[WDBQF.RSID]:x[WDBQF.WEIGHT] for x in w}
return w
def get_weights(self, gene):
w = self.weight_data[gene]
w = dataframe_from_weight_data(list(zip(*w)))
return w
def get_model_snps(self):
return set(self.snps_in_model)
def _get_gwas(self, snps):
snps = set(snps)
g = self.gwas_data
g = [g[x] for x in snps if x in g]
g = {x[0]:(x[1], x[2]) for x in g}
return g
def get_gwas(self, snps):
snps = set(snps)
g = self.gwas_data
g = [g[x] for x in snps if x in g]
if len(g):
g = list(zip(*g))
g = pandas.DataFrame({Constants.SNP:g[0], Constants.ZSCORE:g[1], Constants.BETA:g[2]})
else:
g = pandas.DataFrame(columns=[Constants.SNP, Constants.ZSCORE, Constants.BETA])
return g
def get_data_intersection(self):
return _data_intersection_3(self.weight_data, self.gwas_data, self.extra.gene.values, self.pedantic)
def provide_calculation(self, gene):
if gene != self.last_gene:
#dummy while(True) to emulate go/to
while True:
w = self._get_weights(gene)
gwas = self._get_gwas(list(w.keys()))
type = [numpy.str, numpy.float64, numpy.float64, numpy.float64]
columns = [Constants.SNP, WDBQF.K_WEIGHT, Constants.ZSCORE, Constants.BETA]
d = {x: v for x, v in w.items() if x in gwas}
snps, cov = self.get_covariance(gene, list(d.keys()))
if snps is None:
d = pandas.DataFrame(columns=columns)
self.data_cache = len(w), d, cov, snps
self.last_gene = gene
break
d = [(x, w[x], gwas[x][0], gwas[x][1]) for x in snps]
d = list(zip(*d))
if len(d):
d = {columns[i]:numpy.array(d[i], dtype=type[i]) for i in range(0,len(columns))}
else:
d = {columns[i]:numpy.array([]) for i in range(0,len(columns))}
self.data_cache = len(w), d, cov, snps
self.last_gene = gene
break
return self.data_cache
def get_model_info(self):
return self.extra
def _data_intersection(model, gwas):
weights = model.weights
k = | pandas.merge(weights, gwas, how='inner', left_on="rsid", right_on="snp") | pandas.merge |
import glob
import os
import sys
# these imports and usings need to be in the same order
sys.path.insert(0, "../")
sys.path.insert(0, "TP_model")
sys.path.insert(0, "TP_model/fit_and_forecast")
from Reff_functions import *
from Reff_constants import *
from sys import argv
from datetime import timedelta, datetime
from scipy.special import expit
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import matplotlib
matplotlib.use("Agg")
def forecast_TP(data_date):
from scenarios import scenarios, scenario_dates
from params import (
num_forecast_days,
alpha_start_date,
delta_start_date,
omicron_start_date,
truncation_days,
start_date,
sim_start_date,
third_start_date,
mob_samples,
)
data_date = pd.to_datetime(data_date)
# Define inputs
sim_start_date = pd.to_datetime(sim_start_date)
# Add 3 days buffer to mobility forecast
num_forecast_days = num_forecast_days + 3
# data_date = pd.to_datetime('2022-01-25')
print("============")
print("Generating forecasts using data from", data_date)
print("============")
# convert third start date to the correct format
third_start_date = pd.to_datetime(third_start_date)
third_end_date = data_date - timedelta(truncation_days)
# a different end date to deal with issues in fitting
third_end_date_diff = data_date - timedelta(18 + 7 + 7)
third_states = sorted(["NSW", "VIC", "ACT", "QLD", "SA", "TAS", "NT", "WA"])
# third_states = sorted(['NSW', 'VIC', 'ACT', 'QLD', 'SA', 'NT'])
# choose dates for each state for third wave
# NOTE: These need to be in date sorted order
third_date_range = {
"ACT": pd.date_range(start="2021-08-15", end=third_end_date).values,
"NSW": pd.date_range(start="2021-06-25", end=third_end_date).values,
"NT": pd.date_range(start="2021-12-20", end=third_end_date).values,
"QLD": pd.date_range(start="2021-07-30", end=third_end_date).values,
"SA": pd.date_range(start="2021-12-10", end=third_end_date).values,
"TAS": pd.date_range(start="2021-12-20", end=third_end_date).values,
"VIC": pd.date_range(start="2021-07-10", end=third_end_date).values,
"WA": pd.date_range(start="2022-01-01", end=third_end_date).values,
}
# Get Google Data - Don't use the smoothed data?
df_google_all = read_in_google(Aus_only=True, moving=True, local=True)
third_end_date = pd.to_datetime(data_date) - pd.Timedelta(days=truncation_days)
results_dir = (
"results/"
+ data_date.strftime("%Y-%m-%d")
+ "/"
)
# Load in vaccination data by state and date which should have the same date as the
# NNDSS/linelist data use the inferred VE
vaccination_by_state_delta = pd.read_csv(
results_dir + "adjusted_vaccine_ts_delta" + data_date.strftime("%Y-%m-%d") + ".csv",
parse_dates=["date"],
)
vaccination_by_state_delta = vaccination_by_state_delta[["state", "date", "effect"]]
vaccination_by_state_delta = vaccination_by_state_delta.pivot(
index="state", columns="date", values="effect"
) # Convert to matrix form
# Convert to simple array for indexing
vaccination_by_state_delta_array = vaccination_by_state_delta.to_numpy()
vaccination_by_state_omicron = pd.read_csv(
results_dir + "adjusted_vaccine_ts_omicron" + data_date.strftime("%Y-%m-%d") + ".csv",
parse_dates=["date"],
)
vaccination_by_state_omicron = vaccination_by_state_omicron[["state", "date", "effect"]]
vaccination_by_state_omicron = vaccination_by_state_omicron.pivot(
index="state", columns="date", values="effect"
) # Convert to matrix form
# Convert to simple array for indexing
vaccination_by_state_omicron_array = vaccination_by_state_omicron.to_numpy()
# Get survey data
surveys = pd.DataFrame()
path = "data/md/Barometer wave*.csv"
for file in glob.glob(path):
surveys = surveys.append(pd.read_csv(file, parse_dates=["date"]))
surveys = surveys.sort_values(by="date")
print("Latest microdistancing survey is {}".format(surveys.date.values[-1]))
surveys.loc[surveys.state != "ACT", "state"] = (
surveys.loc[surveys.state != "ACT", "state"]
.map(states_initials)
.fillna(surveys.loc[surveys.state != "ACT", "state"])
)
surveys["proportion"] = surveys["count"] / surveys.respondents
surveys.date = pd.to_datetime(surveys.date)
always = surveys.loc[surveys.response == "Always"].set_index(["state", "date"])
always = always.unstack(["state"])
# fill in date range
idx = pd.date_range("2020-03-01", | pd.to_datetime("today") | pandas.to_datetime |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Jul 21 03:31:16 2018
@author: burhanusman
"""
#Importing the required Packages
import os
os.chdir('/Users/burhanusman/Documents/Competitions/Data_supremacy')
import lightgbm as lgb
import pandas as pd
from catboost import CatBoostClassifier
from sklearn.model_selection import KFold
from sklearn.linear_model import LogisticRegression
from sklearn.preprocessing import LabelEncoder
#Function for label encoding the columns of a dataframe
def label_encode_df(dataframe,cols):
for col in cols:
le=LabelEncoder()
dataframe[col]=le.fit_transform(dataframe[col].astype(str))
#Function for target encoding categorical variables
#df_fit is the dataframe from where the target encoding variables are obtained
#df_tranform is the dataframe where the target encoded variables are imputed to
def target_encode_mean(df_fit,df_transform,col,target):
group_mean=pd.DataFrame(df_fit.groupby([col])[target].mean())
group_mean.columns=[col+"_"+target+"_mean"]
group_mean.reset_index(inplace=True)
df_transform=df_transform.merge(group_mean,how="left",on=[col])
return df_transform
#Reading the train data
train = pd.read_csv("train.csv")
#Creating folds to out-of-fold predictions stacking
train["fold"]=0
i=1
for tr,ts in KFold(n_splits=5,shuffle=True,random_state=5).split(train):
train.loc[list(ts),"fold"]=i
i=i+1
#Reading the test data
test = pd.read_csv("test.csv")
#Stacking dataframe to hold the predicted outputs
stack=pd.DataFrame()
stack["enrollee_id"]=train.enrollee_id
stack["fold"]=train.fold
stack["target"]=train.target
#Defining catboost models to be stacked(Only used one model in the final submisson)
model1={"model_name" : "CatBoost1", "n_estimators":540,"model_vars" :['city', 'gender',
'relevent_experience', 'enrolled_university', 'education_level',
'major_discipline', 'experience', 'company_size', 'company_type',
'last_new_job', 'training_hours','NA_type'],"cat_vars" :12, "seed" :30}
models=[model1]
#Loop for iteratively training on 4folds and predicing on the 5th
#We obtain a dataframe where no. of columns = number of models used, and no. of rows = rows in train data
for model in models:
stack[model["model_name"]]=0
comb=pd.concat([train,test])
comb.reset_index(inplace=True,drop=True)
NA_cols=["company_size","company_type","education_level","enrolled_university","experience","gender",
"last_new_job","major_discipline"]
for col in NA_cols:
comb["isna_"+col]=comb[col].isna().astype(int)
comb["NA_type"]=''
for col in NA_cols:
comb["NA_type"]=comb["NA_type"].astype(str)+"_"+comb[col].isna().astype(int).astype(str)
label_encode_df(comb,model["model_vars"][0:model["cat_vars"]])
for col in model["model_vars"][0:model["cat_vars"]]:
comb[col]=comb[col].astype(str)
for i in range(1,6):
print("Running Model " +model["model_name"]+" for fold "+str(i))
comb["dataset"]="train"
len_train=18359
comb.loc[len_train:,"dataset"]="test"
comb.loc[comb.fold==i,"dataset"]="valid"
y=comb.loc[comb.dataset=="train","target"].values
y_test=comb.loc[comb.dataset=="valid","target"].values
x=comb.loc[comb.dataset=="train",model["model_vars"]].values
x_test=comb.loc[comb.dataset=="valid",model["model_vars"]].values
cat_model=CatBoostClassifier(eval_metric="AUC",n_estimators=model["n_estimators"],random_state=model["seed"])
cat_model.fit(x,y,cat_features=list(range(0,model["cat_vars"])),verbose=False)
stack.loc[stack.fold==i,model["model_name"]]=cat_model.predict_proba(comb.loc[comb.dataset=="valid",model["model_vars"]].values)[:,1]
#Training the above models on the full train dataset and predicting for the test data
#We obtain a dataframe where no. of columns = number of models used, and no. of rows = rows in test data
stack_test=pd.DataFrame()
stack_test["enrollee_id"]=test.enrollee_id
for model in models:
stack_test[model["model_name"]]=0
comb= | pd.concat([train,test]) | pandas.concat |
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
from matplotlib.lines import Line2D
from matplotlib import cm
import pandas as pd
from dataset.min_max_scaling_operation import MinMaxScaling
from dataset.biometric_dataset import BioDataSet
from classifiers.random_forest_classifier import RandomForestClassifier
from classifiers.svm_classifier import SvmClassifier
from classifiers.knn_classifier import KnnClassifier
from metrics.confusion_matrix import ConfusionMatrix
from metrics.roc_curve import RocCurve
from metrics.gini_coefficient import GiniCoef
from metrics.fcs import FCS
from analytics.dataoverlap_interval import OverLapInt
from matplotlib.colors import ListedColormap
from mpl_toolkits.mplot3d import Axes3D
from synth_data_gen.gauss_blob_generator import GaussBlob
from pathlib import Path
import os
import math
"""
Data and metrics saving path
"""
root_path = Path(__file__).parent.parent.parent.parent
data_metric_save_path = os.path.join(root_path, 'experiment_results\\neg_class_contained_center_edge2_03D_02U\\')
"""
Generating Isometric Gaussian data, in this example data is generated with three features
"""
rand_state_1 = 42
features = 3
users = 1
neg_users = 1
samples_pos = 1000
outlier_filter_x_std = 2
samples_neg = math.ceil(samples_pos / neg_users)
center_pos_class = [np.zeros(features)]
center_neg_class_1 = np.ones(features) * 1.5
# center_neg_class_2 = np.ones(features) * -1
# center_neg_class_3 = np.ones(features) * 0.9
# center_neg_class_4 = np.ones(features) * -12
# center_neg_class_5 = np.ones(features) * 0.7
# center_neg_class_6 = np.ones(features) * -2
# center_neg_class_7 = np.ones(features) * -0.8
# center_neg_class_8 = np.ones(features) * 0.9
# center_neg_class_9 = np.ones(features) * 1
centers_neg_class = []
centers_neg_class.extend((value for name, value in globals().items() if name.startswith('center_neg_class_')))
labels_pos = ['pos_class']
labels_neg = ['neg_class_u%02d' % i for i in range(neg_users)]
std_dev_pos = [1]
# std_dev_neg = [1.75, 1.5, 1, 1, 1, 1, 1, 1, 2]
std_dev_neg = [1]
std_dev_neg_df = | pd.DataFrame(labels_neg) | pandas.DataFrame |
from string import Template
import pandas as pd
import numpy as np
import utils.log_util as logger
class TransformationUtil:
too_few_distinct_values_message = Template("INFO: Dropping column $col " + \
"because it doesn't have at least two distinct values.")
too_few_samples_message = Template("INFO: Dropping column $col " + \
"because it doesn't have at least $min_num_samples samples for 0 and for 1.")
converting_message = Template("INFO: Converting binary column $col " + \
"to 0/1 encoding.")
expanding_message = Template("INFO: Expanding categoric column $col " + \
"as indicator variables.")
@staticmethod
def encode_as_binary(input_df, min_num_samples):
"""
Converts each column of the input dataframe to binary encoding. Intended
for use with dataframes whose columns are binary or categorical, but it
will not throw an exception if given numeric or free-text data; such columns
will usually be excluded from the output due to the `min_num_samples`
filtering.
For each column, let `num_distinct_values` be the number of distinct values,
excluding NA, in the column.
- If `num_distinct_values` < 2, drop the column from the output.
- If `num_distinct_values` == 2 and the two distinct values are 0 and 1,
leave the column unchanged in the output.
- If `num_distinct_values` == 2 and the two distinct values are not 0 and 1,
convert the column for the output as follows: Replace all instances
of one of the distinct values with 0 and replace all instances of the
other distinct value with 1. Any missing values will remain unchanged.
Edit the column name to indicate which of the original values is now
represented by 1.
- If `num_distinct_values` > 2, expand the column into `num_distinct_values`
indicator columns in the output; any NAs will be preserved
Finally, for each of the binary columns present at the end of the above
procedure, count the number of samples having value 0 and the number of
samples having value 1. If either of those counts is less than `min_num_samples`,
drop the column from the output.
Args:
input_df (pandas.DataFrame): the dataframe to process.
min_num_samples (int): the minimum number of samples that must have value 0
and that must have value 1 in each of the output columns.
Returns:
pandas.DataFrame: a new dataframe derived from the input as described.
"""
# create an empty df for output
output_df = pd.DataFrame({}, index=input_df.index)
# encode all columns as binary
for col in input_df:
s_col_values = input_df[col]
# determine number of distinct values for this column
s_value_counts = s_col_values.value_counts()
# first ensure 0/1 encoding, including indicator variables for categorical case
if len(s_value_counts) < 2:
logger.logging.append(\
TransformationUtil.too_few_distinct_values_message.substitute(col=col))
elif len(s_value_counts) == 2:
if sorted(s_value_counts.index.values) == [0, 1]:
# column is already 0/1 encoded; add to output df
output_df = pd.concat([output_df, s_col_values], axis=1)
else:
logger.logging.append(\
TransformationUtil.converting_message.substitute(col=col))
# convert to 0/1 encoding and then add to output df
output_col = pd.get_dummies(s_col_values, prefix=col, \
drop_first=True) # note drop_first=True to get 1 col back
# preserve NAs
output_col.loc[s_col_values.isnull(), :] = np.nan
output_df = | pd.concat([output_df, output_col], axis=1) | pandas.concat |
import matplotlib.pyplot as plt
import math
import numpy as np
import pandas as pd
import cv2
from utils.geometry import line_polar_to_cart
##################### Network #####################
def plotImages(x, y, img_n, WAYP_VALUE = 255):
"""
BATCH Take as input a batch from the generator and plt a number of images equal to img_n
Default columns equal to max_c. At least inputs of batch equal two
"""
max_c = 5
if img_n <= max_c:
r = 1
c = img_n
else:
r = math.ceil(img_n/max_c)
c = max_c
fig, axes = plt.subplots(r, c, figsize=(15,15))
axes = axes.flatten()
for x, y, ax in zip(x, y, axes):
canvas = x.copy()[...,None].astype('uint8')
canvas = np.concatenate((canvas,canvas,canvas), axis=-1)
row, col = np.where(y==WAYP_VALUE)
for r, c in zip(row, col):
canvas = cv2.circle(canvas, (c,r), 3, (50,255,250), -1)
ax.imshow(cv2.bitwise_not(canvas))
ax.grid()
plt.tight_layout()
plt.show()
def plotHistory(history):
"""
Plot the loss and accuracy curves for training and validation
"""
| pd.DataFrame(history.history) | pandas.DataFrame |
import streamlit as st
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import altair as alt
from requests import get
import re
import os
from bs4 import BeautifulSoup
from urllib.request import Request, urlopen
import datetime
import time
import matplotlib.pyplot as plt
import statsmodels.api as sm
from geopy.geocoders import Nominatim
from geopy.distance import geodesic
geolocator = Nominatim(user_agent='myuseragent')
import lxml
import plotly.express as px
from PIL import Image
#with open("styles/style.css") as f:
# st.markdown(f'<style>{f.read()}</style>', unsafe_allow_html=True)
st.set_page_config(
page_title="O/U Hockey Analytics",
page_icon=":ice_hockey_stick_and_puck:"
)
#Dummy data to get the header to display correctly
st.markdown("""<Head>
<Title> Test Title</Title><link rel="shortcut icon" href="favicon.ico" type="image/x-icon"> </Head>""",unsafe_allow_html=True)
#Title/Header
st.markdown("""<h1 style="text-align:center;color:white;font-weight:bolder;font-size:70px;font-family:helvetica; background:
-webkit-linear-gradient(#a73305,#000000,#093ff0); -webkit-background-clip:
text;-webkit-text-fill-color: transparent;">NHL<br>Wager<br>Analytics</h1>""",unsafe_allow_html=True)
# Load data
data_load_state = st.text('Checking and Fetching Data...')
#####################################
#### Data Gathering and Cleaning ####
#####################################
master_df = pd.read_csv('master_df.csv')
master_df = master_df.dropna(thresh=10)
start = pd.to_datetime(master_df.Date[-1:]).dt.date.values[0]+datetime.timedelta(days=1)
today = datetime.date.today()
yesterday = today-datetime.timedelta(days = 1)
#Function to covert dates to string
def covert_dates(date1, date2):
covert_list = []
days = pd.date_range(date1, date2, freq='d')
for i in range(len(days)):
covert_list.append(int(days[i].strftime('%Y%m%d')))
return covert_list
#Function to fetch missing data
@st.cache
def get_data(date1, date2):
new_df = pd.DataFrame()
for day in covert_dates(date1, date2):
site = f"https://sportsdatabase.com/nhl/query?output=default&sdql=date%3D{day}&submit=++S+D+Q+L+%21++"
hdr = {'User-Agent': 'Mozilla/5.0'}
req = Request(site, headers=hdr)
page = urlopen(req)
soup = BeautifulSoup(page)
tables = soup.find('table', attrs={'id':'DT_Table'})
page_df = pd.read_html(str(tables))[0]
new_df = pd.concat([new_df, page_df])
time.sleep(1)
return new_df
#Check if the data needs updating
if start <= today:
new_data = get_data(start, today)
master_df = pd.concat([master_df, new_data])
#Save updated data as csv
#master_df.to_csv("master_df.csv", index=False)
def clean_data(df):
df.Date =pd.to_datetime(df.Date)
df= df.sort_values(by=['Team', 'Date']).reset_index()
df.insert(2, "Date_Prev", df.Date.shift(1))
df.insert(2, "Days_Rest", (df.Date_Prev-df.Date)*-1)
df = df.drop(['index','Season', 'P1', 'P2', 'P3'], axis=1)
return df
#Fucntion to identify a team change to break streak counts
def trips(home_or_away, TeamChange, Site):
list =[]
x = 0
for i, j in zip(TeamChange, Site):
if i == False:
x = x
else:
x = 0
if j == home_or_away:
x += 1
else:
x = 0
list.append(x)
return list
#Function to calculate the distance the road team is from home
def distance_calc(df):
df.insert(4,"Team_City", df.Team.map(team_dict['City']))
df.insert(6,"Opp_City", df.Opp.map(team_dict['City']))
df.insert(9,"Team_point", df.Team.map(team_dict['Citypoint']))
df.insert(10,"Opp_point", df.Opp.map(team_dict['Citypoint']))
df['Distance'] = df.apply(lambda x: geodesic(x['Team_point'],x['Opp_point']).km, axis=1)
df['Team_distance'] = df.apply(lambda x: 0 if x.Site == "home" else x.Distance, axis=1)
df['Opp_distance'] = df.apply(lambda x: 0 if x.Site == "away" else x.Distance, axis=1)
df = df.drop(['Team_point','Distance','Opp_point'], axis=1)
return df
#Function to count the current streak of home or games
def road_trips(df):
df.insert(4, "TeamChange", df["Team"].shift(1, fill_value=df["Team"].head(1)) != df["Team"])
df.insert(10, "Home_Stand", trips("home", df.TeamChange, df.Site))
df.insert(11, "Road_Trip", trips("away", df.TeamChange, df.Site))
df.Days_Rest = df.Days_Rest.dt.days
df.Days_Rest = df.Days_Rest.fillna(5)
df.Days_Rest = df.Days_Rest.astype(int)-1
df.loc[df.Days_Rest < 0, 'Days_Rest'] = 5
df = df.drop('TeamChange', axis=1)
return df
#Function to pair games into a singel record -- for O/U analysis
def opp_func (df):
df.insert(2,"Opp_Days_Rest", eda_df.Oppkey.map(opp_days_rest))
df.insert(10,"Opp_home_stand", eda_df.Oppkey.map(opp_home_stand))
df.insert(11,"Opp_road_trip", eda_df.Oppkey.map(opp_road_trip))
return df
#Func to calculate the unit return of each game and team
def unit_value(Line, Result):
if Line < 0 and Result == 'W':
return 1
elif Line < 0 and Result == 'L':
return Line/100
elif Line > 0 and Result == 'W':
return Line/100
elif Line > 0 and Result == 'L':
return -1
nhltable= pd.read_csv('nhltable.csv')
team_dict = nhltable.set_index('Team').to_dict()
eda_df = clean_data(master_df)
eda_df = distance_calc(eda_df)
eda_df = road_trips(eda_df)
#Adding Division
eda_df = pd.merge(eda_df, nhltable[['Team', 'Division']], on='Team', how="left" )
#Create keys for pairing
Teamkey = []
Oppkey = []
for i in range(len(eda_df.Date)):
Teamkey.append(str(covert_dates(eda_df.Date[i], eda_df.Date[i])[0])+eda_df.Team[i])
Oppkey.append(str(covert_dates(eda_df.Date[i], eda_df.Date[i])[0])+eda_df.Opp[i])
eda_df['Oppkey'] = Oppkey
opp_days_rest = dict(zip(Teamkey, eda_df.Days_Rest))
opp_home_stand = dict(zip(Teamkey, eda_df.Home_Stand))
opp_road_trip = dict(zip(Teamkey, eda_df.Road_Trip))
opp_func(eda_df)
eda_df.Final = eda_df.Final.fillna('0-0')
eda_df = eda_df.fillna(0)
eda_df = pd.concat([eda_df, pd.get_dummies(eda_df.OUr)], axis=1)
goals_df = eda_df['Final'].str.split('-', expand=True).rename(columns={0:'Team_Goals', 1:'Opp_Goals'}).astype(int)
eda_df = pd.concat([eda_df, goals_df], axis=1)
eda_df['total_O'] = eda_df.groupby('Team')['O'].cumsum()
eda_df['total_U'] = eda_df.groupby('Team')['U'].cumsum()
eda_df['total_P'] = eda_df.groupby('Team')['P'].cumsum()
eda_df['total_Team_goals'] = eda_df.groupby('Team')['Team_Goals'].cumsum()
eda_df['total_Opp_goals'] = eda_df.groupby('Team')['Opp_Goals'].cumsum()
#eda_df = eda_df.loc[eda_df['OUr']!='P']
#eda_df['y'] = (eda_df.OUr=='O').astype(int)
eda_df['Team_U'] = eda_df.groupby('Team')['total_U'].transform('max')
eda_df['Team_O'] = eda_df.groupby('Team')['total_O'].transform('max')
eda_df['Opp_U'] = eda_df.groupby('Opp')['total_U'].transform('max')
eda_df['Opp_O'] = eda_df.groupby('Opp')['total_O'].transform('max')
eda_df['Team_Goals_Scored'] = eda_df.groupby('Team')['total_Team_goals'].transform('max')
eda_df['Team_Goals_Allowed'] = eda_df.groupby('Team')['total_Opp_goals'].transform('max')
eda_df['Opp_Goals_Scored'] = eda_df.groupby('Opp')['total_Team_goals'].transform('max')
eda_df['Opp_Goals_Allowed'] = eda_df.groupby('Opp')['total_Opp_goals'].transform('max')
#eda_df['Units'] = eda_df.apply(lambda x: unit_value(x.Line, x.SUr), axis=1)
#Tonight's games data
today_np = np.datetime64(today)
tonight_df= eda_df[['Team','Opp','Total','Home_Stand','Opp_road_trip','Days_Rest','Opp_Days_Rest', 'Opp_distance', 'Team_U',
'Opp_U','Team_O', 'Opp_O','Team_Goals_Scored', 'Opp_Goals_Scored','Team_Goals_Allowed', 'Opp_Goals_Allowed', "Date",'Site']]
tonight_df = tonight_df.loc[(tonight_df['Date']==today_np) & (tonight_df['Site']=='home')].reset_index(drop=True)
#Seperating the two EDA dataframes
eda_OU = eda_df.loc[(eda_df['Site']=='home') & (eda_df['Date']<today_np)]
eda_OU.insert(3, "Combined_Rest", eda_OU.loc[:,'Days_Rest'] + eda_OU.loc[:,'Opp_Days_Rest'])
cut_labels = [500, 1000, 1500, 2000, 3000, 4000]
cut_bins = [0, 500, 1000, 1500, 2000, 3000, 4000]
eda_OU['Distance'] = | pd.cut(eda_OU.loc[:,'Opp_distance'], bins=cut_bins, labels= cut_labels) | pandas.cut |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import sys
import re
import logging
import json
import math
from ConfigReader import configuration
import mysql.connector
from common import constants as constant
from mysql.connector import errorcode
from datetime import datetime
import pandas as pd
import jaydebeapi
class source(object):
def __init__(self):
logging.debug("Initiating schemaReader.source()")
def removeNewLine(self, _data):
if _data == None:
return None
else:
return _data
def readTableColumns(self, JDBCCursor, serverType = None, database = None, schema = None, table = None):
logging.debug("Executing schemaReader.readTableColumns()")
query = None
result_df = pd.DataFrame()
if serverType == constant.MSSQL:
query = "select "
query += " SchemaName = CAST((TBL.TABLE_SCHEMA) AS NVARCHAR(4000)), "
query += " TableName = CAST((TBL.TABLE_NAME) AS NVARCHAR(4000)), "
query += " TableDescription = CAST((tableProp.value) AS NVARCHAR(4000)), "
query += " ColumnName = CAST((COL.COLUMN_NAME) AS NVARCHAR(4000)), "
query += " ColumnDataType = CAST((COL.DATA_TYPE) AS NVARCHAR(4000)), "
query += " ColumnLength = COL.CHARACTER_MAXIMUM_LENGTH, "
query += " ColumnDescription = CAST((colDesc.ColumnDescription) AS NVARCHAR(4000)), "
query += " ColumnPrecision = CAST((COL.numeric_precision) AS NVARCHAR(128)), "
query += " ColumnScale = COL.numeric_scale, "
query += " IsNullable = CAST((COL.Is_Nullable) AS NVARCHAR(128)), "
query += " TableType = CAST((TBL.TABLE_TYPE) AS NVARCHAR(4000)), "
query += " CreateDate = sysTables.create_date "
query += "FROM INFORMATION_SCHEMA.TABLES TBL "
query += "INNER JOIN INFORMATION_SCHEMA.COLUMNS COL "
query += " ON COL.TABLE_NAME = TBL.TABLE_NAME "
query += " AND COL.TABLE_SCHEMA = TBL.TABLE_SCHEMA "
query += "LEFT JOIN sys.tables sysTables "
query += " ON sysTables.object_id = object_id(TBL.TABLE_SCHEMA + '.' + TBL.TABLE_NAME) "
query += "LEFT JOIN sys.extended_properties tableProp "
query += " ON tableProp.major_id = object_id(TBL.TABLE_SCHEMA + '.' + TBL.TABLE_NAME) "
query += " AND tableProp.minor_id = 0 "
query += " AND tableProp.name = 'MS_Description' "
query += "LEFT JOIN ( "
query += " SELECT "
query += " sc.object_id, "
query += " sc.column_id, "
query += " sc.name, "
query += " colProp.[value] AS ColumnDescription "
query += " FROM sys.columns sc "
query += " INNER JOIN sys.extended_properties colProp "
query += " ON colProp.major_id = sc.object_id "
query += " AND colProp.minor_id = sc.column_id "
query += " AND colProp.name = 'MS_Description' "
query += " ) colDesc "
query += " ON colDesc.object_id = object_id(TBL.TABLE_SCHEMA + '.' + TBL.TABLE_NAME) "
query += " AND colDesc.name = COL.COLUMN_NAME "
query += "WHERE lower(TBL.TABLE_TYPE) in ('base table','view') "
query += " AND COL.TABLE_SCHEMA = '%s' "%(schema)
if table != None:
query += " AND COL.TABLE_NAME = '%s' "%(table)
query += "ORDER BY TBL.TABLE_SCHEMA, TBL.TABLE_NAME,COL.ordinal_position"
logging.debug("SQL Statement executed: %s" % (query) )
try:
JDBCCursor.execute(query)
except jaydebeapi.DatabaseError as errMsg:
logging.error("Failure when communicating with JDBC database. %s"%(errMsg))
return result_df
rows_list = []
for row in JDBCCursor.fetchall():
logging.debug(row)
line_dict = {}
if table == None:
line_dict["SCHEMA_NAME"] = self.removeNewLine(row[0])
line_dict["TABLE_NAME"] = self.removeNewLine(row[1])
if row[2] == "" or row[2] == None:
line_dict["TABLE_COMMENT"] = None
else:
line_dict["TABLE_COMMENT"] = self.removeNewLine(row[2]).encode('ascii', 'ignore').decode('unicode_escape', 'ignore')
line_dict["SOURCE_COLUMN_NAME"] = self.removeNewLine(row[3])
if row[4] in ("numeric", "decimal"):
if row[5] == None:
line_dict["SOURCE_COLUMN_TYPE"] = "%s(%s,%s)"%(row[4],row[7], row[8] )
else:
line_dict["SOURCE_COLUMN_TYPE"] = "%s(%s)"%(row[4], row[5])
elif row[4] in ("geometry", "image", "ntext", "text", "xml"):
line_dict["SOURCE_COLUMN_TYPE"] = "%s"%(row[4])
elif row[4] == "varbinary":
if row[7] != None and row[7] > -1:
line_dict["SOURCE_COLUMN_TYPE"] = "%s(%s,%s)"%(row[4],row[7], row[8] )
else:
line_dict["SOURCE_COLUMN_TYPE"] = row[4]
else:
if row[5] == None:
line_dict["SOURCE_COLUMN_TYPE"] = row[4]
else:
line_dict["SOURCE_COLUMN_TYPE"] = "%s(%s)"%(row[4], row[5])
line_dict["SOURCE_COLUMN_LENGTH"] = row[5]
if row[6] == "" or row[6] == None:
line_dict["SOURCE_COLUMN_COMMENT"] = None
else:
line_dict["SOURCE_COLUMN_COMMENT"] = self.removeNewLine(row[6]).encode('ascii', 'ignore').decode('unicode_escape', 'ignore')
line_dict["IS_NULLABLE"] = row[9]
line_dict["TABLE_TYPE"] = row[10]
try:
line_dict["TABLE_CREATE_TIME"] = datetime.strptime(row[11], '%Y-%m-%d %H:%M:%S.%f')
except:
line_dict["TABLE_CREATE_TIME"] = None
line_dict["DEFAULT_VALUE"] = None
rows_list.append(line_dict)
result_df = pd.DataFrame(rows_list)
if serverType == constant.ORACLE:
# First determine if column ORIGIN_CON_ID exists in ALL_TAB_COMMENTS. If it does, we need to take that into consideration
oracle_OriginConId_exists = True
oracle_OriginConId = None
# query = "SELECT ORIGIN_CON_ID FROM ALL_TAB_COMMENTS WHERE 1 = 0"
query = "SELECT ORIGIN_CON_ID FROM ALL_TAB_COMMENTS "
query += "WHERE OWNER = '%s' "%(schema)
if table != None:
query += " AND TABLE_NAME = '%s' "%(table)
try:
JDBCCursor.execute(query)
except jaydebeapi.DatabaseError as errMsg:
if "invalid identifier" in str(errMsg):
oracle_OriginConId_exists = False
else:
logging.error("Failure when communicating with JDBC database. %s"%(errMsg))
return result_df
if oracle_OriginConId_exists == True:
rowCount = 0
for row in JDBCCursor.fetchall():
oracle_OriginConId = row[0]
rowCount += 1
if rowCount != 1:
# If there are more than one originConId, it's impossible to determine what we will use. So then we go to default
oracle_OriginConId = None
query = "SELECT "
query += " ALL_TAB_COLUMNS.OWNER SCHEMA_NAME, "
query += " ALL_TAB_COLUMNS.TABLE_NAME, "
query += " ALL_TAB_COMMENTS.COMMENTS TABLE_COMMENT, "
query += " ALL_TAB_COLUMNS.COLUMN_NAME, "
query += " ALL_TAB_COLUMNS.DATA_TYPE, "
query += " ALL_TAB_COLUMNS.DATA_LENGTH, "
query += " ALL_COL_COMMENTS.COMMENTS COLUMN_COMMENT, "
query += " ALL_TAB_COLUMNS.CHAR_LENGTH, "
query += " ALL_TAB_COLUMNS.DATA_PRECISION, "
query += " ALL_TAB_COLUMNS.DATA_SCALE, "
query += " ALL_TAB_COLUMNS.NULLABLE, "
query += " ALL_OBJECTS.OBJECT_TYPE, "
query += " ALL_OBJECTS.CREATED "
query += "FROM ALL_TAB_COLUMNS ALL_TAB_COLUMNS "
query += "LEFT JOIN ALL_TAB_COMMENTS ALL_TAB_COMMENTS "
query += " ON ALL_TAB_COLUMNS.OWNER = ALL_TAB_COMMENTS.OWNER "
query += " AND ALL_TAB_COLUMNS.TABLE_NAME = ALL_TAB_COMMENTS.TABLE_NAME "
if oracle_OriginConId_exists == True:
if oracle_OriginConId == None:
query += " AND ALL_TAB_COMMENTS.ORIGIN_CON_ID <= 1 "
else:
query += " AND ALL_TAB_COMMENTS.ORIGIN_CON_ID = %s "%(oracle_OriginConId)
query += "LEFT JOIN ALL_COL_COMMENTS ALL_COL_COMMENTS "
query += " ON ALL_TAB_COLUMNS.OWNER = ALL_COL_COMMENTS.OWNER "
query += " AND ALL_TAB_COLUMNS.TABLE_NAME = ALL_COL_COMMENTS.TABLE_NAME "
query += " AND ALL_TAB_COLUMNS.COLUMN_NAME = ALL_COL_COMMENTS.COLUMN_NAME "
if oracle_OriginConId_exists == True:
if oracle_OriginConId == None:
query += " AND ALL_COL_COMMENTS.ORIGIN_CON_ID <= 1 "
else:
query += " AND ALL_COL_COMMENTS.ORIGIN_CON_ID = %s "%(oracle_OriginConId)
query += "LEFT JOIN ALL_OBJECTS ALL_OBJECTS "
query += " ON ALL_TAB_COLUMNS.OWNER = ALL_OBJECTS.OWNER "
query += " AND ALL_TAB_COLUMNS.TABLE_NAME = ALL_OBJECTS.OBJECT_NAME "
query += " AND ALL_OBJECTS.OBJECT_TYPE IN ('TABLE', 'VIEW') "
query += "WHERE ALL_TAB_COLUMNS.OWNER = '%s' "%(schema)
if table != None:
query += " AND ALL_TAB_COLUMNS.TABLE_NAME = '%s' "%(table)
query += "ORDER BY SCHEMA_NAME, ALL_TAB_COLUMNS.TABLE_NAME, ALL_TAB_COLUMNS.COLUMN_ID"
logging.debug("SQL Statement executed: %s" % (query) )
try:
JDBCCursor.execute(query)
except jaydebeapi.DatabaseError as errMsg:
logging.error("Failure when communicating with JDBC database. %s"%(errMsg))
return result_df
rows_list = []
for row in JDBCCursor.fetchall():
logging.debug(row)
line_dict = {}
if table == None:
line_dict["SCHEMA_NAME"] = self.removeNewLine(row[0])
line_dict["TABLE_NAME"] = self.removeNewLine(row[1])
if row[2] == "" or row[2] == None:
line_dict["TABLE_COMMENT"] = None
else:
line_dict["TABLE_COMMENT"] = self.removeNewLine(row[2]).encode('ascii', 'ignore').decode('unicode_escape', 'ignore')
line_dict["SOURCE_COLUMN_NAME"] = self.removeNewLine(row[3])
if row[5] == None:
line_dict["SOURCE_COLUMN_TYPE"] = row[4]
else:
if re.search('TIMESTAMP', row[4]) or row[4] in ("CLOB", "DATE", "LONG", "BLOB", "NCLOB", "LONG RAW"):
line_dict["SOURCE_COLUMN_TYPE"] = row[4]
elif row[4] in ("VARCHAR", "VARCHAR2", "CHAR", "NCHAR", "NVARCHAR2"):
line_dict["SOURCE_COLUMN_TYPE"] = "%s(%s)"%(row[4], int(row[7]))
elif row[4] in ("NUMBER", "FLOAT", "BINARY_FLOAT", "BINARY_DOUBLE"):
if row[8] == None:
line_dict["SOURCE_COLUMN_TYPE"] = row[4]
elif row[8] == 0: #("DATA_PRECISION") == 0) then use char_length
line_dict["SOURCE_COLUMN_TYPE"] = "%s(%s)"%(row[4], int(row[7]))
elif row[9]== None or row[9] == 0:
line_dict["SOURCE_COLUMN_TYPE"] = "%s(%s)"%(row[4], int(row[8]))
else:
line_dict["SOURCE_COLUMN_TYPE"] = "%s(%s,%s)"%(row[4], int(row[8]), int(row[9]))
else:
line_dict["SOURCE_COLUMN_TYPE"] = "%s(%s)"%(row[4], int(row[5]))
line_dict["SOURCE_COLUMN_LENGTH"] = row[5]
if row[6] == "" or row[6] == None:
line_dict["SOURCE_COLUMN_COMMENT"] = None
else:
line_dict["SOURCE_COLUMN_COMMENT"] = self.removeNewLine(row[6]).encode('ascii', 'ignore').decode('unicode_escape', 'ignore')
line_dict["IS_NULLABLE"] = row[10]
line_dict["TABLE_TYPE"] = row[11]
try:
line_dict["TABLE_CREATE_TIME"] = datetime.strptime(row[12], '%Y-%m-%d %H:%M:%S')
except:
line_dict["TABLE_CREATE_TIME"] = None
line_dict["DEFAULT_VALUE"] = None
rows_list.append(line_dict)
result_df = pd.DataFrame(rows_list)
if serverType == constant.MYSQL:
query = "select "
query += " c.table_schema as table_schema, "
query += " c.table_name, "
query += " t.table_comment, "
query += " c.column_name, "
query += " c.data_type, "
query += " c.character_maximum_length, "
query += " c.column_comment, "
query += " c.is_nullable, "
query += " c.numeric_precision, "
query += " c.numeric_scale, "
query += " t.table_type, "
query += " t.create_time "
query += "from information_schema.columns c "
query += "left join information_schema.tables t "
query += " on c.table_schema = t.table_schema and c.table_name = t.table_name "
query += "where c.table_schema = '%s' "%(database)
if table != None:
query += " and c.table_name = '%s' "%(table)
query += "order by c.table_schema,c.table_name, c.ordinal_position "
logging.debug("SQL Statement executed: %s" % (query) )
try:
JDBCCursor.execute(query)
except jaydebeapi.DatabaseError as errMsg:
logging.error("Failure when communicating with JDBC database. %s"%(errMsg))
return result_df
rows_list = []
for row in JDBCCursor.fetchall():
logging.debug(row)
line_dict = {}
if table == None:
line_dict["SCHEMA_NAME"] = self.removeNewLine(row[0])
line_dict["TABLE_NAME"] = self.removeNewLine(row[1])
if row[2] == "" or row[2] == None:
line_dict["TABLE_COMMENT"] = None
else:
line_dict["TABLE_COMMENT"] = self.removeNewLine(row[2]).encode('ascii', 'ignore').decode('unicode_escape', 'ignore')
line_dict["SOURCE_COLUMN_NAME"] = self.removeNewLine(row[3])
if row[4] == "decimal":
line_dict["SOURCE_COLUMN_TYPE"] = "%s(%s,%s)"%(self.removeNewLine(row[4]), row[8], row[9])
elif row[5] == None:
line_dict["SOURCE_COLUMN_TYPE"] = self.removeNewLine(row[4])
else:
line_dict["SOURCE_COLUMN_TYPE"] = "%s(%s)"%(self.removeNewLine(row[4]), row[5])
line_dict["SOURCE_COLUMN_LENGTH"] = row[5]
if row[6] == None or row[6] == "":
line_dict["SOURCE_COLUMN_COMMENT"] = None
else:
line_dict["SOURCE_COLUMN_COMMENT"] = self.removeNewLine(row[6]).encode('ascii', 'ignore').decode('unicode_escape', 'ignore')
line_dict["IS_NULLABLE"] = row[7]
line_dict["TABLE_TYPE"] = row[10]
try:
line_dict["TABLE_CREATE_TIME"] = datetime.strptime(row[11], '%Y-%m-%d %H:%M:%S')
except:
line_dict["TABLE_CREATE_TIME"] = None
line_dict["DEFAULT_VALUE"] = None
rows_list.append(line_dict)
result_df = pd.DataFrame(rows_list)
if serverType == constant.DB2_UDB:
query = "SELECT "
query += " TRIM(ST.CREATOR) as SCHEMA_NAME, "
query += " TRIM(ST.NAME) as TABLE_NAME, "
query += " TRIM(ST.REMARKS) as TABLE_COMMENT, "
query += " TRIM(SC.NAME) as SOURCE_COLUMN_NAME, "
query += " TRIM(SC.COLTYPE) SOURCE_COLUMN_TYPE, "
query += " SC.LENGTH as SOURCE_COLUMN_LENGTH, "
query += " SC.SCALE as SOURCE_COLUMN_SCALE, "
query += " TRIM(SC.REMARKS) as SOURCE_COLUMN_COMMENT, "
query += " SC.NULLS as IS_NULLABLE, "
query += " ST.TYPE as TABLE_TYPE, "
query += " ST.CTIME as CREATE_TIME "
query += "FROM SYSIBM.SYSTABLES ST "
query += "LEFT JOIN SYSIBM.SYSCOLUMNS SC "
query += " ON ST.NAME = SC.TBNAME "
query += " AND ST.CREATOR = SC.TBCREATOR "
query += "WHERE "
query += " ST.CREATOR = '%s' "%(schema)
if table != None:
query += " AND ST.NAME = '%s' "%(table)
query += "ORDER BY ST.CREATOR, ST.NAME"
logging.debug("SQL Statement executed: %s" % (query) )
try:
JDBCCursor.execute(query)
except jaydebeapi.DatabaseError as errMsg:
logging.error("Failure when communicating with JDBC database. %s"%(errMsg))
return result_df
rows_list = []
for row in JDBCCursor.fetchall():
logging.debug(row)
line_dict = {}
if table == None:
line_dict["SCHEMA_NAME"] = self.removeNewLine(row[0])
line_dict["TABLE_NAME"] = self.removeNewLine(row[1])
if row[2] == "" or row[2] == None:
line_dict["TABLE_COMMENT"] = None
else:
line_dict["TABLE_COMMENT"] = self.removeNewLine(row[2]).encode('ascii', 'ignore').decode('unicode_escape', 'ignore')
line_dict["SOURCE_COLUMN_NAME"] = self.removeNewLine(row[3])
if row[4] == "DECIMAL":
line_dict["SOURCE_COLUMN_TYPE"] = "%s(%s,%s)"%(row[4], row[5], row[6])
elif row[4] in ("DOUBLE", "REAL", "SMALLINT", "DATE", "BLOB", "INTEGER", "TIMESTMP", "BIGINT", "CLOB"):
line_dict["SOURCE_COLUMN_TYPE"] = self.removeNewLine(row[4])
else:
line_dict["SOURCE_COLUMN_TYPE"] = "%s(%s)"%(self.removeNewLine(row[4]), row[5])
line_dict["SOURCE_COLUMN_LENGTH"] = row[5]
if row[7] == "" or row[7] == None:
line_dict["SOURCE_COLUMN_COMMENT"] = None
else:
line_dict["SOURCE_COLUMN_COMMENT"] = self.removeNewLine(row[7]).encode('ascii', 'ignore').decode('unicode_escape', 'ignore')
line_dict["IS_NULLABLE"] = row[8]
line_dict["TABLE_TYPE"] = row[9]
try:
line_dict["TABLE_CREATE_TIME"] = datetime.strptime(row[10], '%Y-%m-%d %H:%M:%S.%f')
except:
line_dict["TABLE_CREATE_TIME"] = None
line_dict["DEFAULT_VALUE"] = None
rows_list.append(line_dict)
result_df = pd.DataFrame(rows_list)
if serverType == constant.DB2_AS400:
query = "SELECT "
query += " TRIM(ST.TABLE_SCHEMA) as SCHEMA_NAME, "
query += " TRIM(ST.TABLE_NAME) as TABLE_NAME, "
query += " ST.LONG_COMMENT as TABLE_COMMENT, "
query += " TRIM(SC.COLUMN_NAME) as SOURCE_COLUMN_NAME, "
query += " SC.TYPE_NAME as SOURCE_COLUMN_TYPE, "
query += " SC.COLUMN_SIZE as SOURCE_COLUMN_LENGTH, "
query += " SC.DECIMAL_DIGITS as SOURCE_COLUMN_SCALE, "
query += " SC.REMARKS as SOURCE_COLUMN_COMMENT, "
query += " SC.IS_NULLABLE, "
query += " ST.TABLE_TYPE, "
# ST.LAST_ALTERED_TIMESTAMP is not really correct, but it's the best we got
# https://www.ibm.com/support/knowledgecenter/SSAE4W_9.6.0/db2/rbafzcatsystbls.htm
query += " ST.LAST_ALTERED_TIMESTAMP "
query += "FROM QSYS2.SYSTABLES ST "
query += "LEFT JOIN SYSIBM.SQLCOLUMNS SC "
query += " ON ST.TABLE_SCHEMA = SC.TABLE_SCHEM "
query += " AND ST.TABLE_NAME= SC.TABLE_NAME "
query += "WHERE "
query += " ST.TABLE_SCHEMA = '%s' "%(schema)
if table != None:
query += " AND SC.TABLE_NAME = '%s' "%(table)
query += "ORDER BY ST.TABLE_SCHEMA, SC.TABLE_NAME, SC.ORDINAL_POSITION"
logging.debug("SQL Statement executed: %s" % (query) )
try:
JDBCCursor.execute(query)
except jaydebeapi.DatabaseError as errMsg:
logging.error("Failure when communicating with JDBC database. %s"%(errMsg))
return result_df
rows_list = []
for row in JDBCCursor.fetchall():
logging.debug(row)
line_dict = {}
if table == None:
line_dict["SCHEMA_NAME"] = self.removeNewLine(row[0])
line_dict["TABLE_NAME"] = self.removeNewLine(row[1])
if row[2] == "" or row[2] == None:
line_dict["TABLE_COMMENT"] = None
else:
line_dict["TABLE_COMMENT"] = self.removeNewLine(row[2]).encode('ascii', 'ignore').decode('unicode_escape', 'ignore')
line_dict["SOURCE_COLUMN_NAME"] = self.removeNewLine(row[3])
if row[4] == "DECIMAL":
line_dict["SOURCE_COLUMN_TYPE"] = "%s(%s,%s)"%(row[4], row[5], row[6])
elif row[4] in ("DOUBLE", "REAL", "SMALLINT", "DATE", "BLOB", "INTEGER", "TIMESTMP", "BIGINT", "CLOB"):
line_dict["SOURCE_COLUMN_TYPE"] = self.removeNewLine(row[4])
else:
line_dict["SOURCE_COLUMN_TYPE"] = "%s(%s)"%(self.removeNewLine(row[4]), row[5])
line_dict["SOURCE_COLUMN_LENGTH"] = row[5]
if self.removeNewLine(row[7]) == "" or row[7] == None:
line_dict["SOURCE_COLUMN_COMMENT"] = None
else:
line_dict["SOURCE_COLUMN_COMMENT"] = self.removeNewLine(row[7]).encode('ascii', 'ignore').decode('unicode_escape', 'ignore')
line_dict["IS_NULLABLE"] = row[8]
line_dict["TABLE_TYPE"] = row[9]
try:
line_dict["TABLE_CREATE_TIME"] = datetime.strptime(row[10], '%Y-%m-%d %H:%M:%S.%f')
except:
line_dict["TABLE_CREATE_TIME"] = None
line_dict["DEFAULT_VALUE"] = None
rows_list.append(line_dict)
result_df = pd.DataFrame(rows_list)
if serverType == constant.POSTGRESQL:
query = "SELECT "
query += " tab_columns.table_schema, "
query += " tab_columns.table_name, "
query += " pg_catalog.col_description(c.oid, 0::int) as table_comment, "
query += " tab_columns.column_name, "
query += " data_type, "
query += " character_maximum_length, "
query += " pg_catalog.col_description(c.oid, tab_columns.ordinal_position::int) as column_comment, "
query += " is_nullable, "
query += " tab_tables.table_type "
query += "FROM information_schema.columns AS tab_columns "
query += "LEFT JOIN pg_catalog.pg_class c "
query += " ON c.relname = tab_columns.table_name "
query += "LEFT JOIN information_schema.tables AS tab_tables "
query += " ON tab_tables.table_catalog = tab_columns.table_catalog "
query += " AND tab_tables.table_schema = tab_columns.table_schema "
query += " AND tab_tables.table_name = tab_columns.table_name "
query += "WHERE tab_columns.table_catalog = '%s' "%(database)
query += " AND tab_columns.table_schema ='%s' "%(schema)
if table != None:
query += " AND tab_columns.table_name = '%s' "%(table)
query += "ORDER BY table_schema, table_name"
logging.debug("SQL Statement executed: %s" % (query) )
try:
JDBCCursor.execute(query)
except jaydebeapi.DatabaseError as errMsg:
logging.error("Failure when communicating with JDBC database. %s"%(errMsg))
return result_df
rows_list = []
for row in JDBCCursor.fetchall():
logging.debug(row)
line_dict = {}
if table == None:
line_dict["SCHEMA_NAME"] = self.removeNewLine(row[0])
line_dict["TABLE_NAME"] = self.removeNewLine(row[1])
if row[2] == "" or row[2] == None:
line_dict["TABLE_COMMENT"] = None
else:
line_dict["TABLE_COMMENT"] = self.removeNewLine(row[2]).encode('ascii', 'ignore').decode('unicode_escape', 'ignore')
line_dict["SOURCE_COLUMN_NAME"] = self.removeNewLine(row[3])
if row[5] == None:
line_dict["SOURCE_COLUMN_TYPE"] = self.removeNewLine(row[4])
else:
line_dict["SOURCE_COLUMN_TYPE"] = "%s(%s)"%(self.removeNewLine(row[4]), row[5])
line_dict["SOURCE_COLUMN_LENGTH"] = row[5]
if row[6] == "" or row[6] == None:
line_dict["SOURCE_COLUMN_COMMENT"] = None
else:
line_dict["SOURCE_COLUMN_COMMENT"] = self.removeNewLine(row[6]).encode('ascii', 'ignore').decode('unicode_escape', 'ignore')
line_dict["IS_NULLABLE"] = row[7]
line_dict["TABLE_TYPE"] = row[8]
line_dict["TABLE_CREATE_TIME"] = None
line_dict["DEFAULT_VALUE"] = None
rows_list.append(line_dict)
result_df = pd.DataFrame(rows_list)
if serverType == constant.PROGRESS:
query = "SELECT "
query += " tab_tables.OWNER, "
query += " tab_tables.TBL, "
query += " tab_tables.DESCRIPTION AS TBL_Commnets, "
query += " COL, "
query += " COLTYPE, "
query += " WIDTH, "
query += " SCALE, "
query += " tab_columns.DESCRIPTION, "
query += " tab_columns.NULLFLAG, "
query += " tab_tables.TBLTYPE "
query += "FROM sysprogress.SYSCOLUMNS_FULL tab_columns "
query += "LEFT JOIN SYSPROGRESS.SYSTABLES_FULL tab_tables "
query += " ON tab_tables.TBL = tab_columns.TBL "
query += " AND tab_tables.OWNER = tab_columns.OWNER "
query += "WHERE "
query += " tab_columns.OWNER = '%s' "%(schema)
if table != None:
query += " AND tab_columns.TBL = '%s' "%(table)
query += "ORDER BY tab_tables.OWNER, tab_tables.TBL"
logging.debug("SQL Statement executed: %s" % (query) )
try:
JDBCCursor.execute(query)
except jaydebeapi.DatabaseError as errMsg:
logging.error("Failure when communicating with JDBC database. %s"%(errMsg))
return result_df
rows_list = []
for row in JDBCCursor.fetchall():
logging.debug(row)
line_dict = {}
if table == None:
line_dict["SCHEMA_NAME"] = self.removeNewLine(row[0])
line_dict["TABLE_NAME"] = self.removeNewLine(row[1])
if row[2] == "" or row[2] == None:
line_dict["TABLE_COMMENT"] = None
else:
line_dict["TABLE_COMMENT"] = self.removeNewLine(row[2]).encode('ascii', 'ignore').decode('unicode_escape', 'ignore')
line_dict["SOURCE_COLUMN_NAME"] = self.removeNewLine(row[3])
if row[4] in ("decimal", "numeric"):
if row[5] == None:
line_dict["SOURCE_COLUMN_TYPE"] = "%s"%(row[4])
else:
line_dict["SOURCE_COLUMN_TYPE"] = "%s(%s,%s)"%(row[4], row[5], row[6])
else:
if row[5] == None:
line_dict["SOURCE_COLUMN_TYPE"] = "%s"%(row[4])
else:
line_dict["SOURCE_COLUMN_TYPE"] = "%s(%s)"%(row[4], row[5])
line_dict["SOURCE_COLUMN_LENGTH"] = row[5]
if self.removeNewLine(row[7]) == "" or row[7] == None:
line_dict["SOURCE_COLUMN_COMMENT"] = None
else:
try:
line_dict["SOURCE_COLUMN_COMMENT"] = self.removeNewLine(row[7]).encode('ascii', 'ignore').decode('unicode_escape', 'ignore')
except UnicodeDecodeError:
line_dict["SOURCE_COLUMN_COMMENT"] = self.removeNewLine(row[7])
line_dict["IS_NULLABLE"] = row[8]
line_dict["TABLE_TYPE"] = row[9]
line_dict["TABLE_CREATE_TIME"] = None
line_dict["DEFAULT_VALUE"] = None
rows_list.append(line_dict)
result_df = pd.DataFrame(rows_list)
logging.debug(result_df)
logging.debug("Executing schemaReader.readTable() - Finished")
return result_df
def readTableKeys(self, JDBCCursor, serverType = None, database = None, schema = None, table = None):
logging.debug("Executing schemaReader.readTableKeys()")
query = None
result_df = pd.DataFrame()
if serverType == constant.MSSQL:
query = "SELECT "
query += " CAST(oParentColDtl.TABLE_SCHEMA AS VARCHAR(4000)) as SCHEMA_NAME, "
query += " CAST(PKnUTable.name AS VARCHAR(4000)) as TABLE_NAME, "
query += " CAST(PKnUKEY.name AS VARCHAR(4000)) as CONSTRAINT_NAME, "
# query += " CAST(PKnUKEY.type_desc AS VARCHAR(4000)) as CONSTRAINT_TYPE, "
query += " '%s' AS CONSTRAINT_TYPE, "%(constant.PRIMARY_KEY)
query += " CAST(PKnUKEYCol.name AS VARCHAR(4000)) as COL_NAME, "
query += " oParentColDtl.DATA_TYPE as COL_DATA_TYPE, "
query += " oParentColDtl.CHARACTER_MAXIMUM_LENGTH as COL_LENGTH, "
query += " '' as REFERENCE_SCHEMA_NAME, "
query += " '' as REFERENCE_TABLE_NAME, "
query += " '' as REFERENCE_COL_NAME, "
query += " PKnUColIdx.key_ordinal as ORDINAL_POSITION "
query += "FROM sys.key_constraints as PKnUKEY "
query += "INNER JOIN sys.tables as PKnUTable "
query += " ON PKnUTable.object_id = PKnUKEY.parent_object_id "
query += "INNER JOIN sys.index_columns as PKnUColIdx "
query += " ON PKnUColIdx.object_id = PKnUTable.object_id "
query += " AND PKnUColIdx.index_id = PKnUKEY.unique_index_id "
query += "INNER JOIN sys.columns as PKnUKEYCol "
query += " ON PKnUKEYCol.object_id = PKnUTable.object_id "
query += " AND PKnUKEYCol.column_id = PKnUColIdx.column_id "
query += "INNER JOIN INFORMATION_SCHEMA.COLUMNS oParentColDtl "
query += " ON oParentColDtl.TABLE_NAME=PKnUTable.name "
query += " AND oParentColDtl.COLUMN_NAME=PKnUKEYCol.name "
query += "WHERE oParentColDtl.TABLE_SCHEMA = '%s' "%(schema)
if table != None:
query += " and PKnUTable.name = '%s' "%(table)
query += " and PKnUKEY.type_desc = 'PRIMARY_KEY_CONSTRAINT' "
query += "UNION ALL "
query += "SELECT "
query += " CAST(oParentColDtl.TABLE_SCHEMA AS VARCHAR(4000)) as SCHEMA_NAME, "
query += " CAST(oParent.name AS VARCHAR(4000)) as TABLE_NAME, "
query += " CAST(oConstraint.name AS VARCHAR(4000)) as CONSTRAINT_NAME, "
# query += " CONSTRAINT_TYPE = 'FK', "
query += " '%s' AS CONSTRAINT_TYPE, "%(constant.FOREIGN_KEY)
query += " CAST(oParentCol.name AS VARCHAR(4000)) as COL_NAME, "
query += " oParentColDtl.DATA_TYPE as COL_NAME_DATA_TYPE, "
query += " oParentColDtl.CHARACTER_MAXIMUM_LENGTH as COL_LENGTH, "
query += " CAST(OBJECT_SCHEMA_NAME(T.[object_id],DB_ID()) AS VARCHAR(4000)) as REFERENCE_SCHEMA_NAME, "
query += " CAST(oReference.name AS VARCHAR(4000)) as REFERENCE_TABLE_NAME, "
query += " CAST(oReferenceCol.name AS VARCHAR(4000)) as REFERENCE_COL_NAME, "
query += " '' as ORDINAL_POSITION "
query += "FROM sys.foreign_key_columns FKC "
query += "INNER JOIN sys.sysobjects oConstraint "
query += " ON FKC.constraint_object_id=oConstraint.id "
query += "INNER JOIN sys.sysobjects oParent "
query += " ON FKC.parent_object_id=oParent.id "
query += "INNER JOIN sys.all_columns oParentCol "
query += " ON FKC.parent_object_id=oParentCol.object_id "
query += " AND FKC.parent_column_id=oParentCol.column_id "
query += "INNER JOIN sys.sysobjects oReference "
query += " ON FKC.referenced_object_id=oReference.id "
query += "INNER JOIN INFORMATION_SCHEMA.COLUMNS oParentColDtl "
query += " ON oParentColDtl.TABLE_NAME=oParent.name "
query += " AND oParentColDtl.COLUMN_NAME=oParentCol.name "
query += "INNER JOIN sys.all_columns oReferenceCol "
query += " ON FKC.referenced_object_id=oReferenceCol.object_id "
query += " AND FKC.referenced_column_id=oReferenceCol.column_id "
query += "INNER JOIN sys.[tables] AS T ON T.[object_id] = oReferenceCol.[object_id] "
query += "WHERE oParentColDtl.TABLE_SCHEMA = '%s' "%(schema)
if table != None:
query += " and oParent.name = '%s' "%(table)
query += "ORDER BY SCHEMA_NAME, TABLE_NAME, CONSTRAINT_TYPE, ORDINAL_POSITION"
logging.debug("SQL Statement executed: %s" % (query) )
try:
JDBCCursor.execute(query)
except jaydebeapi.DatabaseError as errMsg:
logging.error("Failure when communicating with JDBC database. %s"%(errMsg))
return result_df
rows_list = []
for row in JDBCCursor.fetchall():
logging.debug(row)
line_dict = {}
if table == None:
line_dict["SCHEMA_NAME"] = row[0]
line_dict["TABLE_NAME"] = row[1]
line_dict["CONSTRAINT_NAME"] = row[2]
line_dict["CONSTRAINT_TYPE"] = row[3]
line_dict["COL_NAME"] = row[4]
# line_dict["COL_DATA_TYPE"] = line.split('|')[5]
line_dict["REFERENCE_SCHEMA_NAME"] = row[7]
line_dict["REFERENCE_TABLE_NAME"] = row[8]
line_dict["REFERENCE_COL_NAME"] = row[9]
line_dict["COL_KEY_POSITION"] = row[10]
rows_list.append(line_dict)
result_df = pd.DataFrame(rows_list)
if serverType == constant.ORACLE:
query = "SELECT "
query += " DISTINCT CAST (acc.OWNER AS VARCHAR(4000)) AS SCHEMA_NAME, "
query += " CAST (acc.TABLE_NAME AS VARCHAR(4000)) AS TABLE_NAME, "
query += " CAST(ac.CONSTRAINT_NAME AS VARCHAR(4000)) AS CONSTRAINT_NAME, "
query += " '%s' AS CONSTRAINT_TYPE, "%(constant.PRIMARY_KEY)
query += " CAST ( acc.COLUMN_NAME AS VARCHAR(4000)) AS COL_NAME, "
query += " CAST(atc.data_type AS VARCHAR(4000)) AS COL_NAME_DATA_TYPE, "
query += " atc.DATA_LENGTH, "
query += " '' AS REFERENCE_OWNER_NAME, "
query += " '' AS REFERENCE_TABLE_NAME, "
query += " '' AS REFERENCE_COL_NAME, "
query += " acc.POSITION AS COL_KEY_POSITION, "
query += " atc.DATA_PRECISION, "
query += " atc.CHAR_LENGTH "
query += "FROM ALL_CONSTRAINTS ac "
query += "JOIN ALL_CONS_COLUMNS acc "
query += " ON ac.CONSTRAINT_NAME = acc.CONSTRAINT_NAME "
query += "JOIN all_tab_cols atc "
query += " ON ac.owner = atc.owner "
query += " AND ac.table_name = atc.TABLE_NAME "
query += " AND acc.COLUMN_NAME = atc.COLUMN_NAME "
query += "WHERE ac.CONSTRAINT_TYPE = 'P' "
query += " AND acc.OWNER = '%s' "%(schema)
if table != None:
query += " AND acc.TABLE_NAME = '%s' "%(table)
query += "UNION ALL "
query += "select "
query += " b.owner AS SCHEMA_NAME, "
query += " b.table_name AS TABLE_NAME, "
query += " a.constraint_name AS CONSTRAINT_NAME, "
query += " '%s' AS CONSTRAINT_TYPE, "%(constant.FOREIGN_KEY)
query += " b.column_name AS COL_NAME , "
query += " atc.data_type AS COL_NAME_DATA_TYPE, "
query += " atc.DATA_LENGTH, "
query += " c.owner AS REFERENCE_SCHEMA_NAME, "
query += " c.table_name AS REFERENCE_TABLE_NAME, "
query += " c.column_name AS REFERENCE_COL_NAME, "
query += " b.position AS COL_KEY_POSITION, "
query += " atc.DATA_PRECISION, "
query += " atc.CHAR_LENGTH "
query += "from all_cons_columns b "
query += "left join all_cons_columns c "
query += " on b.position = c.position "
query += "left join all_constraints a "
query += " on b.constraint_name = a.constraint_name "
query += " AND a.owner = b.owner "
query += " AND c.constraint_name = a.r_constraint_name "
query += " AND c.owner = a.r_owner "
query += "left join all_tab_cols atc "
query += " on b.owner = atc.owner "
query += " AND b.table_name = atc.table_name "
query += " AND b.column_name = atc.column_name "
query += "where "
query += " a.constraint_type = 'R' "
query += " AND b.OWNER = '%s' "%(schema)
if table != None:
query += " AND b.TABLE_NAME = '%s' "%(table)
query += "ORDER BY SCHEMA_NAME, TABLE_NAME,CONSTRAINT_TYPE,CONSTRAINT_NAME,COL_KEY_POSITION"
logging.debug("SQL Statement executed: %s" % (query) )
try:
JDBCCursor.execute(query)
except jaydebeapi.DatabaseError as errMsg:
logging.error("Failure when communicating with JDBC database. %s"%(errMsg))
return result_df
rows_list = []
for row in JDBCCursor.fetchall():
logging.debug(row)
line_dict = {}
if table == None:
line_dict["SCHEMA_NAME"] = row[0]
line_dict["TABLE_NAME"] = row[1]
line_dict["CONSTRAINT_NAME"] = row[2]
line_dict["CONSTRAINT_TYPE"] = row[3]
line_dict["COL_NAME"] = row[4]
# line_dict["COL_DATA_TYPE"] = line.split('|')[5]
line_dict["REFERENCE_SCHEMA_NAME"] = row[7]
line_dict["REFERENCE_TABLE_NAME"] = row[8]
line_dict["REFERENCE_COL_NAME"] = row[9]
line_dict["COL_KEY_POSITION"] = int(row[10])
rows_list.append(line_dict)
result_df = pd.DataFrame(rows_list)
if serverType == constant.MYSQL:
query = "SELECT kcu.CONSTRAINT_SCHEMA AS SCHEMA_NAME, "
query += " kcu.table_name AS TABLE_NAME, "
query += " kcu.constraint_name AS CONSTRAINT_NAME, "
query += " '%s' AS CONSTRAINT_TYPE, "%(constant.PRIMARY_KEY)
query += " kcu.column_name AS COL_NAME, "
query += " cols.data_type AS COL_DATA_TYPE, "
query += " cols.character_maximum_length AS COL_MAX_LENGTH, "
query += " kcu.referenced_table_schema AS REFERENCE_TABLE_SCHEMA, "
query += " kcu.referenced_table_name AS REFERENCE_TABLE_NAME, "
query += " kcu.referenced_column_name AS REFERENCE_COL_NAME, "
query += " kcu.ORDINAL_POSITION AS COL_KEY_POSITION "
query += "FROM information_schema.key_column_usage kcu "
query += "left join information_schema.columns cols "
query += " on kcu.table_name = cols.table_name and kcu.column_name = cols.column_name "
query += "WHERE "
query += " kcu.referenced_table_name IS NULL "
query += " AND (CONSTRAINT_NAME='PRIMARY' OR CONSTRAINT_NAME='UNIQUE') "
query += " AND kcu.CONSTRAINT_SCHEMA = '%s' "%(database)
if table != None:
query += " AND kcu.table_name = '%s' "%(table)
query += "UNION "
query += "SELECT "
query += " kcu.CONSTRAINT_SCHEMA AS SCHEMA_NAME, "
query += " kcu.table_name AS TABLE_NAME, "
query += " kcu.constraint_name AS CONSTRAINT_NAME, "
query += " '%s' AS CONSTRAINT_TYPE, "%(constant.FOREIGN_KEY)
query += " kcu.column_name AS COL_NAME, "
query += " cols.data_type AS COL_DATA_TYPE, "
query += " cols.character_maximum_length AS COL_MAX_LENGTH, "
query += " kcu.referenced_table_schema AS REFERENCE_TABLE_SCHEMA, "
query += " kcu.referenced_table_name AS REFERENCE_TABLE_NAME, "
query += " kcu.referenced_column_name AS REFERENCE_COL_NAME, "
query += " kcu.ORDINAL_POSITION AS COL_KEY_POSITION "
query += "FROM information_schema.key_column_usage kcu "
query += "left join information_schema.columns cols "
query += " on kcu.referenced_table_name = cols.table_name and referenced_column_name = cols.column_name "
query += "WHERE "
query += " kcu.referenced_table_name IS NOT NULL "
query += " AND kcu.CONSTRAINT_SCHEMA = '%s' "%(database)
if table != None:
query += " AND kcu.table_name = '%s' "%(table)
query += "order by schema_name, table_name, CONSTRAINT_TYPE, COL_KEY_POSITION"
logging.debug("SQL Statement executed: %s" % (query) )
try:
JDBCCursor.execute(query)
except jaydebeapi.DatabaseError as errMsg:
logging.error("Failure when communicating with JDBC database. %s"%(errMsg))
return result_df
rows_list = []
for row in JDBCCursor.fetchall():
logging.debug(row)
line_dict = {}
if table == None:
line_dict["SCHEMA_NAME"] = row[0]
line_dict["TABLE_NAME"] = row[1]
line_dict["CONSTRAINT_NAME"] = row[2]
line_dict["CONSTRAINT_TYPE"] = row[3]
line_dict["COL_NAME"] = row[4]
# line_dict["COL_DATA_TYPE"] = line.split('|')[5]
line_dict["REFERENCE_SCHEMA_NAME"] = row[7]
line_dict["REFERENCE_TABLE_NAME"] = row[8]
line_dict["REFERENCE_COL_NAME"] = row[9]
line_dict["COL_KEY_POSITION"] = row[10]
rows_list.append(line_dict)
result_df = pd.DataFrame(rows_list)
if serverType == constant.DB2_UDB:
query = "select "
query += " TRIM(SI.TBCREATOR) as SCHEMA_NAME, "
query += " TRIM(SI.TBNAME) as TABLE_NAME, "
query += " TRIM(SI.NAME) as CONSTRAINT_NAME, "
query += " '%s' AS CONSTRAINT_TYPE, "%(constant.PRIMARY_KEY)
query += " TRIM(SC.NAME) as COL_NAME, "
query += " TRIM(SC.COLTYPE) as COL_DATA_TYPE, "
query += " SC.LENGTH as COL_DATA_LENGTH, "
query += " SC.SCALE as COL_DATA_SCALE, "
query += " '' as REFERENCE_SCHEMA_NAME, "
query += " '' as REFERENCE_TABLE_NAME, "
query += " '' as REFERENCE_COL_NAME, "
query += " SI.COLCOUNT as ORDINAL_POSITION "
query += "FROM SYSIBM.SYSINDEXES SI "
query += "LEFT JOIN SYSIBM.SYSCOLUMNS SC "
query += " ON SI.TBCREATOR = SC.TBCREATOR "
query += " AND SI.TBNAME = SC.TBNAME "
query += "WHERE "
query += " SI.COLNAMES = CONCAT('+',SC.NAME) "
query += " AND SI.uniquerule = 'P'"
query += " AND SI.TBCREATOR = '%s' "%(schema)
if table != None:
query += " AND SI.TBNAME = '%s' "%(table)
query += "UNION ALL "
query = "SELECT "
query += " TRIM(R.tabschema) as SCHEMA_NAME, "
query += " TRIM(R.tabname) as TABLE_NAME, "
query += " TRIM(R.constname) as CONSTRAINT_NAME, "
query += " 'F' AS CONSTRAINT_TYPE, "
query += " TRIM(C.COLNAME) as COL_NAME, "
query += " SC.COLTYPE as COL_DATA_TYPE, "
query += " SC.LENGTH as COL_DATA_LENGTH, "
query += " SC.SCALE as COL_DATA_SCALE, "
query += " TRIM(R.reftabschema) as REFERENCE_SCHEMA_NAME, "
query += " TRIM(R.reftabname) as REFERENCE_TABLE_NAME, "
query += " TRIM(Cref.COLNAME) as REFERENCE_COL_NAME, "
query += " C.COLSEQ as ORDINAL_POSITION "
query += "FROM syscat.references R "
query += "LEFT JOIN syscat.keycoluse C "
query += " ON R.constname = C.constname "
query += "LEFT JOIN syscat.keycoluse Cref "
query += " ON R.refkeyname = Cref.constname "
query += " AND C.COLSEQ = Cref.COLSEQ "
query += "LEFT JOIN SYSIBM.SYSCOLUMNS SC "
query += " ON R.tabschema = SC.TBCREATOR "
query += " AND R.tabname = SC.TBNAME "
query += " AND TRIM(SC.NAME)= TRIM(R.FK_COLNAMES) "
query += "WHERE "
query += " R.tabschema = '%s' "%(schema)
if table != None:
query += " AND R.tabname = '%s' "%(table)
query += "ORDER BY SCHEMA_NAME, TABLE_NAME, CONSTRAINT_TYPE, ORDINAL_POSITION "
logging.debug("SQL Statement executed: %s" % (query) )
try:
JDBCCursor.execute(query)
except jaydebeapi.DatabaseError as errMsg:
logging.error("Failure when communicating with JDBC database. %s"%(errMsg))
return result_df
rows_list = []
for row in JDBCCursor.fetchall():
logging.debug(row)
line_dict = {}
if table == None:
line_dict["SCHEMA_NAME"] = row[0]
line_dict["TABLE_NAME"] = row[1]
line_dict["CONSTRAINT_NAME"] = row[2]
line_dict["CONSTRAINT_TYPE"] = row[3]
line_dict["COL_NAME"] = row[4]
# line_dict["COL_DATA_TYPE"] = line.split('|')[5]
line_dict["REFERENCE_SCHEMA_NAME"] = row[8]
line_dict["REFERENCE_TABLE_NAME"] = row[9]
line_dict["REFERENCE_COL_NAME"] = row[10]
line_dict["COL_KEY_POSITION"] = int(row[11])
rows_list.append(line_dict)
result_df = pd.DataFrame(rows_list)
if serverType == constant.DB2_AS400:
query = "SELECT "
query += " TRIM(SPK.TABLE_SCHEM) as SCHEMA_NAME, "
query += " TRIM(SPK.TABLE_NAME) as TABLE_NAME, "
query += " TRIM(SPK.PK_NAME) as CONSTRAINT_NAME, "
query += " '%s' AS CONSTRAINT_TYPE, "%(constant.PRIMARY_KEY)
query += " TRIM(SC.COLUMN_NAME) as COL_NAME, "
query += " SC.TYPE_NAME as COL_DATA_TYPE, "
query += " SC.COLUMN_SIZE as COL_DATA_LENGTH, "
query += " SC.DECIMAL_DIGITS as COL_DATA_SCALE, "
query += " '' as REFERENCE_SCHEMA_NAME, "
query += " '' as REFERENCE_TABLE_NAME, "
query += " '' as REFERENCE_COL_NAME, "
query += " SPK.KEY_SEQ as ORDINAL_POSITION "
query += "FROM SYSIBM.SQLPRIMARYKEYS SPK "
query += "LEFT JOIN SYSIBM.SQLCOLUMNS SC "
query += " ON SPK.TABLE_CAT = SC.TABLE_CAT "
query += " AND SPK.TABLE_SCHEM = SC.TABLE_SCHEM "
query += " AND SPK.TABLE_NAME = SC.TABLE_NAME "
query += " AND SPK.COLUMN_NAME=SC.COLUMN_NAME "
query += "WHERE "
query += " SPK.TABLE_SCHEM = '%s' "%(schema)
if table != None:
query += " AND SPK.TABLE_NAME = '%s' "%(table)
query += "UNION ALL "
query += "SELECT "
query += " TRIM(SFK.FKTABLE_SCHEM) as SCHEMA_NAME, "
query += " TRIM(SFK.FKTABLE_NAME) as TABLE_NAME, "
query += " TRIM(SFK.FK_NAME) as CONSTRAINT_NAME, "
query += " '%s' AS CONSTRAINT_TYPE, "%(constant.FOREIGN_KEY)
query += " TRIM(SFK.FKCOLUMN_NAME) as COL_NAME, "
query += " SC.TYPE_NAME as COL_DATA_TYPE, "
query += " SC.COLUMN_SIZE as COL_DATA_LENGTH, "
query += " SC.DECIMAL_DIGITS as COL_DATA_SCALE, "
query += " SFK.PKTABLE_SCHEM as REFERENCE_SCHEMA_NAME, "
query += " SFK.PKTABLE_NAME as REFERENCE_TABLE_NAME, "
query += " SFK.PKCOLUMN_NAME as REFERENCE_COL_NAME, "
query += " SFK.KEY_SEQ as ORDINAL_POSITION "
query += "FROM SYSIBM.SQLFOREIGNKEYS SFK "
query += "LEFT JOIN SYSIBM.SQLCOLUMNS SC "
query += " ON SFK.FKTABLE_CAT = SC.TABLE_CAT "
query += " AND SFK.FKTABLE_SCHEM = SC.TABLE_SCHEM "
query += " AND SFK.FKTABLE_NAME = SC.TABLE_NAME "
query += " AND SFK.FKCOLUMN_NAME = SC.COLUMN_NAME "
query += "WHERE "
query += " SFK.FKTABLE_SCHEM = '%s' "%(schema)
if table != None:
query += " AND SFK.FKTABLE_NAME = '%s' "%(table)
query += "ORDER BY SCHEMA_NAME, TABLE_NAME, CONSTRAINT_TYPE, ORDINAL_POSITION"
logging.debug("SQL Statement executed: %s" % (query) )
try:
JDBCCursor.execute(query)
except jaydebeapi.DatabaseError as errMsg:
logging.error("Failure when communicating with JDBC database. %s"%(errMsg))
return result_df
rows_list = []
for row in JDBCCursor.fetchall():
logging.debug(row)
line_dict = {}
if table == None:
line_dict["SCHEMA_NAME"] = row[0]
line_dict["TABLE_NAME"] = row[1]
line_dict["CONSTRAINT_NAME"] = row[2]
line_dict["CONSTRAINT_TYPE"] = row[3]
line_dict["COL_NAME"] = row[4]
# line_dict["COL_DATA_TYPE"] = line.split('|')[5]
line_dict["REFERENCE_SCHEMA_NAME"] = row[8]
line_dict["REFERENCE_TABLE_NAME"] = row[9]
line_dict["REFERENCE_COL_NAME"] = row[10]
line_dict["COL_KEY_POSITION"] = int(row[11])
rows_list.append(line_dict)
result_df = pd.DataFrame(rows_list)
if serverType == constant.POSTGRESQL:
query = "SELECT "
query += " distinct kcu.constraint_schema AS SCHEMA_NAME, "
query += " kcu.table_name AS TABLE_NAME, "
query += " c.conname AS CONSTRAINT_NAME, "
query += " '%s' AS CONSTRAINT_TYPE, "%(constant.PRIMARY_KEY)
query += " CASE WHEN pg_get_constraintdef(c.oid) LIKE 'PRIMARY KEY %' "
query += " THEN substring(pg_get_constraintdef(c.oid), 14, position(')' in pg_get_constraintdef(c.oid))-14) "
query += " END AS COL_NAME, "
query += " '' AS REFERENCE_SCHEMA_NAME, "
query += " '' AS REFERENCE_TABLE_NAME, "
query += " '' AS REFERENCE_COL_NAME "
query += "FROM pg_catalog.pg_constraint c "
query += "LEFT JOIN information_schema.key_column_usage kcu "
query += " ON c.conname = kcu.constraint_name "
query += "LEFT JOIN information_schema.tables ist "
query += " ON ist.table_schema = kcu.constraint_schema "
query += " AND ist.table_name = kcu.table_name "
query += "WHERE "
query += " c.contype = 'p' "
query += " AND pg_get_constraintdef(c.oid) LIKE 'PRIMARY KEY %' "
query += " AND ist.table_catalog = '%s' "%(database)
query += " AND kcu.constraint_schema ='%s' "%(schema)
if table != None:
query += " AND kcu.table_name = '%s' "%(table)
query += "UNION "
query += "SELECT "
query += " kcu.constraint_schema AS SCHEMA_NAME, "
query += " kcu.table_name AS TABLE_NAME, "
query += " c.conname AS CONSTRAINT_NAME, "
query += " '%s' AS CONSTRAINT_TYPE, "%(constant.FOREIGN_KEY)
query += " CASE WHEN pg_get_constraintdef(c.oid) LIKE 'FOREIGN KEY %' "
query += " THEN substring(pg_get_constraintdef(c.oid), 14, position(')' in pg_get_constraintdef(c.oid))-14) "
query += " END AS COL_NAME, "
query += " '' AS REFERENCE_SCHEMA_NAME,"
query += " CASE WHEN pg_get_constraintdef(c.oid) LIKE 'FOREIGN KEY %' "
query += " THEN substring(pg_get_constraintdef(c.oid), position(' REFERENCES ' in pg_get_constraintdef(c.oid))+12, position('(' in substring(pg_get_constraintdef(c.oid), 14))-position(' REFERENCES ' in pg_get_constraintdef(c.oid))+1) "
query += " END AS REFERENCE_TABLE_NAME, "
query += " CASE WHEN pg_get_constraintdef(c.oid) LIKE 'FOREIGN KEY %' "
query += " THEN substring(pg_get_constraintdef(c.oid), position('(' in substring(pg_get_constraintdef(c.oid), 14))+14, position(')' in substring(pg_get_constraintdef(c.oid), position('(' in substring(pg_get_constraintdef(c.oid), 14))+14))-1) "
query += " END AS REFERENCE_COL_NAME "
query += "FROM pg_catalog.pg_constraint c "
query += "LEFT JOIN information_schema.key_column_usage kcu "
query += " ON c.conname = kcu.constraint_name "
query += "LEFT JOIN information_schema.tables ist "
query += " ON ist.table_schema=kcu.constraint_schema "
query += " AND ist.table_name=kcu.table_name "
query += "WHERE "
query += " c.contype = 'f' AND contype IN ('f', 'p') "
query += " AND pg_get_constraintdef(c.oid) LIKE 'FOREIGN KEY %' "
query += " AND ist.table_catalog = '%s' "%(database)
query += " AND kcu.constraint_schema ='%s' "%(schema)
if table != None:
query += " AND kcu.table_name = '%s' "%(table)
query += "ORDER BY SCHEMA_NAME, TABLE_NAME,CONSTRAINT_TYPE "
logging.debug("SQL Statement executed: %s" % (query) )
try:
JDBCCursor.execute(query)
except jaydebeapi.DatabaseError as errMsg:
logging.error("Failure when communicating with JDBC database. %s"%(errMsg))
return result_df
rows_list = []
for row in JDBCCursor.fetchall():
logging.debug(row)
line_dict = {}
schemaName = row[0]
tableName = row[1]
constraintName = row[2]
constraintType = row[3]
colName = row[4].strip('"')
refSchemaName = row[5]
refTableName = row[6].strip('"')
refColName = row[7].strip('"')
colKeyPosition = 1
if constraintType == constant.FOREIGN_KEY:
if refSchemaName == "" and "." in refTableName:
refArray = refTableName.split(".")
refSchemaName = refArray[0]
refTableName = refArray[1]
if refSchemaName == "":
refSchemaName = "public"
colNameList = colName.split(",")
refColNameList = refColName.split(",")
for i, column in enumerate(colNameList):
colName = colNameList[i]
refColName = refColNameList[i]
if table == None:
line_dict["SCHEMA_NAME"] = schemaName
line_dict["TABLE_NAME"] = tableName
line_dict["CONSTRAINT_NAME"] = constraintName
line_dict["CONSTRAINT_TYPE"] = constraintType
line_dict["COL_NAME"] = colName
# line_dict["COL_DATA_TYPE"] = line.split('|')[5]
line_dict["REFERENCE_SCHEMA_NAME"] = refSchemaName
line_dict["REFERENCE_TABLE_NAME"] = refTableName
line_dict["REFERENCE_COL_NAME"] = refColName
line_dict["COL_KEY_POSITION"] = colKeyPosition
rows_list.append(line_dict)
result_df = pd.DataFrame(rows_list)
# In some cases, we get duplicate Foreign Keys. This removes all duplicate entries
result_df.drop_duplicates(keep="first", inplace=True)
logging.debug(result_df)
logging.debug("Executing schemaReader.readKeys() - Finished")
return result_df
def readTableIndex(self, JDBCCursor, serverType = None, database = None, schema = None, table = None):
logging.debug("Executing schemaReader.readTableColumns()")
query = None
result_df = pd.DataFrame()
if serverType == constant.MSSQL:
query = ""
query += "select i.name,"
query += " i.type, "
query += " i.is_unique, "
query += " col.name, "
query += " ic.index_column_id, "
query += " col.is_nullable "
query += "from sys.objects t "
query += " inner join sys.indexes i "
query += " on t.object_id = i.object_id "
query += " inner join sys.index_columns ic "
query += " on ic.object_id = t.object_id "
query += " and ic.index_id = i.index_id "
query += " inner join sys.columns col "
query += " on col.object_id = t.object_id "
query += " and col.column_id = ic.column_id "
query += "where schema_name(t.schema_id) = '%s' "%(schema)
query += "and t.name = '%s' "%(table)
query += "order by i.object_id, i.index_id"
logging.debug("SQL Statement executed: %s" % (query) )
try:
JDBCCursor.execute(query)
except jaydebeapi.DatabaseError as errMsg:
logging.error("Failure when communicating with JDBC database. %s"%(errMsg))
return result_df
uniqueDict = { 0: "Not unique", 1: "Unique" }
indexTypeDict = {
1: "Clustered index",
2: "Nonclustered unique index",
3: "XML index",
4: "Spatial index",
5: "Clustered columnstore index",
6: "Nonclustered columnstore index",
7: "Nonclustered hash index"
}
rows_list = []
for row in JDBCCursor.fetchall():
logging.debug(row)
line_dict = {}
line_dict["Name"] = row[0]
line_dict["Type"] = indexTypeDict.get(row[1], row[1])
line_dict["Unique"] = uniqueDict.get(int(row[2]), int(row[2]))
line_dict["Column"] = row[3]
line_dict["ColumnOrder"] = row[4]
line_dict["IsNullable"] = row[5]
rows_list.append(line_dict)
result_df = pd.DataFrame(rows_list)
if serverType == constant.ORACLE:
query = ""
query += "SELECT "
query += " ai.index_name, "
query += " ai.index_type, "
query += " ai.uniqueness, "
query += " aic.column_name, "
query += " aic.column_position, "
query += " atc.nullable "
query += "FROM all_indexes ai "
query += "INNER JOIN all_ind_columns aic "
query += " ON ai.owner = aic.index_owner "
query += " AND ai.index_name = aic.index_name "
query += "INNER JOIN all_tab_columns atc "
query += " ON ai.owner = atc.owner "
query += " AND ai.table_name = atc.table_name "
query += " AND aic.column_name = atc.column_name "
query += "WHERE ai.owner = UPPER('%s') "%(schema)
query += " AND ai.table_name = UPPER('%s') "%(table)
query += "ORDER BY aic.column_position"
logging.debug("SQL Statement executed: %s" % (query) )
try:
JDBCCursor.execute(query)
except jaydebeapi.DatabaseError as errMsg:
logging.error("Failure when communicating with JDBC database. %s"%(errMsg))
return result_df
rows_list = []
for row in JDBCCursor.fetchall():
logging.debug(row)
line_dict = {}
line_dict["Name"] = row[0]
line_dict["Type"] = row[1].capitalize()
if row[2] == "NONUNIQUE":
line_dict["Unique"] = "Not unique"
else:
line_dict["Unique"] = row[2].capitalize()
line_dict["Column"] = row[3]
line_dict["ColumnOrder"] = row[4]
if row[5] == "N":
line_dict["IsNullable"] = 0
else:
line_dict["IsNullable"] = 1
rows_list.append(line_dict)
result_df = | pd.DataFrame(rows_list) | pandas.DataFrame |
#!/usr/bin/env python
# coding: utf-8
# In[ ]:
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from statsmodels.stats.outliers_influence import variance_inflation_factor
class DimensionalityReduction:
def __init__(self):
pass
def fit(self, df, target, label_encoding=False):
self.df = df.copy()
self.target = target
self.missing = []
self.multico = []
self.var = []
if label_encoding == True:
self.cat = [i for i in df.columns if i not in df.describe().columns]
for c in self.cat:
self.df[c].replace(self.df[c].dropna().unique(), range(self.df[c].nunique()), inplace=True)
def plot(self, plot='dash'):
if plot == 'dash':
fig, ax = plt.subplots(2, 2, figsize=(12, 10))
ax = ax.flatten()
plt.style.use('fivethirtyeight')
plt.suptitle('Dimensionality Reduction Dash Board')
#------------------------------%Missing------------------------------
if plot == '%missing' or plot == 'dash':
desc = self.df.describe().T
desc['%missing'] = (1 - (desc['count']/len(self.df))) * 100
if plot == 'dash':
sns.barplot(
data=desc.sort_values('%missing', ascending=False),
x='%missing',
y=desc.sort_values('%missing', ascending=False).index,
ax=ax[0], color='orange').set(title='%Missing'.title(), xlabel=' ')
if plot != 'dash':
fig, ax = plt.subplots(figsize=(14, 6))
sns.barplot(
data=desc.sort_values('%missing', ascending=False),
x='%missing',
y=desc.sort_values('%missing', ascending=False).index,
color='orange').set(title='%Missing'.title(), xlabel=' ')
self.missing = desc['%missing'][desc['%missing'] > 95]
#------------------------------Amount of Variation------------------------------
if plot == 'var' or plot == 'dash':
std_df = self.df.drop(self.target, axis=1)
std_df = (std_df.describe().T['std'] - std_df.min())/ (std_df.max() - std_df.min())
std_df.sort_values(ascending=False, inplace=True)
if plot == 'dash':
sns.lineplot(
data=std_df,
x=[i + 1 for i in range(len(std_df.index))],
y=std_df.values,
linewidth=2, ax=ax[1]).set(title='Amount of Variation'.title(), xlabel=' ')
if plot != 'dash':
fig, ax = plt.subplots(figsize=(14, 6))
sns.lineplot(
data=std_df,
x=[i + 1 for i in range(len(std_df.index))],
y=std_df.values,
linewidth=2).set(title='Amount of Variation'.title(), xlabel=' ')
#------------------------------Multicolinearity------------------------------
if plot == 'multico' or plot == 'dash':
eig=[]
for i,j in zip(self.df.isnull().sum().index, self.df.isnull().sum()):
if j == 0:
eig.append(i) # Selecting columns that do not contain any NaNs and inf. values
eigen_matrix = self.df[eig].corr().iloc[1:, 1:]
w, v = np.linalg.eig(eigen_matrix) # eigen values & eigen vectors
CI=np.round((w.max()/w)**0.5) # Condition Index
CI_index = ['U' + str(i) + ' = ' + str(j) for i, j in zip(range(len(eigen_matrix.columns)),CI)]
Multicolinearity_matrix=round(pd.DataFrame(v,columns=[eigen_matrix.columns],index=CI_index), 1)
Multicolinearity_matrix.sort_index(level=1,ascending=False,inplace=True)
cmap = sns.diverging_palette(0, 230, 90, 60, as_cmap=True)
if plot == 'dash':
sns.heatmap(
Multicolinearity_matrix,
cmap=cmap,
annot=False,
ax=ax[2]).set(title='Condition Index', xlabel=' ', ylabel=' ')
if plot != 'dash':
fig, ax = plt.subplots(figsize=(14, 6))
sns.heatmap(
Multicolinearity_matrix,
cmap=cmap,
annot=True).set(title='Condition Index', xlabel=' ', ylabel=' ')
self.multico = [feat for idx, feat in zip(CI, self.df.corr().iloc[1:, 1:].columns) if idx > 30]
#-------------------------------------------VIF-------------------------------------------------
# the independent variables set
X = self.df.drop(self.target, axis=1)
# VIF dataframe
vif_data = pd.DataFrame()
vif_data["feature"] = X.columns
# calculating VIF for each feature
VIF = {feat:variance_inflation_factor(X.dropna().values, i) for i, feat in zip(range(len(X.columns)), X.columns)}
self.VIF = | pd.DataFrame(VIF, index=['VIF']) | pandas.DataFrame |
"""
Tests that work on both the Python and C engines but do not have a
specific classification into the other test modules.
"""
import codecs
import csv
from datetime import datetime
from io import StringIO
import os
import platform
from tempfile import TemporaryFile
from urllib.error import URLError
import numpy as np
import pytest
from pandas._libs.tslib import Timestamp
from pandas.errors import DtypeWarning, EmptyDataError, ParserError
from pandas import DataFrame, Index, MultiIndex, Series, compat, concat
import pandas._testing as tm
from pandas.io.parsers import CParserWrapper, TextFileReader, TextParser
def test_override_set_noconvert_columns():
# see gh-17351
#
# Usecols needs to be sorted in _set_noconvert_columns based
# on the test_usecols_with_parse_dates test from test_usecols.py
class MyTextFileReader(TextFileReader):
def __init__(self):
self._currow = 0
self.squeeze = False
class MyCParserWrapper(CParserWrapper):
def _set_noconvert_columns(self):
if self.usecols_dtype == "integer":
# self.usecols is a set, which is documented as unordered
# but in practice, a CPython set of integers is sorted.
# In other implementations this assumption does not hold.
# The following code simulates a different order, which
# before GH 17351 would cause the wrong columns to be
# converted via the parse_dates parameter
self.usecols = list(self.usecols)
self.usecols.reverse()
return CParserWrapper._set_noconvert_columns(self)
data = """a,b,c,d,e
0,1,20140101,0900,4
0,1,20140102,1000,4"""
parse_dates = [[1, 2]]
cols = {
"a": [0, 0],
"c_d": [Timestamp("2014-01-01 09:00:00"), Timestamp("2014-01-02 10:00:00")],
}
expected = DataFrame(cols, columns=["c_d", "a"])
parser = MyTextFileReader()
parser.options = {
"usecols": [0, 2, 3],
"parse_dates": parse_dates,
"delimiter": ",",
}
parser._engine = MyCParserWrapper(StringIO(data), **parser.options)
result = parser.read()
tm.assert_frame_equal(result, expected)
def test_empty_decimal_marker(all_parsers):
data = """A|B|C
1|2,334|5
10|13|10.
"""
# Parsers support only length-1 decimals
msg = "Only length-1 decimal markers supported"
parser = all_parsers
with pytest.raises(ValueError, match=msg):
parser.read_csv(StringIO(data), decimal="")
def test_bad_stream_exception(all_parsers, csv_dir_path):
# see gh-13652
#
# This test validates that both the Python engine and C engine will
# raise UnicodeDecodeError instead of C engine raising ParserError
# and swallowing the exception that caused read to fail.
path = os.path.join(csv_dir_path, "sauron.SHIFT_JIS.csv")
codec = codecs.lookup("utf-8")
utf8 = codecs.lookup("utf-8")
parser = all_parsers
msg = "'utf-8' codec can't decode byte"
# Stream must be binary UTF8.
with open(path, "rb") as handle, codecs.StreamRecoder(
handle, utf8.encode, utf8.decode, codec.streamreader, codec.streamwriter
) as stream:
with pytest.raises(UnicodeDecodeError, match=msg):
parser.read_csv(stream)
def test_read_csv_local(all_parsers, csv1):
prefix = "file:///" if compat.is_platform_windows() else "file://"
parser = all_parsers
fname = prefix + str(os.path.abspath(csv1))
result = parser.read_csv(fname, index_col=0, parse_dates=True)
expected = DataFrame(
[
[0.980269, 3.685731, -0.364216805298, -1.159738],
[1.047916, -0.041232, -0.16181208307, 0.212549],
[0.498581, 0.731168, -0.537677223318, 1.346270],
[1.120202, 1.567621, 0.00364077397681, 0.675253],
[-0.487094, 0.571455, -1.6116394093, 0.103469],
[0.836649, 0.246462, 0.588542635376, 1.062782],
[-0.157161, 1.340307, 1.1957779562, -1.097007],
],
columns=["A", "B", "C", "D"],
index=Index(
[
datetime(2000, 1, 3),
datetime(2000, 1, 4),
datetime(2000, 1, 5),
datetime(2000, 1, 6),
datetime(2000, 1, 7),
datetime(2000, 1, 10),
datetime(2000, 1, 11),
],
name="index",
),
)
tm.assert_frame_equal(result, expected)
def test_1000_sep(all_parsers):
parser = all_parsers
data = """A|B|C
1|2,334|5
10|13|10.
"""
expected = DataFrame({"A": [1, 10], "B": [2334, 13], "C": [5, 10.0]})
result = parser.read_csv(StringIO(data), sep="|", thousands=",")
tm.assert_frame_equal(result, expected)
def test_squeeze(all_parsers):
data = """\
a,1
b,2
c,3
"""
parser = all_parsers
index = Index(["a", "b", "c"], name=0)
expected = Series([1, 2, 3], name=1, index=index)
result = parser.read_csv(StringIO(data), index_col=0, header=None, squeeze=True)
tm.assert_series_equal(result, expected)
# see gh-8217
#
# Series should not be a view.
assert not result._is_view
def test_malformed(all_parsers):
# see gh-6607
parser = all_parsers
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
"""
msg = "Expected 3 fields in line 4, saw 5"
with pytest.raises(ParserError, match=msg):
parser.read_csv(StringIO(data), header=1, comment="#")
@pytest.mark.parametrize("nrows", [5, 3, None])
def test_malformed_chunks(all_parsers, nrows):
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
parser = all_parsers
msg = "Expected 3 fields in line 6, saw 5"
reader = parser.read_csv(
StringIO(data), header=1, comment="#", iterator=True, chunksize=1, skiprows=[2]
)
with pytest.raises(ParserError, match=msg):
reader.read(nrows)
def test_unnamed_columns(all_parsers):
data = """A,B,C,,
1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
parser = all_parsers
expected = DataFrame(
[[1, 2, 3, 4, 5], [6, 7, 8, 9, 10], [11, 12, 13, 14, 15]],
dtype=np.int64,
columns=["A", "B", "C", "Unnamed: 3", "Unnamed: 4"],
)
result = parser.read_csv(StringIO(data))
tm.assert_frame_equal(result, expected)
def test_csv_mixed_type(all_parsers):
data = """A,B,C
a,1,2
b,3,4
c,4,5
"""
parser = all_parsers
expected = DataFrame({"A": ["a", "b", "c"], "B": [1, 3, 4], "C": [2, 4, 5]})
result = parser.read_csv(StringIO(data))
tm.assert_frame_equal(result, expected)
def test_read_csv_low_memory_no_rows_with_index(all_parsers):
# see gh-21141
parser = all_parsers
if not parser.low_memory:
pytest.skip("This is a low-memory specific test")
data = """A,B,C
1,1,1,2
2,2,3,4
3,3,4,5
"""
result = parser.read_csv(StringIO(data), low_memory=True, index_col=0, nrows=0)
expected = DataFrame(columns=["A", "B", "C"])
tm.assert_frame_equal(result, expected)
def test_read_csv_dataframe(all_parsers, csv1):
parser = all_parsers
result = parser.read_csv(csv1, index_col=0, parse_dates=True)
expected = DataFrame(
[
[0.980269, 3.685731, -0.364216805298, -1.159738],
[1.047916, -0.041232, -0.16181208307, 0.212549],
[0.498581, 0.731168, -0.537677223318, 1.346270],
[1.120202, 1.567621, 0.00364077397681, 0.675253],
[-0.487094, 0.571455, -1.6116394093, 0.103469],
[0.836649, 0.246462, 0.588542635376, 1.062782],
[-0.157161, 1.340307, 1.1957779562, -1.097007],
],
columns=["A", "B", "C", "D"],
index=Index(
[
datetime(2000, 1, 3),
datetime(2000, 1, 4),
datetime(2000, 1, 5),
datetime(2000, 1, 6),
datetime(2000, 1, 7),
datetime(2000, 1, 10),
datetime(2000, 1, 11),
],
name="index",
),
)
tm.assert_frame_equal(result, expected)
def test_read_csv_no_index_name(all_parsers, csv_dir_path):
parser = all_parsers
csv2 = os.path.join(csv_dir_path, "test2.csv")
result = parser.read_csv(csv2, index_col=0, parse_dates=True)
expected = DataFrame(
[
[0.980269, 3.685731, -0.364216805298, -1.159738, "foo"],
[1.047916, -0.041232, -0.16181208307, 0.212549, "bar"],
[0.498581, 0.731168, -0.537677223318, 1.346270, "baz"],
[1.120202, 1.567621, 0.00364077397681, 0.675253, "qux"],
[-0.487094, 0.571455, -1.6116394093, 0.103469, "foo2"],
],
columns=["A", "B", "C", "D", "E"],
index=Index(
[
datetime(2000, 1, 3),
datetime(2000, 1, 4),
datetime(2000, 1, 5),
datetime(2000, 1, 6),
datetime(2000, 1, 7),
]
),
)
tm.assert_frame_equal(result, expected)
def test_read_csv_wrong_num_columns(all_parsers):
# Too few columns.
data = """A,B,C,D,E,F
1,2,3,4,5,6
6,7,8,9,10,11,12
11,12,13,14,15,16
"""
parser = all_parsers
msg = "Expected 6 fields in line 3, saw 7"
with pytest.raises(ParserError, match=msg):
parser.read_csv(StringIO(data))
def test_read_duplicate_index_explicit(all_parsers):
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo,12,13,14,15
bar,12,13,14,15
"""
parser = all_parsers
result = parser.read_csv(StringIO(data), index_col=0)
expected = DataFrame(
[
[2, 3, 4, 5],
[7, 8, 9, 10],
[12, 13, 14, 15],
[12, 13, 14, 15],
[12, 13, 14, 15],
[12, 13, 14, 15],
],
columns=["A", "B", "C", "D"],
index=Index(["foo", "bar", "baz", "qux", "foo", "bar"], name="index"),
)
tm.assert_frame_equal(result, expected)
def test_read_duplicate_index_implicit(all_parsers):
data = """A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo,12,13,14,15
bar,12,13,14,15
"""
parser = all_parsers
result = parser.read_csv(StringIO(data))
expected = DataFrame(
[
[2, 3, 4, 5],
[7, 8, 9, 10],
[12, 13, 14, 15],
[12, 13, 14, 15],
[12, 13, 14, 15],
[12, 13, 14, 15],
],
columns=["A", "B", "C", "D"],
index=Index(["foo", "bar", "baz", "qux", "foo", "bar"]),
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"data,kwargs,expected",
[
(
"A,B\nTrue,1\nFalse,2\nTrue,3",
dict(),
DataFrame([[True, 1], [False, 2], [True, 3]], columns=["A", "B"]),
),
(
"A,B\nYES,1\nno,2\nyes,3\nNo,3\nYes,3",
dict(true_values=["yes", "Yes", "YES"], false_values=["no", "NO", "No"]),
DataFrame(
[[True, 1], [False, 2], [True, 3], [False, 3], [True, 3]],
columns=["A", "B"],
),
),
(
"A,B\nTRUE,1\nFALSE,2\nTRUE,3",
dict(),
DataFrame([[True, 1], [False, 2], [True, 3]], columns=["A", "B"]),
),
(
"A,B\nfoo,bar\nbar,foo",
dict(true_values=["foo"], false_values=["bar"]),
DataFrame([[True, False], [False, True]], columns=["A", "B"]),
),
],
)
def test_parse_bool(all_parsers, data, kwargs, expected):
parser = all_parsers
result = parser.read_csv(StringIO(data), **kwargs)
tm.assert_frame_equal(result, expected)
def test_int_conversion(all_parsers):
data = """A,B
1.0,1
2.0,2
3.0,3
"""
parser = all_parsers
result = parser.read_csv(StringIO(data))
expected = DataFrame([[1.0, 1], [2.0, 2], [3.0, 3]], columns=["A", "B"])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("nrows", [3, 3.0])
def test_read_nrows(all_parsers, nrows):
# see gh-10476
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
expected = DataFrame(
[["foo", 2, 3, 4, 5], ["bar", 7, 8, 9, 10], ["baz", 12, 13, 14, 15]],
columns=["index", "A", "B", "C", "D"],
)
parser = all_parsers
result = parser.read_csv(StringIO(data), nrows=nrows)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("nrows", [1.2, "foo", -1])
def test_read_nrows_bad(all_parsers, nrows):
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
msg = r"'nrows' must be an integer >=0"
parser = all_parsers
with pytest.raises(ValueError, match=msg):
parser.read_csv(StringIO(data), nrows=nrows)
@pytest.mark.parametrize("index_col", [0, "index"])
def test_read_chunksize_with_index(all_parsers, index_col):
parser = all_parsers
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
reader = parser.read_csv(StringIO(data), index_col=0, chunksize=2)
expected = DataFrame(
[
["foo", 2, 3, 4, 5],
["bar", 7, 8, 9, 10],
["baz", 12, 13, 14, 15],
["qux", 12, 13, 14, 15],
["foo2", 12, 13, 14, 15],
["bar2", 12, 13, 14, 15],
],
columns=["index", "A", "B", "C", "D"],
)
expected = expected.set_index("index")
chunks = list(reader)
tm.assert_frame_equal(chunks[0], expected[:2])
tm.assert_frame_equal(chunks[1], expected[2:4])
tm.assert_frame_equal(chunks[2], expected[4:])
@pytest.mark.parametrize("chunksize", [1.3, "foo", 0])
def test_read_chunksize_bad(all_parsers, chunksize):
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
parser = all_parsers
msg = r"'chunksize' must be an integer >=1"
with pytest.raises(ValueError, match=msg):
parser.read_csv(StringIO(data), chunksize=chunksize)
@pytest.mark.parametrize("chunksize", [2, 8])
def test_read_chunksize_and_nrows(all_parsers, chunksize):
# see gh-15755
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
parser = all_parsers
kwargs = dict(index_col=0, nrows=5)
reader = parser.read_csv(StringIO(data), chunksize=chunksize, **kwargs)
expected = parser.read_csv(StringIO(data), **kwargs)
tm.assert_frame_equal(concat(reader), expected)
def test_read_chunksize_and_nrows_changing_size(all_parsers):
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
parser = all_parsers
kwargs = dict(index_col=0, nrows=5)
reader = parser.read_csv(StringIO(data), chunksize=8, **kwargs)
expected = parser.read_csv(StringIO(data), **kwargs)
tm.assert_frame_equal(reader.get_chunk(size=2), expected.iloc[:2])
tm.assert_frame_equal(reader.get_chunk(size=4), expected.iloc[2:5])
with pytest.raises(StopIteration, match=""):
reader.get_chunk(size=3)
def test_get_chunk_passed_chunksize(all_parsers):
parser = all_parsers
data = """A,B,C
1,2,3
4,5,6
7,8,9
1,2,3"""
reader = parser.read_csv(StringIO(data), chunksize=2)
result = reader.get_chunk()
expected = DataFrame([[1, 2, 3], [4, 5, 6]], columns=["A", "B", "C"])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("kwargs", [dict(), dict(index_col=0)])
def test_read_chunksize_compat(all_parsers, kwargs):
# see gh-12185
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
parser = all_parsers
reader = parser.read_csv(StringIO(data), chunksize=2, **kwargs)
result = parser.read_csv(StringIO(data), **kwargs)
tm.assert_frame_equal(concat(reader), result)
def test_read_chunksize_jagged_names(all_parsers):
# see gh-23509
parser = all_parsers
data = "\n".join(["0"] * 7 + [",".join(["0"] * 10)])
expected = DataFrame([[0] + [np.nan] * 9] * 7 + [[0] * 10])
reader = parser.read_csv(StringIO(data), names=range(10), chunksize=4)
result = concat(reader)
tm.assert_frame_equal(result, expected)
def test_read_data_list(all_parsers):
parser = all_parsers
kwargs = dict(index_col=0)
data = "A,B,C\nfoo,1,2,3\nbar,4,5,6"
data_list = [["A", "B", "C"], ["foo", "1", "2", "3"], ["bar", "4", "5", "6"]]
expected = parser.read_csv(StringIO(data), **kwargs)
parser = TextParser(data_list, chunksize=2, **kwargs)
result = parser.read()
tm.assert_frame_equal(result, expected)
def test_iterator(all_parsers):
# see gh-6607
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
parser = all_parsers
kwargs = dict(index_col=0)
expected = parser.read_csv(StringIO(data), **kwargs)
reader = parser.read_csv(StringIO(data), iterator=True, **kwargs)
first_chunk = reader.read(3)
tm.assert_frame_equal(first_chunk, expected[:3])
last_chunk = reader.read(5)
| tm.assert_frame_equal(last_chunk, expected[3:]) | pandas._testing.assert_frame_equal |
# encoding: utf-8
##################################################
# This script shows how to collect data from remote sources and create line plots
# Find extra documentation for the source code here:
# https://github.com/diegopajarito/COVID19_datavis
# Note: the project does not have changes after mid 2019
##################################################
#
##################################################
# Author: <NAME>
# Copyright: Copyright 2021, IAAC
# Credits: [Institute for Advanced Architecture of Catalonia - IAAC, Advanced Architecture group]
# License: Apache License Version 2.0
# Version: 1.0.0
# Maintainer: <NAME>
# Email: <EMAIL>
# Status: development
##################################################
# We need to import pandas library
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
# Setting up data sources (files and web)
gdp_growth = pd.read_csv('../data/WB_GDP_growth_anual.csv', skiprows=4)
jhu_link_confirmed = 'https://github.com/CSSEGISandData/COVID-19/raw/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_confirmed_global.csv'
jhu_link_deaths = 'https://github.com/CSSEGISandData/COVID-19/raw/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_deaths_global.csv'
confirmed = | pd.read_csv(jhu_link_confirmed) | pandas.read_csv |
from datetime import datetime, timedelta
import warnings
import operator
from textwrap import dedent
import numpy as np
from pandas._libs import (lib, index as libindex, tslib as libts,
algos as libalgos, join as libjoin,
Timedelta)
from pandas._libs.lib import is_datetime_array
from pandas.compat import range, u, set_function_name
from pandas.compat.numpy import function as nv
from pandas import compat
from pandas.core.accessor import CachedAccessor
from pandas.core.arrays import ExtensionArray
from pandas.core.dtypes.generic import (
ABCSeries, ABCDataFrame,
ABCMultiIndex,
ABCPeriodIndex, ABCTimedeltaIndex,
ABCDateOffset)
from pandas.core.dtypes.missing import isna, array_equivalent
from pandas.core.dtypes.common import (
_ensure_int64,
_ensure_object,
_ensure_categorical,
_ensure_platform_int,
is_integer,
is_float,
is_dtype_equal,
is_dtype_union_equal,
is_object_dtype,
is_categorical,
is_categorical_dtype,
is_interval_dtype,
is_period_dtype,
is_bool,
is_bool_dtype,
is_signed_integer_dtype,
is_unsigned_integer_dtype,
is_integer_dtype, is_float_dtype,
is_datetime64_any_dtype,
is_datetime64tz_dtype,
is_timedelta64_dtype,
is_hashable,
needs_i8_conversion,
is_iterator, is_list_like,
is_scalar)
from pandas.core.base import PandasObject, IndexOpsMixin
import pandas.core.common as com
from pandas.core import ops
from pandas.util._decorators import (
Appender, Substitution, cache_readonly, deprecate_kwarg)
from pandas.core.indexes.frozen import FrozenList
import pandas.core.dtypes.concat as _concat
import pandas.core.missing as missing
import pandas.core.algorithms as algos
import pandas.core.sorting as sorting
from pandas.io.formats.printing import (
pprint_thing, default_pprint, format_object_summary, format_object_attrs)
from pandas.core.ops import make_invalid_op
from pandas.core.strings import StringMethods
__all__ = ['Index']
_unsortable_types = frozenset(('mixed', 'mixed-integer'))
_index_doc_kwargs = dict(klass='Index', inplace='',
target_klass='Index',
unique='Index', duplicated='np.ndarray')
_index_shared_docs = dict()
def _try_get_item(x):
try:
return x.item()
except AttributeError:
return x
def _make_comparison_op(op, cls):
def cmp_method(self, other):
if isinstance(other, (np.ndarray, Index, ABCSeries)):
if other.ndim > 0 and len(self) != len(other):
raise ValueError('Lengths must match to compare')
# we may need to directly compare underlying
# representations
if needs_i8_conversion(self) and needs_i8_conversion(other):
return self._evaluate_compare(other, op)
if is_object_dtype(self) and self.nlevels == 1:
# don't pass MultiIndex
with np.errstate(all='ignore'):
result = ops._comp_method_OBJECT_ARRAY(op, self.values, other)
else:
# numpy will show a DeprecationWarning on invalid elementwise
# comparisons, this will raise in the future
with warnings.catch_warnings(record=True):
with np.errstate(all='ignore'):
result = op(self.values, np.asarray(other))
# technically we could support bool dtyped Index
# for now just return the indexing array directly
if is_bool_dtype(result):
return result
try:
return Index(result)
except TypeError:
return result
name = '__{name}__'.format(name=op.__name__)
# TODO: docstring?
return set_function_name(cmp_method, name, cls)
def _make_arithmetic_op(op, cls):
def index_arithmetic_method(self, other):
if isinstance(other, (ABCSeries, ABCDataFrame)):
return NotImplemented
elif isinstance(other, ABCTimedeltaIndex):
# Defer to subclass implementation
return NotImplemented
other = self._validate_for_numeric_binop(other, op)
# handle time-based others
if isinstance(other, (ABCDateOffset, np.timedelta64, timedelta)):
return self._evaluate_with_timedelta_like(other, op)
elif isinstance(other, (datetime, np.datetime64)):
return self._evaluate_with_datetime_like(other, op)
values = self.values
with np.errstate(all='ignore'):
result = op(values, other)
result = missing.dispatch_missing(op, values, other, result)
attrs = self._get_attributes_dict()
attrs = self._maybe_update_attributes(attrs)
if op is divmod:
result = (Index(result[0], **attrs), Index(result[1], **attrs))
else:
result = Index(result, **attrs)
return result
name = '__{name}__'.format(name=op.__name__)
# TODO: docstring?
return set_function_name(index_arithmetic_method, name, cls)
class InvalidIndexError(Exception):
pass
_o_dtype = np.dtype(object)
_Identity = object
def _new_Index(cls, d):
""" This is called upon unpickling, rather than the default which doesn't
have arguments and breaks __new__
"""
# required for backward compat, because PI can't be instantiated with
# ordinals through __new__ GH #13277
if issubclass(cls, ABCPeriodIndex):
from pandas.core.indexes.period import _new_PeriodIndex
return _new_PeriodIndex(cls, **d)
return cls.__new__(cls, **d)
class Index(IndexOpsMixin, PandasObject):
"""
Immutable ndarray implementing an ordered, sliceable set. The basic object
storing axis labels for all pandas objects
Parameters
----------
data : array-like (1-dimensional)
dtype : NumPy dtype (default: object)
If dtype is None, we find the dtype that best fits the data.
If an actual dtype is provided, we coerce to that dtype if it's safe.
Otherwise, an error will be raised.
copy : bool
Make a copy of input ndarray
name : object
Name to be stored in the index
tupleize_cols : bool (default: True)
When True, attempt to create a MultiIndex if possible
Notes
-----
An Index instance can **only** contain hashable objects
Examples
--------
>>> pd.Index([1, 2, 3])
Int64Index([1, 2, 3], dtype='int64')
>>> pd.Index(list('abc'))
Index(['a', 'b', 'c'], dtype='object')
See Also
---------
RangeIndex : Index implementing a monotonic integer range
CategoricalIndex : Index of :class:`Categorical` s.
MultiIndex : A multi-level, or hierarchical, Index
IntervalIndex : an Index of :class:`Interval` s.
DatetimeIndex, TimedeltaIndex, PeriodIndex
Int64Index, UInt64Index, Float64Index
"""
# To hand over control to subclasses
_join_precedence = 1
# Cython methods
_left_indexer_unique = libjoin.left_join_indexer_unique_object
_left_indexer = libjoin.left_join_indexer_object
_inner_indexer = libjoin.inner_join_indexer_object
_outer_indexer = libjoin.outer_join_indexer_object
_typ = 'index'
_data = None
_id = None
name = None
asi8 = None
_comparables = ['name']
_attributes = ['name']
_is_numeric_dtype = False
_can_hold_na = True
# would we like our indexing holder to defer to us
_defer_to_indexing = False
# prioritize current class for _shallow_copy_with_infer,
# used to infer integers as datetime-likes
_infer_as_myclass = False
_engine_type = libindex.ObjectEngine
_accessors = set(['str'])
str = CachedAccessor("str", StringMethods)
def __new__(cls, data=None, dtype=None, copy=False, name=None,
fastpath=False, tupleize_cols=True, **kwargs):
if name is None and hasattr(data, 'name'):
name = data.name
if fastpath:
return cls._simple_new(data, name)
from .range import RangeIndex
# range
if isinstance(data, RangeIndex):
return RangeIndex(start=data, copy=copy, dtype=dtype, name=name)
elif isinstance(data, range):
return RangeIndex.from_range(data, copy=copy, dtype=dtype,
name=name)
# categorical
if is_categorical_dtype(data) or is_categorical_dtype(dtype):
from .category import CategoricalIndex
return CategoricalIndex(data, dtype=dtype, copy=copy, name=name,
**kwargs)
# interval
if is_interval_dtype(data) or is_interval_dtype(dtype):
from .interval import IntervalIndex
closed = kwargs.get('closed', None)
return IntervalIndex(data, dtype=dtype, name=name, copy=copy,
closed=closed)
# index-like
elif isinstance(data, (np.ndarray, Index, ABCSeries)):
if (is_datetime64_any_dtype(data) or
(dtype is not None and is_datetime64_any_dtype(dtype)) or
'tz' in kwargs):
from pandas.core.indexes.datetimes import DatetimeIndex
result = DatetimeIndex(data, copy=copy, name=name,
dtype=dtype, **kwargs)
if dtype is not None and is_dtype_equal(_o_dtype, dtype):
return Index(result.to_pydatetime(), dtype=_o_dtype)
else:
return result
elif (is_timedelta64_dtype(data) or
(dtype is not None and is_timedelta64_dtype(dtype))):
from pandas.core.indexes.timedeltas import TimedeltaIndex
result = TimedeltaIndex(data, copy=copy, name=name, **kwargs)
if dtype is not None and _o_dtype == dtype:
return Index(result.to_pytimedelta(), dtype=_o_dtype)
else:
return result
if dtype is not None:
try:
# we need to avoid having numpy coerce
# things that look like ints/floats to ints unless
# they are actually ints, e.g. '0' and 0.0
# should not be coerced
# GH 11836
if is_integer_dtype(dtype):
inferred = lib.infer_dtype(data)
if inferred == 'integer':
try:
data = np.array(data, copy=copy, dtype=dtype)
except OverflowError:
# gh-15823: a more user-friendly error message
raise OverflowError(
"the elements provided in the data cannot "
"all be casted to the dtype {dtype}"
.format(dtype=dtype))
elif inferred in ['floating', 'mixed-integer-float']:
if isna(data).any():
raise ValueError('cannot convert float '
'NaN to integer')
# If we are actually all equal to integers,
# then coerce to integer.
try:
return cls._try_convert_to_int_index(
data, copy, name, dtype)
except ValueError:
pass
# Return an actual float index.
from .numeric import Float64Index
return Float64Index(data, copy=copy, dtype=dtype,
name=name)
elif inferred == 'string':
pass
else:
data = data.astype(dtype)
elif is_float_dtype(dtype):
inferred = lib.infer_dtype(data)
if inferred == 'string':
pass
else:
data = data.astype(dtype)
else:
data = np.array(data, dtype=dtype, copy=copy)
except (TypeError, ValueError) as e:
msg = str(e)
if 'cannot convert float' in msg:
raise
# maybe coerce to a sub-class
from pandas.core.indexes.period import (
PeriodIndex, IncompatibleFrequency)
if isinstance(data, PeriodIndex):
return PeriodIndex(data, copy=copy, name=name, **kwargs)
if is_signed_integer_dtype(data.dtype):
from .numeric import Int64Index
return Int64Index(data, copy=copy, dtype=dtype, name=name)
elif is_unsigned_integer_dtype(data.dtype):
from .numeric import UInt64Index
return UInt64Index(data, copy=copy, dtype=dtype, name=name)
elif is_float_dtype(data.dtype):
from .numeric import Float64Index
return Float64Index(data, copy=copy, dtype=dtype, name=name)
elif issubclass(data.dtype.type, np.bool) or is_bool_dtype(data):
subarr = data.astype('object')
else:
subarr = com._asarray_tuplesafe(data, dtype=object)
# _asarray_tuplesafe does not always copy underlying data,
# so need to make sure that this happens
if copy:
subarr = subarr.copy()
if dtype is None:
inferred = lib.infer_dtype(subarr)
if inferred == 'integer':
try:
return cls._try_convert_to_int_index(
subarr, copy, name, dtype)
except ValueError:
pass
return Index(subarr, copy=copy,
dtype=object, name=name)
elif inferred in ['floating', 'mixed-integer-float']:
from .numeric import Float64Index
return Float64Index(subarr, copy=copy, name=name)
elif inferred == 'interval':
from .interval import IntervalIndex
return IntervalIndex(subarr, name=name, copy=copy)
elif inferred == 'boolean':
# don't support boolean explicitly ATM
pass
elif inferred != 'string':
if inferred.startswith('datetime'):
if (lib.is_datetime_with_singletz_array(subarr) or
'tz' in kwargs):
# only when subarr has the same tz
from pandas.core.indexes.datetimes import (
DatetimeIndex)
try:
return DatetimeIndex(subarr, copy=copy,
name=name, **kwargs)
except libts.OutOfBoundsDatetime:
pass
elif inferred.startswith('timedelta'):
from pandas.core.indexes.timedeltas import (
TimedeltaIndex)
return TimedeltaIndex(subarr, copy=copy, name=name,
**kwargs)
elif inferred == 'period':
try:
return PeriodIndex(subarr, name=name, **kwargs)
except IncompatibleFrequency:
pass
return cls._simple_new(subarr, name)
elif hasattr(data, '__array__'):
return Index(np.asarray(data), dtype=dtype, copy=copy, name=name,
**kwargs)
elif data is None or is_scalar(data):
cls._scalar_data_error(data)
else:
if tupleize_cols and is_list_like(data) and data:
if is_iterator(data):
data = list(data)
# we must be all tuples, otherwise don't construct
# 10697
if all(isinstance(e, tuple) for e in data):
from .multi import MultiIndex
return MultiIndex.from_tuples(
data, names=name or kwargs.get('names'))
# other iterable of some kind
subarr = com._asarray_tuplesafe(data, dtype=object)
return Index(subarr, dtype=dtype, copy=copy, name=name, **kwargs)
"""
NOTE for new Index creation:
- _simple_new: It returns new Index with the same type as the caller.
All metadata (such as name) must be provided by caller's responsibility.
Using _shallow_copy is recommended because it fills these metadata
otherwise specified.
- _shallow_copy: It returns new Index with the same type (using
_simple_new), but fills caller's metadata otherwise specified. Passed
kwargs will overwrite corresponding metadata.
- _shallow_copy_with_infer: It returns new Index inferring its type
from passed values. It fills caller's metadata otherwise specified as the
same as _shallow_copy.
See each method's docstring.
"""
@classmethod
def _simple_new(cls, values, name=None, dtype=None, **kwargs):
"""
we require the we have a dtype compat for the values
if we are passed a non-dtype compat, then coerce using the constructor
Must be careful not to recurse.
"""
if not hasattr(values, 'dtype'):
if (values is None or not len(values)) and dtype is not None:
values = np.empty(0, dtype=dtype)
else:
values = np.array(values, copy=False)
if is_object_dtype(values):
values = cls(values, name=name, dtype=dtype,
**kwargs)._ndarray_values
result = object.__new__(cls)
result._data = values
result.name = name
for k, v in compat.iteritems(kwargs):
setattr(result, k, v)
return result._reset_identity()
_index_shared_docs['_shallow_copy'] = """
create a new Index with the same class as the caller, don't copy the
data, use the same object attributes with passed in attributes taking
precedence
*this is an internal non-public method*
Parameters
----------
values : the values to create the new Index, optional
kwargs : updates the default attributes for this Index
"""
@Appender(_index_shared_docs['_shallow_copy'])
def _shallow_copy(self, values=None, **kwargs):
if values is None:
values = self.values
attributes = self._get_attributes_dict()
attributes.update(kwargs)
if not len(values) and 'dtype' not in kwargs:
attributes['dtype'] = self.dtype
return self._simple_new(values, **attributes)
def _shallow_copy_with_infer(self, values=None, **kwargs):
"""
create a new Index inferring the class with passed value, don't copy
the data, use the same object attributes with passed in attributes
taking precedence
*this is an internal non-public method*
Parameters
----------
values : the values to create the new Index, optional
kwargs : updates the default attributes for this Index
"""
if values is None:
values = self.values
attributes = self._get_attributes_dict()
attributes.update(kwargs)
attributes['copy'] = False
if not len(values) and 'dtype' not in kwargs:
attributes['dtype'] = self.dtype
if self._infer_as_myclass:
try:
return self._constructor(values, **attributes)
except (TypeError, ValueError):
pass
return Index(values, **attributes)
def _deepcopy_if_needed(self, orig, copy=False):
"""
.. versionadded:: 0.19.0
Make a copy of self if data coincides (in memory) with orig.
Subclasses should override this if self._base is not an ndarray.
Parameters
----------
orig : ndarray
other ndarray to compare self._data against
copy : boolean, default False
when False, do not run any check, just return self
Returns
-------
A copy of self if needed, otherwise self : Index
"""
if copy:
# Retrieve the "base objects", i.e. the original memory allocations
if not isinstance(orig, np.ndarray):
# orig is a DatetimeIndex
orig = orig.values
orig = orig if orig.base is None else orig.base
new = self._data if self._data.base is None else self._data.base
if orig is new:
return self.copy(deep=True)
return self
def _update_inplace(self, result, **kwargs):
# guard when called from IndexOpsMixin
raise TypeError("Index can't be updated inplace")
def _sort_levels_monotonic(self):
""" compat with MultiIndex """
return self
_index_shared_docs['_get_grouper_for_level'] = """
Get index grouper corresponding to an index level
Parameters
----------
mapper: Group mapping function or None
Function mapping index values to groups
level : int or None
Index level
Returns
-------
grouper : Index
Index of values to group on
labels : ndarray of int or None
Array of locations in level_index
uniques : Index or None
Index of unique values for level
"""
@Appender(_index_shared_docs['_get_grouper_for_level'])
def _get_grouper_for_level(self, mapper, level=None):
assert level is None or level == 0
if mapper is None:
grouper = self
else:
grouper = self.map(mapper)
return grouper, None, None
def is_(self, other):
"""
More flexible, faster check like ``is`` but that works through views
Note: this is *not* the same as ``Index.identical()``, which checks
that metadata is also the same.
Parameters
----------
other : object
other object to compare against.
Returns
-------
True if both have same underlying data, False otherwise : bool
"""
# use something other than None to be clearer
return self._id is getattr(
other, '_id', Ellipsis) and self._id is not None
def _reset_identity(self):
"""Initializes or resets ``_id`` attribute with new object"""
self._id = _Identity()
return self
# ndarray compat
def __len__(self):
"""
return the length of the Index
"""
return len(self._data)
def __array__(self, dtype=None):
""" the array interface, return my values """
return self._data.view(np.ndarray)
def __array_wrap__(self, result, context=None):
"""
Gets called after a ufunc
"""
if is_bool_dtype(result):
return result
attrs = self._get_attributes_dict()
attrs = self._maybe_update_attributes(attrs)
return Index(result, **attrs)
@cache_readonly
def dtype(self):
""" return the dtype object of the underlying data """
return self._data.dtype
@cache_readonly
def dtype_str(self):
""" return the dtype str of the underlying data """
return str(self.dtype)
@property
def values(self):
""" return the underlying data as an ndarray """
return self._data.view(np.ndarray)
@property
def _values(self):
# type: () -> Union[ExtensionArray, Index]
# TODO(EA): remove index types as they become extension arrays
"""The best array representation.
This is an ndarray, ExtensionArray, or Index subclass. This differs
from ``_ndarray_values``, which always returns an ndarray.
Both ``_values`` and ``_ndarray_values`` are consistent between
``Series`` and ``Index``.
It may differ from the public '.values' method.
index | values | _values | _ndarray_values |
----------------- | -------------- -| ----------- | --------------- |
CategoricalIndex | Categorical | Categorical | codes |
DatetimeIndex[tz] | ndarray[M8ns] | DTI[tz] | ndarray[M8ns] |
For the following, the ``._values`` is currently ``ndarray[object]``,
but will soon be an ``ExtensionArray``
index | values | _values | _ndarray_values |
----------------- | --------------- | ------------ | --------------- |
PeriodIndex | ndarray[object] | ndarray[obj] | ndarray[int] |
IntervalIndex | ndarray[object] | ndarray[obj] | ndarray[object] |
See Also
--------
values
_ndarray_values
"""
return self.values
def get_values(self):
"""
Return `Index` data as an `numpy.ndarray`.
Returns
-------
numpy.ndarray
A one-dimensional numpy array of the `Index` values.
See Also
--------
Index.values : The attribute that get_values wraps.
Examples
--------
Getting the `Index` values of a `DataFrame`:
>>> df = pd.DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]],
... index=['a', 'b', 'c'], columns=['A', 'B', 'C'])
>>> df
A B C
a 1 2 3
b 4 5 6
c 7 8 9
>>> df.index.get_values()
array(['a', 'b', 'c'], dtype=object)
Standalone `Index` values:
>>> idx = pd.Index(['1', '2', '3'])
>>> idx.get_values()
array(['1', '2', '3'], dtype=object)
`MultiIndex` arrays also have only one dimension:
>>> midx = pd.MultiIndex.from_arrays([[1, 2, 3], ['a', 'b', 'c']],
... names=('number', 'letter'))
>>> midx.get_values()
array([(1, 'a'), (2, 'b'), (3, 'c')], dtype=object)
>>> midx.get_values().ndim
1
"""
return self.values
@Appender(IndexOpsMixin.memory_usage.__doc__)
def memory_usage(self, deep=False):
result = super(Index, self).memory_usage(deep=deep)
# include our engine hashtable
result += self._engine.sizeof(deep=deep)
return result
# ops compat
@deprecate_kwarg(old_arg_name='n', new_arg_name='repeats')
def repeat(self, repeats, *args, **kwargs):
"""
Repeat elements of an Index.
Returns a new index where each element of the current index
is repeated consecutively a given number of times.
Parameters
----------
repeats : int
The number of repetitions for each element.
**kwargs
Additional keywords have no effect but might be accepted for
compatibility with numpy.
Returns
-------
pandas.Index
Newly created Index with repeated elements.
See Also
--------
Series.repeat : Equivalent function for Series
numpy.repeat : Underlying implementation
Examples
--------
>>> idx = pd.Index([1, 2, 3])
>>> idx
Int64Index([1, 2, 3], dtype='int64')
>>> idx.repeat(2)
Int64Index([1, 1, 2, 2, 3, 3], dtype='int64')
>>> idx.repeat(3)
Int64Index([1, 1, 1, 2, 2, 2, 3, 3, 3], dtype='int64')
"""
nv.validate_repeat(args, kwargs)
return self._shallow_copy(self._values.repeat(repeats))
_index_shared_docs['where'] = """
.. versionadded:: 0.19.0
Return an Index of same shape as self and whose corresponding
entries are from self where cond is True and otherwise are from
other.
Parameters
----------
cond : boolean array-like with the same length as self
other : scalar, or array-like
"""
@Appender(_index_shared_docs['where'])
def where(self, cond, other=None):
if other is None:
other = self._na_value
dtype = self.dtype
values = self.values
if is_bool(other) or is_bool_dtype(other):
# bools force casting
values = values.astype(object)
dtype = None
values = np.where(cond, values, other)
if self._is_numeric_dtype and np.any(isna(values)):
# We can't coerce to the numeric dtype of "self" (unless
# it's float) if there are NaN values in our output.
dtype = None
return self._shallow_copy_with_infer(values, dtype=dtype)
def ravel(self, order='C'):
"""
return an ndarray of the flattened values of the underlying data
See also
--------
numpy.ndarray.ravel
"""
return self._ndarray_values.ravel(order=order)
# construction helpers
@classmethod
def _try_convert_to_int_index(cls, data, copy, name, dtype):
"""
Attempt to convert an array of data into an integer index.
Parameters
----------
data : The data to convert.
copy : Whether to copy the data or not.
name : The name of the index returned.
Returns
-------
int_index : data converted to either an Int64Index or a
UInt64Index
Raises
------
ValueError if the conversion was not successful.
"""
from .numeric import Int64Index, UInt64Index
if not is_unsigned_integer_dtype(dtype):
# skip int64 conversion attempt if uint-like dtype is passed, as
# this could return Int64Index when UInt64Index is what's desrired
try:
res = data.astype('i8', copy=False)
if (res == data).all():
return Int64Index(res, copy=copy, name=name)
except (OverflowError, TypeError, ValueError):
pass
# Conversion to int64 failed (possibly due to overflow) or was skipped,
# so let's try now with uint64.
try:
res = data.astype('u8', copy=False)
if (res == data).all():
return UInt64Index(res, copy=copy, name=name)
except (OverflowError, TypeError, ValueError):
pass
raise ValueError
@classmethod
def _scalar_data_error(cls, data):
raise TypeError('{0}(...) must be called with a collection of some '
'kind, {1} was passed'.format(cls.__name__,
repr(data)))
@classmethod
def _string_data_error(cls, data):
raise TypeError('String dtype not supported, you may need '
'to explicitly cast to a numeric type')
@classmethod
def _coerce_to_ndarray(cls, data):
"""coerces data to ndarray, raises on scalar data. Converts other
iterables to list first and then to array. Does not touch ndarrays.
"""
if not isinstance(data, (np.ndarray, Index)):
if data is None or is_scalar(data):
cls._scalar_data_error(data)
# other iterable of some kind
if not isinstance(data, (ABCSeries, list, tuple)):
data = list(data)
data = np.asarray(data)
return data
def _get_attributes_dict(self):
""" return an attributes dict for my class """
return {k: getattr(self, k, None) for k in self._attributes}
def view(self, cls=None):
# we need to see if we are subclassing an
# index type here
if cls is not None and not hasattr(cls, '_typ'):
result = self._data.view(cls)
else:
result = self._shallow_copy()
if isinstance(result, Index):
result._id = self._id
return result
def _coerce_scalar_to_index(self, item):
"""
we need to coerce a scalar to a compat for our index type
Parameters
----------
item : scalar item to coerce
"""
dtype = self.dtype
if self._is_numeric_dtype and isna(item):
# We can't coerce to the numeric dtype of "self" (unless
# it's float) if there are NaN values in our output.
dtype = None
return Index([item], dtype=dtype, **self._get_attributes_dict())
_index_shared_docs['copy'] = """
Make a copy of this object. Name and dtype sets those attributes on
the new object.
Parameters
----------
name : string, optional
deep : boolean, default False
dtype : numpy dtype or pandas type
Returns
-------
copy : Index
Notes
-----
In most cases, there should be no functional difference from using
``deep``, but if ``deep`` is passed it will attempt to deepcopy.
"""
@Appender(_index_shared_docs['copy'])
def copy(self, name=None, deep=False, dtype=None, **kwargs):
if deep:
new_index = self._shallow_copy(self._data.copy())
else:
new_index = self._shallow_copy()
names = kwargs.get('names')
names = self._validate_names(name=name, names=names, deep=deep)
new_index = new_index.set_names(names)
if dtype:
new_index = new_index.astype(dtype)
return new_index
def __copy__(self, **kwargs):
return self.copy(**kwargs)
def __deepcopy__(self, memo=None):
if memo is None:
memo = {}
return self.copy(deep=True)
def _validate_names(self, name=None, names=None, deep=False):
"""
Handles the quirks of having a singular 'name' parameter for general
Index and plural 'names' parameter for MultiIndex.
"""
from copy import deepcopy
if names is not None and name is not None:
raise TypeError("Can only provide one of `names` and `name`")
elif names is None and name is None:
return deepcopy(self.names) if deep else self.names
elif names is not None:
if not is_list_like(names):
raise TypeError("Must pass list-like as `names`.")
return names
else:
if not is_list_like(name):
return [name]
return name
def __unicode__(self):
"""
Return a string representation for this object.
Invoked by unicode(df) in py2 only. Yields a Unicode String in both
py2/py3.
"""
klass = self.__class__.__name__
data = self._format_data()
attrs = self._format_attrs()
space = self._format_space()
prepr = (u(",%s") %
space).join(u("%s=%s") % (k, v) for k, v in attrs)
# no data provided, just attributes
if data is None:
data = ''
res = u("%s(%s%s)") % (klass, data, prepr)
return res
def _format_space(self):
# using space here controls if the attributes
# are line separated or not (the default)
# max_seq_items = get_option('display.max_seq_items')
# if len(self) > max_seq_items:
# space = "\n%s" % (' ' * (len(klass) + 1))
return " "
@property
def _formatter_func(self):
"""
Return the formatter function
"""
return default_pprint
def _format_data(self, name=None):
"""
Return the formatted data as a unicode string
"""
# do we want to justify (only do so for non-objects)
is_justify = not (self.inferred_type in ('string', 'unicode') or
(self.inferred_type == 'categorical' and
is_object_dtype(self.categories)))
return format_object_summary(self, self._formatter_func,
is_justify=is_justify, name=name)
def _format_attrs(self):
"""
Return a list of tuples of the (attr,formatted_value)
"""
return format_object_attrs(self)
def to_series(self, index=None, name=None):
"""
Create a Series with both index and values equal to the index keys
useful with map for returning an indexer based on an index
Parameters
----------
index : Index, optional
index of resulting Series. If None, defaults to original index
name : string, optional
name of resulting Series. If None, defaults to name of original
index
Returns
-------
Series : dtype will be based on the type of the Index values.
"""
from pandas import Series
if index is None:
index = self._shallow_copy()
if name is None:
name = self.name
return Series(self._to_embed(), index=index, name=name)
def to_frame(self, index=True):
"""
Create a DataFrame with a column containing the Index.
.. versionadded:: 0.21.0
Parameters
----------
index : boolean, default True
Set the index of the returned DataFrame as the original Index.
Returns
-------
DataFrame
DataFrame containing the original Index data.
See Also
--------
Index.to_series : Convert an Index to a Series.
Series.to_frame : Convert Series to DataFrame.
Examples
--------
>>> idx = pd.Index(['Ant', 'Bear', 'Cow'], name='animal')
>>> idx.to_frame()
animal
animal
Ant Ant
Bear Bear
Cow Cow
By default, the original Index is reused. To enforce a new Index:
>>> idx.to_frame(index=False)
animal
0 Ant
1 Bear
2 Cow
"""
from pandas import DataFrame
result = DataFrame(self._shallow_copy(), columns=[self.name or 0])
if index:
result.index = self
return result
def _to_embed(self, keep_tz=False, dtype=None):
"""
*this is an internal non-public method*
return an array repr of this object, potentially casting to object
"""
if dtype is not None:
return self.astype(dtype)._to_embed(keep_tz=keep_tz)
return self.values.copy()
_index_shared_docs['astype'] = """
Create an Index with values cast to dtypes. The class of a new Index
is determined by dtype. When conversion is impossible, a ValueError
exception is raised.
Parameters
----------
dtype : numpy dtype or pandas type
copy : bool, default True
By default, astype always returns a newly allocated object.
If copy is set to False and internal requirements on dtype are
satisfied, the original data is used to create a new Index
or the original Index is returned.
.. versionadded:: 0.19.0
"""
@Appender(_index_shared_docs['astype'])
def astype(self, dtype, copy=True):
if is_dtype_equal(self.dtype, dtype):
return self.copy() if copy else self
elif is_categorical_dtype(dtype):
from .category import CategoricalIndex
return CategoricalIndex(self.values, name=self.name, dtype=dtype,
copy=copy)
try:
return Index(self.values.astype(dtype, copy=copy), name=self.name,
dtype=dtype)
except (TypeError, ValueError):
msg = 'Cannot cast {name} to dtype {dtype}'
raise TypeError(msg.format(name=type(self).__name__, dtype=dtype))
def _to_safe_for_reshape(self):
""" convert to object if we are a categorical """
return self
def _assert_can_do_setop(self, other):
if not is_list_like(other):
raise TypeError('Input must be Index or array-like')
return True
def _convert_can_do_setop(self, other):
if not isinstance(other, Index):
other = Index(other, name=self.name)
result_name = self.name
else:
result_name = self.name if self.name == other.name else None
return other, result_name
def _convert_for_op(self, value):
""" Convert value to be insertable to ndarray """
return value
def _assert_can_do_op(self, value):
""" Check value is valid for scalar op """
if not is_scalar(value):
msg = "'value' must be a scalar, passed: {0}"
raise TypeError(msg.format(type(value).__name__))
@property
def nlevels(self):
return 1
def _get_names(self):
return FrozenList((self.name, ))
def _set_names(self, values, level=None):
"""
Set new names on index. Each name has to be a hashable type.
Parameters
----------
values : str or sequence
name(s) to set
level : int, level name, or sequence of int/level names (default None)
If the index is a MultiIndex (hierarchical), level(s) to set (None
for all levels). Otherwise level must be None
Raises
------
TypeError if each name is not hashable.
"""
if not is_list_like(values):
raise ValueError('Names must be a list-like')
if len(values) != 1:
raise ValueError('Length of new names must be 1, got %d' %
len(values))
# GH 20527
# All items in 'name' need to be hashable:
for name in values:
if not is_hashable(name):
raise TypeError('{}.name must be a hashable type'
.format(self.__class__.__name__))
self.name = values[0]
names = property(fset=_set_names, fget=_get_names)
def set_names(self, names, level=None, inplace=False):
"""
Set new names on index. Defaults to returning new index.
Parameters
----------
names : str or sequence
name(s) to set
level : int, level name, or sequence of int/level names (default None)
If the index is a MultiIndex (hierarchical), level(s) to set (None
for all levels). Otherwise level must be None
inplace : bool
if True, mutates in place
Returns
-------
new index (of same type and class...etc) [if inplace, returns None]
Examples
--------
>>> Index([1, 2, 3, 4]).set_names('foo')
Int64Index([1, 2, 3, 4], dtype='int64', name='foo')
>>> Index([1, 2, 3, 4]).set_names(['foo'])
Int64Index([1, 2, 3, 4], dtype='int64', name='foo')
>>> idx = MultiIndex.from_tuples([(1, u'one'), (1, u'two'),
(2, u'one'), (2, u'two')],
names=['foo', 'bar'])
>>> idx.set_names(['baz', 'quz'])
MultiIndex(levels=[[1, 2], [u'one', u'two']],
labels=[[0, 0, 1, 1], [0, 1, 0, 1]],
names=[u'baz', u'quz'])
>>> idx.set_names('baz', level=0)
MultiIndex(levels=[[1, 2], [u'one', u'two']],
labels=[[0, 0, 1, 1], [0, 1, 0, 1]],
names=[u'baz', u'bar'])
"""
from .multi import MultiIndex
if level is not None and not isinstance(self, MultiIndex):
raise ValueError('Level must be None for non-MultiIndex')
if level is not None and not is_list_like(level) and is_list_like(
names):
raise TypeError("Names must be a string")
if not is_list_like(names) and level is None and self.nlevels > 1:
raise TypeError("Must pass list-like as `names`.")
if not is_list_like(names):
names = [names]
if level is not None and not is_list_like(level):
level = [level]
if inplace:
idx = self
else:
idx = self._shallow_copy()
idx._set_names(names, level=level)
if not inplace:
return idx
def rename(self, name, inplace=False):
"""
Set new names on index. Defaults to returning new index.
Parameters
----------
name : str or list
name to set
inplace : bool
if True, mutates in place
Returns
-------
new index (of same type and class...etc) [if inplace, returns None]
"""
return self.set_names([name], inplace=inplace)
@property
def _has_complex_internals(self):
# to disable groupby tricks in MultiIndex
return False
def _summary(self, name=None):
"""
Return a summarized representation
Parameters
----------
name : str
name to use in the summary representation
Returns
-------
String with a summarized representation of the index
"""
if len(self) > 0:
head = self[0]
if (hasattr(head, 'format') and
not isinstance(head, compat.string_types)):
head = head.format()
tail = self[-1]
if (hasattr(tail, 'format') and
not isinstance(tail, compat.string_types)):
tail = tail.format()
index_summary = ', %s to %s' % (pprint_thing(head),
pprint_thing(tail))
else:
index_summary = ''
if name is None:
name = type(self).__name__
return '%s: %s entries%s' % (name, len(self), index_summary)
def summary(self, name=None):
"""
Return a summarized representation
.. deprecated:: 0.23.0
"""
warnings.warn("'summary' is deprecated and will be removed in a "
"future version.", FutureWarning, stacklevel=2)
return self._summary(name)
def _mpl_repr(self):
# how to represent ourselves to matplotlib
return self.values
_na_value = np.nan
"""The expected NA value to use with this index."""
# introspection
@property
def is_monotonic(self):
""" alias for is_monotonic_increasing (deprecated) """
return self.is_monotonic_increasing
@property
def is_monotonic_increasing(self):
"""
return if the index is monotonic increasing (only equal or
increasing) values.
Examples
--------
>>> Index([1, 2, 3]).is_monotonic_increasing
True
>>> Index([1, 2, 2]).is_monotonic_increasing
True
>>> Index([1, 3, 2]).is_monotonic_increasing
False
"""
return self._engine.is_monotonic_increasing
@property
def is_monotonic_decreasing(self):
"""
return if the index is monotonic decreasing (only equal or
decreasing) values.
Examples
--------
>>> Index([3, 2, 1]).is_monotonic_decreasing
True
>>> Index([3, 2, 2]).is_monotonic_decreasing
True
>>> Index([3, 1, 2]).is_monotonic_decreasing
False
"""
return self._engine.is_monotonic_decreasing
@property
def _is_strictly_monotonic_increasing(self):
"""return if the index is strictly monotonic increasing
(only increasing) values
Examples
--------
>>> Index([1, 2, 3])._is_strictly_monotonic_increasing
True
>>> Index([1, 2, 2])._is_strictly_monotonic_increasing
False
>>> Index([1, 3, 2])._is_strictly_monotonic_increasing
False
"""
return self.is_unique and self.is_monotonic_increasing
@property
def _is_strictly_monotonic_decreasing(self):
"""return if the index is strictly monotonic decreasing
(only decreasing) values
Examples
--------
>>> Index([3, 2, 1])._is_strictly_monotonic_decreasing
True
>>> Index([3, 2, 2])._is_strictly_monotonic_decreasing
False
>>> Index([3, 1, 2])._is_strictly_monotonic_decreasing
False
"""
return self.is_unique and self.is_monotonic_decreasing
def is_lexsorted_for_tuple(self, tup):
return True
@cache_readonly
def is_unique(self):
""" return if the index has unique values """
return self._engine.is_unique
@property
def has_duplicates(self):
return not self.is_unique
def is_boolean(self):
return self.inferred_type in ['boolean']
def is_integer(self):
return self.inferred_type in ['integer']
def is_floating(self):
return self.inferred_type in ['floating', 'mixed-integer-float']
def is_numeric(self):
return self.inferred_type in ['integer', 'floating']
def is_object(self):
return is_object_dtype(self.dtype)
def is_categorical(self):
"""
Check if the Index holds categorical data.
Returns
-------
boolean
True if the Index is categorical.
See Also
--------
CategoricalIndex : Index for categorical data.
Examples
--------
>>> idx = pd.Index(["Watermelon", "Orange", "Apple",
... "Watermelon"]).astype("category")
>>> idx.is_categorical()
True
>>> idx = pd.Index([1, 3, 5, 7])
>>> idx.is_categorical()
False
>>> s = pd.Series(["Peter", "Victor", "Elisabeth", "Mar"])
>>> s
0 Peter
1 Victor
2 Elisabeth
3 Mar
dtype: object
>>> s.index.is_categorical()
False
"""
return self.inferred_type in ['categorical']
def is_interval(self):
return self.inferred_type in ['interval']
def is_mixed(self):
return self.inferred_type in ['mixed']
def holds_integer(self):
return self.inferred_type in ['integer', 'mixed-integer']
_index_shared_docs['_convert_scalar_indexer'] = """
Convert a scalar indexer.
Parameters
----------
key : label of the slice bound
kind : {'ix', 'loc', 'getitem', 'iloc'} or None
"""
@Appender(_index_shared_docs['_convert_scalar_indexer'])
def _convert_scalar_indexer(self, key, kind=None):
assert kind in ['ix', 'loc', 'getitem', 'iloc', None]
if kind == 'iloc':
return self._validate_indexer('positional', key, kind)
if len(self) and not isinstance(self, ABCMultiIndex,):
# we can raise here if we are definitive that this
# is positional indexing (eg. .ix on with a float)
# or label indexing if we are using a type able
# to be represented in the index
if kind in ['getitem', 'ix'] and is_float(key):
if not self.is_floating():
return self._invalid_indexer('label', key)
elif kind in ['loc'] and is_float(key):
# we want to raise KeyError on string/mixed here
# technically we *could* raise a TypeError
# on anything but mixed though
if self.inferred_type not in ['floating',
'mixed-integer-float',
'string',
'unicode',
'mixed']:
return self._invalid_indexer('label', key)
elif kind in ['loc'] and is_integer(key):
if not self.holds_integer():
return self._invalid_indexer('label', key)
return key
_index_shared_docs['_convert_slice_indexer'] = """
Convert a slice indexer.
By definition, these are labels unless 'iloc' is passed in.
Floats are not allowed as the start, step, or stop of the slice.
Parameters
----------
key : label of the slice bound
kind : {'ix', 'loc', 'getitem', 'iloc'} or None
"""
@Appender(_index_shared_docs['_convert_slice_indexer'])
def _convert_slice_indexer(self, key, kind=None):
assert kind in ['ix', 'loc', 'getitem', 'iloc', None]
# if we are not a slice, then we are done
if not isinstance(key, slice):
return key
# validate iloc
if kind == 'iloc':
return slice(self._validate_indexer('slice', key.start, kind),
self._validate_indexer('slice', key.stop, kind),
self._validate_indexer('slice', key.step, kind))
# potentially cast the bounds to integers
start, stop, step = key.start, key.stop, key.step
# figure out if this is a positional indexer
def is_int(v):
return v is None or is_integer(v)
is_null_slicer = start is None and stop is None
is_index_slice = is_int(start) and is_int(stop)
is_positional = is_index_slice and not self.is_integer()
if kind == 'getitem':
"""
called from the getitem slicers, validate that we are in fact
integers
"""
if self.is_integer() or is_index_slice:
return slice(self._validate_indexer('slice', key.start, kind),
self._validate_indexer('slice', key.stop, kind),
self._validate_indexer('slice', key.step, kind))
# convert the slice to an indexer here
# if we are mixed and have integers
try:
if is_positional and self.is_mixed():
# TODO: i, j are not used anywhere
if start is not None:
i = self.get_loc(start) # noqa
if stop is not None:
j = self.get_loc(stop) # noqa
is_positional = False
except KeyError:
if self.inferred_type == 'mixed-integer-float':
raise
if is_null_slicer:
indexer = key
elif is_positional:
indexer = key
else:
try:
indexer = self.slice_indexer(start, stop, step, kind=kind)
except Exception:
if is_index_slice:
if self.is_integer():
raise
else:
indexer = key
else:
raise
return indexer
def _convert_listlike_indexer(self, keyarr, kind=None):
"""
Parameters
----------
keyarr : list-like
Indexer to convert.
Returns
-------
tuple (indexer, keyarr)
indexer is an ndarray or None if cannot convert
keyarr are tuple-safe keys
"""
if isinstance(keyarr, Index):
keyarr = self._convert_index_indexer(keyarr)
else:
keyarr = self._convert_arr_indexer(keyarr)
indexer = self._convert_list_indexer(keyarr, kind=kind)
return indexer, keyarr
_index_shared_docs['_convert_arr_indexer'] = """
Convert an array-like indexer to the appropriate dtype.
Parameters
----------
keyarr : array-like
Indexer to convert.
Returns
-------
converted_keyarr : array-like
"""
@Appender(_index_shared_docs['_convert_arr_indexer'])
def _convert_arr_indexer(self, keyarr):
keyarr = com._asarray_tuplesafe(keyarr)
return keyarr
_index_shared_docs['_convert_index_indexer'] = """
Convert an Index indexer to the appropriate dtype.
Parameters
----------
keyarr : Index (or sub-class)
Indexer to convert.
Returns
-------
converted_keyarr : Index (or sub-class)
"""
@Appender(_index_shared_docs['_convert_index_indexer'])
def _convert_index_indexer(self, keyarr):
return keyarr
_index_shared_docs['_convert_list_indexer'] = """
Convert a list-like indexer to the appropriate dtype.
Parameters
----------
keyarr : Index (or sub-class)
Indexer to convert.
kind : iloc, ix, loc, optional
Returns
-------
positional indexer or None
"""
@Appender(_index_shared_docs['_convert_list_indexer'])
def _convert_list_indexer(self, keyarr, kind=None):
if (kind in [None, 'iloc', 'ix'] and
is_integer_dtype(keyarr) and not self.is_floating() and
not isinstance(keyarr, ABCPeriodIndex)):
if self.inferred_type == 'mixed-integer':
indexer = self.get_indexer(keyarr)
if (indexer >= 0).all():
return indexer
# missing values are flagged as -1 by get_indexer and negative
# indices are already converted to positive indices in the
# above if-statement, so the negative flags are changed to
# values outside the range of indices so as to trigger an
# IndexError in maybe_convert_indices
indexer[indexer < 0] = len(self)
from pandas.core.indexing import maybe_convert_indices
return maybe_convert_indices(indexer, len(self))
elif not self.inferred_type == 'integer':
keyarr = np.where(keyarr < 0, len(self) + keyarr, keyarr)
return keyarr
return None
def _invalid_indexer(self, form, key):
""" consistent invalid indexer message """
raise TypeError("cannot do {form} indexing on {klass} with these "
"indexers [{key}] of {kind}".format(
form=form, klass=type(self), key=key,
kind=type(key)))
def get_duplicates(self):
"""
Extract duplicated index elements.
Returns a sorted list of index elements which appear more than once in
the index.
.. deprecated:: 0.23.0
Use idx[idx.duplicated()].unique() instead
Returns
-------
array-like
List of duplicated indexes.
See Also
--------
Index.duplicated : Return boolean array denoting duplicates.
Index.drop_duplicates : Return Index with duplicates removed.
Examples
--------
Works on different Index of types.
>>> pd.Index([1, 2, 2, 3, 3, 3, 4]).get_duplicates()
[2, 3]
>>> pd.Index([1., 2., 2., 3., 3., 3., 4.]).get_duplicates()
[2.0, 3.0]
>>> pd.Index(['a', 'b', 'b', 'c', 'c', 'c', 'd']).get_duplicates()
['b', 'c']
Note that for a DatetimeIndex, it does not return a list but a new
DatetimeIndex:
>>> dates = pd.to_datetime(['2018-01-01', '2018-01-02', '2018-01-03',
... '2018-01-03', '2018-01-04', '2018-01-04'],
... format='%Y-%m-%d')
>>> pd.Index(dates).get_duplicates()
DatetimeIndex(['2018-01-03', '2018-01-04'],
dtype='datetime64[ns]', freq=None)
Sorts duplicated elements even when indexes are unordered.
>>> pd.Index([1, 2, 3, 2, 3, 4, 3]).get_duplicates()
[2, 3]
Return empty array-like structure when all elements are unique.
>>> pd.Index([1, 2, 3, 4]).get_duplicates()
[]
>>> dates = pd.to_datetime(['2018-01-01', '2018-01-02', '2018-01-03'],
... format='%Y-%m-%d')
>>> pd.Index(dates).get_duplicates()
DatetimeIndex([], dtype='datetime64[ns]', freq=None)
"""
warnings.warn("'get_duplicates' is deprecated and will be removed in "
"a future release. You can use "
"idx[idx.duplicated()].unique() instead",
FutureWarning, stacklevel=2)
return self[self.duplicated()].unique()
def _cleanup(self):
self._engine.clear_mapping()
@cache_readonly
def _constructor(self):
return type(self)
@cache_readonly
def _engine(self):
# property, for now, slow to look up
return self._engine_type(lambda: self._ndarray_values, len(self))
def _validate_index_level(self, level):
"""
Validate index level.
For single-level Index getting level number is a no-op, but some
verification must be done like in MultiIndex.
"""
if isinstance(level, int):
if level < 0 and level != -1:
raise IndexError("Too many levels: Index has only 1 level,"
" %d is not a valid level number" % (level, ))
elif level > 0:
raise IndexError("Too many levels:"
" Index has only 1 level, not %d" %
(level + 1))
elif level != self.name:
raise KeyError('Level %s must be same as name (%s)' %
(level, self.name))
def _get_level_number(self, level):
self._validate_index_level(level)
return 0
@cache_readonly
def inferred_type(self):
""" return a string of the type inferred from the values """
return lib.infer_dtype(self)
def _is_memory_usage_qualified(self):
""" return a boolean if we need a qualified .info display """
return self.is_object()
def is_type_compatible(self, kind):
return kind == self.inferred_type
@cache_readonly
def is_all_dates(self):
if self._data is None:
return False
return is_datetime_array(_ensure_object(self.values))
def __reduce__(self):
d = dict(data=self._data)
d.update(self._get_attributes_dict())
return _new_Index, (self.__class__, d), None
def __setstate__(self, state):
"""Necessary for making this object picklable"""
if isinstance(state, dict):
self._data = state.pop('data')
for k, v in compat.iteritems(state):
setattr(self, k, v)
elif isinstance(state, tuple):
if len(state) == 2:
nd_state, own_state = state
data = np.empty(nd_state[1], dtype=nd_state[2])
np.ndarray.__setstate__(data, nd_state)
self.name = own_state[0]
else: # pragma: no cover
data = np.empty(state)
np.ndarray.__setstate__(data, state)
self._data = data
self._reset_identity()
else:
raise Exception("invalid pickle state")
_unpickle_compat = __setstate__
def __nonzero__(self):
raise ValueError("The truth value of a {0} is ambiguous. "
"Use a.empty, a.bool(), a.item(), a.any() or a.all()."
.format(self.__class__.__name__))
__bool__ = __nonzero__
_index_shared_docs['__contains__'] = """
return a boolean if this key is IN the index
Parameters
----------
key : object
Returns
-------
boolean
"""
@Appender(_index_shared_docs['__contains__'] % _index_doc_kwargs)
def __contains__(self, key):
hash(key)
try:
return key in self._engine
except (OverflowError, TypeError, ValueError):
return False
_index_shared_docs['contains'] = """
return a boolean if this key is IN the index
Parameters
----------
key : object
Returns
-------
boolean
"""
@Appender(_index_shared_docs['contains'] % _index_doc_kwargs)
def contains(self, key):
hash(key)
try:
return key in self._engine
except (TypeError, ValueError):
return False
def __hash__(self):
raise TypeError("unhashable type: %r" % type(self).__name__)
def __setitem__(self, key, value):
raise TypeError("Index does not support mutable operations")
def __getitem__(self, key):
"""
Override numpy.ndarray's __getitem__ method to work as desired.
This function adds lists and Series as valid boolean indexers
(ndarrays only supports ndarray with dtype=bool).
If resulting ndim != 1, plain ndarray is returned instead of
corresponding `Index` subclass.
"""
# There's no custom logic to be implemented in __getslice__, so it's
# not overloaded intentionally.
getitem = self._data.__getitem__
promote = self._shallow_copy
if is_scalar(key):
return getitem(key)
if isinstance(key, slice):
# This case is separated from the conditional above to avoid
# pessimization of basic indexing.
return promote(getitem(key))
if com.is_bool_indexer(key):
key = np.asarray(key)
key = com._values_from_object(key)
result = getitem(key)
if not is_scalar(result):
return promote(result)
else:
return result
def _can_hold_identifiers_and_holds_name(self, name):
"""
Faster check for ``name in self`` when we know `name` is a Python
identifier (e.g. in NDFrame.__getattr__, which hits this to support
. key lookup). For indexes that can't hold identifiers (everything
but object & categorical) we just return False.
https://github.com/pandas-dev/pandas/issues/19764
"""
if self.is_object() or self.is_categorical():
return name in self
return False
def append(self, other):
"""
Append a collection of Index options together
Parameters
----------
other : Index or list/tuple of indices
Returns
-------
appended : Index
"""
to_concat = [self]
if isinstance(other, (list, tuple)):
to_concat = to_concat + list(other)
else:
to_concat.append(other)
for obj in to_concat:
if not isinstance(obj, Index):
raise TypeError('all inputs must be Index')
names = {obj.name for obj in to_concat}
name = None if len(names) > 1 else self.name
return self._concat(to_concat, name)
def _concat(self, to_concat, name):
typs = _concat.get_dtype_kinds(to_concat)
if len(typs) == 1:
return self._concat_same_dtype(to_concat, name=name)
return _concat._concat_index_asobject(to_concat, name=name)
def _concat_same_dtype(self, to_concat, name):
"""
Concatenate to_concat which has the same class
"""
# must be overridden in specific classes
return _concat._concat_index_asobject(to_concat, name)
_index_shared_docs['take'] = """
return a new %(klass)s of the values selected by the indices
For internal compatibility with numpy arrays.
Parameters
----------
indices : list
Indices to be taken
axis : int, optional
The axis over which to select values, always 0.
allow_fill : bool, default True
fill_value : bool, default None
If allow_fill=True and fill_value is not None, indices specified by
-1 is regarded as NA. If Index doesn't hold NA, raise ValueError
See also
--------
numpy.ndarray.take
"""
@Appender(_index_shared_docs['take'] % _index_doc_kwargs)
def take(self, indices, axis=0, allow_fill=True,
fill_value=None, **kwargs):
if kwargs:
nv.validate_take(tuple(), kwargs)
indices = _ensure_platform_int(indices)
if self._can_hold_na:
taken = self._assert_take_fillable(self.values, indices,
allow_fill=allow_fill,
fill_value=fill_value,
na_value=self._na_value)
else:
if allow_fill and fill_value is not None:
msg = 'Unable to fill values because {0} cannot contain NA'
raise ValueError(msg.format(self.__class__.__name__))
taken = self.values.take(indices)
return self._shallow_copy(taken)
def _assert_take_fillable(self, values, indices, allow_fill=True,
fill_value=None, na_value=np.nan):
""" Internal method to handle NA filling of take """
indices = _ensure_platform_int(indices)
# only fill if we are passing a non-None fill_value
if allow_fill and fill_value is not None:
if (indices < -1).any():
msg = ('When allow_fill=True and fill_value is not None, '
'all indices must be >= -1')
raise ValueError(msg)
taken = algos.take(values,
indices,
allow_fill=allow_fill,
fill_value=na_value)
else:
taken = values.take(indices)
return taken
@cache_readonly
def _isnan(self):
""" return if each value is nan"""
if self._can_hold_na:
return isna(self)
else:
# shouldn't reach to this condition by checking hasnans beforehand
values = np.empty(len(self), dtype=np.bool_)
values.fill(False)
return values
@cache_readonly
def _nan_idxs(self):
if self._can_hold_na:
w, = self._isnan.nonzero()
return w
else:
return np.array([], dtype=np.int64)
@cache_readonly
def hasnans(self):
""" return if I have any nans; enables various perf speedups """
if self._can_hold_na:
return self._isnan.any()
else:
return False
def isna(self):
"""
Detect missing values.
Return a boolean same-sized object indicating if the values are NA.
NA values, such as ``None``, :attr:`numpy.NaN` or :attr:`pd.NaT`, get
mapped to ``True`` values.
Everything else get mapped to ``False`` values. Characters such as
empty strings `''` or :attr:`numpy.inf` are not considered NA values
(unless you set ``pandas.options.mode.use_inf_as_na = True``).
.. versionadded:: 0.20.0
Returns
-------
numpy.ndarray
A boolean array of whether my values are NA
See Also
--------
pandas.Index.notna : boolean inverse of isna.
pandas.Index.dropna : omit entries with missing values.
pandas.isna : top-level isna.
Series.isna : detect missing values in Series object.
Examples
--------
Show which entries in a pandas.Index are NA. The result is an
array.
>>> idx = pd.Index([5.2, 6.0, np.NaN])
>>> idx
Float64Index([5.2, 6.0, nan], dtype='float64')
>>> idx.isna()
array([False, False, True], dtype=bool)
Empty strings are not considered NA values. None is considered an NA
value.
>>> idx = pd.Index(['black', '', 'red', None])
>>> idx
Index(['black', '', 'red', None], dtype='object')
>>> idx.isna()
array([False, False, False, True], dtype=bool)
For datetimes, `NaT` (Not a Time) is considered as an NA value.
>>> idx = pd.DatetimeIndex([pd.Timestamp('1940-04-25'),
... pd.Timestamp(''), None, pd.NaT])
>>> idx
DatetimeIndex(['1940-04-25', 'NaT', 'NaT', 'NaT'],
dtype='datetime64[ns]', freq=None)
>>> idx.isna()
array([False, True, True, True], dtype=bool)
"""
return self._isnan
isnull = isna
def notna(self):
"""
Detect existing (non-missing) values.
Return a boolean same-sized object indicating if the values are not NA.
Non-missing values get mapped to ``True``. Characters such as empty
strings ``''`` or :attr:`numpy.inf` are not considered NA values
(unless you set ``pandas.options.mode.use_inf_as_na = True``).
NA values, such as None or :attr:`numpy.NaN`, get mapped to ``False``
values.
.. versionadded:: 0.20.0
Returns
-------
numpy.ndarray
Boolean array to indicate which entries are not NA.
See also
--------
Index.notnull : alias of notna
Index.isna: inverse of notna
pandas.notna : top-level notna
Examples
--------
Show which entries in an Index are not NA. The result is an
array.
>>> idx = pd.Index([5.2, 6.0, np.NaN])
>>> idx
Float64Index([5.2, 6.0, nan], dtype='float64')
>>> idx.notna()
array([ True, True, False])
Empty strings are not considered NA values. None is considered a NA
value.
>>> idx = pd.Index(['black', '', 'red', None])
>>> idx
Index(['black', '', 'red', None], dtype='object')
>>> idx.notna()
array([ True, True, True, False])
"""
return ~self.isna()
notnull = notna
def putmask(self, mask, value):
"""
return a new Index of the values set with the mask
See also
--------
numpy.ndarray.putmask
"""
values = self.values.copy()
try:
np.putmask(values, mask, self._convert_for_op(value))
return self._shallow_copy(values)
except (ValueError, TypeError) as err:
if is_object_dtype(self):
raise err
# coerces to object
return self.astype(object).putmask(mask, value)
def format(self, name=False, formatter=None, **kwargs):
"""
Render a string representation of the Index
"""
header = []
if name:
header.append(pprint_thing(self.name,
escape_chars=('\t', '\r', '\n')) if
self.name is not None else '')
if formatter is not None:
return header + list(self.map(formatter))
return self._format_with_header(header, **kwargs)
def _format_with_header(self, header, na_rep='NaN', **kwargs):
values = self.values
from pandas.io.formats.format import format_array
if is_categorical_dtype(values.dtype):
values = np.array(values)
elif is_object_dtype(values.dtype):
values = lib.maybe_convert_objects(values, safe=1)
if is_object_dtype(values.dtype):
result = [pprint_thing(x, escape_chars=('\t', '\r', '\n'))
for x in values]
# could have nans
mask = isna(values)
if mask.any():
result = np.array(result)
result[mask] = na_rep
result = result.tolist()
else:
result = _trim_front(format_array(values, None, justify='left'))
return header + result
def to_native_types(self, slicer=None, **kwargs):
"""
Format specified values of `self` and return them.
Parameters
----------
slicer : int, array-like
An indexer into `self` that specifies which values
are used in the formatting process.
kwargs : dict
Options for specifying how the values should be formatted.
These options include the following:
1) na_rep : str
The value that serves as a placeholder for NULL values
2) quoting : bool or None
Whether or not there are quoted values in `self`
3) date_format : str
The format used to represent date-like values
"""
values = self
if slicer is not None:
values = values[slicer]
return values._format_native_types(**kwargs)
def _format_native_types(self, na_rep='', quoting=None, **kwargs):
""" actually format my specific types """
mask = isna(self)
if not self.is_object() and not quoting:
values = np.asarray(self).astype(str)
else:
values = np.array(self, dtype=object, copy=True)
values[mask] = na_rep
return values
def equals(self, other):
"""
Determines if two Index objects contain the same elements.
"""
if self.is_(other):
return True
if not isinstance(other, Index):
return False
if is_object_dtype(self) and not is_object_dtype(other):
# if other is not object, use other's logic for coercion
return other.equals(self)
try:
return array_equivalent(com._values_from_object(self),
com._values_from_object(other))
except Exception:
return False
def identical(self, other):
"""Similar to equals, but check that other comparable attributes are
also equal
"""
return (self.equals(other) and
all((getattr(self, c, None) == getattr(other, c, None)
for c in self._comparables)) and
type(self) == type(other))
def asof(self, label):
"""
For a sorted index, return the most recent label up to and including
the passed label. Return NaN if not found.
See also
--------
get_loc : asof is a thin wrapper around get_loc with method='pad'
"""
try:
loc = self.get_loc(label, method='pad')
except KeyError:
return self._na_value
else:
if isinstance(loc, slice):
loc = loc.indices(len(self))[-1]
return self[loc]
def asof_locs(self, where, mask):
"""
where : array of timestamps
mask : array of booleans where data is not NA
"""
locs = self.values[mask].searchsorted(where.values, side='right')
locs = np.where(locs > 0, locs - 1, 0)
result = np.arange(len(self))[mask].take(locs)
first = mask.argmax()
result[(locs == 0) & (where < self.values[first])] = -1
return result
def sort_values(self, return_indexer=False, ascending=True):
"""
Return a sorted copy of the index.
Return a sorted copy of the index, and optionally return the indices
that sorted the index itself.
Parameters
----------
return_indexer : bool, default False
Should the indices that would sort the index be returned.
ascending : bool, default True
Should the index values be sorted in an ascending order.
Returns
-------
sorted_index : pandas.Index
Sorted copy of the index.
indexer : numpy.ndarray, optional
The indices that the index itself was sorted by.
See Also
--------
pandas.Series.sort_values : Sort values of a Series.
pandas.DataFrame.sort_values : Sort values in a DataFrame.
Examples
--------
>>> idx = pd.Index([10, 100, 1, 1000])
>>> idx
Int64Index([10, 100, 1, 1000], dtype='int64')
Sort values in ascending order (default behavior).
>>> idx.sort_values()
Int64Index([1, 10, 100, 1000], dtype='int64')
Sort values in descending order, and also get the indices `idx` was
sorted by.
>>> idx.sort_values(ascending=False, return_indexer=True)
(Int64Index([1000, 100, 10, 1], dtype='int64'), array([3, 1, 0, 2]))
"""
_as = self.argsort()
if not ascending:
_as = _as[::-1]
sorted_index = self.take(_as)
if return_indexer:
return sorted_index, _as
else:
return sorted_index
def sort(self, *args, **kwargs):
raise TypeError("cannot sort an Index object in-place, use "
"sort_values instead")
def sortlevel(self, level=None, ascending=True, sort_remaining=None):
"""
For internal compatibility with with the Index API
Sort the Index. This is for compat with MultiIndex
Parameters
----------
ascending : boolean, default True
False to sort in descending order
level, sort_remaining are compat parameters
Returns
-------
sorted_index : Index
"""
return self.sort_values(return_indexer=True, ascending=ascending)
def shift(self, periods=1, freq=None):
"""
Shift index by desired number of time frequency increments.
This method is for shifting the values of datetime-like indexes
by a specified time increment a given number of times.
Parameters
----------
periods : int, default 1
Number of periods (or increments) to shift by,
can be positive or negative.
freq : pandas.DateOffset, pandas.Timedelta or string, optional
Frequency increment to shift by.
If None, the index is shifted by its own `freq` attribute.
Offset aliases are valid strings, e.g., 'D', 'W', 'M' etc.
Returns
-------
pandas.Index
shifted index
See Also
--------
Series.shift : Shift values of Series.
Examples
--------
Put the first 5 month starts of 2011 into an index.
>>> month_starts = pd.date_range('1/1/2011', periods=5, freq='MS')
>>> month_starts
DatetimeIndex(['2011-01-01', '2011-02-01', '2011-03-01', '2011-04-01',
'2011-05-01'],
dtype='datetime64[ns]', freq='MS')
Shift the index by 10 days.
>>> month_starts.shift(10, freq='D')
DatetimeIndex(['2011-01-11', '2011-02-11', '2011-03-11', '2011-04-11',
'2011-05-11'],
dtype='datetime64[ns]', freq=None)
The default value of `freq` is the `freq` attribute of the index,
which is 'MS' (month start) in this example.
>>> month_starts.shift(10)
DatetimeIndex(['2011-11-01', '2011-12-01', '2012-01-01', '2012-02-01',
'2012-03-01'],
dtype='datetime64[ns]', freq='MS')
Notes
-----
This method is only implemented for datetime-like index classes,
i.e., DatetimeIndex, PeriodIndex and TimedeltaIndex.
"""
raise NotImplementedError("Not supported for type %s" %
type(self).__name__)
def argsort(self, *args, **kwargs):
"""
Return the integer indices that would sort the index.
Parameters
----------
*args
Passed to `numpy.ndarray.argsort`.
**kwargs
Passed to `numpy.ndarray.argsort`.
Returns
-------
numpy.ndarray
Integer indices that would sort the index if used as
an indexer.
See also
--------
numpy.argsort : Similar method for NumPy arrays.
Index.sort_values : Return sorted copy of Index.
Examples
--------
>>> idx = pd.Index(['b', 'a', 'd', 'c'])
>>> idx
Index(['b', 'a', 'd', 'c'], dtype='object')
>>> order = idx.argsort()
>>> order
array([1, 0, 3, 2])
>>> idx[order]
Index(['a', 'b', 'c', 'd'], dtype='object')
"""
result = self.asi8
if result is None:
result = np.array(self)
return result.argsort(*args, **kwargs)
def __add__(self, other):
return Index(np.array(self) + other)
def __radd__(self, other):
return Index(other + np.array(self))
def __iadd__(self, other):
# alias for __add__
return self + other
def __sub__(self, other):
raise TypeError("cannot perform __sub__ with this index type: "
"{typ}".format(typ=type(self).__name__))
def __and__(self, other):
return self.intersection(other)
def __or__(self, other):
return self.union(other)
def __xor__(self, other):
return self.symmetric_difference(other)
def _get_consensus_name(self, other):
"""
Given 2 indexes, give a consensus name meaning
we take the not None one, or None if the names differ.
Return a new object if we are resetting the name
"""
if self.name != other.name:
if self.name is None or other.name is None:
name = self.name or other.name
else:
name = None
if self.name != name:
return self._shallow_copy(name=name)
return self
def union(self, other):
"""
Form the union of two Index objects and sorts if possible.
Parameters
----------
other : Index or array-like
Returns
-------
union : Index
Examples
--------
>>> idx1 = pd.Index([1, 2, 3, 4])
>>> idx2 = pd.Index([3, 4, 5, 6])
>>> idx1.union(idx2)
Int64Index([1, 2, 3, 4, 5, 6], dtype='int64')
"""
self._assert_can_do_setop(other)
other = _ensure_index(other)
if len(other) == 0 or self.equals(other):
return self._get_consensus_name(other)
if len(self) == 0:
return other._get_consensus_name(self)
# TODO: is_dtype_union_equal is a hack around
# 1. buggy set ops with duplicates (GH #13432)
# 2. CategoricalIndex lacking setops (GH #10186)
# Once those are fixed, this workaround can be removed
if not is_dtype_union_equal(self.dtype, other.dtype):
this = self.astype('O')
other = other.astype('O')
return this.union(other)
# TODO(EA): setops-refactor, clean all this up
if is_period_dtype(self) or is_datetime64tz_dtype(self):
lvals = self._ndarray_values
else:
lvals = self._values
if is_period_dtype(other) or is_datetime64tz_dtype(other):
rvals = other._ndarray_values
else:
rvals = other._values
if self.is_monotonic and other.is_monotonic:
try:
result = self._outer_indexer(lvals, rvals)[0]
except TypeError:
# incomparable objects
result = list(lvals)
# worth making this faster? a very unusual case
value_set = set(lvals)
result.extend([x for x in rvals if x not in value_set])
else:
indexer = self.get_indexer(other)
indexer, = (indexer == -1).nonzero()
if len(indexer) > 0:
other_diff = algos.take_nd(rvals, indexer,
allow_fill=False)
result = _concat._concat_compat((lvals, other_diff))
try:
lvals[0] < other_diff[0]
except TypeError as e:
warnings.warn("%s, sort order is undefined for "
"incomparable objects" % e, RuntimeWarning,
stacklevel=3)
else:
types = frozenset((self.inferred_type,
other.inferred_type))
if not types & _unsortable_types:
result.sort()
else:
result = lvals
try:
result = np.sort(result)
except TypeError as e:
warnings.warn("%s, sort order is undefined for "
"incomparable objects" % e, RuntimeWarning,
stacklevel=3)
# for subclasses
return self._wrap_union_result(other, result)
def _wrap_union_result(self, other, result):
name = self.name if self.name == other.name else None
return self.__class__(result, name=name)
def intersection(self, other):
"""
Form the intersection of two Index objects.
This returns a new Index with elements common to the index and `other`,
preserving the order of the calling index.
Parameters
----------
other : Index or array-like
Returns
-------
intersection : Index
Examples
--------
>>> idx1 = pd.Index([1, 2, 3, 4])
>>> idx2 = pd.Index([3, 4, 5, 6])
>>> idx1.intersection(idx2)
Int64Index([3, 4], dtype='int64')
"""
self._assert_can_do_setop(other)
other = _ensure_index(other)
if self.equals(other):
return self._get_consensus_name(other)
if not is_dtype_equal(self.dtype, other.dtype):
this = self.astype('O')
other = other.astype('O')
return this.intersection(other)
# TODO(EA): setops-refactor, clean all this up
if is_period_dtype(self):
lvals = self._ndarray_values
else:
lvals = self._values
if is_period_dtype(other):
rvals = other._ndarray_values
else:
rvals = other._values
if self.is_monotonic and other.is_monotonic:
try:
result = self._inner_indexer(lvals, rvals)[0]
return self._wrap_union_result(other, result)
except TypeError:
pass
try:
indexer = Index(rvals).get_indexer(lvals)
indexer = indexer.take((indexer != -1).nonzero()[0])
except Exception:
# duplicates
indexer = algos.unique1d(
Index(rvals).get_indexer_non_unique(lvals)[0])
indexer = indexer[indexer != -1]
taken = other.take(indexer)
if self.name != other.name:
taken.name = None
return taken
def difference(self, other):
"""
Return a new Index with elements from the index that are not in
`other`.
This is the set difference of two Index objects.
It's sorted if sorting is possible.
Parameters
----------
other : Index or array-like
Returns
-------
difference : Index
Examples
--------
>>> idx1 = pd.Index([1, 2, 3, 4])
>>> idx2 = pd.Index([3, 4, 5, 6])
>>> idx1.difference(idx2)
Int64Index([1, 2], dtype='int64')
"""
self._assert_can_do_setop(other)
if self.equals(other):
return self._shallow_copy([])
other, result_name = self._convert_can_do_setop(other)
this = self._get_unique_index()
indexer = this.get_indexer(other)
indexer = indexer.take((indexer != -1).nonzero()[0])
label_diff = np.setdiff1d(np.arange(this.size), indexer,
assume_unique=True)
the_diff = this.values.take(label_diff)
try:
the_diff = sorting.safe_sort(the_diff)
except TypeError:
pass
return this._shallow_copy(the_diff, name=result_name, freq=None)
def symmetric_difference(self, other, result_name=None):
"""
Compute the symmetric difference of two Index objects.
It's sorted if sorting is possible.
Parameters
----------
other : Index or array-like
result_name : str
Returns
-------
symmetric_difference : Index
Notes
-----
``symmetric_difference`` contains elements that appear in either
``idx1`` or ``idx2`` but not both. Equivalent to the Index created by
``idx1.difference(idx2) | idx2.difference(idx1)`` with duplicates
dropped.
Examples
--------
>>> idx1 = Index([1, 2, 3, 4])
>>> idx2 = Index([2, 3, 4, 5])
>>> idx1.symmetric_difference(idx2)
Int64Index([1, 5], dtype='int64')
You can also use the ``^`` operator:
>>> idx1 ^ idx2
Int64Index([1, 5], dtype='int64')
"""
self._assert_can_do_setop(other)
other, result_name_update = self._convert_can_do_setop(other)
if result_name is None:
result_name = result_name_update
this = self._get_unique_index()
other = other._get_unique_index()
indexer = this.get_indexer(other)
# {this} minus {other}
common_indexer = indexer.take((indexer != -1).nonzero()[0])
left_indexer = np.setdiff1d(np.arange(this.size), common_indexer,
assume_unique=True)
left_diff = this.values.take(left_indexer)
# {other} minus {this}
right_indexer = (indexer == -1).nonzero()[0]
right_diff = other.values.take(right_indexer)
the_diff = _concat._concat_compat([left_diff, right_diff])
try:
the_diff = sorting.safe_sort(the_diff)
except TypeError:
pass
attribs = self._get_attributes_dict()
attribs['name'] = result_name
if 'freq' in attribs:
attribs['freq'] = None
return self._shallow_copy_with_infer(the_diff, **attribs)
def _get_unique_index(self, dropna=False):
"""
Returns an index containing unique values.
Parameters
----------
dropna : bool
If True, NaN values are dropped.
Returns
-------
uniques : index
"""
if self.is_unique and not dropna:
return self
values = self.values
if not self.is_unique:
values = self.unique()
if dropna:
try:
if self.hasnans:
values = values[~isna(values)]
except NotImplementedError:
pass
return self._shallow_copy(values)
_index_shared_docs['get_loc'] = """
Get integer location, slice or boolean mask for requested label.
Parameters
----------
key : label
method : {None, 'pad'/'ffill', 'backfill'/'bfill', 'nearest'}, optional
* default: exact matches only.
* pad / ffill: find the PREVIOUS index value if no exact match.
* backfill / bfill: use NEXT index value if no exact match
* nearest: use the NEAREST index value if no exact match. Tied
distances are broken by preferring the larger index value.
tolerance : optional
Maximum distance from index value for inexact matches. The value of
the index at the matching location most satisfy the equation
``abs(index[loc] - key) <= tolerance``.
Tolerance may be a scalar
value, which applies the same tolerance to all values, or
list-like, which applies variable tolerance per element. List-like
includes list, tuple, array, Series, and must be the same size as
the index and its dtype must exactly match the index's type.
.. versionadded:: 0.21.0 (list-like tolerance)
Returns
-------
loc : int if unique index, slice if monotonic index, else mask
Examples
---------
>>> unique_index = pd.Index(list('abc'))
>>> unique_index.get_loc('b')
1
>>> monotonic_index = pd.Index(list('abbc'))
>>> monotonic_index.get_loc('b')
slice(1, 3, None)
>>> non_monotonic_index = pd.Index(list('abcb'))
>>> non_monotonic_index.get_loc('b')
array([False, True, False, True], dtype=bool)
"""
@Appender(_index_shared_docs['get_loc'])
def get_loc(self, key, method=None, tolerance=None):
if method is None:
if tolerance is not None:
raise ValueError('tolerance argument only valid if using pad, '
'backfill or nearest lookups')
try:
return self._engine.get_loc(key)
except KeyError:
return self._engine.get_loc(self._maybe_cast_indexer(key))
indexer = self.get_indexer([key], method=method, tolerance=tolerance)
if indexer.ndim > 1 or indexer.size > 1:
raise TypeError('get_loc requires scalar valued input')
loc = indexer.item()
if loc == -1:
raise KeyError(key)
return loc
def get_value(self, series, key):
"""
Fast lookup of value from 1-dimensional ndarray. Only use this if you
know what you're doing
"""
# if we have something that is Index-like, then
# use this, e.g. DatetimeIndex
s = getattr(series, '_values', None)
if isinstance(s, (ExtensionArray, Index)) and is_scalar(key):
# GH 20825
# Unify Index and ExtensionArray treatment
# First try to convert the key to a location
# If that fails, see if key is an integer, and
# try that
try:
iloc = self.get_loc(key)
return s[iloc]
except KeyError:
if is_integer(key):
return s[key]
s = com._values_from_object(series)
k = com._values_from_object(key)
k = self._convert_scalar_indexer(k, kind='getitem')
try:
return self._engine.get_value(s, k,
tz=getattr(series.dtype, 'tz', None))
except KeyError as e1:
if len(self) > 0 and self.inferred_type in ['integer', 'boolean']:
raise
try:
return libindex.get_value_box(s, key)
except IndexError:
raise
except TypeError:
# generator/iterator-like
if is_iterator(key):
raise InvalidIndexError(key)
else:
raise e1
except Exception: # pragma: no cover
raise e1
except TypeError:
# python 3
if is_scalar(key): # pragma: no cover
raise IndexError(key)
raise InvalidIndexError(key)
def set_value(self, arr, key, value):
"""
Fast lookup of value from 1-dimensional ndarray. Only use this if you
know what you're doing
"""
self._engine.set_value(com._values_from_object(arr),
com._values_from_object(key), value)
def _get_level_values(self, level):
"""
Return an Index of values for requested level, equal to the length
of the index.
Parameters
----------
level : int or str
``level`` is either the integer position of the level in the
MultiIndex, or the name of the level.
Returns
-------
values : Index
``self``, as there is only one level in the Index.
See also
---------
pandas.MultiIndex.get_level_values : get values for a level of a
MultiIndex
"""
self._validate_index_level(level)
return self
get_level_values = _get_level_values
def droplevel(self, level=0):
"""
Return index with requested level(s) removed. If resulting index has
only 1 level left, the result will be of Index type, not MultiIndex.
.. versionadded:: 0.23.1 (support for non-MultiIndex)
Parameters
----------
level : int, str, or list-like, default 0
If a string is given, must be the name of a level
If list-like, elements must be names or indexes of levels.
Returns
-------
index : Index or MultiIndex
"""
if not isinstance(level, (tuple, list)):
level = [level]
levnums = sorted(self._get_level_number(lev) for lev in level)[::-1]
if len(level) == 0:
return self
if len(level) >= self.nlevels:
raise ValueError("Cannot remove {} levels from an index with {} "
"levels: at least one level must be "
"left.".format(len(level), self.nlevels))
# The two checks above guarantee that here self is a MultiIndex
new_levels = list(self.levels)
new_labels = list(self.labels)
new_names = list(self.names)
for i in levnums:
new_levels.pop(i)
new_labels.pop(i)
new_names.pop(i)
if len(new_levels) == 1:
# set nan if needed
mask = new_labels[0] == -1
result = new_levels[0].take(new_labels[0])
if mask.any():
result = result.putmask(mask, np.nan)
result.name = new_names[0]
return result
else:
from .multi import MultiIndex
return MultiIndex(levels=new_levels, labels=new_labels,
names=new_names, verify_integrity=False)
_index_shared_docs['get_indexer'] = """
Compute indexer and mask for new index given the current index. The
indexer should be then used as an input to ndarray.take to align the
current data to the new index.
Parameters
----------
target : %(target_klass)s
method : {None, 'pad'/'ffill', 'backfill'/'bfill', 'nearest'}, optional
* default: exact matches only.
* pad / ffill: find the PREVIOUS index value if no exact match.
* backfill / bfill: use NEXT index value if no exact match
* nearest: use the NEAREST index value if no exact match. Tied
distances are broken by preferring the larger index value.
limit : int, optional
Maximum number of consecutive labels in ``target`` to match for
inexact matches.
tolerance : optional
Maximum distance between original and new labels for inexact
matches. The values of the index at the matching locations most
satisfy the equation ``abs(index[indexer] - target) <= tolerance``.
Tolerance may be a scalar value, which applies the same tolerance
to all values, or list-like, which applies variable tolerance per
element. List-like includes list, tuple, array, Series, and must be
the same size as the index and its dtype must exactly match the
index's type.
.. versionadded:: 0.21.0 (list-like tolerance)
Examples
--------
>>> indexer = index.get_indexer(new_index)
>>> new_values = cur_values.take(indexer)
Returns
-------
indexer : ndarray of int
Integers from 0 to n - 1 indicating that the index at these
positions matches the corresponding target values. Missing values
in the target are marked by -1.
"""
@Appender(_index_shared_docs['get_indexer'] % _index_doc_kwargs)
def get_indexer(self, target, method=None, limit=None, tolerance=None):
method = missing.clean_reindex_fill_method(method)
target = _ensure_index(target)
if tolerance is not None:
tolerance = self._convert_tolerance(tolerance, target)
# Treat boolean labels passed to a numeric index as not found. Without
# this fix False and True would be treated as 0 and 1 respectively.
# (GH #16877)
if target.is_boolean() and self.is_numeric():
return _ensure_platform_int(np.repeat(-1, target.size))
pself, ptarget = self._maybe_promote(target)
if pself is not self or ptarget is not target:
return pself.get_indexer(ptarget, method=method, limit=limit,
tolerance=tolerance)
if not is_dtype_equal(self.dtype, target.dtype):
this = self.astype(object)
target = target.astype(object)
return this.get_indexer(target, method=method, limit=limit,
tolerance=tolerance)
if not self.is_unique:
raise InvalidIndexError('Reindexing only valid with uniquely'
' valued Index objects')
if method == 'pad' or method == 'backfill':
indexer = self._get_fill_indexer(target, method, limit, tolerance)
elif method == 'nearest':
indexer = self._get_nearest_indexer(target, limit, tolerance)
else:
if tolerance is not None:
raise ValueError('tolerance argument only valid if doing pad, '
'backfill or nearest reindexing')
if limit is not None:
raise ValueError('limit argument only valid if doing pad, '
'backfill or nearest reindexing')
indexer = self._engine.get_indexer(target._ndarray_values)
return _ensure_platform_int(indexer)
def _convert_tolerance(self, tolerance, target):
# override this method on subclasses
tolerance = np.asarray(tolerance)
if target.size != tolerance.size and tolerance.size > 1:
raise ValueError('list-like tolerance size must match '
'target index size')
return tolerance
def _get_fill_indexer(self, target, method, limit=None, tolerance=None):
if self.is_monotonic_increasing and target.is_monotonic_increasing:
method = (self._engine.get_pad_indexer if method == 'pad' else
self._engine.get_backfill_indexer)
indexer = method(target._ndarray_values, limit)
else:
indexer = self._get_fill_indexer_searchsorted(target, method,
limit)
if tolerance is not None:
indexer = self._filter_indexer_tolerance(target._ndarray_values,
indexer,
tolerance)
return indexer
def _get_fill_indexer_searchsorted(self, target, method, limit=None):
"""
Fallback pad/backfill get_indexer that works for monotonic decreasing
indexes and non-monotonic targets
"""
if limit is not None:
raise ValueError('limit argument for %r method only well-defined '
'if index and target are monotonic' % method)
side = 'left' if method == 'pad' else 'right'
# find exact matches first (this simplifies the algorithm)
indexer = self.get_indexer(target)
nonexact = (indexer == -1)
indexer[nonexact] = self._searchsorted_monotonic(target[nonexact],
side)
if side == 'left':
# searchsorted returns "indices into a sorted array such that,
# if the corresponding elements in v were inserted before the
# indices, the order of a would be preserved".
# Thus, we need to subtract 1 to find values to the left.
indexer[nonexact] -= 1
# This also mapped not found values (values of 0 from
# np.searchsorted) to -1, which conveniently is also our
# sentinel for missing values
else:
# Mark indices to the right of the largest value as not found
indexer[indexer == len(self)] = -1
return indexer
def _get_nearest_indexer(self, target, limit, tolerance):
"""
Get the indexer for the nearest index labels; requires an index with
values that can be subtracted from each other (e.g., not strings or
tuples).
"""
left_indexer = self.get_indexer(target, 'pad', limit=limit)
right_indexer = self.get_indexer(target, 'backfill', limit=limit)
target = np.asarray(target)
left_distances = abs(self.values[left_indexer] - target)
right_distances = abs(self.values[right_indexer] - target)
op = operator.lt if self.is_monotonic_increasing else operator.le
indexer = np.where(op(left_distances, right_distances) |
(right_indexer == -1), left_indexer, right_indexer)
if tolerance is not None:
indexer = self._filter_indexer_tolerance(target, indexer,
tolerance)
return indexer
def _filter_indexer_tolerance(self, target, indexer, tolerance):
distance = abs(self.values[indexer] - target)
indexer = np.where(distance <= tolerance, indexer, -1)
return indexer
_index_shared_docs['get_indexer_non_unique'] = """
Compute indexer and mask for new index given the current index. The
indexer should be then used as an input to ndarray.take to align the
current data to the new index.
Parameters
----------
target : %(target_klass)s
Returns
-------
indexer : ndarray of int
Integers from 0 to n - 1 indicating that the index at these
positions matches the corresponding target values. Missing values
in the target are marked by -1.
missing : ndarray of int
An indexer into the target of the values not found.
These correspond to the -1 in the indexer array
"""
@Appender(_index_shared_docs['get_indexer_non_unique'] % _index_doc_kwargs)
def get_indexer_non_unique(self, target):
target = _ensure_index(target)
if is_categorical(target):
target = target.astype(target.dtype.categories.dtype)
pself, ptarget = self._maybe_promote(target)
if pself is not self or ptarget is not target:
return pself.get_indexer_non_unique(ptarget)
if self.is_all_dates:
self = Index(self.asi8)
tgt_values = target.asi8
else:
tgt_values = target._ndarray_values
indexer, missing = self._engine.get_indexer_non_unique(tgt_values)
return _ensure_platform_int(indexer), missing
def get_indexer_for(self, target, **kwargs):
"""
guaranteed return of an indexer even when non-unique
This dispatches to get_indexer or get_indexer_nonunique as appropriate
"""
if self.is_unique:
return self.get_indexer(target, **kwargs)
indexer, _ = self.get_indexer_non_unique(target, **kwargs)
return indexer
def _maybe_promote(self, other):
# A hack, but it works
from pandas.core.indexes.datetimes import DatetimeIndex
if self.inferred_type == 'date' and isinstance(other, DatetimeIndex):
return DatetimeIndex(self), other
elif self.inferred_type == 'boolean':
if not is_object_dtype(self.dtype):
return self.astype('object'), other.astype('object')
return self, other
def groupby(self, values):
"""
Group the index labels by a given array of values.
Parameters
----------
values : array
Values used to determine the groups.
Returns
-------
groups : dict
{group name -> group labels}
"""
# TODO: if we are a MultiIndex, we can do better
# that converting to tuples
from .multi import MultiIndex
if isinstance(values, MultiIndex):
values = values.values
values = _ensure_categorical(values)
result = values._reverse_indexer()
# map to the label
result = {k: self.take(v) for k, v in compat.iteritems(result)}
return result
def map(self, mapper, na_action=None):
"""
Map values using input correspondence (a dict, Series, or function).
Parameters
----------
mapper : function, dict, or Series
Mapping correspondence.
na_action : {None, 'ignore'}
If 'ignore', propagate NA values, without passing them to the
mapping correspondence.
Returns
-------
applied : Union[Index, MultiIndex], inferred
The output of the mapping function applied to the index.
If the function returns a tuple with more than one element
a MultiIndex will be returned.
"""
from .multi import MultiIndex
new_values = super(Index, self)._map_values(
mapper, na_action=na_action)
attributes = self._get_attributes_dict()
# we can return a MultiIndex
if new_values.size and isinstance(new_values[0], tuple):
if isinstance(self, MultiIndex):
names = self.names
elif attributes.get('name'):
names = [attributes.get('name')] * len(new_values[0])
else:
names = None
return MultiIndex.from_tuples(new_values,
names=names)
attributes['copy'] = False
if not new_values.size:
# empty
attributes['dtype'] = self.dtype
return Index(new_values, **attributes)
def isin(self, values, level=None):
"""
Return a boolean array where the index values are in `values`.
Compute boolean array of whether each index value is found in the
passed set of values. The length of the returned boolean array matches
the length of the index.
Parameters
----------
values : set or list-like
Sought values.
.. versionadded:: 0.18.1
Support for values as a set.
level : str or int, optional
Name or position of the index level to use (if the index is a
`MultiIndex`).
Returns
-------
is_contained : ndarray
NumPy array of boolean values.
See also
--------
Series.isin : Same for Series.
DataFrame.isin : Same method for DataFrames.
Notes
-----
In the case of `MultiIndex` you must either specify `values` as a
list-like object containing tuples that are the same length as the
number of levels, or specify `level`. Otherwise it will raise a
``ValueError``.
If `level` is specified:
- if it is the name of one *and only one* index level, use that level;
- otherwise it should be a number indicating level position.
Examples
--------
>>> idx = pd.Index([1,2,3])
>>> idx
Int64Index([1, 2, 3], dtype='int64')
Check whether each index value in a list of values.
>>> idx.isin([1, 4])
array([ True, False, False])
>>> midx = pd.MultiIndex.from_arrays([[1,2,3],
... ['red', 'blue', 'green']],
... names=('number', 'color'))
>>> midx
MultiIndex(levels=[[1, 2, 3], ['blue', 'green', 'red']],
labels=[[0, 1, 2], [2, 0, 1]],
names=['number', 'color'])
Check whether the strings in the 'color' level of the MultiIndex
are in a list of colors.
>>> midx.isin(['red', 'orange', 'yellow'], level='color')
array([ True, False, False])
To check across the levels of a MultiIndex, pass a list of tuples:
>>> midx.isin([(1, 'red'), (3, 'red')])
array([ True, False, False])
For a DatetimeIndex, string values in `values` are converted to
Timestamps.
>>> dates = ['2000-03-11', '2000-03-12', '2000-03-13']
>>> dti = pd.to_datetime(dates)
>>> dti
DatetimeIndex(['2000-03-11', '2000-03-12', '2000-03-13'],
dtype='datetime64[ns]', freq=None)
>>> dti.isin(['2000-03-11'])
array([ True, False, False])
"""
if level is not None:
self._validate_index_level(level)
return algos.isin(self, values)
def _can_reindex(self, indexer):
"""
*this is an internal non-public method*
Check if we are allowing reindexing with this particular indexer
Parameters
----------
indexer : an integer indexer
Raises
------
ValueError if its a duplicate axis
"""
# trying to reindex on an axis with duplicates
if not self.is_unique and len(indexer):
raise ValueError("cannot reindex from a duplicate axis")
def reindex(self, target, method=None, level=None, limit=None,
tolerance=None):
"""
Create index with target's values (move/add/delete values as necessary)
Parameters
----------
target : an iterable
Returns
-------
new_index : pd.Index
Resulting index
indexer : np.ndarray or None
Indices of output values in original index
"""
# GH6552: preserve names when reindexing to non-named target
# (i.e. neither Index nor Series).
preserve_names = not hasattr(target, 'name')
# GH7774: preserve dtype/tz if target is empty and not an Index.
target = _ensure_has_len(target) # target may be an iterator
if not isinstance(target, Index) and len(target) == 0:
attrs = self._get_attributes_dict()
attrs.pop('freq', None) # don't preserve freq
target = self._simple_new(None, dtype=self.dtype, **attrs)
else:
target = _ensure_index(target)
if level is not None:
if method is not None:
raise TypeError('Fill method not supported if level passed')
_, indexer, _ = self._join_level(target, level, how='right',
return_indexers=True)
else:
if self.equals(target):
indexer = None
else:
if self.is_unique:
indexer = self.get_indexer(target, method=method,
limit=limit,
tolerance=tolerance)
else:
if method is not None or limit is not None:
raise ValueError("cannot reindex a non-unique index "
"with a method or limit")
indexer, missing = self.get_indexer_non_unique(target)
if preserve_names and target.nlevels == 1 and target.name != self.name:
target = target.copy()
target.name = self.name
return target, indexer
def _reindex_non_unique(self, target):
"""
*this is an internal non-public method*
Create a new index with target's values (move/add/delete values as
necessary) use with non-unique Index and a possibly non-unique target
Parameters
----------
target : an iterable
Returns
-------
new_index : pd.Index
Resulting index
indexer : np.ndarray or None
Indices of output values in original index
"""
target = _ensure_index(target)
indexer, missing = self.get_indexer_non_unique(target)
check = indexer != -1
new_labels = self.take(indexer[check])
new_indexer = None
if len(missing):
length = np.arange(len(indexer))
missing = _ensure_platform_int(missing)
missing_labels = target.take(missing)
missing_indexer = _ensure_int64(length[~check])
cur_labels = self.take(indexer[check]).values
cur_indexer = _ensure_int64(length[check])
new_labels = np.empty(tuple([len(indexer)]), dtype=object)
new_labels[cur_indexer] = cur_labels
new_labels[missing_indexer] = missing_labels
# a unique indexer
if target.is_unique:
# see GH5553, make sure we use the right indexer
new_indexer = np.arange(len(indexer))
new_indexer[cur_indexer] = np.arange(len(cur_labels))
new_indexer[missing_indexer] = -1
# we have a non_unique selector, need to use the original
# indexer here
else:
# need to retake to have the same size as the indexer
indexer[~check] = 0
# reset the new indexer to account for the new size
new_indexer = np.arange(len(self.take(indexer)))
new_indexer[~check] = -1
new_index = self._shallow_copy_with_infer(new_labels, freq=None)
return new_index, indexer, new_indexer
_index_shared_docs['join'] = """
*this is an internal non-public method*
Compute join_index and indexers to conform data
structures to the new index.
Parameters
----------
other : Index
how : {'left', 'right', 'inner', 'outer'}
level : int or level name, default None
return_indexers : boolean, default False
sort : boolean, default False
Sort the join keys lexicographically in the result Index. If False,
the order of the join keys depends on the join type (how keyword)
.. versionadded:: 0.20.0
Returns
-------
join_index, (left_indexer, right_indexer)
"""
@Appender(_index_shared_docs['join'])
def join(self, other, how='left', level=None, return_indexers=False,
sort=False):
from .multi import MultiIndex
self_is_mi = isinstance(self, MultiIndex)
other_is_mi = isinstance(other, MultiIndex)
# try to figure out the join level
# GH3662
if level is None and (self_is_mi or other_is_mi):
# have the same levels/names so a simple join
if self.names == other.names:
pass
else:
return self._join_multi(other, how=how,
return_indexers=return_indexers)
# join on the level
if level is not None and (self_is_mi or other_is_mi):
return self._join_level(other, level, how=how,
return_indexers=return_indexers)
other = _ensure_index(other)
if len(other) == 0 and how in ('left', 'outer'):
join_index = self._shallow_copy()
if return_indexers:
rindexer = np.repeat(-1, len(join_index))
return join_index, None, rindexer
else:
return join_index
if len(self) == 0 and how in ('right', 'outer'):
join_index = other._shallow_copy()
if return_indexers:
lindexer = np.repeat(-1, len(join_index))
return join_index, lindexer, None
else:
return join_index
if self._join_precedence < other._join_precedence:
how = {'right': 'left', 'left': 'right'}.get(how, how)
result = other.join(self, how=how, level=level,
return_indexers=return_indexers)
if return_indexers:
x, y, z = result
result = x, z, y
return result
if not is_dtype_equal(self.dtype, other.dtype):
this = self.astype('O')
other = other.astype('O')
return this.join(other, how=how, return_indexers=return_indexers)
_validate_join_method(how)
if not self.is_unique and not other.is_unique:
return self._join_non_unique(other, how=how,
return_indexers=return_indexers)
elif not self.is_unique or not other.is_unique:
if self.is_monotonic and other.is_monotonic:
return self._join_monotonic(other, how=how,
return_indexers=return_indexers)
else:
return self._join_non_unique(other, how=how,
return_indexers=return_indexers)
elif self.is_monotonic and other.is_monotonic:
try:
return self._join_monotonic(other, how=how,
return_indexers=return_indexers)
except TypeError:
pass
if how == 'left':
join_index = self
elif how == 'right':
join_index = other
elif how == 'inner':
join_index = self.intersection(other)
elif how == 'outer':
join_index = self.union(other)
if sort:
join_index = join_index.sort_values()
if return_indexers:
if join_index is self:
lindexer = None
else:
lindexer = self.get_indexer(join_index)
if join_index is other:
rindexer = None
else:
rindexer = other.get_indexer(join_index)
return join_index, lindexer, rindexer
else:
return join_index
def _join_multi(self, other, how, return_indexers=True):
from .multi import MultiIndex
self_is_mi = isinstance(self, MultiIndex)
other_is_mi = isinstance(other, MultiIndex)
# figure out join names
self_names = com._not_none(*self.names)
other_names = com._not_none(*other.names)
overlap = list(set(self_names) & set(other_names))
# need at least 1 in common, but not more than 1
if not len(overlap):
raise ValueError("cannot join with no level specified and no "
"overlapping names")
if len(overlap) > 1:
raise NotImplementedError("merging with more than one level "
"overlap on a multi-index is not "
"implemented")
jl = overlap[0]
# make the indices into mi's that match
if not (self_is_mi and other_is_mi):
flip_order = False
if self_is_mi:
self, other = other, self
flip_order = True
# flip if join method is right or left
how = {'right': 'left', 'left': 'right'}.get(how, how)
level = other.names.index(jl)
result = self._join_level(other, level, how=how,
return_indexers=return_indexers)
if flip_order:
if isinstance(result, tuple):
return result[0], result[2], result[1]
return result
# 2 multi-indexes
raise NotImplementedError("merging with both multi-indexes is not "
"implemented")
def _join_non_unique(self, other, how='left', return_indexers=False):
from pandas.core.reshape.merge import _get_join_indexers
left_idx, right_idx = _get_join_indexers([self._ndarray_values],
[other._ndarray_values],
how=how,
sort=True)
left_idx = _ensure_platform_int(left_idx)
right_idx = _ensure_platform_int(right_idx)
join_index = np.asarray(self._ndarray_values.take(left_idx))
mask = left_idx == -1
np.putmask(join_index, mask, other._ndarray_values.take(right_idx))
join_index = self._wrap_joined_index(join_index, other)
if return_indexers:
return join_index, left_idx, right_idx
else:
return join_index
def _join_level(self, other, level, how='left', return_indexers=False,
keep_order=True):
"""
The join method *only* affects the level of the resulting
MultiIndex. Otherwise it just exactly aligns the Index data to the
labels of the level in the MultiIndex. If `keep_order` == True, the
order of the data indexed by the MultiIndex will not be changed;
otherwise, it will tie out with `other`.
"""
from .multi import MultiIndex
def _get_leaf_sorter(labels):
"""
returns sorter for the inner most level while preserving the
order of higher levels
"""
if labels[0].size == 0:
return np.empty(0, dtype='int64')
if len(labels) == 1:
lab = _ensure_int64(labels[0])
sorter, _ = libalgos.groupsort_indexer(lab, 1 + lab.max())
return sorter
# find indexers of beginning of each set of
# same-key labels w.r.t all but last level
tic = labels[0][:-1] != labels[0][1:]
for lab in labels[1:-1]:
tic |= lab[:-1] != lab[1:]
starts = np.hstack(([True], tic, [True])).nonzero()[0]
lab = _ensure_int64(labels[-1])
return lib.get_level_sorter(lab, _ensure_int64(starts))
if isinstance(self, MultiIndex) and isinstance(other, MultiIndex):
raise TypeError('Join on level between two MultiIndex objects '
'is ambiguous')
left, right = self, other
flip_order = not isinstance(self, MultiIndex)
if flip_order:
left, right = right, left
how = {'right': 'left', 'left': 'right'}.get(how, how)
level = left._get_level_number(level)
old_level = left.levels[level]
if not right.is_unique:
raise NotImplementedError('Index._join_level on non-unique index '
'is not implemented')
new_level, left_lev_indexer, right_lev_indexer = \
old_level.join(right, how=how, return_indexers=True)
if left_lev_indexer is None:
if keep_order or len(left) == 0:
left_indexer = None
join_index = left
else: # sort the leaves
left_indexer = _get_leaf_sorter(left.labels[:level + 1])
join_index = left[left_indexer]
else:
left_lev_indexer = _ensure_int64(left_lev_indexer)
rev_indexer = lib.get_reverse_indexer(left_lev_indexer,
len(old_level))
new_lev_labels = algos.take_nd(rev_indexer, left.labels[level],
allow_fill=False)
new_labels = list(left.labels)
new_labels[level] = new_lev_labels
new_levels = list(left.levels)
new_levels[level] = new_level
if keep_order: # just drop missing values. o.w. keep order
left_indexer = np.arange(len(left), dtype=np.intp)
mask = new_lev_labels != -1
if not mask.all():
new_labels = [lab[mask] for lab in new_labels]
left_indexer = left_indexer[mask]
else: # tie out the order with other
if level == 0: # outer most level, take the fast route
ngroups = 1 + new_lev_labels.max()
left_indexer, counts = libalgos.groupsort_indexer(
new_lev_labels, ngroups)
# missing values are placed first; drop them!
left_indexer = left_indexer[counts[0]:]
new_labels = [lab[left_indexer] for lab in new_labels]
else: # sort the leaves
mask = new_lev_labels != -1
mask_all = mask.all()
if not mask_all:
new_labels = [lab[mask] for lab in new_labels]
left_indexer = _get_leaf_sorter(new_labels[:level + 1])
new_labels = [lab[left_indexer] for lab in new_labels]
# left_indexers are w.r.t masked frame.
# reverse to original frame!
if not mask_all:
left_indexer = mask.nonzero()[0][left_indexer]
join_index = MultiIndex(levels=new_levels, labels=new_labels,
names=left.names, verify_integrity=False)
if right_lev_indexer is not None:
right_indexer = algos.take_nd(right_lev_indexer,
join_index.labels[level],
allow_fill=False)
else:
right_indexer = join_index.labels[level]
if flip_order:
left_indexer, right_indexer = right_indexer, left_indexer
if return_indexers:
left_indexer = (None if left_indexer is None
else _ensure_platform_int(left_indexer))
right_indexer = (None if right_indexer is None
else _ensure_platform_int(right_indexer))
return join_index, left_indexer, right_indexer
else:
return join_index
def _join_monotonic(self, other, how='left', return_indexers=False):
if self.equals(other):
ret_index = other if how == 'right' else self
if return_indexers:
return ret_index, None, None
else:
return ret_index
sv = self._ndarray_values
ov = other._ndarray_values
if self.is_unique and other.is_unique:
# We can perform much better than the general case
if how == 'left':
join_index = self
lidx = None
ridx = self._left_indexer_unique(sv, ov)
elif how == 'right':
join_index = other
lidx = self._left_indexer_unique(ov, sv)
ridx = None
elif how == 'inner':
join_index, lidx, ridx = self._inner_indexer(sv, ov)
join_index = self._wrap_joined_index(join_index, other)
elif how == 'outer':
join_index, lidx, ridx = self._outer_indexer(sv, ov)
join_index = self._wrap_joined_index(join_index, other)
else:
if how == 'left':
join_index, lidx, ridx = self._left_indexer(sv, ov)
elif how == 'right':
join_index, ridx, lidx = self._left_indexer(ov, sv)
elif how == 'inner':
join_index, lidx, ridx = self._inner_indexer(sv, ov)
elif how == 'outer':
join_index, lidx, ridx = self._outer_indexer(sv, ov)
join_index = self._wrap_joined_index(join_index, other)
if return_indexers:
lidx = None if lidx is None else _ensure_platform_int(lidx)
ridx = None if ridx is None else _ensure_platform_int(ridx)
return join_index, lidx, ridx
else:
return join_index
def _wrap_joined_index(self, joined, other):
name = self.name if self.name == other.name else None
return Index(joined, name=name)
def _get_string_slice(self, key, use_lhs=True, use_rhs=True):
# this is for partial string indexing,
# overridden in DatetimeIndex, TimedeltaIndex and PeriodIndex
raise NotImplementedError
def slice_indexer(self, start=None, end=None, step=None, kind=None):
"""
For an ordered or unique index, compute the slice indexer for input
labels and step.
Parameters
----------
start : label, default None
If None, defaults to the beginning
end : label, default None
If None, defaults to the end
step : int, default None
kind : string, default None
Returns
-------
indexer : slice
Raises
------
KeyError : If key does not exist, or key is not unique and index is
not ordered.
Notes
-----
This function assumes that the data is sorted, so use at your own peril
Examples
---------
This is a method on all index types. For example you can do:
>>> idx = pd.Index(list('abcd'))
>>> idx.slice_indexer(start='b', end='c')
slice(1, 3)
>>> idx = pd.MultiIndex.from_arrays([list('abcd'), list('efgh')])
>>> idx.slice_indexer(start='b', end=('c', 'g'))
slice(1, 3)
"""
start_slice, end_slice = self.slice_locs(start, end, step=step,
kind=kind)
# return a slice
if not is_scalar(start_slice):
raise AssertionError("Start slice bound is non-scalar")
if not is_scalar(end_slice):
raise AssertionError("End slice bound is non-scalar")
return slice(start_slice, end_slice, step)
def _maybe_cast_indexer(self, key):
"""
If we have a float key and are not a floating index
then try to cast to an int if equivalent
"""
if is_float(key) and not self.is_floating():
try:
ckey = int(key)
if ckey == key:
key = ckey
except (OverflowError, ValueError, TypeError):
pass
return key
def _validate_indexer(self, form, key, kind):
"""
if we are positional indexer
validate that we have appropriate typed bounds
must be an integer
"""
assert kind in ['ix', 'loc', 'getitem', 'iloc']
if key is None:
pass
elif is_integer(key):
pass
elif kind in ['iloc', 'getitem']:
self._invalid_indexer(form, key)
return key
_index_shared_docs['_maybe_cast_slice_bound'] = """
This function should be overloaded in subclasses that allow non-trivial
casting on label-slice bounds, e.g. datetime-like indices allowing
strings containing formatted datetimes.
Parameters
----------
label : object
side : {'left', 'right'}
kind : {'ix', 'loc', 'getitem'}
Returns
-------
label : object
Notes
-----
Value of `side` parameter should be validated in caller.
"""
@Appender(_index_shared_docs['_maybe_cast_slice_bound'])
def _maybe_cast_slice_bound(self, label, side, kind):
assert kind in ['ix', 'loc', 'getitem', None]
# We are a plain index here (sub-class override this method if they
# wish to have special treatment for floats/ints, e.g. Float64Index and
# datetimelike Indexes
# reject them
if is_float(label):
if not (kind in ['ix'] and (self.holds_integer() or
self.is_floating())):
self._invalid_indexer('slice', label)
# we are trying to find integer bounds on a non-integer based index
# this is rejected (generally .loc gets you here)
elif is_integer(label):
self._invalid_indexer('slice', label)
return label
def _searchsorted_monotonic(self, label, side='left'):
if self.is_monotonic_increasing:
return self.searchsorted(label, side=side)
elif self.is_monotonic_decreasing:
# np.searchsorted expects ascending sort order, have to reverse
# everything for it to work (element ordering, search side and
# resulting value).
pos = self[::-1].searchsorted(label, side='right' if side == 'left'
else 'left')
return len(self) - pos
raise ValueError('index must be monotonic increasing or decreasing')
def _get_loc_only_exact_matches(self, key):
"""
This is overridden on subclasses (namely, IntervalIndex) to control
get_slice_bound.
"""
return self.get_loc(key)
def get_slice_bound(self, label, side, kind):
"""
Calculate slice bound that corresponds to given label.
Returns leftmost (one-past-the-rightmost if ``side=='right'``) position
of given label.
Parameters
----------
label : object
side : {'left', 'right'}
kind : {'ix', 'loc', 'getitem'}
"""
assert kind in ['ix', 'loc', 'getitem', None]
if side not in ('left', 'right'):
raise ValueError("Invalid value for side kwarg,"
" must be either 'left' or 'right': %s" %
(side, ))
original_label = label
# For datetime indices label may be a string that has to be converted
# to datetime boundary according to its resolution.
label = self._maybe_cast_slice_bound(label, side, kind)
# we need to look up the label
try:
slc = self._get_loc_only_exact_matches(label)
except KeyError as err:
try:
return self._searchsorted_monotonic(label, side)
except ValueError:
# raise the original KeyError
raise err
if isinstance(slc, np.ndarray):
# get_loc may return a boolean array or an array of indices, which
# is OK as long as they are representable by a slice.
if is_bool_dtype(slc):
slc = lib.maybe_booleans_to_slice(slc.view('u1'))
else:
slc = lib.maybe_indices_to_slice(slc.astype('i8'), len(self))
if isinstance(slc, np.ndarray):
raise KeyError("Cannot get %s slice bound for non-unique "
"label: %r" % (side, original_label))
if isinstance(slc, slice):
if side == 'left':
return slc.start
else:
return slc.stop
else:
if side == 'right':
return slc + 1
else:
return slc
def slice_locs(self, start=None, end=None, step=None, kind=None):
"""
Compute slice locations for input labels.
Parameters
----------
start : label, default None
If None, defaults to the beginning
end : label, default None
If None, defaults to the end
step : int, defaults None
If None, defaults to 1
kind : {'ix', 'loc', 'getitem'} or None
Returns
-------
start, end : int
Notes
-----
This method only works if the index is monotonic or unique.
Examples
---------
>>> idx = pd.Index(list('abcd'))
>>> idx.slice_locs(start='b', end='c')
(1, 3)
See Also
--------
Index.get_loc : Get location for a single label
"""
inc = (step is None or step >= 0)
if not inc:
# If it's a reverse slice, temporarily swap bounds.
start, end = end, start
start_slice = None
if start is not None:
start_slice = self.get_slice_bound(start, 'left', kind)
if start_slice is None:
start_slice = 0
end_slice = None
if end is not None:
end_slice = self.get_slice_bound(end, 'right', kind)
if end_slice is None:
end_slice = len(self)
if not inc:
# Bounds at this moment are swapped, swap them back and shift by 1.
#
# slice_locs('B', 'A', step=-1): s='B', e='A'
#
# s='A' e='B'
# AFTER SWAP: | |
# v ------------------> V
# -----------------------------------
# | | |A|A|A|A| | | | | |B|B| | | | |
# -----------------------------------
# ^ <------------------ ^
# SHOULD BE: | |
# end=s-1 start=e-1
#
end_slice, start_slice = start_slice - 1, end_slice - 1
# i == -1 triggers ``len(self) + i`` selection that points to the
# last element, not before-the-first one, subtracting len(self)
# compensates that.
if end_slice == -1:
end_slice -= len(self)
if start_slice == -1:
start_slice -= len(self)
return start_slice, end_slice
def delete(self, loc):
"""
Make new Index with passed location(-s) deleted
Returns
-------
new_index : Index
"""
return self._shallow_copy(np.delete(self._data, loc))
def insert(self, loc, item):
"""
Make new Index inserting new item at location. Follows
Python list.append semantics for negative values
Parameters
----------
loc : int
item : object
Returns
-------
new_index : Index
"""
if is_scalar(item) and isna(item):
# GH 18295
item = self._na_value
_self = np.asarray(self)
item = self._coerce_scalar_to_index(item)._ndarray_values
idx = np.concatenate((_self[:loc], item, _self[loc:]))
return self._shallow_copy_with_infer(idx)
def drop(self, labels, errors='raise'):
"""
Make new Index with passed list of labels deleted
Parameters
----------
labels : array-like
errors : {'ignore', 'raise'}, default 'raise'
If 'ignore', suppress error and existing labels are dropped.
Returns
-------
dropped : Index
Raises
------
KeyError
If none of the labels are found in the selected axis
"""
arr_dtype = 'object' if self.dtype == 'object' else None
labels = com._index_labels_to_array(labels, dtype=arr_dtype)
indexer = self.get_indexer(labels)
mask = indexer == -1
if mask.any():
if errors != 'ignore':
raise KeyError(
'labels %s not contained in axis' % labels[mask])
indexer = indexer[~mask]
return self.delete(indexer)
_index_shared_docs['index_unique'] = (
"""
Return unique values in the index. Uniques are returned in order
of appearance, this does NOT sort.
Parameters
----------
level : int or str, optional, default None
Only return values from specified level (for MultiIndex)
.. versionadded:: 0.23.0
Returns
-------
Index without duplicates
See Also
--------
unique
Series.unique
""")
@Appender(_index_shared_docs['index_unique'] % _index_doc_kwargs)
def unique(self, level=None):
if level is not None:
self._validate_index_level(level)
result = super(Index, self).unique()
return self._shallow_copy(result)
def drop_duplicates(self, keep='first'):
"""
Return Index with duplicate values removed.
Parameters
----------
keep : {'first', 'last', ``False``}, default 'first'
- 'first' : Drop duplicates except for the first occurrence.
- 'last' : Drop duplicates except for the last occurrence.
- ``False`` : Drop all duplicates.
Returns
-------
deduplicated : Index
See Also
--------
Series.drop_duplicates : equivalent method on Series
DataFrame.drop_duplicates : equivalent method on DataFrame
Index.duplicated : related method on Index, indicating duplicate
Index values.
Examples
--------
Generate an pandas.Index with duplicate values.
>>> idx = pd.Index(['lama', 'cow', 'lama', 'beetle', 'lama', 'hippo'])
The `keep` parameter controls which duplicate values are removed.
The value 'first' keeps the first occurrence for each
set of duplicated entries. The default value of keep is 'first'.
>>> idx.drop_duplicates(keep='first')
Index(['lama', 'cow', 'beetle', 'hippo'], dtype='object')
The value 'last' keeps the last occurrence for each set of duplicated
entries.
>>> idx.drop_duplicates(keep='last')
Index(['cow', 'beetle', 'lama', 'hippo'], dtype='object')
The value ``False`` discards all sets of duplicated entries.
>>> idx.drop_duplicates(keep=False)
Index(['cow', 'beetle', 'hippo'], dtype='object')
"""
return super(Index, self).drop_duplicates(keep=keep)
def duplicated(self, keep='first'):
"""
Indicate duplicate index values.
Duplicated values are indicated as ``True`` values in the resulting
array. Either all duplicates, all except the first, or all except the
last occurrence of duplicates can be indicated.
Parameters
----------
keep : {'first', 'last', False}, default 'first'
The value or values in a set of duplicates to mark as missing.
- 'first' : Mark duplicates as ``True`` except for the first
occurrence.
- 'last' : Mark duplicates as ``True`` except for the last
occurrence.
- ``False`` : Mark all duplicates as ``True``.
Examples
--------
By default, for each set of duplicated values, the first occurrence is
set to False and all others to True:
>>> idx = pd.Index(['lama', 'cow', 'lama', 'beetle', 'lama'])
>>> idx.duplicated()
array([False, False, True, False, True])
which is equivalent to
>>> idx.duplicated(keep='first')
array([False, False, True, False, True])
By using 'last', the last occurrence of each set of duplicated values
is set on False and all others on True:
>>> idx.duplicated(keep='last')
array([ True, False, True, False, False])
By setting keep on ``False``, all duplicates are True:
>>> idx.duplicated(keep=False)
array([ True, False, True, False, True])
Returns
-------
numpy.ndarray
See Also
--------
pandas.Series.duplicated : Equivalent method on pandas.Series
pandas.DataFrame.duplicated : Equivalent method on pandas.DataFrame
pandas.Index.drop_duplicates : Remove duplicate values from Index
"""
return super(Index, self).duplicated(keep=keep)
_index_shared_docs['fillna'] = """
Fill NA/NaN values with the specified value
Parameters
----------
value : scalar
Scalar value to use to fill holes (e.g. 0).
This value cannot be a list-likes.
downcast : dict, default is None
a dict of item->dtype of what to downcast if possible,
or the string 'infer' which will try to downcast to an appropriate
equal type (e.g. float64 to int64 if possible)
Returns
-------
filled : %(klass)s
"""
@Appender(_index_shared_docs['fillna'])
def fillna(self, value=None, downcast=None):
self._assert_can_do_op(value)
if self.hasnans:
result = self.putmask(self._isnan, value)
if downcast is None:
# no need to care metadata other than name
# because it can't have freq if
return Index(result, name=self.name)
return self._shallow_copy()
_index_shared_docs['dropna'] = """
Return Index without NA/NaN values
Parameters
----------
how : {'any', 'all'}, default 'any'
If the Index is a MultiIndex, drop the value when any or all levels
are NaN.
Returns
-------
valid : Index
"""
@Appender(_index_shared_docs['dropna'])
def dropna(self, how='any'):
if how not in ('any', 'all'):
raise ValueError("invalid how option: {0}".format(how))
if self.hasnans:
return self._shallow_copy(self.values[~self._isnan])
return self._shallow_copy()
def _evaluate_with_timedelta_like(self, other, op):
# Timedelta knows how to operate with np.array, so dispatch to that
# operation and then wrap the results
other = Timedelta(other)
values = self.values
with np.errstate(all='ignore'):
result = op(values, other)
attrs = self._get_attributes_dict()
attrs = self._maybe_update_attributes(attrs)
if op == divmod:
return Index(result[0], **attrs), Index(result[1], **attrs)
return Index(result, **attrs)
def _evaluate_with_datetime_like(self, other, op):
raise TypeError("can only perform ops with datetime like values")
def _evaluate_compare(self, other, op):
raise com.AbstractMethodError(self)
@classmethod
def _add_comparison_methods(cls):
""" add in comparison methods """
cls.__eq__ = _make_comparison_op(operator.eq, cls)
cls.__ne__ = _make_comparison_op(operator.ne, cls)
cls.__lt__ = _make_comparison_op(operator.lt, cls)
cls.__gt__ = _make_comparison_op(operator.gt, cls)
cls.__le__ = _make_comparison_op(operator.le, cls)
cls.__ge__ = _make_comparison_op(operator.ge, cls)
@classmethod
def _add_numeric_methods_add_sub_disabled(cls):
""" add in the numeric add/sub methods to disable """
cls.__add__ = make_invalid_op('__add__')
cls.__radd__ = make_invalid_op('__radd__')
cls.__iadd__ = make_invalid_op('__iadd__')
cls.__sub__ = make_invalid_op('__sub__')
cls.__rsub__ = | make_invalid_op('__rsub__') | pandas.core.ops.make_invalid_op |
""" test the scalar Timestamp """
import pytz
import pytest
import dateutil
import calendar
import locale
import numpy as np
from dateutil.tz import tzutc
from pytz import timezone, utc
from datetime import datetime, timedelta
import pandas.util.testing as tm
import pandas.util._test_decorators as td
from pandas.tseries import offsets
from pandas._libs.tslibs import conversion
from pandas._libs.tslibs.timezones import get_timezone, dateutil_gettz as gettz
from pandas.errors import OutOfBoundsDatetime
from pandas.compat import long, PY3
from pandas.compat.numpy import np_datetime64_compat
from pandas import Timestamp, Period, Timedelta, NaT
class TestTimestampProperties(object):
def test_properties_business(self):
ts = Timestamp('2017-10-01', freq='B')
control = Timestamp('2017-10-01')
assert ts.dayofweek == 6
assert not ts.is_month_start # not a weekday
assert not ts.is_quarter_start # not a weekday
# Control case: non-business is month/qtr start
assert control.is_month_start
assert control.is_quarter_start
ts = Timestamp('2017-09-30', freq='B')
control = Timestamp('2017-09-30')
assert ts.dayofweek == 5
assert not ts.is_month_end # not a weekday
assert not ts.is_quarter_end # not a weekday
# Control case: non-business is month/qtr start
assert control.is_month_end
assert control.is_quarter_end
def test_fields(self):
def check(value, equal):
# that we are int/long like
assert isinstance(value, (int, long))
assert value == equal
# GH 10050
ts = Timestamp('2015-05-10 09:06:03.000100001')
check(ts.year, 2015)
check(ts.month, 5)
check(ts.day, 10)
check(ts.hour, 9)
check(ts.minute, 6)
check(ts.second, 3)
pytest.raises(AttributeError, lambda: ts.millisecond)
check(ts.microsecond, 100)
check(ts.nanosecond, 1)
check(ts.dayofweek, 6)
check(ts.quarter, 2)
check(ts.dayofyear, 130)
check(ts.week, 19)
check(ts.daysinmonth, 31)
check(ts.daysinmonth, 31)
# GH 13303
ts = Timestamp('2014-12-31 23:59:00-05:00', tz='US/Eastern')
check(ts.year, 2014)
check(ts.month, 12)
check(ts.day, 31)
check(ts.hour, 23)
check(ts.minute, 59)
check(ts.second, 0)
pytest.raises(AttributeError, lambda: ts.millisecond)
check(ts.microsecond, 0)
check(ts.nanosecond, 0)
check(ts.dayofweek, 2)
check(ts.quarter, 4)
check(ts.dayofyear, 365)
check(ts.week, 1)
check(ts.daysinmonth, 31)
ts = Timestamp('2014-01-01 00:00:00+01:00')
starts = ['is_month_start', 'is_quarter_start', 'is_year_start']
for start in starts:
assert getattr(ts, start)
ts = Timestamp('2014-12-31 23:59:59+01:00')
ends = ['is_month_end', 'is_year_end', 'is_quarter_end']
for end in ends:
assert getattr(ts, end)
# GH 12806
@pytest.mark.parametrize('data',
[Timestamp('2017-08-28 23:00:00'),
Timestamp('2017-08-28 23:00:00', tz='EST')])
@pytest.mark.parametrize('time_locale', [
None] if tm.get_locales() is None else [None] + tm.get_locales())
def test_names(self, data, time_locale):
# GH 17354
# Test .weekday_name, .day_name(), .month_name
with tm.assert_produces_warning(DeprecationWarning,
check_stacklevel=False):
assert data.weekday_name == 'Monday'
if time_locale is None:
expected_day = 'Monday'
expected_month = 'August'
else:
with tm.set_locale(time_locale, locale.LC_TIME):
expected_day = calendar.day_name[0].capitalize()
expected_month = calendar.month_name[8].capitalize()
assert data.day_name(time_locale) == expected_day
assert data.month_name(time_locale) == expected_month
# Test NaT
nan_ts = Timestamp(NaT)
assert np.isnan(nan_ts.day_name(time_locale))
assert np.isnan(nan_ts.month_name(time_locale))
@pytest.mark.parametrize('tz', [None, 'UTC', 'US/Eastern', 'Asia/Tokyo'])
def test_is_leap_year(self, tz):
# GH 13727
dt = Timestamp('2000-01-01 00:00:00', tz=tz)
assert dt.is_leap_year
assert isinstance(dt.is_leap_year, bool)
dt = Timestamp('1999-01-01 00:00:00', tz=tz)
assert not dt.is_leap_year
dt = Timestamp('2004-01-01 00:00:00', tz=tz)
assert dt.is_leap_year
dt = Timestamp('2100-01-01 00:00:00', tz=tz)
assert not dt.is_leap_year
def test_woy_boundary(self):
# make sure weeks at year boundaries are correct
d = datetime(2013, 12, 31)
result = Timestamp(d).week
expected = 1 # ISO standard
assert result == expected
d = datetime(2008, 12, 28)
result = Timestamp(d).week
expected = 52 # ISO standard
assert result == expected
d = datetime(2009, 12, 31)
result = Timestamp(d).week
expected = 53 # ISO standard
assert result == expected
d = datetime(2010, 1, 1)
result = Timestamp(d).week
expected = 53 # ISO standard
assert result == expected
d = datetime(2010, 1, 3)
result = Timestamp(d).week
expected = 53 # ISO standard
assert result == expected
result = np.array([Timestamp(datetime(*args)).week
for args in [(2000, 1, 1), (2000, 1, 2), (
2005, 1, 1), (2005, 1, 2)]])
assert (result == [52, 52, 53, 53]).all()
class TestTimestampConstructors(object):
def test_constructor(self):
base_str = '2014-07-01 09:00'
base_dt = datetime(2014, 7, 1, 9)
base_expected = 1404205200000000000
# confirm base representation is correct
import calendar
assert (calendar.timegm(base_dt.timetuple()) * 1000000000 ==
base_expected)
tests = [(base_str, base_dt, base_expected),
('2014-07-01 10:00', datetime(2014, 7, 1, 10),
base_expected + 3600 * 1000000000),
('2014-07-01 09:00:00.000008000',
datetime(2014, 7, 1, 9, 0, 0, 8),
base_expected + 8000),
('2014-07-01 09:00:00.000000005',
Timestamp('2014-07-01 09:00:00.000000005'),
base_expected + 5)]
timezones = [(None, 0), ('UTC', 0), (pytz.utc, 0), ('Asia/Tokyo', 9),
('US/Eastern', -4), ('dateutil/US/Pacific', -7),
(pytz.FixedOffset(-180), -3),
(dateutil.tz.tzoffset(None, 18000), 5)]
for date_str, date, expected in tests:
for result in [Timestamp(date_str), Timestamp(date)]:
# only with timestring
assert result.value == expected
assert conversion.pydt_to_i8(result) == expected
# re-creation shouldn't affect to internal value
result = Timestamp(result)
assert result.value == expected
assert conversion.pydt_to_i8(result) == expected
# with timezone
for tz, offset in timezones:
for result in [Timestamp(date_str, tz=tz), Timestamp(date,
tz=tz)]:
expected_tz = expected - offset * 3600 * 1000000000
assert result.value == expected_tz
assert conversion.pydt_to_i8(result) == expected_tz
# should preserve tz
result = Timestamp(result)
assert result.value == expected_tz
assert conversion.pydt_to_i8(result) == expected_tz
# should convert to UTC
result = Timestamp(result, tz='UTC')
expected_utc = expected - offset * 3600 * 1000000000
assert result.value == expected_utc
assert conversion.pydt_to_i8(result) == expected_utc
def test_constructor_with_stringoffset(self):
# GH 7833
base_str = '2014-07-01 11:00:00+02:00'
base_dt = datetime(2014, 7, 1, 9)
base_expected = 1404205200000000000
# confirm base representation is correct
import calendar
assert (calendar.timegm(base_dt.timetuple()) * 1000000000 ==
base_expected)
tests = [(base_str, base_expected),
('2014-07-01 12:00:00+02:00',
base_expected + 3600 * 1000000000),
('2014-07-01 11:00:00.000008000+02:00', base_expected + 8000),
('2014-07-01 11:00:00.000000005+02:00', base_expected + 5)]
timezones = [(None, 0), ('UTC', 0), (pytz.utc, 0), ('Asia/Tokyo', 9),
('US/Eastern', -4), ('dateutil/US/Pacific', -7),
(pytz.FixedOffset(-180), -3),
(dateutil.tz.tzoffset(None, 18000), 5)]
for date_str, expected in tests:
for result in [Timestamp(date_str)]:
# only with timestring
assert result.value == expected
assert conversion.pydt_to_i8(result) == expected
# re-creation shouldn't affect to internal value
result = Timestamp(result)
assert result.value == expected
assert conversion.pydt_to_i8(result) == expected
# with timezone
for tz, offset in timezones:
result = Timestamp(date_str, tz=tz)
expected_tz = expected
assert result.value == expected_tz
assert conversion.pydt_to_i8(result) == expected_tz
# should preserve tz
result = Timestamp(result)
assert result.value == expected_tz
assert conversion.pydt_to_i8(result) == expected_tz
# should convert to UTC
result = Timestamp(result, tz='UTC')
expected_utc = expected
assert result.value == expected_utc
assert conversion.pydt_to_i8(result) == expected_utc
# This should be 2013-11-01 05:00 in UTC
# converted to Chicago tz
result = Timestamp('2013-11-01 00:00:00-0500', tz='America/Chicago')
assert result.value == Timestamp('2013-11-01 05:00').value
expected = "Timestamp('2013-11-01 00:00:00-0500', tz='America/Chicago')" # noqa
assert repr(result) == expected
assert result == eval(repr(result))
# This should be 2013-11-01 05:00 in UTC
# converted to Tokyo tz (+09:00)
result = Timestamp('2013-11-01 00:00:00-0500', tz='Asia/Tokyo')
assert result.value == Timestamp('2013-11-01 05:00').value
expected = "Timestamp('2013-11-01 14:00:00+0900', tz='Asia/Tokyo')"
assert repr(result) == expected
assert result == eval(repr(result))
# GH11708
# This should be 2015-11-18 10:00 in UTC
# converted to Asia/Katmandu
result = Timestamp("2015-11-18 15:45:00+05:45", tz="Asia/Katmandu")
assert result.value == Timestamp("2015-11-18 10:00").value
expected = "Timestamp('2015-11-18 15:45:00+0545', tz='Asia/Katmandu')"
assert repr(result) == expected
assert result == eval(repr(result))
# This should be 2015-11-18 10:00 in UTC
# converted to Asia/Kolkata
result = Timestamp("2015-11-18 15:30:00+05:30", tz="Asia/Kolkata")
assert result.value == Timestamp("2015-11-18 10:00").value
expected = "Timestamp('2015-11-18 15:30:00+0530', tz='Asia/Kolkata')"
assert repr(result) == expected
assert result == eval(repr(result))
def test_constructor_invalid(self):
with tm.assert_raises_regex(TypeError, 'Cannot convert input'):
Timestamp(slice(2))
with tm.assert_raises_regex(ValueError, 'Cannot convert Period'):
Timestamp(Period('1000-01-01'))
def test_constructor_invalid_tz(self):
# GH#17690
with tm.assert_raises_regex(TypeError, 'must be a datetime.tzinfo'):
Timestamp('2017-10-22', tzinfo='US/Eastern')
with tm.assert_raises_regex(ValueError, 'at most one of'):
Timestamp('2017-10-22', tzinfo=utc, tz='UTC')
with tm.assert_raises_regex(ValueError, "Invalid frequency:"):
# GH#5168
# case where user tries to pass tz as an arg, not kwarg, gets
# interpreted as a `freq`
Timestamp('2012-01-01', 'US/Pacific')
def test_constructor_tz_or_tzinfo(self):
# GH#17943, GH#17690, GH#5168
stamps = [Timestamp(year=2017, month=10, day=22, tz='UTC'),
Timestamp(year=2017, month=10, day=22, tzinfo=utc),
Timestamp(year=2017, month=10, day=22, tz=utc),
Timestamp(datetime(2017, 10, 22), tzinfo=utc),
Timestamp(datetime(2017, 10, 22), tz='UTC'),
Timestamp(datetime(2017, 10, 22), tz=utc)]
assert all(ts == stamps[0] for ts in stamps)
def test_constructor_positional(self):
# see gh-10758
with pytest.raises(TypeError):
Timestamp(2000, 1)
with pytest.raises(ValueError):
Timestamp(2000, 0, 1)
with pytest.raises(ValueError):
Timestamp(2000, 13, 1)
with pytest.raises(ValueError):
Timestamp(2000, 1, 0)
with pytest.raises(ValueError):
Timestamp(2000, 1, 32)
# see gh-11630
assert (repr(Timestamp(2015, 11, 12)) ==
repr(Timestamp('20151112')))
assert (repr(Timestamp(2015, 11, 12, 1, 2, 3, 999999)) ==
repr(Timestamp('2015-11-12 01:02:03.999999')))
def test_constructor_keyword(self):
# GH 10758
with pytest.raises(TypeError):
Timestamp(year=2000, month=1)
with pytest.raises(ValueError):
Timestamp(year=2000, month=0, day=1)
with pytest.raises(ValueError):
Timestamp(year=2000, month=13, day=1)
with pytest.raises(ValueError):
Timestamp(year=2000, month=1, day=0)
with pytest.raises(ValueError):
Timestamp(year=2000, month=1, day=32)
assert (repr(Timestamp(year=2015, month=11, day=12)) ==
repr(Timestamp('20151112')))
assert (repr(Timestamp(year=2015, month=11, day=12, hour=1, minute=2,
second=3, microsecond=999999)) ==
repr(Timestamp('2015-11-12 01:02:03.999999')))
def test_constructor_fromordinal(self):
base = datetime(2000, 1, 1)
ts = Timestamp.fromordinal(base.toordinal(), freq='D')
assert base == ts
assert ts.freq == 'D'
assert base.toordinal() == ts.toordinal()
ts = Timestamp.fromordinal(base.toordinal(), tz='US/Eastern')
assert Timestamp('2000-01-01', tz='US/Eastern') == ts
assert base.toordinal() == ts.toordinal()
# GH#3042
dt = datetime(2011, 4, 16, 0, 0)
ts = Timestamp.fromordinal(dt.toordinal())
assert ts.to_pydatetime() == dt
# with a tzinfo
stamp = Timestamp('2011-4-16', tz='US/Eastern')
dt_tz = stamp.to_pydatetime()
ts = Timestamp.fromordinal(dt_tz.toordinal(), tz='US/Eastern')
assert ts.to_pydatetime() == dt_tz
@pytest.mark.parametrize('result', [
Timestamp(datetime(2000, 1, 2, 3, 4, 5, 6), nanosecond=1),
Timestamp(year=2000, month=1, day=2, hour=3, minute=4, second=5,
microsecond=6, nanosecond=1),
Timestamp(year=2000, month=1, day=2, hour=3, minute=4, second=5,
microsecond=6, nanosecond=1, tz='UTC'),
Timestamp(2000, 1, 2, 3, 4, 5, 6, 1, None),
Timestamp(2000, 1, 2, 3, 4, 5, 6, 1, pytz.UTC)])
def test_constructor_nanosecond(self, result):
# GH 18898
expected = Timestamp(datetime(2000, 1, 2, 3, 4, 5, 6), tz=result.tz)
expected = expected + Timedelta(nanoseconds=1)
assert result == expected
@pytest.mark.parametrize('arg', ['year', 'month', 'day', 'hour', 'minute',
'second', 'microsecond', 'nanosecond'])
def test_invalid_date_kwarg_with_string_input(self, arg):
kwarg = {arg: 1}
with pytest.raises(ValueError):
Timestamp('2010-10-10 12:59:59.999999999', **kwarg)
def test_out_of_bounds_value(self):
one_us = np.timedelta64(1).astype('timedelta64[us]')
# By definition we can't go out of bounds in [ns], so we
# convert the datetime64s to [us] so we can go out of bounds
min_ts_us = np.datetime64(Timestamp.min).astype('M8[us]')
max_ts_us = np.datetime64(Timestamp.max).astype('M8[us]')
# No error for the min/max datetimes
Timestamp(min_ts_us)
Timestamp(max_ts_us)
# One us less than the minimum is an error
with pytest.raises(ValueError):
Timestamp(min_ts_us - one_us)
# One us more than the maximum is an error
with pytest.raises(ValueError):
| Timestamp(max_ts_us + one_us) | pandas.Timestamp |
import pandas as pd
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import SGDClassifier
import argparse
rate = "0.5" # 默认为6:4的正负样本比例,若要改为1:1则取rate=“0.5”
class SGD:
def __init__(self, trainfile, validfile, testfile):
super(SGD, self).__init__()
train: pd.DataFrame = pd.read_csv(trainfile)
train: pd.DataFrame = train[train['label'].notna()]
valid: pd.DataFrame = | pd.read_csv(validfile) | pandas.read_csv |
from helper import find_csv,find_sigma
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib import cm
import os
import re
import numpy as np
linewidth = 4
fontsize = 20
figsize = [10,8]
font = {'family' : 'monospace',
'weight' : 'bold',
'size' : fontsize }
plt.rc('font', **font) # pass in the font dict as kwargs
from matplotlib import cycler
#plt.rcParams['axes.prop_cycle'] = cycler(linestyle=['-','-.','--',':','-','-.','--',':'],color= ['#1f77b4', '#ff7f0e', '#2ca02c', '#d62728','#9467bd', '#8c564b', '#e377c2', '#7f7f7f'])
from helper import format_func_dimensionla_potential
def getTafelRegion(CV,start=0.05, end = 0.15):
df = pd.read_csv(CV,header=None)
cv_forward = df[:int(len(df)/2)]
min_current = cv_forward[1].min()
min_current_index = cv_forward[1].idxmin()
cv_before_peak = cv_forward.iloc[:min_current_index]
cv_tafel = cv_before_peak[(cv_before_peak[1]<start*min_current)&(cv_before_peak[1]>end*min_current)]
flux = pd.DataFrame(cv_tafel[1])
cv_tafel[1] = np.log(-cv_tafel[1])
x = pd.DataFrame(cv_tafel[0])
y= pd.DataFrame(cv_tafel[1])
#X Theta
#Y Log(J)
Gradient = pd.DataFrame(columns=['Theta','flux','LnFlux','Gradient'])
Gradient['Theta'] = x[0]
Gradient['LnFlux'] = y
Gradient['flux'] = flux
for index, value in enumerate(Gradient['Theta']):
if index < len(Gradient['Theta'])-2:
Gradient['Gradient'].iloc[index] = (Gradient['LnFlux'].iloc[index+1] - Gradient['LnFlux'].iloc[index])/(Gradient['Theta'].iloc[index+1]-Gradient['Theta'].iloc[index])
#print(index,Gradient['Gradient'].iloc[index])
else:
Gradient['Gradient'].iloc[index] = Gradient['Gradient'].iloc[index-1]
Gradient['Gradient'] = -Gradient['Gradient']
Gradient_name = 'Gradient' + CV
Gradient.to_csv(Gradient_name,index=False)
def plotTafelRegion(CV):
if 'One Electron Reduction' in CV:
Gradient = pd.read_csv(CV)
offset = Gradient.iloc[0,0]
Gradient['Theta'] = Gradient['Theta'] - offset
Transfer_coefficient_at_5pct = Gradient['Gradient'][0]
Transfer_coefficient_at_30pct = Gradient['Gradient'].iloc[-1]
Gradient.plot(x='Theta',y='Gradient',ax=ax,linewidth = linewidth, label = f'One Electron Reduction',ls='--',color='k')
else:
Gradient = | pd.read_csv(CV) | pandas.read_csv |
# -*- coding: utf-8 -*-
import re
import numpy as np
import pytest
from pandas.core.dtypes.common import (
is_bool_dtype, is_categorical, is_categorical_dtype,
is_datetime64_any_dtype, is_datetime64_dtype, is_datetime64_ns_dtype,
is_datetime64tz_dtype, is_datetimetz, is_dtype_equal, is_interval_dtype,
is_period, is_period_dtype, is_string_dtype)
from pandas.core.dtypes.dtypes import (
CategoricalDtype, DatetimeTZDtype, IntervalDtype, PeriodDtype, registry)
import pandas as pd
from pandas import (
Categorical, CategoricalIndex, IntervalIndex, Series, date_range)
from pandas.core.sparse.api import SparseDtype
import pandas.util.testing as tm
@pytest.fixture(params=[True, False, None])
def ordered(request):
return request.param
class Base(object):
def setup_method(self, method):
self.dtype = self.create()
def test_hash(self):
hash(self.dtype)
def test_equality_invalid(self):
assert not self.dtype == 'foo'
assert not is_dtype_equal(self.dtype, np.int64)
def test_numpy_informed(self):
pytest.raises(TypeError, np.dtype, self.dtype)
assert not self.dtype == np.str_
assert not np.str_ == self.dtype
def test_pickle(self):
# make sure our cache is NOT pickled
# clear the cache
type(self.dtype).reset_cache()
assert not len(self.dtype._cache)
# force back to the cache
result = tm.round_trip_pickle(self.dtype)
assert not len(self.dtype._cache)
assert result == self.dtype
class TestCategoricalDtype(Base):
def create(self):
return CategoricalDtype()
def test_pickle(self):
# make sure our cache is NOT pickled
# clear the cache
type(self.dtype).reset_cache()
assert not len(self.dtype._cache)
# force back to the cache
result = tm.round_trip_pickle(self.dtype)
assert result == self.dtype
def test_hash_vs_equality(self):
dtype = self.dtype
dtype2 = CategoricalDtype()
assert dtype == dtype2
assert dtype2 == dtype
assert hash(dtype) == hash(dtype2)
def test_equality(self):
assert is_dtype_equal(self.dtype, 'category')
assert is_dtype_equal(self.dtype, CategoricalDtype())
assert not is_dtype_equal(self.dtype, 'foo')
def test_construction_from_string(self):
result = CategoricalDtype.construct_from_string('category')
assert is_dtype_equal(self.dtype, result)
pytest.raises(
TypeError, lambda: CategoricalDtype.construct_from_string('foo'))
def test_constructor_invalid(self):
msg = "Parameter 'categories' must be list-like"
with pytest.raises(TypeError, match=msg):
CategoricalDtype("category")
dtype1 = CategoricalDtype(['a', 'b'], ordered=True)
dtype2 = CategoricalDtype(['x', 'y'], ordered=False)
c = Categorical([0, 1], dtype=dtype1, fastpath=True)
@pytest.mark.parametrize('values, categories, ordered, dtype, expected',
[
[None, None, None, None,
CategoricalDtype()],
[None, ['a', 'b'], True, None, dtype1],
[c, None, None, dtype2, dtype2],
[c, ['x', 'y'], False, None, dtype2],
])
def test_from_values_or_dtype(
self, values, categories, ordered, dtype, expected):
result = CategoricalDtype._from_values_or_dtype(values, categories,
ordered, dtype)
assert result == expected
@pytest.mark.parametrize('values, categories, ordered, dtype', [
[None, ['a', 'b'], True, dtype2],
[None, ['a', 'b'], None, dtype2],
[None, None, True, dtype2],
])
def test_from_values_or_dtype_raises(self, values, categories,
ordered, dtype):
msg = "Cannot specify `categories` or `ordered` together with `dtype`."
with pytest.raises(ValueError, match=msg):
CategoricalDtype._from_values_or_dtype(values, categories,
ordered, dtype)
def test_is_dtype(self):
assert CategoricalDtype.is_dtype(self.dtype)
assert CategoricalDtype.is_dtype('category')
assert CategoricalDtype.is_dtype(CategoricalDtype())
assert not CategoricalDtype.is_dtype('foo')
assert not CategoricalDtype.is_dtype(np.float64)
def test_basic(self):
assert is_categorical_dtype(self.dtype)
factor = Categorical(['a', 'b', 'b', 'a', 'a', 'c', 'c', 'c'])
s = Series(factor, name='A')
# dtypes
assert is_categorical_dtype(s.dtype)
assert is_categorical_dtype(s)
assert not is_categorical_dtype(np.dtype('float64'))
assert is_categorical(s.dtype)
assert is_categorical(s)
assert not is_categorical(np.dtype('float64'))
assert not is_categorical(1.0)
def test_tuple_categories(self):
categories = [(1, 'a'), (2, 'b'), (3, 'c')]
result = CategoricalDtype(categories)
assert all(result.categories == categories)
@pytest.mark.parametrize("categories, expected", [
([True, False], True),
([True, False, None], True),
([True, False, "a", "b'"], False),
([0, 1], False),
])
def test_is_boolean(self, categories, expected):
cat = Categorical(categories)
assert cat.dtype._is_boolean is expected
assert is_bool_dtype(cat) is expected
assert is_bool_dtype(cat.dtype) is expected
class TestDatetimeTZDtype(Base):
def create(self):
return DatetimeTZDtype('ns', 'US/Eastern')
def test_alias_to_unit_raises(self):
# 23990
with tm.assert_produces_warning(FutureWarning):
DatetimeTZDtype('datetime64[ns, US/Central]')
def test_alias_to_unit_bad_alias_raises(self):
# 23990
with pytest.raises(TypeError, match=''):
DatetimeTZDtype('this is a bad string')
with pytest.raises(TypeError, match=''):
DatetimeTZDtype('datetime64[ns, US/NotATZ]')
def test_hash_vs_equality(self):
# make sure that we satisfy is semantics
dtype = self.dtype
dtype2 = DatetimeTZDtype('ns', 'US/Eastern')
dtype3 = DatetimeTZDtype(dtype2)
assert dtype == dtype2
assert dtype2 == dtype
assert dtype3 == dtype
assert hash(dtype) == hash(dtype2)
assert hash(dtype) == hash(dtype3)
dtype4 = DatetimeTZDtype("ns", "US/Central")
assert dtype2 != dtype4
assert hash(dtype2) != hash(dtype4)
def test_construction(self):
pytest.raises(ValueError,
lambda: DatetimeTZDtype('ms', 'US/Eastern'))
def test_subclass(self):
a = DatetimeTZDtype.construct_from_string('datetime64[ns, US/Eastern]')
b = DatetimeTZDtype.construct_from_string('datetime64[ns, CET]')
assert issubclass(type(a), type(a))
assert issubclass(type(a), type(b))
def test_compat(self):
assert is_datetime64tz_dtype(self.dtype)
assert is_datetime64tz_dtype('datetime64[ns, US/Eastern]')
assert is_datetime64_any_dtype(self.dtype)
assert is_datetime64_any_dtype('datetime64[ns, US/Eastern]')
assert is_datetime64_ns_dtype(self.dtype)
assert is_datetime64_ns_dtype('datetime64[ns, US/Eastern]')
assert not is_datetime64_dtype(self.dtype)
assert not is_datetime64_dtype('datetime64[ns, US/Eastern]')
def test_construction_from_string(self):
result = DatetimeTZDtype.construct_from_string(
'datetime64[ns, US/Eastern]')
assert is_dtype_equal(self.dtype, result)
pytest.raises(TypeError,
lambda: DatetimeTZDtype.construct_from_string('foo'))
def test_construct_from_string_raises(self):
with pytest.raises(TypeError, match="notatz"):
DatetimeTZDtype.construct_from_string('datetime64[ns, notatz]')
with pytest.raises(TypeError,
match="^Could not construct DatetimeTZDtype$"):
DatetimeTZDtype.construct_from_string(['datetime64[ns, notatz]'])
def test_is_dtype(self):
assert not DatetimeTZDtype.is_dtype(None)
assert DatetimeTZDtype.is_dtype(self.dtype)
assert DatetimeTZDtype.is_dtype('datetime64[ns, US/Eastern]')
assert not DatetimeTZDtype.is_dtype('foo')
assert DatetimeTZDtype.is_dtype(DatetimeTZDtype('ns', 'US/Pacific'))
assert not DatetimeTZDtype.is_dtype(np.float64)
def test_equality(self):
assert is_dtype_equal(self.dtype, 'datetime64[ns, US/Eastern]')
assert is_dtype_equal(self.dtype, DatetimeTZDtype('ns', 'US/Eastern'))
assert not is_dtype_equal(self.dtype, 'foo')
assert not is_dtype_equal(self.dtype, DatetimeTZDtype('ns', 'CET'))
assert not is_dtype_equal(DatetimeTZDtype('ns', 'US/Eastern'),
DatetimeTZDtype('ns', 'US/Pacific'))
# numpy compat
assert is_dtype_equal(np.dtype("M8[ns]"), "datetime64[ns]")
def test_basic(self):
assert is_datetime64tz_dtype(self.dtype)
dr = date_range('20130101', periods=3, tz='US/Eastern')
s = Series(dr, name='A')
# dtypes
assert is_datetime64tz_dtype(s.dtype)
assert is_datetime64tz_dtype(s)
assert not is_datetime64tz_dtype(np.dtype('float64'))
assert not is_datetime64tz_dtype(1.0)
with tm.assert_produces_warning(FutureWarning):
assert is_datetimetz(s)
assert is_datetimetz(s.dtype)
assert not is_datetimetz(np.dtype('float64'))
assert not is_datetimetz(1.0)
def test_dst(self):
dr1 = date_range('2013-01-01', periods=3, tz='US/Eastern')
s1 = Series(dr1, name='A')
assert is_datetime64tz_dtype(s1)
with tm.assert_produces_warning(FutureWarning):
assert is_datetimetz(s1)
dr2 = date_range('2013-08-01', periods=3, tz='US/Eastern')
s2 = Series(dr2, name='A')
assert is_datetime64tz_dtype(s2)
with tm.assert_produces_warning(FutureWarning):
assert is_datetimetz(s2)
assert s1.dtype == s2.dtype
@pytest.mark.parametrize('tz', ['UTC', 'US/Eastern'])
@pytest.mark.parametrize('constructor', ['M8', 'datetime64'])
def test_parser(self, tz, constructor):
# pr #11245
dtz_str = '{con}[ns, {tz}]'.format(con=constructor, tz=tz)
result = DatetimeTZDtype.construct_from_string(dtz_str)
expected = DatetimeTZDtype('ns', tz)
assert result == expected
def test_empty(self):
with pytest.raises(TypeError, match="A 'tz' is required."):
DatetimeTZDtype()
class TestPeriodDtype(Base):
def create(self):
return PeriodDtype('D')
def test_hash_vs_equality(self):
# make sure that we satisfy is semantics
dtype = self.dtype
dtype2 = PeriodDtype('D')
dtype3 = PeriodDtype(dtype2)
assert dtype == dtype2
assert dtype2 == dtype
assert dtype3 == dtype
assert dtype is dtype2
assert dtype2 is dtype
assert dtype3 is dtype
assert hash(dtype) == hash(dtype2)
assert hash(dtype) == hash(dtype3)
def test_construction(self):
with pytest.raises(ValueError):
PeriodDtype('xx')
for s in ['period[D]', 'Period[D]', 'D']:
dt = PeriodDtype(s)
assert dt.freq == pd.tseries.offsets.Day()
assert is_period_dtype(dt)
for s in ['period[3D]', 'Period[3D]', '3D']:
dt = PeriodDtype(s)
assert dt.freq == pd.tseries.offsets.Day(3)
assert is_period_dtype(dt)
for s in ['period[26H]', 'Period[26H]', '26H',
'period[1D2H]', 'Period[1D2H]', '1D2H']:
dt = PeriodDtype(s)
assert dt.freq == pd.tseries.offsets.Hour(26)
assert is_period_dtype(dt)
def test_subclass(self):
a = PeriodDtype('period[D]')
b = PeriodDtype('period[3D]')
assert issubclass(type(a), type(a))
assert issubclass(type(a), type(b))
def test_identity(self):
assert PeriodDtype('period[D]') == PeriodDtype('period[D]')
assert PeriodDtype('period[D]') is PeriodDtype('period[D]')
assert PeriodDtype('period[3D]') == PeriodDtype('period[3D]')
assert PeriodDtype('period[3D]') is | PeriodDtype('period[3D]') | pandas.core.dtypes.dtypes.PeriodDtype |
from __future__ import print_function # In python 2.7
from flask import Flask, session, render_template, make_response, jsonify, request, send_from_directory, g, url_for
from flask_limiter import Limiter
from flask_limiter.util import get_remote_address
import json
from sklearn.naive_bayes import GaussianNB
import numpy as np
import pickle as pkl
from sklearn.externals import joblib
import pandas as pd
from decimal import Decimal
from werkzeug.utils import secure_filename
import logging
from flask import escape
from flask_expects_json import expects_json
import math
import operator
from more_itertools import unique_everseen
from uuid import uuid4
import difflib
import os
import uuid
import requests
import sys
from helper import vector_cos5, isValid, isInDB, get_model, findFeatures, findSymptom, findDisease, syInData, getDetails
with open('en_Labels.js') as dataFile:
json_data = dataFile.read()
data = json.loads(json_data)
nameToCUI = {}
for i in data:
nameToCUI[i['label'].lower()] = i['value']
app = Flask(
__name__
)
class SetEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, set):
return list(obj)
return json.JSONEncoder.default(self, obj)
def get_model():
model_file = 'data/all-files-for-ml/' + 'all_mnb' + '.pkl'
mnb = joblib.load(open(model_file, 'rb'))
data = | pd.read_csv("data/all-files-for-ml/all_x.csv") | pandas.read_csv |
# coding=utf-8
# /usr/bin/env python
'''
Author: wenqiangw
Email: <EMAIL>
Date: 2020-04-23 17:59
Desc:
卡方分箱 【迭代依次查找(合并分箱后卡方最小的两个箱)进行合并】
连续特征值观测值过多的可以先进行等频率分箱
from https://zhuanlan.zhihu.com/p/115267395
1.初始化阶段:首先按照属性值的大小进行排序(对于非连续特征,需要先做数值转换,比如转为坏人率,然后排序),然后每个属性值单独作为一组。
2、合并阶段:
(1)对每一对相邻的组,计算卡方值。
(2)根据计算的卡方值,对其中最小的一对邻组合并为一组。
(3)不断重复(1),(2)直到计算出的卡方值都不低于事先设定的阈值,或者分组数达到一定的条件(如最小分组数5,最大分组数8)并且每箱都包含正负多个label。
>>> df = pd.DataFrame([[1,2,3,4,5,5,5,3,3,3,2,1,5,7],[1,1,0,0,0,0,0,1,1,0,1,1,1,1]]).T
>>> df.columns=['field','label']
>>> df_field, df_label = df['field'],df['label']
>>> aa = chi_blocks(df_field, df_label, box_num=5, dfree=4, cf=0.1)
>>> print(aa)
[-inf, 4.0, 5.0, inf]
'''
import pandas as pd
import numpy as np
from scipy.stats import chi2
from pynovice.score_card.src.data_boxing import frequence_blocks
def get_chi2(df_field,df_label):
'''
计算卡方统计量
:param df_field: 需分箱的字段 Series
:param df_label: 样本的标签 Series
:return:
各箱样本频率,期望频率,卡发值
'''
df_field.reset_index(drop=True,inplace=True)
df_label.reset_index(drop=True,inplace=True)
df_concat = pd.concat([df_field,df_label],axis=1)
df_concat.columns = ['field','label']
df_concat.sort_values(by='field',ascending= True,inplace=True)
dict_label_ratio = dict(df_label.value_counts() / df_label.count())
_group = df_concat.groupby('field')
group_cnt = []
group_except_cnt = []
for i,j in dict_label_ratio.items():
iterm = _group.agg([(i, lambda x: sum(x == i))])
group_cnt.append(iterm)
iterm_expect = _group.agg([(i, lambda x: len(x)*j)])
group_except_cnt.append(iterm_expect)
df_pos_cnt = | pd.concat(group_cnt,axis=1) | pandas.concat |
# import necessary libraries
import pandas as pd
import os
import matplotlib.pyplot as plt
from itertools import combinations
from collections import Counter
def get_city(address):
return address.split(',')[1]
def get_state(address):
return address.split(',')[2].split(' ')[1]
# plt.style.use('fivethirtyeight')
# Merging 12 months of sales data into a singe file
data1 = pd.read_csv('D:/Phyton Code/Contoh dari Github/Pandas-Data-Science-\
Tasks-master/SalesAnalysis/Sales_Data/Sales_April_2019.csv')
all_data = pd.DataFrame()
files = [file for file in os.listdir('D:/Phyton Code/Contoh dari Github/\
Pandas-Data-Science-Tasks-master/SalesAnalysis/Sales_Data')]
for file in files:
temp = | pd.read_csv('D:/Phyton Code/Contoh dari Github/Pandas-Data-Science-\
Tasks-master/SalesAnalysis/Sales_Data/' + file) | pandas.read_csv |
# Copyright 2021 ABSA Group Limited
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from copy import deepcopy
from datetime import date
from decimal import Decimal
from typing import Optional
import pandas as pd
import pytest
from pydantic import BaseModel
from py2k.creators import PandasModelCreator
from py2k.record import PandasToRecordsTransformer, KafkaRecord
class _TestData(BaseModel):
name: str
id: int
value: float
decimal_val: Decimal
bool_val: bool
def __str__(self):
return f"Name = ${self.name}, " \
f"Id = ${self.id}, " \
f"Value = ${self.value}, " \
f"Decimal Val = ${self.decimal_val}," \
f" bool_val = ${self.bool_val}"
def __hash__(self):
return hash(self.__str__())
def __eq__(self, other):
return self.__hash__() == other.__hash__()
class _TestDataWithOptional(BaseModel):
name: Optional[str]
id: int
def __str__(self):
return f"Name = ${self.name}, Id = ${self.id}"
def __hash__(self):
return hash(self.__str__())
def __eq__(self, other):
return self.__hash__() == other.__hash__()
def test_return_empty_if_df_empty(test_df):
with pytest.warns(UserWarning) as record:
empty_df = test_df.head(0)
model = PandasToRecordsTransformer(test_df, 'TestModel')
created_df = model.from_pandas(empty_df)
assert len(created_df) == 0
expected_message = "Unable to create kafka model from an empty dataframe."
assert len(record) == 1
assert record[0].message.args[0] == expected_message
def test_dynamically_convert_from_pandas(test_data, test_df):
model = PandasToRecordsTransformer(test_df, 'TestModel')
records = model.from_pandas(test_df)
_assert_records(records, test_data)
def test_convert_from_constructed_dataframe_by_default(test_data, test_df):
model = PandasToRecordsTransformer(test_df, 'TestModel')
records = model.from_pandas()
_assert_records(records, test_data)
def test_fields_names_and_titles_are_the_same(test_df):
model = PandasToRecordsTransformer(test_df, 'TestModel')
records = model.from_pandas(test_df)
for name, definition in records[0].__fields__.items():
assert name == definition.name
def test_use_default_defaults_if_none_informed(test_df):
model = PandasToRecordsTransformer(test_df, 'TestModel')
records = model.from_pandas(test_df)
# expected defaults by field
expected = {
field: PandasModelCreator._SCHEMA_TYPES_DEFAULTS.get(type(value))
for field, value in records[0].dict().items()}
_assert_schema_defaults(records[0], expected)
def test_all_defaults_from_field_name(test_df):
expected = {"name": "default name", "id": 8,
"value": 8.8, "decimal_val": 8.8, "bool_val": True}
model = PandasToRecordsTransformer(test_df, 'TestModel',
fields_defaults=expected)
records = model.from_pandas(test_df)
_assert_schema_defaults(records[0], expected)
def test_some_defaults_from_field_name(test_df):
local_defaults = {"name": "default name", "value": 8.8,
"decimal_val": Decimal(12), "bool_val": True}
model = PandasToRecordsTransformer(test_df, 'TestModel',
fields_defaults=local_defaults)
records = model.from_pandas(test_df)
expected = {**local_defaults,
"id": PandasModelCreator._SCHEMA_TYPES_DEFAULTS.get(int)}
_assert_schema_defaults(records[0], expected)
def test_all_defaults_from_field_type(test_df):
expected = {str: "default name", int: 8, float: 8.8, bool: True}
model = PandasToRecordsTransformer(test_df, 'TestModel',
types_defaults=expected)
records = model.from_pandas(test_df)
_assert_schema_defaults(records[0], expected, by_name=False)
def test_some_defaults_from_field_type(test_df):
local_defaults = {int: 8, float: 8.8, bool: True}
model = PandasToRecordsTransformer(test_df, 'TestModel',
types_defaults=local_defaults)
records = model.from_pandas(test_df)
expected = {**local_defaults,
str: PandasModelCreator._SCHEMA_TYPES_DEFAULTS.get(str)}
_assert_schema_defaults(records[0], expected, by_name=False)
def test_optional_fields_specified_by_param(test_df_with_nones):
model = PandasToRecordsTransformer(test_df_with_nones, 'TestModel',
optional_fields=['name'])
records = model.from_pandas(test_df_with_nones)
expected = {"name": Optional[str], "id": int}
_assert_schema_types(records[0], expected)
_raw_types_test_cases = [
(10, int),
("aa", str),
(12.4, float),
(True, bool),
(Decimal(10.), float),
(date(2020, 1, 10), date),
(pd.Timestamp('2020-01-01T12'), pd.Timestamp),
]
_optional_types_test_cases = [(value, Optional[_type])
for (value, _type) in _raw_types_test_cases]
@pytest.mark.parametrize("value,_type", _raw_types_test_cases)
def test_supported_types(value, _type):
df = pd.DataFrame({'column_1': [value]})
model = PandasToRecordsTransformer(df, 'TestModel')
records = model.from_pandas(df)
expected = {'column_1': _type}
_assert_schema_types(records[0], expected)
@pytest.mark.parametrize("value,_type", _optional_types_test_cases)
def test_supported_optional_types(value, _type):
df = pd.DataFrame({'column_1': [value]})
model = PandasToRecordsTransformer(df, 'TestModel',
optional_fields=['column_1'])
records = model.from_pandas(df)
expected = {'column_1': _type}
_assert_schema_types(records[0], expected)
def test_specification_of_key_fields():
df = | pd.DataFrame({'column_1': [1], 'key': ['key_val']}) | pandas.DataFrame |
import sys
import pandas as pd
import numpy as np
import json
from sqlalchemy import create_engine
def load_data(messages_filepath, categories_filepath):
'''
This function load the datasets messages and categories and merge based on id column.
Params:
messages_filepath (str): String that contain the path to messages file
categories_filepath (str): String that contain the path to categories file
Returns:
df (pandas DataFrame): DataFrame with columns: id,message,original,genre,categories
row: A single messages
columns:
id-->Id for each message
messages--> Text of message
categories --> A single column containing the categories marks for the message
'''
messages = pd.read_csv(messages_filepath)
categories = pd.read_csv(categories_filepath)
df = messages.merge(categories, on="id")
return df
def clean_data(df):
'''
This function clean the files messages and categories. The cleaning operations are:
1. Split the single categorie column in multiple categories columns
2. Rename the categories columns using the first row
3. Convert the categories columns in ints variables.
4. Drop the single original categorie column and replace it with multiples categories columns
5. Remove duplicates rows.
6. Remove constant columns.
7. Set "related" categorie to binary (it has some values that are not 0,1)
Params:
df (pandas DataFrame): DataFrame over the cleaning operations are made.
Returns
df (pandas DataFrame): DataFrame cleaned.
'''
# create a dataframe of the 36 individual category
categories = df.categories.str.split(';', expand=True)
#Rename the categories columns
row = categories.iloc[0,]
category_colnames = row.apply(lambda x: x[:-2])
categories.columns = category_colnames
#Convert columns to ints
for column in categories:
# set each value to be the last character of the string
categories[column] = categories[column].apply(lambda x: x[-1])
# convert column from string to numeric
categories[column] = categories[column].astype(int)
#Remove original categorie column
df = df.drop(columns=['categories'])
#Insert the categories columns created
df = | pd.concat([df,categories],axis=1) | pandas.concat |
# -*- coding: utf-8 -*-
""" Work with result files from the EIT-40/160 tomograph (also called medusa).
Data structure of .mat files:
EMD(n).fm frequency
EMD(n).Time point of time of this measurement
EMD(n).ni number of the two excitation electrodes (not the channel number)
EMD(n).nu number of the two potential electrodes (not the channel number)
EMD(n).Zt3 array with the transfer impedances (repetition measurement)
EMD(n).nni number of injection
EMD(n).cni number of channels used to inject current
EMD(n).cnu number of channels used to measure voltage
EMD(n).Is3 injected current (A) (calibrated)
EMD(n).II3 leakage current (A)
EMD(n).Yg1 Admitance of first injection path
EMD(n).Yg2 Admitance of second injection path
EMD(n).As3 Voltages at shunt resistors (defined in .mcf files: NA1 - NA2)
EMD(n).Zg3 Impedance between injection electrodes
Import pipeline:
* read single-potentials from .mat file
* read quadrupoles from separate file or provide numpy array
* compute mean of three impedance measurement repetitions (Z1-Z3) for each ABM
* compute quadrupole impedance via superposition using
* a) averaged Z-values
* b) the single repetitions Z1-Z3
* (I think we don't need the next step because of np.arctan2)
check for correct quadrant in phase values, correct if necessary (is this
required if we use the arctan2 function?)
* compute variance/standard deviation from the repetition values
Open questions:
* should we provide a time delta between the two measurements?
"""
print('WARNING: DONt use this module anymore!. Use the eit_fzj module!')
import numpy as np
import scipy.io as sio
import pandas as pd
import datetime
import reda.utils.geometric_factors as redaK
def _add_rhoa(df, spacing):
"""a simple wrapper to compute K factors and add rhoa
"""
df['k'] = redaK.compute_K_analytical(df, spacing=spacing)
df['rho_a'] = df['r'] * df['k']
if 'Zt' in df.columns:
df['rho_a_complex'] = df['Zt'] * df['k']
return df
def import_medusa_data(mat_filename, config_file):
"""Import measurement data (a .mat file) of the FZJ EIT160 system. This
data format is identified as 'FZJ-EZ-2017'.
Parameters
----------
mat_filename: string
filename to the .mat data file. Note that only MNU0 single-potentials
are supported!
config_file: string
filename for configuration file. The configuration file contains N rows
with 4 columns each (a, b, m, n)
Returns
-------
"""
df_emd, df_md = _read_mat_mnu0(mat_filename)
# 'configs' can be a numpy array or a filename
if not isinstance(config_file, np.ndarray):
configs = np.loadtxt(config_file).astype(int)
else:
configs = config_file
# construct four-point measurements via superposition
print('constructing four-point measurements')
quadpole_list = []
if df_emd is not None:
index = 0
for Ar, Br, M, N in configs:
# print('constructing', Ar, Br, M, N)
# the order of A and B doesn't concern us
A = np.min((Ar, Br))
B = np.max((Ar, Br))
# first choice: correct ordering
query_M = df_emd.query('a=={0} and b=={1} and p=={2}'.format(
A, B, M
))
query_N = df_emd.query('a=={0} and b=={1} and p=={2}'.format(
A, B, N
))
if query_M.size == 0 or query_N.size == 0:
continue
index += 1
# keep these columns as they are (no subtracting)
keep_cols = [
'datetime',
'frequency',
'a', 'b',
'Zg1', 'Zg2', 'Zg3',
'Is',
'Il',
'Zg',
'Iab',
]
df4 = pd.DataFrame()
diff_cols = ['Zt', ]
df4[keep_cols] = query_M[keep_cols]
for col in diff_cols:
df4[col] = query_M[col].values - query_N[col].values
df4['m'] = query_M['p'].values
df4['n'] = query_N['p'].values
quadpole_list.append(df4)
if quadpole_list:
dfn = pd.concat(quadpole_list)
Rsign = np.sign(dfn['Zt'].real)
dfn['r'] = Rsign * np.abs(dfn['Zt'])
dfn['Vmn'] = dfn['r'] * dfn['Iab']
dfn['rpha'] = np.arctan2(
np.imag(dfn['Zt'].values),
np.real(dfn['Zt'].values)
) * 1e3
else:
dfn = pd.DataFrame()
return dfn, df_md
def _read_mat_mnu0(filename):
"""Import a .mat file with single potentials (a b m) into a pandas
DataFrame
Also export some variables of the MD struct into a separate structure
"""
print('read_mag_single_file: {0}'.format(filename))
mat = sio.loadmat(filename, squeeze_me=True)
# check the version
version = mat['MP']['Version'].item()
if version != 'FZJ-EZ-2017':
raise Exception(
'This data format is not supported (expected: FZJ-EZ-2017)' +
' got: {}'.format(version)
)
df_emd = _extract_emd(mat, filename=filename)
df_md = _extract_md(mat)
return df_emd, df_md
def _extract_md(mat):
md = mat['MD'].squeeze()
# Labview epoch
epoch = datetime.datetime(1904, 1, 1)
def convert_epoch(x):
timestamp = epoch + datetime.timedelta(seconds=x.astype(float))
return timestamp
dfl = []
# loop over frequencies
for f_id in range(0, md.size):
# print('Frequency: ', emd[f_id]['fm'])
fdata = md[f_id]
# for name in fdata.dtype.names:
# print(name, fdata[name].shape)
timestamp = np.atleast_2d(
[convert_epoch(x) for x in fdata['Time'].squeeze()]
).T
df = pd.DataFrame(
np.hstack((
timestamp,
fdata['cni'],
fdata['Cl3'],
fdata['Zg3'],
fdata['As3'][:, 0, :].squeeze(),
fdata['As3'][:, 1, :].squeeze(),
fdata['As3'][:, 2, :].squeeze(),
fdata['As3'][:, 3, :].squeeze(),
fdata['Is3'],
fdata['Yl3'],
))
)
df.columns = (
'datetime',
'a',
'b',
'Cl1',
'Cl2',
'Cl3',
'Zg1',
'Zg2',
'Zg3',
'ShuntVoltage1_1',
'ShuntVoltage1_2',
'ShuntVoltage1_3',
'ShuntVoltage2_1',
'ShuntVoltage2_2',
'ShuntVoltage2_3',
'ShuntVoltage3_1',
'ShuntVoltage3_2',
'ShuntVoltage3_3',
'ShuntVoltage4_1',
'ShuntVoltage4_2',
'ShuntVoltage4_3',
'Is1',
'Is2',
'Is3',
'Yl1',
'Yl2',
'Yl3',
)
df['datetime'] = | pd.to_datetime(df['datetime']) | pandas.to_datetime |
# coding: utf-8
"""The GBRV results for binary and ternary compunds"""
import os
import json
import numpy as np
from collections import defaultdict, Counter
from monty.io import FileLock
from monty.string import list_strings
from monty.termcolor import cprint
from atomicfile import AtomicFile
from pandas import DataFrame
from monty.collections import dict2namedtuple
from pymatgen.core.periodic_table import Element
from abipy.tools.plotting import add_fig_kwargs, get_ax_fig_plt
from pseudo_dojo.core.pseudos import DojoTable, OfficialDojoTable
from pseudo_dojo.refdata.gbrv.database import gbrv_database, gbrv_code_names, species_from_formula
from pseudo_dojo.pseudos import as_dojo_path
from pseudo_dojo.util.dojo_eos import EOS
import logging
logger = logging.getLogger(__name__)
def sort_symbols_by_Z(symbols):
"""
Given a list of element symbols, sort the strings according to Z,
Return sorted list.
>>> assert sort_symbols_by_Z(["Si", "H"]) == ["H", "Si"]
"""
return list(sorted(symbols, key=lambda s: Element(s).Z))
def print_frame(x):
import pandas as pd
with pd.option_context('display.max_rows', len(x),
'display.max_columns', len(list(x.keys()))):
print(x)
class GbrvRecord(dict):
"""
Example of entry of LiCl:
"rocksalt": {
"LiCl" = {
formula: "LiCl",
pseudos_metadata: {
"Li": {basename: "Li-s-high.psp8", md5: 312},
"Cl": {basename: "Cl.psp8", md5: 562}],
}
normal: results,
high: results,
},
}
where results is the dictionary:
{"ecut": 6,
"v0": 31.72020565768123,
"a0": 5.024952898489712,
"b0": 4.148379951739942,
"num_sites": 2
"etotals": [-593.3490598305451, ...],
"volumes": [31.410526411353832, ...],
}
"""
ACCURACIES = ("normal", "high")
STATUS_LIST = (None, "scheduled" ,"failed")
def __init__(self, struct_type, formula, pseudos_or_dict, dojo_pptable):
"""
Initialize the record for the chemical formula and the list of
pseudopotentials.
"""
keys = ("basename", "Z_val", "l_max", "md5")
#if isinstance(pseudos_or_dict, (list, tuple)):
if all(hasattr(p, "as_dict") for p in pseudos_or_dict):
def get_info(p):
"""Extract the most important info from the pseudo."""
#symbol = p.symbol
d = p.as_dict()
return {k: d[k] for k in keys}
meta = {p.symbol: get_info(p) for p in pseudos_or_dict}
pseudos = pseudos_or_dict
else:
meta = pseudos_or_dict
for v in meta.values():
assert set(v.keys()) == set(keys)
def pmatch(ps, esymb, d):
return (ps.md5 == d["md5"] and
ps.symbol == esymb and
ps.Z_val == d["Z_val"] and
ps.l_max == d["l_max"])
pseudos = []
for esymb, d in meta.items():
for p in dojo_pptable.pseudo_with_symbol(esymb, allow_multi=True):
if pmatch(p, esymb, d):
pseudos.append(p)
break
else:
raise ValueError("Cannot find pseudo:\n %s\n in dojo_pptable" % str(d))
super(GbrvRecord, self).__init__(formula=formula, pseudos_metadata=meta,
normal=None, high=None)
self.pseudos = DojoTable.as_table(pseudos)
self.dojo_pptable = dojo_pptable
self.struct_type = struct_type
#def __str__(self):
# return json.dumps(self.as_dict(), indent=4, sort_keys=False)
#@property
#def formula(self):
# return self["formula"]
@add_fig_kwargs
def plot_eos(self, ax=None, accuracy="normal", **kwargs):
"""
Plot the equation of state.
Args:
ax: matplotlib :class:`Axes` or None if a new figure should be created.
Returns:
`matplotlib` figure.
"""
ax, fig, plt = get_ax_fig_plt(ax)
if not self.has_data(accuracy): return fig
d = self["accuracy"]
num_sites, volumes, etotals = d["num_sites"], np.array(d["volumes"]), np.array(d["etotals"])
# Perform quadratic fit.
eos = EOS.Quadratic()
eos_fit = eos.fit(volumes/num_sites, etotals/num_sites)
label = "ecut %.1f" % d["ecut"]
eos_fit.plot(ax=ax, text=False, label=label, show=False) # color=cmap(i/num_ecuts, alpha=1),
return fig
class GbrvOutdb(dict):
"""
Stores the results for the GBRV tests (binary and ternary compounds).
This object is usually created via the class methods:
GbrvOutdb.from_file and GbrvOutdb.new_from_table.
"""
# The structures stored in the database.
struct_types = ["rocksalt", "ABO3", "hH"]
# The name of the json database should start with prefix.
prefix = "gbrv_compounds_"
@classmethod
def new_from_table(cls, table, djson_path):
"""
Initialize the object from an :class:`OfficialDojoTable` and a djson file.
"""
djson_path = os.path.abspath(djson_path)
dirname = os.path.dirname(djson_path)
new = cls(path=os.path.join(dirname, cls.prefix + os.path.basename(djson_path)),
djson_path=djson_path, xc_name=table.xc.name)
# Init subdictionaries e.g. {'ABO3': {'KHgF3': None, 'SbNCa3': None}, "hH": {...}}
# These dictionaries will be filled afterwards with the GBRV results.
gbrv = gbrv_database(xc=table.xc)
for struct_type in cls.struct_types:
new[struct_type] = {k: None for k in gbrv.tables[struct_type]}
# Get a reference to the table.
new.table = table
return new
@classmethod
def from_file(cls, filepath):
"""
Initalize the object from a file in json format.
"""
with open(filepath, "rt") as fh:
d = json.load(fh)
new = cls(**d)
#new["xc"] = new["xc"]
#print("keys", new.keys())
# Construct the full table of pseudos
# Translate djson_path into path insides pseudos
djpath = as_dojo_path(new["djson_path"])
new.table = OfficialDojoTable.from_djson_file(djpath)
return new
def iter_struct_formula_data(self):
"""Iterate over (struct_type, formula, data)."""
for struct_type in self.struct_types:
for formula, data in self[struct_type].items():
yield struct_type, formula, data
@property
def path(self):
return self["path"]
def json_write(self, filepath=None):
"""
Write data to file in JSON format.
If filepath is None, self.path is used.
"""
filepath = self.path if filepath is None else filepath
with open(filepath, "wt") as fh:
json.dump(self, fh, indent=-1, sort_keys=True)
@classmethod
def insert_results(cls, filepath, struct_type, formula, accuracy, pseudos, results):
"""
Update the entry in the database.
"""
with FileLock(filepath):
outdb = cls.from_file(filepath)
old_dict = outdb[struct_type][formula]
if not isinstance(old_dict, dict): old_dict = {}
old_dict[accuracy] = results
outdb[struct_type][formula] = old_dict
with AtomicFile(filepath, mode="wt") as fh:
json.dump(outdb, fh, indent=-1, sort_keys=True) #, cls=MontyEncoder)
def find_jobs_torun(self, max_njobs):
"""
Find entries whose results have not been yet calculated.
Args:
select_formulas:
"""
jobs, got = [], 0
for struct_type, formula, data in self.iter_struct_formula_data():
if got == max_njobs: break
if data in ("scheduled", "failed"): continue
if data is None:
symbols = list(set(species_from_formula(formula)))
pseudos = self.table.pseudos_with_symbols(symbols)
job = dict2namedtuple(formula=formula, struct_type=struct_type, pseudos=pseudos)
self[struct_type][formula] = "scheduled"
jobs.append(job)
got += 1
# Update the database.
if jobs: self.json_write()
return jobs
def get_record(self, struct_type, formula):
"""
Find the record associated to the specified structure type and chemical formula.
Return None if record is not present.
"""
d = self.get(struct_type)
if d is None: return None
data = d.get(formula)
if data is None: return None
raise NotImplementedError()
#return GbrvRecord.from_data(data, struct_type, formula, pseudos)
# TODO
def check_update(self):
"""
Check consistency between the pseudo potential table and the database and upgrade it
This usually happens when new pseudopotentials have been added to the dojo directory.
(very likely) or when pseudos have been removed (unlikely!)
Returns: namedtuple with the following attributes.
nrec_removed
nrec_added
"""
nrec_removed, nrec_added = 0, 0
missing = defaultdict(list)
for formula, species in self.gbrv_formula_and_species:
# Get **all** the possible combinations for these species.
comb_list = self.dojo_pptable.all_combinations_for_elements(set(species))
# Check consistency between records and pseudos!
# This is gonna be slow if we have several possibilities!
records = self[formula]
recidx_found = []
for pseudos in comb_list:
for i, rec in enumerate(records):
if rec.matches_pseudos(pseudos):
recidx_found.append(i)
break
else:
missing[formula].append(pseudos)
# Remove stale records (if any)
num_found = len(recidx_found)
if num_found != len(records):
num_stale = len(records) - num_found
print("Found %s stale records" % num_stale)
nrec_removed += num_stale
self[formula] = [records[i] for i in recidx_found]
if missing:
for formula, pplist in missing.items():
for pseudos in pplist:
nrec_removed += 1
self[formula].append(GbrvRecord(self.struct_type, formula, pseudos, self.dojo_pptable))
if missing or nrec_removed:
print("Updating database.")
self.json_write()
return dict2namedtuple(nrec_removed=nrec_removed, nrec_added=nrec_added)
def reset(self, status_list="failed", write=True):
"""
Reset all the records whose status is in `status_list` so that we can resubmit them.
Return the number of records that have been resetted.
"""
status_list = list_strings(status_list)
count = 0
for struct_type, formula, data in self.iter_struct_formula_data():
if data in status_list:
self[struct_type][formula] = None
count += 1
# Update the database.
if count and write: self.json_write()
return count
#############################
### Post-processing tools ###
#############################
def get_pdframe(self, reference="ae", accuracy="normal", pptable=None, **kwargs):
"""
Build and return a :class:`GbrvCompoundDataFrame` with the most important results.
Args:
reference:
pptable: :class:`PseudoTable` object. If given. the frame will contains only the
entries with pseudopotential in pptable.
Returns:
frame: pandas :class:`DataFrame`
"""
#def get_df(p):
# dfact_meV, df_prime = None, None
# if p.has_dojo_report:
# try:
# data = p.dojo_report["deltafactor"]
# high_ecut = list(data.keys())[-1]
# dfact_meV = data[high_ecut]["dfact_meV"]
# df_prime = data[high_ecut]["dfactprime_meV"]
# except KeyError:
# pass
# return dict(dfact_meV=dfact_meV, df_prime=df_prime)
#def get_meta(p):
# """Return dict with pseudo metadata."""
# meta = {"basename": p.basename, "md5": p.md5}
# meta.update(get_df(p))
# return meta
gbrv = gbrv_database(xc=self["xc_name"])
rows, miss = [], 0
for struct_type, formula, data in self.iter_struct_formula_data():
if not isinstance(data, dict):
miss += 1
continue
try:
a0 = data[accuracy]["a0"]
except KeyError:
print("No entry with accuracy %s for %s" % (accuracy, formula))
a0 = None
row = dict(formula=formula, struct_type=struct_type, this=a0,
accuracy=accuracy,
#basenames=set(p.basename for p in rec.pseudos),
#pseudos_meta={p.symbol: get_meta(p) for p in rec.pseudos},
#symbols={p.symbol for p in rec.pseudos},
)
# Add results from the database.
entry = gbrv.get_entry(formula, struct_type)
row.update({code: getattr(entry, code) for code in gbrv_code_names})
#row.update({code: getattr(entry, code) for code in ["ae", "gbrv_paw"]})
rows.append(row)
if miss:
print("There are %d missing entries in %s\n" % (miss, self.path))
# Build sub-class of pandas.DataFrame and add relative error wrt AE results.
frame = GbrvCompoundDataFrame(rows)
frame["rel_err"] = 100 * (frame["this"] - frame["ae"]) / frame["ae"]
return frame
def make_open_notebook(self, nbpath=None, foreground=False):
"""
Generate an ipython notebook and open it in the browser.
Args:
nbpath: If nbpath is None, a temporay file is created.
foreground: By default, jupyter is executed in background and stdout, stderr are redirected
to devnull. Use foreground to run the process in foreground
Return:
system exit code.
Raise:
RuntimeError if jupyter is not in $PATH
"""
nbpath = self.write_notebook(nbpath=nbpath)
if foreground:
cmd = "jupyter notebook %s" % nbpath
return os.system(cmd)
else:
cmd = "jupyter notebook %s &> /dev/null &" % nbpath
print("Executing:", cmd)
import subprocess
try:
from subprocess import DEVNULL # py3k
except ImportError:
DEVNULL = open(os.devnull, "wb")
process = subprocess.Popen(cmd.split(), shell=False, stdout=DEVNULL) #, stderr=DEVNULL)
cprint("pid: %s" % str(process.pid), "yellow")
def write_notebook(self, nbpath):
"""
Write an ipython notebook. If `nbpath` is None, a temporay file is created.
Returns:
The path to the ipython notebook.
See also:
http://nbviewer.jupyter.org/github/maxalbert/auto-exec-notebook/blob/master/how-to-programmatically-generate-and-execute-an-ipython-notebook.ipynb
"""
frame = self.get_pdframe()
import nbformat
nbv = nbformat.v4
nb = nbv.new_notebook()
nb.cells.extend([
nbv.new_markdown_cell("# This is an auto-generated notebook"),
nbv.new_code_cell("""\
from __future__ import print_function, division, unicode_literals
from IPython.display import display
import seaborn as sns
import pandas as pd
import pylab
%matplotlib notebook
pd.options.display.float_format = '{:,.3f}'.format
# disable table reduction
pd.set_option('display.max_rows', None)
pd.set_option('display.max_columns', None)
sns.set(font_scale=1.6)
sns.set_style("whitegrid")
pylab.rcParams['figure.figsize'] = (12.0, 6.0)"""),
nbv.new_code_cell("""\
from pseudo_dojo.dojo.gbrv_outdb import GbrvOutdb
outdb = GbrvOutdb.from_file('%s')
frame = outdb.get_pdframe(accuracy='normal')
display(frame.code_errors())""" % as_dojo_path(self.path)),
nbv.new_code_cell("""\
with open('gbrv_errors.tex', 'w') as f:
f.write(frame.code_errors().to_latex())"""),
nbv.new_code_cell("display(frame)"),
])
for struct_type in frame.struct_types():
nb.cells += [
nbv.new_markdown_cell("## GBRV results for structure %s:" % struct_type),
nbv.new_code_cell("""\
fig = frame.plot_errors_for_structure('%s')
pylab.tight_layout()""" % struct_type),
nbv.new_code_cell("""\
fig = frame.plot_hist('%s')
pylab.tight_layout()""" % struct_type),
nbv.new_code_cell("display(frame.select_bad_guys(reltol=0.35, struct_type='%s'))" % struct_type),
]
nb.cells += [
nbv.new_markdown_cell("## GBRV Compounds: relative errors as function of chemical element"),
nbv.new_code_cell("""\
fig = frame.plot_errors_for_elements()
pylab.tight_layout()"""),
nbv.new_code_cell("fig.savefig('gbrv.elements.eps', bbox_inches='tight')"),
nbv.new_markdown_cell("## Bad guys:"),
nbv.new_code_cell("bad = frame.select_bad_guys(reltol=0.25)"),
nbv.new_code_cell("display(bad.sort_values(by='rel_err'))"),
nbv.new_code_cell("""\
with open('gbrv_compounds_outliers.tex', 'w') as f:
f.write(bad.to_latex())"""),
nbv.new_code_cell("print(bad.symbol_counter)"),
nbv.new_code_cell("""\
from IPython.display import HTML
HTML('''<script>
code_show=true;
function code_toggle() {
if (code_show){
$('div.input').hide();
} else {
$('div.input').show();
}
code_show = !code_show
}
$( document ).ready(code_toggle);
</script>
The code cells for this IPython notebook is by default hidden for easier reading.
To toggle on/off the raw code, click <a href="javascript:code_toggle()">here</a>.''')""")
]
import io, tempfile
if nbpath is None:
_, nbpath = tempfile.mkstemp(suffix='.ipynb', text=True)
with io.open(nbpath, 'wt', encoding="utf8") as f:
nbformat.write(nb, f)
return nbpath
class GbrvCompoundDataFrame(DataFrame):
"""
Extends pandas :class:`DataFrame` by adding helper functions.
The frame has the structure:
ae formula gbrv_paw gbrv_uspp pslib struct_type this vasp
0 4.073 AlN 4.079 4.079 4.068 rocksalt 4.068777 4.070
1 5.161 LiCl 5.153 5.151 5.160 rocksalt 5.150304 5.150
2 4.911 YN 4.909 4.908 4.915 rocksalt 4.906262 4.906
TODO: column with pseudos?
basenames
set(Cs_basename, Cl_basename)
{...}
"""
ALL_ACCURACIES = ("normal", "high")
def struct_types(self):
"""List of structure types available in the dataframe."""
return sorted(set(self["struct_type"]))
def code_names(self):
"""List of code names available in the dataframe."""
codes = sorted(c for c in self.keys() if c in gbrv_code_names)
# Add this
return ["this"] + codes
def code_errors(self, choice="rms", **kwargs):
"""Return frame with the rms of the different codes."""
index, rows = [], []
ref_code = "ae"
for struct_type in self.struct_types():
new = self[self["struct_type"] == struct_type].copy()
index.append(struct_type)
row = {}
for code in self.code_names():
if code == ref_code: continue
values = (100 * (new[code] - new[ref_code]) / new[ref_code]).dropna()
if choice == "rms":
row[code] = np.sqrt((values**2).mean())
elif choice == "mare":
row[code] = values.abs().mean()
else:
raise ValueError("Wrong value of choice: %s" % choice)
rows.append(row)
frame = DataFrame(rows, index=index)
return frame
def select_bad_guys(self, reltol=0.4, struct_type=None):
"""Return new frame with the entries whose relative errors is > reltol."""
new = self[abs(100 * (self["this"] - self["ae"]) / self["ae"]) > reltol].copy()
new["rel_err"] = 100 * (self["this"] - self["ae"]) / self["ae"]
if struct_type is not None:
new = new[new.struct_type == struct_type]
new.__class__ = self.__class__
count = Counter()
for idx, row in new.iterrows():
for symbol in set(species_from_formula(row.formula)):
count[symbol] += 1
new.symbol_counter = count
return new
def remove_bad_guys(self, reltol=0.4):
"""Return new frame in which the entries whose relative errors is > reltol are removed."""
new = self[abs(100 * (self["this"] - self["ae"]) / self["ae"]) <= reltol].copy()
new.__class__ = self.__class__
new["rel_err"] = 100 * (self["this"] - self["ae"]) / self["ae"]
return new
def select_symbol(self, symbol):
"""
Extract the rows whose formula contain the given element symbol.
Return new `GbrvCompoundDataFrame`.
"""
rows = []
for idx, row in self.iterrows():
if symbol not in species_from_formula(row.formula): continue
rows.append(row)
return self.__class__(rows)
@add_fig_kwargs
def plot_errors_for_structure(self, struct_type, ax=None, **kwargs):
"""
Plot the errors for a given crystalline structure.
"""
ax, fig, plt = get_ax_fig_plt(ax=ax)
data = self[self["struct_type"] == struct_type].copy()
if not len(data):
print("No results available for struct_type:", struct_type)
return None
colnames = ["this", "gbrv_paw"]
for col in colnames:
data[col + "_rel_err"] = 100 * (data[col] - data["ae"]) / data["ae"]
#data[col + "_rel_err"] = abs(100 * (data[col] - data["ae"]) / data["ae"])
data.plot(x="formula", y=col + "_rel_err", ax=ax, style="o-", grid=True)
labels = data['formula'].values
ax.set_ylabel("relative error %% for %s" % struct_type)
ticks = list(range(len(data.index)))
ticks1 = range(min(ticks), max(ticks)+1, 2)
ticks2 = range(min(ticks)+1, max(ticks)+1, 2)
labels1 = [labels[i] for i in ticks1]
labels2 = [labels[i] for i in ticks2]
#ax.tick_params(which='both', direction='out')
#ax.set_ylim(-1, 1)
ax.set_xticks(ticks1)
ax.set_xticklabels(labels1, rotation=90)
ax2 = ax.twiny()
ax2.set_zorder(-1)
ax2.set_xticks(ticks2)
ax2.set_xticklabels(labels2, rotation=90)
ax2.set_xlim(ax.get_xlim())
return fig
@add_fig_kwargs
def plot_hist(self, struct_type, ax=None, errtxt=True, **kwargs):
"""
Histogram plot.
"""
#if codes is None: codes = ["ae"]
ax, fig, plt = get_ax_fig_plt(ax)
import seaborn as sns
codes = ["this", "gbrv_paw"] #, "gbrv_uspp", "pslib", "vasp"]
new = self[self["struct_type"] == struct_type].copy()
ypos = 0.8
for i, code in enumerate(codes):
values = (100 * (new[code] - new["ae"]) / new["ae"]).dropna()
sns.distplot(values, ax=ax, rug=True, hist=False, label=code)
# Add text with Mean or (MARE/RMSRE)
if errtxt:
text = []; app = text.append
#app("%s MARE = %.2f" % (code, values.abs().mean()))
app("%s RMSRE = %.2f" % (code, np.sqrt((values**2).mean())))
ax.text(0.6, ypos, "\n".join(text), transform=ax.transAxes)
ypos -= 0.1
ax.grid(True)
ax.set_xlabel("relative error %")
ax.set_xlim(-0.8, 0.8)
return fig
@add_fig_kwargs
def plot_errors_for_elements(self, ax=None, **kwargs):
"""
Plot the relative errors associated to the chemical elements.
"""
dict_list = []
for idx, row in self.iterrows():
rerr = 100 * (row["this"] - row["ae"]) / row["ae"]
for symbol in set(species_from_formula(row.formula)):
dict_list.append(dict(
element=symbol,
rerr=rerr,
formula=row.formula,
struct_type=row.struct_type,
))
frame = | DataFrame(dict_list) | pandas.DataFrame |
from .utils import Atom, Residue, ActiveSite
import pandas as pd
import numpy as np
from scipy import spatial
import collections
from sklearn.cluster import AgglomerativeClustering
from scipy.cluster.hierarchy import dendrogram, linkage
# format the active site data into a df with the active sites as
# index and a list of unit vectors for the residues
def format_data(active_sites):
# active sites contain residues, contain atoms, contain coords
# collect the residue, max/min coords for each active site
collect_vector_coords = []
for s in active_sites:
for r in s.residues:
collect_bb = [str(s),r.type]
collect_a = []
for a in r.atoms[:3]: # r.atoms[:3] are N, CA, C
collect_a.append(a.coords)
x_vals = [item[0] for item in collect_a]
y_vals = [item[1] for item in collect_a]
z_vals = [item[2] for item in collect_a]
collect_vals = [max(x_vals),max(y_vals),max(z_vals),min(x_vals),min(y_vals),min(z_vals)]
collect_bb.append(collect_vals)
collect_vector_coords.append(collect_bb)
# collect the active site, residue, and max/min coords into df
pd_backbone = pd.DataFrame(collect_vector_coords,columns=['activesite','aminoacid','mmcoords'])
pd_backbone[['x_max','y_max','z_max','x_min','y_min','z_min']] = pd.DataFrame(pd_backbone.mmcoords.values.tolist(), index= pd_backbone.index)
pd_backbone.drop('mmcoords',inplace=True,axis=1)
# get the distance between the max and min for each residue
pd_backbone['x_dist'] = pd_backbone['x_max']-pd_backbone['x_min']
pd_backbone['y_dist'] = pd_backbone['y_max']-pd_backbone['y_min']
pd_backbone['z_dist'] = pd_backbone['z_max']-pd_backbone['z_min']
pd_backbone.drop(['x_max','x_min','y_max','y_min','z_max','z_min'],inplace=True,axis=1)
# make a vector for the min/max coords
pd_backbone['vector'] = pd_backbone[['x_dist','y_dist','z_dist']].values.tolist()
pd_backbone.drop(['x_dist','y_dist','z_dist'],inplace=True,axis=1)
# convert to unit vector
pd_backbone['unit_v'] = pd_backbone['vector'].apply(lambda x: x/np.linalg.norm(x))
pd_backbone.drop('vector',inplace=True,axis=1)
# list the residues and max/min coords for each active site
group_bb = pd_backbone.groupby(['activesite']).agg(lambda x: list(x))
group_bb.drop(['aminoacid'],inplace=True,axis=1)
# get average of vectors
group_bb['ave'] = group_bb['unit_v'].apply(lambda x: [float(sum(col))/len(col) for col in zip(*x)])
group_ave = group_bb[['ave']]
return group_ave
# make matrix comparing every active site to every other active site
def make_distance_matrix(df_sites):
collect_comp = []
collect_names = []
# compare every active site distance matrix value to every other
for indexone, rowone in df_sites.iterrows():
collect_names.append(indexone)
collect = []
site_a = rowone['ave']
for indextwo, rowtwo in df_sites.iterrows():
site_b = rowtwo['ave']
collect.append(compute_similarity(site_a, site_b))
collect_comp.append(collect)
df_matrix = | pd.DataFrame(collect_comp,columns=collect_names,index=collect_names) | pandas.DataFrame |
# Arithmetic tests for DataFrame/Series/Index/Array classes that should
# behave identically.
from datetime import datetime, timedelta
import numpy as np
import pytest
from pandas.errors import (
NullFrequencyError, OutOfBoundsDatetime, PerformanceWarning)
import pandas as pd
from pandas import (
DataFrame, DatetimeIndex, NaT, Series, Timedelta, TimedeltaIndex,
Timestamp, timedelta_range)
import pandas.util.testing as tm
def get_upcast_box(box, vector):
"""
Given two box-types, find the one that takes priority
"""
if box is DataFrame or isinstance(vector, DataFrame):
return DataFrame
if box is Series or isinstance(vector, Series):
return Series
if box is pd.Index or isinstance(vector, pd.Index):
return pd.Index
return box
# ------------------------------------------------------------------
# Timedelta64[ns] dtype Comparisons
class TestTimedelta64ArrayLikeComparisons:
# Comparison tests for timedelta64[ns] vectors fully parametrized over
# DataFrame/Series/TimedeltaIndex/TimedeltaArray. Ideally all comparison
# tests will eventually end up here.
def test_compare_timedelta64_zerodim(self, box_with_array):
# GH#26689 should unbox when comparing with zerodim array
box = box_with_array
xbox = box_with_array if box_with_array is not pd.Index else np.ndarray
tdi = pd.timedelta_range('2H', periods=4)
other = np.array(tdi.to_numpy()[0])
tdi = tm.box_expected(tdi, box)
res = tdi <= other
expected = np.array([True, False, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(res, expected)
with pytest.raises(TypeError):
# zero-dim of wrong dtype should still raise
tdi >= np.array(4)
class TestTimedelta64ArrayComparisons:
# TODO: All of these need to be parametrized over box
def test_compare_timedelta_series(self):
# regression test for GH#5963
s = pd.Series([timedelta(days=1), timedelta(days=2)])
actual = s > timedelta(days=1)
expected = pd.Series([False, True])
tm.assert_series_equal(actual, expected)
def test_tdi_cmp_str_invalid(self, box_with_array):
# GH#13624
xbox = box_with_array if box_with_array is not pd.Index else np.ndarray
tdi = TimedeltaIndex(['1 day', '2 days'])
tdarr = tm.box_expected(tdi, box_with_array)
for left, right in [(tdarr, 'a'), ('a', tdarr)]:
with pytest.raises(TypeError):
left > right
with pytest.raises(TypeError):
left >= right
with pytest.raises(TypeError):
left < right
with pytest.raises(TypeError):
left <= right
result = left == right
expected = np.array([False, False], dtype=bool)
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
result = left != right
expected = np.array([True, True], dtype=bool)
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
@pytest.mark.parametrize('dtype', [None, object])
def test_comp_nat(self, dtype):
left = pd.TimedeltaIndex([pd.Timedelta('1 days'), pd.NaT,
pd.Timedelta('3 days')])
right = pd.TimedeltaIndex([pd.NaT, pd.NaT, pd.Timedelta('3 days')])
lhs, rhs = left, right
if dtype is object:
lhs, rhs = left.astype(object), right.astype(object)
result = rhs == lhs
expected = np.array([False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = rhs != lhs
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(lhs == pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT == rhs, expected)
expected = np.array([True, True, True])
tm.assert_numpy_array_equal(lhs != pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT != lhs, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(lhs < pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT > lhs, expected)
def test_comparisons_nat(self):
tdidx1 = pd.TimedeltaIndex(['1 day', pd.NaT, '1 day 00:00:01', pd.NaT,
'1 day 00:00:01', '5 day 00:00:03'])
tdidx2 = pd.TimedeltaIndex(['2 day', '2 day', pd.NaT, pd.NaT,
'1 day 00:00:02', '5 days 00:00:03'])
tdarr = np.array([np.timedelta64(2, 'D'),
np.timedelta64(2, 'D'), np.timedelta64('nat'),
np.timedelta64('nat'),
np.timedelta64(1, 'D') + np.timedelta64(2, 's'),
np.timedelta64(5, 'D') + np.timedelta64(3, 's')])
cases = [(tdidx1, tdidx2), (tdidx1, tdarr)]
# Check pd.NaT is handles as the same as np.nan
for idx1, idx2 in cases:
result = idx1 < idx2
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx2 > idx1
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= idx2
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx2 >= idx1
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 == idx2
expected = np.array([False, False, False, False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 != idx2
expected = np.array([True, True, True, True, True, False])
tm.assert_numpy_array_equal(result, expected)
# TODO: better name
def test_comparisons_coverage(self):
rng = timedelta_range('1 days', periods=10)
result = rng < rng[3]
expected = np.array([True, True, True] + [False] * 7)
tm.assert_numpy_array_equal(result, expected)
# raise TypeError for now
with pytest.raises(TypeError):
rng < rng[3].value
result = rng == list(rng)
exp = rng == rng
tm.assert_numpy_array_equal(result, exp)
# ------------------------------------------------------------------
# Timedelta64[ns] dtype Arithmetic Operations
class TestTimedelta64ArithmeticUnsorted:
# Tests moved from type-specific test files but not
# yet sorted/parametrized/de-duplicated
def test_ufunc_coercions(self):
# normal ops are also tested in tseries/test_timedeltas.py
idx = TimedeltaIndex(['2H', '4H', '6H', '8H', '10H'],
freq='2H', name='x')
for result in [idx * 2, np.multiply(idx, 2)]:
assert isinstance(result, TimedeltaIndex)
exp = TimedeltaIndex(['4H', '8H', '12H', '16H', '20H'],
freq='4H', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == '4H'
for result in [idx / 2, np.divide(idx, 2)]:
assert isinstance(result, TimedeltaIndex)
exp = TimedeltaIndex(['1H', '2H', '3H', '4H', '5H'],
freq='H', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == 'H'
idx = TimedeltaIndex(['2H', '4H', '6H', '8H', '10H'],
freq='2H', name='x')
for result in [-idx, np.negative(idx)]:
assert isinstance(result, TimedeltaIndex)
exp = TimedeltaIndex(['-2H', '-4H', '-6H', '-8H', '-10H'],
freq='-2H', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == '-2H'
idx = TimedeltaIndex(['-2H', '-1H', '0H', '1H', '2H'],
freq='H', name='x')
for result in [abs(idx), np.absolute(idx)]:
assert isinstance(result, TimedeltaIndex)
exp = TimedeltaIndex(['2H', '1H', '0H', '1H', '2H'],
freq=None, name='x')
tm.assert_index_equal(result, exp)
assert result.freq is None
def test_subtraction_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = pd.date_range('20130101', periods=3, name='bar')
td = Timedelta('1 days')
dt = Timestamp('20130101')
msg = "cannot subtract a datelike from a TimedeltaArray"
with pytest.raises(TypeError, match=msg):
tdi - dt
with pytest.raises(TypeError, match=msg):
tdi - dti
msg = (r"descriptor '__sub__' requires a 'datetime\.datetime' object"
" but received a 'Timedelta'")
with pytest.raises(TypeError, match=msg):
td - dt
msg = "bad operand type for unary -: 'DatetimeArray'"
with pytest.raises(TypeError, match=msg):
td - dti
result = dt - dti
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'], name='bar')
tm.assert_index_equal(result, expected)
result = dti - dt
expected = TimedeltaIndex(['0 days', '1 days', '2 days'], name='bar')
tm.assert_index_equal(result, expected)
result = tdi - td
expected = TimedeltaIndex(['0 days', pd.NaT, '1 days'], name='foo')
tm.assert_index_equal(result, expected, check_names=False)
result = td - tdi
expected = TimedeltaIndex(['0 days', pd.NaT, '-1 days'], name='foo')
tm.assert_index_equal(result, expected, check_names=False)
result = dti - td
expected = DatetimeIndex(
['20121231', '20130101', '20130102'], name='bar')
tm.assert_index_equal(result, expected, check_names=False)
result = dt - tdi
expected = DatetimeIndex(['20121231', pd.NaT, '20121230'], name='foo')
tm.assert_index_equal(result, expected)
def test_subtraction_ops_with_tz(self):
# check that dt/dti subtraction ops with tz are validated
dti = pd.date_range('20130101', periods=3)
ts = Timestamp('20130101')
dt = ts.to_pydatetime()
dti_tz = pd.date_range('20130101', periods=3).tz_localize('US/Eastern')
ts_tz = Timestamp('20130101').tz_localize('US/Eastern')
ts_tz2 = Timestamp('20130101').tz_localize('CET')
dt_tz = ts_tz.to_pydatetime()
td = Timedelta('1 days')
def _check(result, expected):
assert result == expected
assert isinstance(result, Timedelta)
# scalars
result = ts - ts
expected = Timedelta('0 days')
_check(result, expected)
result = dt_tz - ts_tz
expected = Timedelta('0 days')
_check(result, expected)
result = ts_tz - dt_tz
expected = Timedelta('0 days')
_check(result, expected)
# tz mismatches
msg = ("Timestamp subtraction must have the same timezones or no"
" timezones")
with pytest.raises(TypeError, match=msg):
dt_tz - ts
msg = "can't subtract offset-naive and offset-aware datetimes"
with pytest.raises(TypeError, match=msg):
dt_tz - dt
msg = ("Timestamp subtraction must have the same timezones or no"
" timezones")
with pytest.raises(TypeError, match=msg):
dt_tz - ts_tz2
msg = "can't subtract offset-naive and offset-aware datetimes"
with pytest.raises(TypeError, match=msg):
dt - dt_tz
msg = ("Timestamp subtraction must have the same timezones or no"
" timezones")
with pytest.raises(TypeError, match=msg):
ts - dt_tz
with pytest.raises(TypeError, match=msg):
ts_tz2 - ts
with pytest.raises(TypeError, match=msg):
ts_tz2 - dt
with pytest.raises(TypeError, match=msg):
ts_tz - ts_tz2
# with dti
with pytest.raises(TypeError, match=msg):
dti - ts_tz
with pytest.raises(TypeError, match=msg):
dti_tz - ts
with pytest.raises(TypeError, match=msg):
dti_tz - ts_tz2
result = dti_tz - dt_tz
expected = TimedeltaIndex(['0 days', '1 days', '2 days'])
tm.assert_index_equal(result, expected)
result = dt_tz - dti_tz
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'])
tm.assert_index_equal(result, expected)
result = dti_tz - ts_tz
expected = TimedeltaIndex(['0 days', '1 days', '2 days'])
tm.assert_index_equal(result, expected)
result = ts_tz - dti_tz
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'])
tm.assert_index_equal(result, expected)
result = td - td
expected = Timedelta('0 days')
_check(result, expected)
result = dti_tz - td
expected = DatetimeIndex(
['20121231', '20130101', '20130102'], tz='US/Eastern')
tm.assert_index_equal(result, expected)
def test_dti_tdi_numeric_ops(self):
# These are normally union/diff set-like ops
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = pd.date_range('20130101', periods=3, name='bar')
# TODO(wesm): unused?
# td = Timedelta('1 days')
# dt = Timestamp('20130101')
result = tdi - tdi
expected = TimedeltaIndex(['0 days', pd.NaT, '0 days'], name='foo')
tm.assert_index_equal(result, expected)
result = tdi + tdi
expected = TimedeltaIndex(['2 days', pd.NaT, '4 days'], name='foo')
tm.assert_index_equal(result, expected)
result = dti - tdi # name will be reset
expected = DatetimeIndex(['20121231', pd.NaT, '20130101'])
tm.assert_index_equal(result, expected)
def test_addition_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = pd.date_range('20130101', periods=3, name='bar')
td = Timedelta('1 days')
dt = Timestamp('20130101')
result = tdi + dt
expected = DatetimeIndex(['20130102', pd.NaT, '20130103'], name='foo')
tm.assert_index_equal(result, expected)
result = dt + tdi
expected = DatetimeIndex(['20130102', pd.NaT, '20130103'], name='foo')
tm.assert_index_equal(result, expected)
result = td + tdi
expected = TimedeltaIndex(['2 days', pd.NaT, '3 days'], name='foo')
tm.assert_index_equal(result, expected)
result = tdi + td
expected = TimedeltaIndex(['2 days', pd.NaT, '3 days'], name='foo')
tm.assert_index_equal(result, expected)
# unequal length
msg = "cannot add indices of unequal length"
with pytest.raises(ValueError, match=msg):
tdi + dti[0:1]
with pytest.raises(ValueError, match=msg):
tdi[0:1] + dti
# random indexes
with pytest.raises(NullFrequencyError):
tdi + pd.Int64Index([1, 2, 3])
# this is a union!
# pytest.raises(TypeError, lambda : Int64Index([1,2,3]) + tdi)
result = tdi + dti # name will be reset
expected = DatetimeIndex(['20130102', pd.NaT, '20130105'])
tm.assert_index_equal(result, expected)
result = dti + tdi # name will be reset
expected = DatetimeIndex(['20130102', pd.NaT, '20130105'])
tm.assert_index_equal(result, expected)
result = dt + td
expected = Timestamp('20130102')
assert result == expected
result = td + dt
expected = Timestamp('20130102')
assert result == expected
# TODO: Needs more informative name, probably split up into
# more targeted tests
@pytest.mark.parametrize('freq', ['D', 'B'])
def test_timedelta(self, freq):
index = pd.date_range('1/1/2000', periods=50, freq=freq)
shifted = index + timedelta(1)
back = shifted + timedelta(-1)
tm.assert_index_equal(index, back)
if freq == 'D':
expected = pd.tseries.offsets.Day(1)
assert index.freq == expected
assert shifted.freq == expected
assert back.freq == expected
else: # freq == 'B'
assert index.freq == pd.tseries.offsets.BusinessDay(1)
assert shifted.freq is None
assert back.freq == pd.tseries.offsets.BusinessDay(1)
result = index - timedelta(1)
expected = index + timedelta(-1)
tm.assert_index_equal(result, expected)
# GH#4134, buggy with timedeltas
rng = pd.date_range('2013', '2014')
s = Series(rng)
result1 = rng - pd.offsets.Hour(1)
result2 = DatetimeIndex(s - np.timedelta64(100000000))
result3 = rng - np.timedelta64(100000000)
result4 = DatetimeIndex(s - pd.offsets.Hour(1))
tm.assert_index_equal(result1, result4)
tm.assert_index_equal(result2, result3)
class TestAddSubNaTMasking:
# TODO: parametrize over boxes
def test_tdi_add_timestamp_nat_masking(self):
# GH#17991 checking for overflow-masking with NaT
tdinat = pd.to_timedelta(['24658 days 11:15:00', 'NaT'])
tsneg = Timestamp('1950-01-01')
ts_neg_variants = [tsneg,
tsneg.to_pydatetime(),
tsneg.to_datetime64().astype('datetime64[ns]'),
tsneg.to_datetime64().astype('datetime64[D]')]
tspos = Timestamp('1980-01-01')
ts_pos_variants = [tspos,
tspos.to_pydatetime(),
tspos.to_datetime64().astype('datetime64[ns]'),
tspos.to_datetime64().astype('datetime64[D]')]
for variant in ts_neg_variants + ts_pos_variants:
res = tdinat + variant
assert res[1] is pd.NaT
def test_tdi_add_overflow(self):
# See GH#14068
# preliminary test scalar analogue of vectorized tests below
with pytest.raises(OutOfBoundsDatetime):
pd.to_timedelta(106580, 'D') + Timestamp('2000')
with pytest.raises(OutOfBoundsDatetime):
Timestamp('2000') + pd.to_timedelta(106580, 'D')
_NaT = int(pd.NaT) + 1
msg = "Overflow in int64 addition"
with pytest.raises(OverflowError, match=msg):
pd.to_timedelta([106580], 'D') + Timestamp('2000')
with pytest.raises(OverflowError, match=msg):
Timestamp('2000') + pd.to_timedelta([106580], 'D')
with pytest.raises(OverflowError, match=msg):
pd.to_timedelta([_NaT]) - Timedelta('1 days')
with pytest.raises(OverflowError, match=msg):
pd.to_timedelta(['5 days', _NaT]) - Timedelta('1 days')
with pytest.raises(OverflowError, match=msg):
(pd.to_timedelta([_NaT, '5 days', '1 hours']) -
pd.to_timedelta(['7 seconds', _NaT, '4 hours']))
# These should not overflow!
exp = TimedeltaIndex([pd.NaT])
result = pd.to_timedelta([pd.NaT]) - Timedelta('1 days')
tm.assert_index_equal(result, exp)
exp = TimedeltaIndex(['4 days', pd.NaT])
result = pd.to_timedelta(['5 days', pd.NaT]) - Timedelta('1 days')
tm.assert_index_equal(result, exp)
exp = TimedeltaIndex([pd.NaT, pd.NaT, '5 hours'])
result = (pd.to_timedelta([pd.NaT, '5 days', '1 hours']) +
pd.to_timedelta(['7 seconds', pd.NaT, '4 hours']))
tm.assert_index_equal(result, exp)
class TestTimedeltaArraylikeAddSubOps:
# Tests for timedelta64[ns] __add__, __sub__, __radd__, __rsub__
# TODO: moved from frame tests; needs parametrization/de-duplication
def test_td64_df_add_int_frame(self):
# GH#22696 Check that we don't dispatch to numpy implementation,
# which treats int64 as m8[ns]
tdi = pd.timedelta_range('1', periods=3)
df = tdi.to_frame()
other = pd.DataFrame([1, 2, 3], index=tdi) # indexed like `df`
with pytest.raises(TypeError):
df + other
with pytest.raises(TypeError):
other + df
with pytest.raises(TypeError):
df - other
with pytest.raises(TypeError):
other - df
# TODO: moved from tests.indexes.timedeltas.test_arithmetic; needs
# parametrization+de-duplication
def test_timedelta_ops_with_missing_values(self):
# setup
s1 = pd.to_timedelta(Series(['00:00:01']))
s2 = pd.to_timedelta(Series(['00:00:02']))
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
# Passing datetime64-dtype data to TimedeltaIndex is deprecated
sn = pd.to_timedelta(Series([pd.NaT]))
df1 = pd.DataFrame(['00:00:01']).apply(pd.to_timedelta)
df2 = pd.DataFrame(['00:00:02']).apply(pd.to_timedelta)
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
# Passing datetime64-dtype data to TimedeltaIndex is deprecated
dfn = pd.DataFrame([pd.NaT]).apply(pd.to_timedelta)
scalar1 = pd.to_timedelta('00:00:01')
scalar2 = pd.to_timedelta('00:00:02')
timedelta_NaT = pd.to_timedelta('NaT')
actual = scalar1 + scalar1
assert actual == scalar2
actual = scalar2 - scalar1
assert actual == scalar1
actual = s1 + s1
tm.assert_series_equal(actual, s2)
actual = s2 - s1
tm.assert_series_equal(actual, s1)
actual = s1 + scalar1
tm.assert_series_equal(actual, s2)
actual = scalar1 + s1
tm.assert_series_equal(actual, s2)
actual = s2 - scalar1
tm.assert_series_equal(actual, s1)
actual = -scalar1 + s2
tm.assert_series_equal(actual, s1)
actual = s1 + timedelta_NaT
tm.assert_series_equal(actual, sn)
actual = timedelta_NaT + s1
tm.assert_series_equal(actual, sn)
actual = s1 - timedelta_NaT
tm.assert_series_equal(actual, sn)
actual = -timedelta_NaT + s1
tm.assert_series_equal(actual, sn)
with pytest.raises(TypeError):
s1 + np.nan
with pytest.raises(TypeError):
np.nan + s1
with pytest.raises(TypeError):
s1 - np.nan
with pytest.raises(TypeError):
-np.nan + s1
actual = s1 + pd.NaT
tm.assert_series_equal(actual, sn)
actual = s2 - pd.NaT
tm.assert_series_equal(actual, sn)
actual = s1 + df1
tm.assert_frame_equal(actual, df2)
actual = s2 - df1
tm.assert_frame_equal(actual, df1)
actual = df1 + s1
tm.assert_frame_equal(actual, df2)
actual = df2 - s1
tm.assert_frame_equal(actual, df1)
actual = df1 + df1
tm.assert_frame_equal(actual, df2)
actual = df2 - df1
tm.assert_frame_equal(actual, df1)
actual = df1 + scalar1
tm.assert_frame_equal(actual, df2)
actual = df2 - scalar1
tm.assert_frame_equal(actual, df1)
actual = df1 + timedelta_NaT
tm.assert_frame_equal(actual, dfn)
actual = df1 - timedelta_NaT
tm.assert_frame_equal(actual, dfn)
with pytest.raises(TypeError):
df1 + np.nan
with pytest.raises(TypeError):
df1 - np.nan
actual = df1 + pd.NaT # NaT is datetime, not timedelta
tm.assert_frame_equal(actual, dfn)
actual = df1 - pd.NaT
tm.assert_frame_equal(actual, dfn)
# TODO: moved from tests.series.test_operators, needs splitting, cleanup,
# de-duplication, box-parametrization...
def test_operators_timedelta64(self):
# series ops
v1 = pd.date_range('2012-1-1', periods=3, freq='D')
v2 = pd.date_range('2012-1-2', periods=3, freq='D')
rs = Series(v2) - Series(v1)
xp = Series(1e9 * 3600 * 24,
rs.index).astype('int64').astype('timedelta64[ns]')
tm.assert_series_equal(rs, xp)
assert rs.dtype == 'timedelta64[ns]'
df = DataFrame(dict(A=v1))
td = Series([timedelta(days=i) for i in range(3)])
assert td.dtype == 'timedelta64[ns]'
# series on the rhs
result = df['A'] - df['A'].shift()
assert result.dtype == 'timedelta64[ns]'
result = df['A'] + td
assert result.dtype == 'M8[ns]'
# scalar Timestamp on rhs
maxa = df['A'].max()
assert isinstance(maxa, Timestamp)
resultb = df['A'] - df['A'].max()
assert resultb.dtype == 'timedelta64[ns]'
# timestamp on lhs
result = resultb + df['A']
values = [Timestamp('20111230'), Timestamp('20120101'),
Timestamp('20120103')]
expected = Series(values, name='A')
tm.assert_series_equal(result, expected)
# datetimes on rhs
result = df['A'] - datetime(2001, 1, 1)
expected = Series(
[timedelta(days=4017 + i) for i in range(3)], name='A')
tm.assert_series_equal(result, expected)
assert result.dtype == 'm8[ns]'
d = datetime(2001, 1, 1, 3, 4)
resulta = df['A'] - d
assert resulta.dtype == 'm8[ns]'
# roundtrip
resultb = resulta + d
tm.assert_series_equal(df['A'], resultb)
# timedeltas on rhs
td = timedelta(days=1)
resulta = df['A'] + td
resultb = resulta - td
tm.assert_series_equal(resultb, df['A'])
assert resultb.dtype == 'M8[ns]'
# roundtrip
td = timedelta(minutes=5, seconds=3)
resulta = df['A'] + td
resultb = resulta - td
tm.assert_series_equal(df['A'], resultb)
assert resultb.dtype == 'M8[ns]'
# inplace
value = rs[2] + np.timedelta64(timedelta(minutes=5, seconds=1))
rs[2] += np.timedelta64(timedelta(minutes=5, seconds=1))
assert rs[2] == value
def test_timedelta64_ops_nat(self):
# GH 11349
timedelta_series = Series([NaT, Timedelta('1s')])
nat_series_dtype_timedelta = Series([NaT, NaT],
dtype='timedelta64[ns]')
single_nat_dtype_timedelta = Series([NaT], dtype='timedelta64[ns]')
# subtraction
tm.assert_series_equal(timedelta_series - NaT,
nat_series_dtype_timedelta)
tm.assert_series_equal(-NaT + timedelta_series,
nat_series_dtype_timedelta)
tm.assert_series_equal(timedelta_series - single_nat_dtype_timedelta,
nat_series_dtype_timedelta)
tm.assert_series_equal(-single_nat_dtype_timedelta + timedelta_series,
nat_series_dtype_timedelta)
# addition
tm.assert_series_equal(nat_series_dtype_timedelta + NaT,
nat_series_dtype_timedelta)
tm.assert_series_equal(NaT + nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
tm.assert_series_equal(nat_series_dtype_timedelta +
single_nat_dtype_timedelta,
nat_series_dtype_timedelta)
tm.assert_series_equal(single_nat_dtype_timedelta +
nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
tm.assert_series_equal(timedelta_series + NaT,
nat_series_dtype_timedelta)
tm.assert_series_equal(NaT + timedelta_series,
nat_series_dtype_timedelta)
tm.assert_series_equal(timedelta_series + single_nat_dtype_timedelta,
nat_series_dtype_timedelta)
tm.assert_series_equal(single_nat_dtype_timedelta + timedelta_series,
nat_series_dtype_timedelta)
tm.assert_series_equal(nat_series_dtype_timedelta + NaT,
nat_series_dtype_timedelta)
tm.assert_series_equal(NaT + nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
tm.assert_series_equal(nat_series_dtype_timedelta +
single_nat_dtype_timedelta,
nat_series_dtype_timedelta)
tm.assert_series_equal(single_nat_dtype_timedelta +
nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
# multiplication
tm.assert_series_equal(nat_series_dtype_timedelta * 1.0,
nat_series_dtype_timedelta)
tm.assert_series_equal(1.0 * nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
tm.assert_series_equal(timedelta_series * 1, timedelta_series)
tm.assert_series_equal(1 * timedelta_series, timedelta_series)
tm.assert_series_equal(timedelta_series * 1.5,
Series([NaT, Timedelta('1.5s')]))
tm.assert_series_equal(1.5 * timedelta_series,
Series([NaT, Timedelta('1.5s')]))
tm.assert_series_equal(timedelta_series * np.nan,
nat_series_dtype_timedelta)
tm.assert_series_equal(np.nan * timedelta_series,
nat_series_dtype_timedelta)
# division
tm.assert_series_equal(timedelta_series / 2,
Series([NaT, Timedelta('0.5s')]))
tm.assert_series_equal(timedelta_series / 2.0,
Series([NaT, Timedelta('0.5s')]))
tm.assert_series_equal(timedelta_series / np.nan,
nat_series_dtype_timedelta)
# -------------------------------------------------------------
# Invalid Operations
def test_td64arr_add_str_invalid(self, box_with_array):
# GH#13624
tdi = TimedeltaIndex(['1 day', '2 days'])
tdi = tm.box_expected(tdi, box_with_array)
with pytest.raises(TypeError):
tdi + 'a'
with pytest.raises(TypeError):
'a' + tdi
@pytest.mark.parametrize('other', [3.14, np.array([2.0, 3.0])])
def test_td64arr_add_sub_float(self, box_with_array, other):
tdi = TimedeltaIndex(['-1 days', '-1 days'])
tdarr = tm.box_expected(tdi, box_with_array)
with pytest.raises(TypeError):
tdarr + other
with pytest.raises(TypeError):
other + tdarr
with pytest.raises(TypeError):
tdarr - other
with pytest.raises(TypeError):
other - tdarr
@pytest.mark.parametrize('freq', [None, 'H'])
def test_td64arr_sub_period(self, box_with_array, freq):
# GH#13078
# not supported, check TypeError
p = pd.Period('2011-01-01', freq='D')
idx = TimedeltaIndex(['1 hours', '2 hours'], freq=freq)
idx = tm.box_expected(idx, box_with_array)
with pytest.raises(TypeError):
idx - p
with pytest.raises(TypeError):
p - idx
@pytest.mark.parametrize('pi_freq', ['D', 'W', 'Q', 'H'])
@pytest.mark.parametrize('tdi_freq', [None, 'H'])
def test_td64arr_sub_pi(self, box_with_array, tdi_freq, pi_freq):
# GH#20049 subtracting PeriodIndex should raise TypeError
tdi = TimedeltaIndex(['1 hours', '2 hours'], freq=tdi_freq)
dti = Timestamp('2018-03-07 17:16:40') + tdi
pi = dti.to_period(pi_freq)
# TODO: parametrize over box for pi?
tdi = tm.box_expected(tdi, box_with_array)
with pytest.raises(TypeError):
tdi - pi
# -------------------------------------------------------------
# Binary operations td64 arraylike and datetime-like
def test_td64arr_sub_timestamp_raises(self, box_with_array):
idx = TimedeltaIndex(['1 day', '2 day'])
idx = tm.box_expected(idx, box_with_array)
msg = ("cannot subtract a datelike from|"
"Could not operate|"
"cannot perform operation")
with pytest.raises(TypeError, match=msg):
idx - Timestamp('2011-01-01')
def test_td64arr_add_timestamp(self, box_with_array, tz_naive_fixture):
# GH#23215
# TODO: parametrize over scalar datetime types?
tz = tz_naive_fixture
other = Timestamp('2011-01-01', tz=tz)
idx = TimedeltaIndex(['1 day', '2 day'])
expected = DatetimeIndex(['2011-01-02', '2011-01-03'], tz=tz)
idx = tm.box_expected(idx, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = idx + other
tm.assert_equal(result, expected)
result = other + idx
tm.assert_equal(result, expected)
def test_td64arr_add_sub_timestamp(self, box_with_array):
# GH#11925
ts = Timestamp('2012-01-01')
# TODO: parametrize over types of datetime scalar?
tdi = timedelta_range('1 day', periods=3)
expected = pd.date_range('2012-01-02', periods=3)
tdarr = tm.box_expected(tdi, box_with_array)
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(ts + tdarr, expected)
tm.assert_equal(tdarr + ts, expected)
expected2 = pd.date_range('2011-12-31', periods=3, freq='-1D')
expected2 = tm.box_expected(expected2, box_with_array)
tm.assert_equal(ts - tdarr, expected2)
tm.assert_equal(ts + (-tdarr), expected2)
with pytest.raises(TypeError):
tdarr - ts
def test_tdi_sub_dt64_array(self, box_with_array):
dti = pd.date_range('2016-01-01', periods=3)
tdi = dti - dti.shift(1)
dtarr = dti.values
expected = pd.DatetimeIndex(dtarr) - tdi
tdi = tm.box_expected(tdi, box_with_array)
expected = tm.box_expected(expected, box_with_array)
with pytest.raises(TypeError):
tdi - dtarr
# TimedeltaIndex.__rsub__
result = dtarr - tdi
tm.assert_equal(result, expected)
def test_tdi_add_dt64_array(self, box_with_array):
dti = pd.date_range('2016-01-01', periods=3)
tdi = dti - dti.shift(1)
dtarr = dti.values
expected = pd.DatetimeIndex(dtarr) + tdi
tdi = tm.box_expected(tdi, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = tdi + dtarr
tm.assert_equal(result, expected)
result = dtarr + tdi
tm.assert_equal(result, expected)
def test_td64arr_add_datetime64_nat(self, box_with_array):
# GH#23215
other = np.datetime64('NaT')
tdi = timedelta_range('1 day', periods=3)
expected = pd.DatetimeIndex(["NaT", "NaT", "NaT"])
tdser = tm.box_expected(tdi, box_with_array)
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(tdser + other, expected)
tm.assert_equal(other + tdser, expected)
# ------------------------------------------------------------------
# Operations with int-like others
def test_td64arr_add_int_series_invalid(self, box):
tdser = pd.Series(['59 Days', '59 Days', 'NaT'], dtype='m8[ns]')
tdser = tm.box_expected(tdser, box)
err = TypeError if box is not pd.Index else NullFrequencyError
int_ser = Series([2, 3, 4])
with pytest.raises(err):
tdser + int_ser
with pytest.raises(err):
int_ser + tdser
with pytest.raises(err):
tdser - int_ser
with pytest.raises(err):
int_ser - tdser
def test_td64arr_add_intlike(self, box_with_array):
# GH#19123
tdi = TimedeltaIndex(['59 days', '59 days', 'NaT'])
ser = tm.box_expected(tdi, box_with_array)
err = TypeError
if box_with_array in [pd.Index, tm.to_array]:
err = NullFrequencyError
other = Series([20, 30, 40], dtype='uint8')
# TODO: separate/parametrize
with pytest.raises(err):
ser + 1
with pytest.raises(err):
ser - 1
with pytest.raises(err):
ser + other
with pytest.raises(err):
ser - other
with pytest.raises(err):
ser + np.array(other)
with pytest.raises(err):
ser - np.array(other)
with pytest.raises(err):
ser + pd.Index(other)
with pytest.raises(err):
ser - pd.Index(other)
@pytest.mark.parametrize('scalar', [1, 1.5, np.array(2)])
def test_td64arr_add_sub_numeric_scalar_invalid(self, box_with_array,
scalar):
box = box_with_array
tdser = pd.Series(['59 Days', '59 Days', 'NaT'], dtype='m8[ns]')
tdser = tm.box_expected(tdser, box)
err = TypeError
if box in [pd.Index, tm.to_array] and not isinstance(scalar, float):
err = NullFrequencyError
with pytest.raises(err):
tdser + scalar
with pytest.raises(err):
scalar + tdser
with pytest.raises(err):
tdser - scalar
with pytest.raises(err):
scalar - tdser
@pytest.mark.parametrize('dtype', ['int64', 'int32', 'int16',
'uint64', 'uint32', 'uint16', 'uint8',
'float64', 'float32', 'float16'])
@pytest.mark.parametrize('vec', [
np.array([1, 2, 3]),
pd.Index([1, 2, 3]),
Series([1, 2, 3])
# TODO: Add DataFrame in here?
], ids=lambda x: type(x).__name__)
def test_td64arr_add_sub_numeric_arr_invalid(self, box, vec, dtype):
tdser = pd.Series(['59 Days', '59 Days', 'NaT'], dtype='m8[ns]')
tdser = tm.box_expected(tdser, box)
err = TypeError
if box is pd.Index and not dtype.startswith('float'):
err = NullFrequencyError
vector = vec.astype(dtype)
with pytest.raises(err):
tdser + vector
with pytest.raises(err):
vector + tdser
with pytest.raises(err):
tdser - vector
with pytest.raises(err):
vector - tdser
# ------------------------------------------------------------------
# Operations with timedelta-like others
# TODO: this was taken from tests.series.test_ops; de-duplicate
@pytest.mark.parametrize('scalar_td', [timedelta(minutes=5, seconds=4),
Timedelta(minutes=5, seconds=4),
Timedelta('5m4s').to_timedelta64()])
def test_operators_timedelta64_with_timedelta(self, scalar_td):
# smoke tests
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
td1 + scalar_td
scalar_td + td1
td1 - scalar_td
scalar_td - td1
td1 / scalar_td
scalar_td / td1
# TODO: this was taken from tests.series.test_ops; de-duplicate
def test_timedelta64_operations_with_timedeltas(self):
# td operate with td
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td2 = timedelta(minutes=5, seconds=4)
result = td1 - td2
expected = (Series([timedelta(seconds=0)] * 3) -
Series([timedelta(seconds=1)] * 3))
assert result.dtype == 'm8[ns]'
tm.assert_series_equal(result, expected)
result2 = td2 - td1
expected = (Series([timedelta(seconds=1)] * 3) -
Series([timedelta(seconds=0)] * 3))
tm.assert_series_equal(result2, expected)
# roundtrip
tm.assert_series_equal(result + td2, td1)
# Now again, using pd.to_timedelta, which should build
# a Series or a scalar, depending on input.
td1 = Series(pd.to_timedelta(['00:05:03'] * 3))
td2 = pd.to_timedelta('00:05:04')
result = td1 - td2
expected = (Series([timedelta(seconds=0)] * 3) -
Series([timedelta(seconds=1)] * 3))
assert result.dtype == 'm8[ns]'
tm.assert_series_equal(result, expected)
result2 = td2 - td1
expected = (Series([timedelta(seconds=1)] * 3) -
Series([timedelta(seconds=0)] * 3))
tm.assert_series_equal(result2, expected)
# roundtrip
tm.assert_series_equal(result + td2, td1)
def test_td64arr_add_td64_array(self, box):
dti = pd.date_range('2016-01-01', periods=3)
tdi = dti - dti.shift(1)
tdarr = tdi.values
expected = 2 * tdi
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
result = tdi + tdarr
tm.assert_equal(result, expected)
result = tdarr + tdi
tm.assert_equal(result, expected)
def test_td64arr_sub_td64_array(self, box):
dti = pd.date_range('2016-01-01', periods=3)
tdi = dti - dti.shift(1)
tdarr = tdi.values
expected = 0 * tdi
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
result = tdi - tdarr
tm.assert_equal(result, expected)
result = tdarr - tdi
tm.assert_equal(result, expected)
# TODO: parametrize over [add, sub, radd, rsub]?
@pytest.mark.parametrize('names', [(None, None, None),
('Egon', 'Venkman', None),
('NCC1701D', 'NCC1701D', 'NCC1701D')])
def test_td64arr_add_sub_tdi(self, box, names):
# GH#17250 make sure result dtype is correct
# GH#19043 make sure names are propagated correctly
if box is pd.DataFrame and names[1] == 'Venkman':
pytest.skip("Name propagation for DataFrame does not behave like "
"it does for Index/Series")
tdi = TimedeltaIndex(['0 days', '1 day'], name=names[0])
ser = Series([Timedelta(hours=3), Timedelta(hours=4)], name=names[1])
expected = Series([Timedelta(hours=3), Timedelta(days=1, hours=4)],
name=names[2])
ser = tm.box_expected(ser, box)
expected = tm.box_expected(expected, box)
result = tdi + ser
tm.assert_equal(result, expected)
if box is not pd.DataFrame:
assert result.dtype == 'timedelta64[ns]'
else:
assert result.dtypes[0] == 'timedelta64[ns]'
result = ser + tdi
tm.assert_equal(result, expected)
if box is not pd.DataFrame:
assert result.dtype == 'timedelta64[ns]'
else:
assert result.dtypes[0] == 'timedelta64[ns]'
expected = Series([Timedelta(hours=-3), Timedelta(days=1, hours=-4)],
name=names[2])
expected = tm.box_expected(expected, box)
result = tdi - ser
tm.assert_equal(result, expected)
if box is not pd.DataFrame:
assert result.dtype == 'timedelta64[ns]'
else:
assert result.dtypes[0] == 'timedelta64[ns]'
result = ser - tdi
tm.assert_equal(result, -expected)
if box is not pd.DataFrame:
assert result.dtype == 'timedelta64[ns]'
else:
assert result.dtypes[0] == 'timedelta64[ns]'
def test_td64arr_add_sub_td64_nat(self, box):
# GH#23320 special handling for timedelta64("NaT")
tdi = pd.TimedeltaIndex([NaT, Timedelta('1s')])
other = np.timedelta64("NaT")
expected = pd.TimedeltaIndex(["NaT"] * 2)
obj = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
result = obj + other
tm.assert_equal(result, expected)
result = other + obj
tm.assert_equal(result, expected)
result = obj - other
tm.assert_equal(result, expected)
result = other - obj
tm.assert_equal(result, expected)
def test_td64arr_sub_NaT(self, box):
# GH#18808
ser = Series([NaT, Timedelta('1s')])
expected = Series([NaT, NaT], dtype='timedelta64[ns]')
ser = tm.box_expected(ser, box)
expected = tm.box_expected(expected, box)
res = ser - pd.NaT
tm.assert_equal(res, expected)
def test_td64arr_add_timedeltalike(self, two_hours, box):
# only test adding/sub offsets as + is now numeric
rng = timedelta_range('1 days', '10 days')
expected = timedelta_range('1 days 02:00:00', '10 days 02:00:00',
freq='D')
rng = tm.box_expected(rng, box)
expected = tm.box_expected(expected, box)
result = rng + two_hours
tm.assert_equal(result, expected)
def test_td64arr_sub_timedeltalike(self, two_hours, box):
# only test adding/sub offsets as - is now numeric
rng = timedelta_range('1 days', '10 days')
expected = timedelta_range('0 days 22:00:00', '9 days 22:00:00')
rng = tm.box_expected(rng, box)
expected = tm.box_expected(expected, box)
result = rng - two_hours
tm.assert_equal(result, expected)
# ------------------------------------------------------------------
# __add__/__sub__ with DateOffsets and arrays of DateOffsets
# TODO: this was taken from tests.series.test_operators; de-duplicate
def test_timedelta64_operations_with_DateOffset(self):
# GH#10699
td = Series([timedelta(minutes=5, seconds=3)] * 3)
result = td + pd.offsets.Minute(1)
expected = Series([timedelta(minutes=6, seconds=3)] * 3)
tm.assert_series_equal(result, expected)
result = td - pd.offsets.Minute(1)
expected = Series([timedelta(minutes=4, seconds=3)] * 3)
tm.assert_series_equal(result, expected)
with tm.assert_produces_warning(PerformanceWarning):
result = td + Series([pd.offsets.Minute(1), pd.offsets.Second(3),
pd.offsets.Hour(2)])
expected = Series([timedelta(minutes=6, seconds=3),
timedelta(minutes=5, seconds=6),
timedelta(hours=2, minutes=5, seconds=3)])
| tm.assert_series_equal(result, expected) | pandas.util.testing.assert_series_equal |
import pandas as pd
import numpy as np
train_data = pd.read_csv("./dataset/train.csv", encoding="euc-kr")
train_data["DateTime"] = | pd.to_datetime(train_data.DateTime) | pandas.to_datetime |
#%%
import time
from pathlib import Path
import colorcet as cc
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from sklearn.feature_selection import VarianceThreshold
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from graspologic.plot import pairplot
from sparse_decomposition import SparseComponentAnalysis
from sparse_new_basis.plot import savefig, set_theme
#%%
fig_dir = Path("sparse_new_basis/results/try_genes")
def stashfig(name, *args, **kwargs):
savefig(fig_dir, name, *args, **kwargs)
set_theme()
#%%
data_dir = Path("sparse_new_basis/data/BP_Barabasi_Share/ScRNAData")
#%%
# gene expression data
sequencing_loc = data_dir / "Celegans_ScRNA_OnlyLabeledNeurons.csv"
sequencing_df = pd.read_csv(sequencing_loc, skiprows=[1])
currtime = time.time()
sequencing_df = sequencing_df.pivot(index="genes", columns="neurons", values="Count")
sequencing_df = sequencing_df.T.fillna(0)
print(f"{time.time() - currtime} elapsed to load sequencing data")
sequencing_df
# TODO drop the background RNA from table S2 in the paper
#%%
# info about the genes themselves
gene_loc = data_dir / "GSE136049_gene_annotations.csv"
gene_df = pd.read_csv(gene_loc)
gene_df["genes"] = range(1, len(gene_df) + 1)
gene_df = gene_df.set_index("genes")
gene_df = gene_df.loc[sequencing_df.columns] # some gene #s werent used
gene_df
#%%
sequencing_df.rename(columns=gene_df["gene_symbol"], inplace=True)
sequencing_df
#%%
# annotations for the individual genes
annotation_genes = pd.read_csv(data_dir / "annotation_genes.csv")
nt_annotation_genes = annotation_genes.melt(
id_vars=["neuron_class", "neuron_type"],
value_vars=[f"nt_gene_{i}" for i in range(3)],
value_name="gene",
).dropna(axis=0)
nt_annotation_genes = nt_annotation_genes.drop("variable", axis=1)
nt_annotation_genes["gene_type"] = "neurotransmitter"
other_annotation_genes = annotation_genes.melt(
id_vars=["neuron_class", "neuron_type"],
value_vars=[f"gene_{i}" for i in range(12)],
value_name="gene",
).dropna(axis=0)
other_annotation_genes = other_annotation_genes.drop("variable", axis=1)
other_annotation_genes["gene_type"] = "other"
annotation_genes = pd.concat((nt_annotation_genes, other_annotation_genes), axis=0)
annotation_genes
#%%
# metadata for each neuron in the gene expression data
class_map_loc = data_dir / "Labels2_CElegansScRNA_onlyLabeledNeurons.csv"
scrna_meta = | pd.read_csv(class_map_loc) | pandas.read_csv |
from datetime import datetime
from datetime import timedelta
import json
import pandas as pd
import logging
logging.info('Started')
# Do I have the right or not, if I have right for how long
# Given a date, for how long I stay in Schengen countries?
def transform_data(ref_date, entries, exits):
logging.info(
'Control if Reference Date is Null and default to today if so')
today = datetime.now()
if ref_date == '': # or is None
reference_date = today
else:
reference_date = datetime.strptime(ref_date, '%Y-%m-%d')
logging.info('Control if Reference Date is past date, not interesting')
if reference_date < today:
exit() # need a function generating json response
logging.info('Create reference date/entries/exists dataframe')
df_entries = pd.DataFrame.from_dict(entries)
df_entries['Coef'] = 1
df_exits = pd.DataFrame.from_dict(exits)
df_exits['Coef'] = 0
df_raw_input = df_entries.append(df_exits, ignore_index=True, sort=True)
df_raw_input.columns = ['Date', 'Coef']
df_raw_input['Date'] = pd.to_datetime(df_raw_input['Date'])
data_ref = {'Date': [reference_date], 'Coef': [0]}
df_reference_date = | pd.DataFrame(data=data_ref) | pandas.DataFrame |
"""
Title: Exploratory Data Analysis of Hand Data
Author: <NAME>
Created on Tue Mar 23 16:07:25 2021
"""
# Import necessary libraries
import pandas as pd
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
from scipy import stats
import seaborn as sns
from pathlib import Path, PureWindowsPath
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn import tree
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
from sklearn.metrics import plot_precision_recall_curve
#%%
# Load project data
project_dir = 'C:/Users/kcmma/github/blackjack/blackjack/data/'
filename = 'hand_data.csv'
basic_strategy = 'basic_strategy.csv'
# Get file path
file_path = Path(project_dir + filename)
basic_strategy_path = Path(project_dir + basic_strategy)
# Convert path to Windows format
windows_path = PureWindowsPath(file_path)
windows_path_2 = PureWindowsPath(basic_strategy_path)
# Read csv to dataframe
hand_df = pd.read_csv(windows_path)
basic_strategy_df = | pd.read_csv(windows_path_2) | pandas.read_csv |
"""Preprocessing WSDM Dataset.
Author: DHSong
Last Modified At: 2020.07.07
Preprocessing WSDM Dataset.
"""
import os
from collections import Counter
from tqdm import tqdm
import pandas as pd
import matplotlib.font_manager as fm
import matplotlib.pyplot as plt
import seaborn as sns
class PreprocessingWorker:
"""Worker for Preprocessing.
Worker for Preprocessing.
Attributes:
train_raw: pandas Dataframe for Train Dataset(train.csv).
test_raw: pandas Dataframe for Train Dataset(test.csv).
sample_submission_raw: pandas Dataframe for Submission Dataset(sample_submission.csv).
songs_raw: pandas Dataframe for Song Dataset(songs.csv).
members_raw: pandas Dataframe for Member Dataset(members.csv).
song_extra_info_raw: pandas Dataframe for Additional Song Dataset(song_extra_info.csv).
"""
def __init__(self, data_dirname='./data', font_path='./static/fonts/D2Coding.ttc'):
"""Inits Dataframe for data in data directory."""
self._matplotlib_setting(font_path)
self.train_raw = pd.read_csv(os.path.join(data_dirname, 'train.csv'))
self.test_raw = pd.read_csv(os.path.join(data_dirname, 'test.csv'))
self.sample_submission_raw = pd.read_csv(os.path.join(data_dirname, 'sample_submission.csv'))
self.songs_raw = pd.read_csv(os.path.join(data_dirname, 'songs.csv'))
self.members_raw = pd.read_csv(os.path.join(data_dirname, 'members.csv'))
self.song_extra_info_raw = pd.read_csv(os.path.join(data_dirname, 'song_extra_info.csv'))
def _matplotlib_setting(self, font_path):
"""set matplotlib fonts and style."""
font_family = fm.FontProperties(fname=font_path).get_name()
plt.rcParams['font.family'] = font_family
plt.rcParams['font.size'] = 14
plt.style.use('seaborn-darkgrid')
def _barplot(self, df, column, horizontal=True):
plt.figure(figsize=(16, 9))
if horizontal:
sns.countplot(y=column, hue='target', data=df, order=df[column].value_counts().index)
else:
sns.countplot(x=column, hue='target', data=df, order=df[column].value_counts().index)
plt.title('{} Distribution by target'.format(column))
plt.legend(loc='upper right')
plt.savefig('./figures/preprocessing-barplot-{}'.format(column))
def preprocess_train_test(self):
"""Preprocess train.csv and test.csv.
preprocess train.csv and test.csv. Select values to be considered.
Arags:
Return:
train: Pandas Dataframe. Select values to be considered in train.csv.
test: Pandas Dataframe. Select values to be considered in test.csv.
"""
train = self.train_raw.fillna('<blank>')
test = self.test_raw.fillna('<blank>')
selected_values_by_columns = {
'source_system_tab': [
'<blank>', '<not selected>',
'my library', 'discover', 'search', 'radio'
],
'source_screen_name': [
'<blank>', '<not selected>',
'Local playlist more', 'Online playlist more', 'Radio',
'Album more', 'Search', 'Artist more', 'Discover Feature',
'Discover Chart', 'Others profile more'
],
'source_type': [
'<blank>', '<not selected>',
'local-library', 'online-playlist', 'local-playlist',
'radio', 'album', 'top-hits-for-artist'
]
}
for column, values in selected_values_by_columns.items():
train.loc[~train[column].isin(values), column] = '<not selected>'
test.loc[~test[column].isin(values), column] = '<not selected>'
for column in selected_values_by_columns.keys():
self._barplot(train, column)
return train, test
def preprocess_members(self):
"""Preprocess members.csv.
preprocess members.csv. Select values to be considered.
Arags:
Return:
members: Pandas Dataframe. Select values to be considered in members.csv.
"""
# fill all the NA with <blank>.
members = self.members_raw.fillna('<blank>')
# calculate membership days.
members['registration_init_time'] = pd.to_datetime(members.registration_init_time, format='%Y%m%d')
members['expiration_date'] = pd.to_datetime(members.expiration_date, format='%Y%m%d')
members['membership_days'] = (members.expiration_date - members.registration_init_time).dt.days
# binning membership days.
invalid_membership_days = members.membership_days < 0
members.loc[invalid_membership_days, 'membership_days'] = -1
members.loc[invalid_membership_days, 'membership_dyas_bin'] = '<invalid>'
members.loc[~invalid_membership_days, 'membership_dyas_bin'] = pd.qcut(members.loc[~invalid_membership_days, 'membership_days'], 3)
# binning bd(age).
invalid_bd = (members.bd < 0) | (members.bd >= 100)
members.loc[invalid_bd, 'bd'] = -1
members.loc[invalid_bd, 'bd_bin'] = '<invalid>'
members.loc[~invalid_bd, 'bd_bin'] = pd.cut(members.loc[~invalid_bd, 'bd'], 5)
selected_values_by_columns = {
'city': [
'<blank>', '<not selected>',
'1', '13', '5', '4', '15', '22'
],
'registered_via': [
'<blank>', '<not selected>',
'4', '7', '9', '3'
]
}
for column, values in selected_values_by_columns.items():
members[column] = members[column].astype('str')
members.loc[~members[column].isin(values), column] = '<not selected>'
members_train = pd.merge(left=members, right=self.train_raw, how='inner')
for column in selected_values_by_columns.keys():
self._barplot(members_train, column)
self._barplot(members_train, 'bd_bin')
self._barplot(members_train, 'gender')
self._barplot(members_train, 'membership_dyas_bin')
return members
def preprocess_songs(self):
"""Preprocess songs.csv.
preprocess songs.csv. Select values to be considered.
Arags:
Return:
songs: Pandas Dataframe. Select values to be considered in songs.csv.
"""
# fill all the NA with <blank>.
songs = self.songs_raw.fillna('<blank>')
# binning song length.
invalid_song_length = songs.song_length < 0
songs.loc[invalid_song_length, 'song_length'] = -1
songs.loc[invalid_song_length, 'song_length_bin'] = '<invalid>'
songs.loc[~invalid_song_length, 'song_length_bin'] = pd.qcut(songs.loc[~invalid_song_length, 'song_length'], 3)
songs['song_length_bin'] = songs.song_length_bin.astype('str')
# select only top genres.
genre_list = list()
for genres in tqdm(songs.genre_ids.str.split('|')):
for genre in genres:
if genre.isdigit():
genre_list.append(genre)
counter = Counter(genre_list)
top_genres = [genre for genre, freq in counter.most_common(5)]
for genre in tqdm(top_genres):
name = 'genre_{}'.format(genre)
values = list()
for genres in songs.genre_ids:
value = 0
if genre in genres.split('|'):
value = 1
values.append(value)
songs[name] = values
selected_values_by_columns = {
'language': [
'<blank>', '<not selected>',
'52', '3', '27', '24', '31', '10'
]
}
songs.loc[songs.language == '<blank>', 'language'] = -1
songs['language'] = songs.language.astype('int')
songs.loc[songs.language == -1, 'language'] = '<blank>'
for column, values in selected_values_by_columns.items():
songs[column] = songs[column].astype('str')
songs.loc[~songs[column].isin(values), column] = '<not selected>'
songs_train = pd.merge(left=songs, right=self.train_raw, how='inner')
for column in selected_values_by_columns.keys():
self._barplot(songs_train, column)
self._barplot(songs_train, 'song_length_bin')
for genre in top_genres:
name = 'genre_{}'.format(genre)
self._barplot(songs_train, name)
return songs
def preprocess_song_extra_info(self):
"""Preprocess songs.csv.
preprocess songs.csv. Select values to be considered.
Arags:
Return:
songs: Pandas Dataframe. Select values to be considered in songs.csv.
"""
def isrc_to_country(isrc):
if isrc != '<blank>':
return isrc[:2]
else:
return isrc
def isrc_to_year(isrc):
if isrc != '<blank>':
year = isrc[5:7]
if int(year) > 18:
return int('19' + year)
else:
return int('20' + year)
else:
return -1
# fill all the NA with <blank>.
song_extra_info = self.song_extra_info_raw.fillna('<blank>')
song_extra_info['country'] = song_extra_info.isrc.apply(lambda x: isrc_to_country(x))
song_extra_info['year'] = song_extra_info.isrc.apply(lambda x: isrc_to_year(x))
blank_year = (song_extra_info.year == -1)
song_extra_info.loc[blank_year, 'year_bin'] = '<blank>'
song_extra_info.loc[~blank_year, 'year_bin'] = pd.qcut(song_extra_info.loc[~blank_year, 'year'], 5)
selected_values_by_columns = {
'country': [
'<blank>',
'US', 'GB', 'DE',
'FR', 'TC', 'JP'
]
}
for column, values in selected_values_by_columns.items():
song_extra_info[column] = song_extra_info[column].astype('str')
song_extra_info.loc[~song_extra_info[column].isin(values), column] = '<not selected>'
song_extra_info_train = pd.merge(left=song_extra_info, right=self.train_raw, how='inner')
for column in selected_values_by_columns.keys():
self._barplot(song_extra_info_train, column)
self._barplot(song_extra_info_train, 'year_bin')
return song_extra_info
if __name__ == '__main__':
worker = PreprocessingWorker(data_dirname='./data', font_path='./static/fonts/D2Coding.ttc')
print('*****Preprocess train.csv and test.csv*****')
train, test = worker.preprocess_train_test()
print('\n*****Preprocess members.csv*****')
members = worker.preprocess_members()
print('\n*****Preprocess songs.csv*****')
songs = worker.preprocess_songs()
print('\n*****Preprocess song_extra_info.csv*****')
song_extra_info = worker.preprocess_song_extra_info()
members = members.drop(columns=['bd', 'registration_init_time', 'expiration_date', 'membership_days'])
songs = songs.drop(columns=['song_length', 'genre_ids', 'artist_name', 'composer', 'lyricist'])
song_extra_info = song_extra_info.drop(columns=['name', 'isrc', 'year'])
merged = pd.merge(left=train, right=members, how='left', on='msno')
merged = pd.merge(left=merged, right=songs, how='left', on='song_id')
merged = pd.merge(left=merged, right=song_extra_info, how='left', on='song_id')
genre_columns = ['genre_465', 'genre_958', 'genre_1609', 'genre_2022', 'genre_2122']
for column in genre_columns:
merged[column] = merged[column].fillna(0)
merged[column] = merged[column].astype('int')
merged = merged.fillna('<blank>')
merged['registered_via'] = merged.registered_via.astype('str')
merged.to_csv('./data/train_merged.csv', index=False)
merged = pd.merge(left=test, right=members, how='left', on='msno')
merged = | pd.merge(left=merged, right=songs, how='left', on='song_id') | pandas.merge |
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import re
from collections.abc import Iterable
from datetime import datetime, timedelta
from unittest import TestCase
import numpy as np
import pandas as pd
import statsmodels
from kats.consts import TimeSeriesData
from kats.detectors.detector_consts import (
AnomalyResponse,
ChangePointInterval,
ConfidenceBand,
PercentageChange,
SingleSpike,
)
statsmodels_ver = float(
re.findall("([0-9]+\\.[0-9]+)\\..*", statsmodels.__version__)[0]
)
class SingleSpikeTest(TestCase):
def test_spike(self) -> None:
spike_time_str = "2020-03-01"
spike_time = datetime.strptime(spike_time_str, "%Y-%m-%d")
spike = SingleSpike(time=spike_time, value=1.0, n_sigma=3.0)
self.assertEqual(spike.time_str, spike_time_str)
class ChangePointIntervalTest(TestCase):
def test_changepoint(self) -> None:
np.random.seed(100)
date_start_str = "2020-03-01"
date_start = datetime.strptime(date_start_str, "%Y-%m-%d")
previous_seq = [date_start + timedelta(days=x) for x in range(15)]
current_length = 10
current_seq = [
previous_seq[10] + timedelta(days=x) for x in range(current_length)
]
previous_values = np.random.randn(len(previous_seq))
current_values = np.random.randn(len(current_seq))
# add a very large value to detect spikes
current_values[0] = 100.0
# pyre-fixme[16]: `ChangePointIntervalTest` has no attribute `previous`.
self.previous = TimeSeriesData(
pd.DataFrame({"time": previous_seq, "value": previous_values})
)
# pyre-fixme[16]: `ChangePointIntervalTest` has no attribute `current`.
self.current = TimeSeriesData(
pd.DataFrame({"time": current_seq, "value": current_values})
)
previous_extend = TimeSeriesData(
pd.DataFrame({"time": previous_seq[9:], "value": previous_values[9:]})
)
# pyre-fixme[16]: `ChangePointIntervalTest` has no attribute `prev_start`.
self.prev_start = previous_seq[0]
# pyre-fixme[16]: `ChangePointIntervalTest` has no attribute `prev_end`.
self.prev_end = previous_seq[9]
# pyre-fixme[16]: `ChangePointIntervalTest` has no attribute `current_start`.
self.current_start = current_seq[0]
# pyre-fixme[16]: `ChangePointIntervalTest` has no attribute `current_end`.
self.current_end = current_seq[-1] + timedelta(days=1)
previous_int = ChangePointInterval(self.prev_start, self.prev_end)
previous_int.data = self.previous
# tests whether data is clipped property to start and end dates
np.testing.assert_array_equal(previous_values[0:9], previous_int.data)
# test extending the data
# now the data is extended to include the whole sequence
previous_int.end_time = previous_seq[-1] + timedelta(days=1)
previous_int.extend_data(previous_extend)
self.assertEqual(len(previous_int), len(previous_seq))
current_int = ChangePointInterval(self.current_start, self.current_end)
current_int.data = self.current
current_int.previous_interval = previous_int
# check all the properties
self.assertEqual(current_int.start_time, self.current_start)
self.assertEqual(current_int.end_time, self.current_end)
self.assertEqual(
current_int.start_time_str,
datetime.strftime(self.current_start, "%Y-%m-%d"),
)
self.assertEqual(
current_int.end_time_str, datetime.strftime(self.current_end, "%Y-%m-%d")
)
self.assertEqual(current_int.mean_val, np.mean(current_values))
self.assertEqual(current_int.variance_val, np.var(current_values))
self.assertEqual(len(current_int), current_length)
self.assertEqual(current_int.previous_interval, previous_int)
# check spike detection
spike_list = current_int.spikes
# pyre-fixme[16]: `List` has no attribute `value`.
self.assertEqual(spike_list[0].value, 100.0)
self.assertEqual(
# pyre-fixme[16]: `List` has no attribute `time_str`.
spike_list[0].time_str,
datetime.strftime(self.current_start, "%Y-%m-%d"),
)
def test_multichangepoint(self) -> None:
# test for multivariate time series
np.random.seed(100)
date_start_str = "2020-03-01"
date_start = datetime.strptime(date_start_str, "%Y-%m-%d")
previous_seq = [date_start + timedelta(days=x) for x in range(15)]
current_length = 10
current_seq = [
previous_seq[10] + timedelta(days=x) for x in range(current_length)
]
num_seq = 5
previous_values = [np.random.randn(len(previous_seq)) for _ in range(num_seq)]
current_values = [np.random.randn(len(current_seq)) for _ in range(num_seq)]
# add a very large value to detect spikes
for i in range(num_seq):
current_values[i][0] = 100 * (i + 1)
# pyre-fixme[16]: `ChangePointIntervalTest` has no attribute `previous`.
self.previous = TimeSeriesData(
pd.DataFrame(
{
**{"time": previous_seq},
**{f"value_{i}": previous_values[i] for i in range(num_seq)},
}
)
)
# pyre-fixme[16]: `ChangePointIntervalTest` has no attribute `current`.
self.current = TimeSeriesData(
pd.DataFrame(
{
**{"time": current_seq},
**{f"value_{i}": current_values[i] for i in range(num_seq)},
}
)
)
previous_extend = TimeSeriesData(
pd.DataFrame(
{
**{"time": previous_seq[9:]},
**{f"value_{i}": previous_values[i][9:] for i in range(num_seq)},
}
)
)
# pyre-fixme[16]: `ChangePointIntervalTest` has no attribute `prev_start`.
self.prev_start = previous_seq[0]
# pyre-fixme[16]: `ChangePointIntervalTest` has no attribute `prev_end`.
self.prev_end = previous_seq[9]
# `current_start`.
# pyre-fixme[16]: `ChangePointIntervalTest` has no attribute `current_start`.
self.current_start = current_seq[0]
# pyre-fixme[16]: `ChangePointIntervalTest` has no attribute `current_end`.
self.current_end = current_seq[-1] + timedelta(days=1)
previous_int = ChangePointInterval(self.prev_start, self.prev_end)
previous_int.data = self.previous
# tests whether data is clipped property to start and end dates
for i in range(num_seq):
self.assertEqual(
# pyre-fixme[16]: Optional type has no attribute `__getitem__`.
previous_int.data[:, i].tolist(),
previous_values[i][0:9].tolist(),
)
# test extending the data
# now the data is extended to include the whole sequence except the last point
previous_int.end_time = previous_seq[-1] # + timedelta(days=1)
previous_int.extend_data(previous_extend)
self.assertEqual(len(previous_int) + 1, len(previous_seq))
# let's repeat this except without truncating the final point
previous_int2 = ChangePointInterval(self.prev_start, self.prev_end)
previous_int2.data = self.previous
previous_int2.end_time = previous_seq[-1] + timedelta(days=1)
previous_int2.extend_data(previous_extend)
self.assertEqual(len(previous_int2), len(previous_seq))
# let's extend the date range so it's longer than the data
# this should not change the results
previous_int3 = ChangePointInterval(self.prev_start, self.prev_end)
previous_int3.data = self.previous
previous_int3.end_time = previous_seq[-1] + timedelta(days=2)
previous_int3.extend_data(previous_extend)
self.assertEqual(len(previous_int3), len(previous_seq))
# let's construct the current ChangePointInterval
current_int = ChangePointInterval(self.current_start, self.current_end)
current_int.data = self.current
current_int.previous_interval = previous_int
# check all the properties
self.assertEqual(current_int.start_time, self.current_start)
self.assertEqual(current_int.end_time, self.current_end)
self.assertEqual(current_int.num_series, num_seq)
self.assertEqual(
current_int.start_time_str,
datetime.strftime(self.current_start, "%Y-%m-%d"),
)
self.assertEqual(
current_int.end_time_str, datetime.strftime(self.current_end, "%Y-%m-%d")
)
self.assertEqual(
# pyre-fixme[16]: `float` has no attribute `tolist`.
current_int.mean_val.tolist(),
[np.mean(current_values[i]) for i in range(num_seq)],
)
self.assertEqual(
current_int.variance_val.tolist(),
[np.var(current_values[i]) for i in range(num_seq)],
)
self.assertEqual(len(current_int), current_length)
self.assertEqual(current_int.previous_interval, previous_int)
# check spike detection
spike_array = current_int.spikes
self.assertEqual(len(spike_array), num_seq)
for i in range(num_seq):
# pyre-fixme[16]: `SingleSpike` has no attribute `__getitem__`.
self.assertEqual(spike_array[i][0].value, 100 * (i + 1))
self.assertEqual(
spike_array[i][0].time_str,
datetime.strftime(self.current_start, "%Y-%m-%d"),
)
class PercentageChangeTest(TestCase):
def test_perc_change(self) -> None:
np.random.seed(100)
date_start_str = "2020-03-01"
date_start = datetime.strptime(date_start_str, "%Y-%m-%d")
previous_seq = [date_start + timedelta(days=x) for x in range(30)]
current_length = 31
# offset one to make the new interval start one day after the previous one ends
current_seq = [
previous_seq[-1] + timedelta(days=(x + 1)) for x in range(current_length)
]
previous_values = 1.0 + 0.25 * np.random.randn(len(previous_seq))
current_values = 10.0 + 0.25 * np.random.randn(len(current_seq))
# pyre-fixme[16]: `PercentageChangeTest` has no attribute `previous`.
self.previous = TimeSeriesData(
pd.DataFrame({"time": previous_seq, "value": previous_values})
)
# pyre-fixme[16]: `PercentageChangeTest` has no attribute `current`.
self.current = TimeSeriesData(
pd.DataFrame({"time": current_seq, "value": current_values})
)
# pyre-fixme[16]: `PercentageChangeTest` has no attribute `prev_start`.
self.prev_start = previous_seq[0]
# pyre-fixme[16]: `PercentageChangeTest` has no attribute `prev_end`.
self.prev_end = previous_seq[9]
# pyre-fixme[16]: `PercentageChangeTest` has no attribute `current_start`.
self.current_start = current_seq[0]
# pyre-fixme[16]: `PercentageChangeTest` has no attribute `current_end`.
self.current_end = current_seq[-1]
previous_int = ChangePointInterval(
previous_seq[0], (previous_seq[-1] + timedelta(days=1))
)
previous_int.data = self.previous
current_int = ChangePointInterval(
current_seq[0], (current_seq[-1] + timedelta(days=1))
)
current_int.data = self.current
current_int.previous_interval = previous_int
perc_change_1 = PercentageChange(current=current_int, previous=previous_int)
previous_mean = np.mean(previous_values)
current_mean = np.mean(current_values)
# test the ratios
ratio_val = current_mean / previous_mean
self.assertEqual(perc_change_1.ratio_estimate, ratio_val)
ratio_estimate = perc_change_1.ratio_estimate
assert isinstance(ratio_estimate, float)
self.assertAlmostEqual(ratio_estimate, 10.0, 0)
self.assertEqual(perc_change_1.perc_change, (ratio_val - 1) * 100)
self.assertEqual(perc_change_1.direction, "up")
self.assertEqual(perc_change_1.stat_sig, True)
self.assertTrue(perc_change_1.p_value < 0.05)
self.assertTrue(perc_change_1.score > 1.96)
# test a detector with false stat sig
second_values = 10.005 + 0.25 * np.random.randn(len(previous_seq))
second = TimeSeriesData(
| pd.DataFrame({"time": previous_seq, "value": second_values}) | pandas.DataFrame |
#!/usr/bin/env python
# coding: utf-8
# ## Thank you for visiting my Karnel !
# I have just started with this dataset that impiles House Sales in King County, USA. My Karnel will be sometime updated by learning from many excellent analysts.
#
# * I am not native in English, so very sorry to let you read poor one.
# ## 1.Read libraries and the dataset
# Read libraries and the dataset before analysing.Especially we should care about strange points of the dataset.
#
# ## 2.Data Cleaning and Visualizations
# I need to conduct nulls and duplications including strange points above. We also see the relation between 'price' as the target and other valuables from visualizations. We try to evaluate 1st model before feature engineering because of seeing the progress. Then, as explanatory variables increase through feature engineering, multicollinearities are detected.
#
# * 2-1.Exploring nulls and duplications into the dataset.
# * 2-2.Visualizing the price
# * 2-3.Model building(1st)
# * 2-4-1. Feature engineering: "date"
# * 2-4-2. Feature engineering: "renovation"
# * 2-4-3. Feature engineering: "zipcode"
# * 2-4-4. New dataset
# * 2-4-5. Detecing multicollinearity
#
# ## 3.Model building and Evaluation
# The model will be built by using train dataset after detecting multicollinearity. In addition, it is evaluated on the correlation^2 between predicted values (y_pred) and actual values(y_test), MSE(mean_squared_error) and MAE(mean_squared_error)
# ## 1.Read libraries and the dataset
# Anaylsis will be started by reading librariese and the datasets.
# In[ ]:
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
get_ipython().run_line_magic('matplotlib', 'inline')
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error, mean_absolute_error
# ## 1-1. Load the dataset
# In[ ]:
df = pd.read_csv("../input/kc_house_data.csv")
df.head()
# In[ ]:
df.tail()
# ****Dataset shows that the target is 'price' and the other explanatory variables are 20.
# In[ ]:
print(df.shape)
print('------------------------')
print(df.nunique())
print('------------------------')
print(df.dtypes)
# Dataset's shape implies 21,613 lines * 21 columns where are composes as be said above.
# #### It is found that the number of lines(21,613) and id(21,436) is different by 176 except the 1st column of explanatory valuables. It should be caused by some nulls or/and duplications.
# ## 2.Data Cleaning and Visualisation
# ### 2-1.Exploring nulls and duplications into the dataset.
# In[ ]:
df.isnull().sum()
# In[ ]:
df['id'].value_counts()
# In[ ]:
sum((df['id'].value_counts()>=2)*1)
# It becomes cleared that the difference is cased by **DUPLICATION**, NOT nulls.
# * Also, on other variables, there are NOT nulls which we have to care.
# When my goal is set to predict 'price', show the distribution and fundamental statistics of 'price' and the correlation between 'price' and other valuables except 'id'.
# ### 2-2. Visualizing the price
# Firstly seeing the distribution of price. It may not be directly useful for prediction, however, the clarification of target data is important.
# In[ ]:
plt.hist(df['price'],bins=100)
# In[ ]:
# Seeing the fundamental statistics of price.
df.describe()['price']
# Distribution of price is distorted to the right. The large difference between minimum and maximum price. More than 100 times!!
# * Nextly, seeing the correlation matrix and the scatter plots between "price" and other variables except 'date'.
# * **'date' is needed to change significantly.**
# In[ ]:
df.corr().style.background_gradient().format('{:.2f}')
# In[ ]:
for i in df.columns:
if (i != 'price') & (i != 'date'):
df[[i,'price']].plot(kind='scatter',x=i,y='price')
# Though the dtypes of 'yr_renovated' and 'zipcode' are int64, they might be needed to be feature engineered because 'yr_renovated' is focused on around 0 and 2000 from seeing scatter plots above and 'zipcode' is just number.
# ### 2-3. Model Building (1st)
# * Try to biuild 1st model, that the target is 'price' and X are other valuables except 'id', 'date', 'yr_renovated' and 'zipcode'.
# In[ ]:
from sklearn.linear_model import LinearRegression
X = df.drop(['price','id','date','yr_renovated','zipcode'],axis=1)
y = df['price']
regr = LinearRegression(fit_intercept=True).fit(X,y)
print("model_1_score:{:.4f}".format(regr.score(X,y)))
# ### 2-4-1. Feature engineering: "date"
# Firstly, as be said , 'date' will be feature engineered to be significant because 'price' may be related with day of week ('dow') and month.
# In[ ]:
df.date.head()
# In[ ]:
pd.to_datetime(df.date).map(lambda x:'dow'+str(x.weekday())).head()
# ** dow:day of week, 0=Monday, 7=Sunday
# In[ ]:
pd.to_datetime(df.date).map(lambda x:'month'+str(x.month)).head()
# ** month1=January, 12=December
# In[ ]:
df['dow'] = pd.to_datetime(df.date).map(lambda x:'dow'+str(x.weekday()))
df['month'] = pd.to_datetime(df.date).map(lambda x:'month'+str(x.month))
# > Nextly, as the values of 'dow' and 'month' are categorilized, they are changed to be one hot encoding.
# In[ ]:
pd.get_dummies(df['dow']).head()
# In[ ]:
| pd.get_dummies(df['month']) | pandas.get_dummies |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from datetime import datetime
class readWeather:
def __init__(self, filename):
self.filename = filename
def generatePlot(self, requestedDate):
self.store = pd.HDFStore(self.filename)
temp = {}
humid = {}
fromTime = requestedDate + " 00:00:01"
toTime = requestedDate + " 23:59:59"
where = 'index > fromTime & index < toTime'
self.dataRequested = self.store.select('Weather1',where = where, columns = ['Temp'])
self.dataRequested1 = self.store.select('Weather1',where = where, columns = ['Humidity'])
self.index1 = self.dataRequested.index.values.tolist()
self.index_time = [pd.to_datetime(dt).strftime("%H:%M") for dt in self.index1]
self.index_time = list( self.index_time[i] for i in np.arange(0,len(self.index1),28))
temp['30min'] = pd.rolling_mean(self.dataRequested['Temp'], window=6)
temp['5min'] = self.dataRequested['Temp']
humid['30min'] = pd.rolling_mean(self.dataRequested1['Humidity'], window=6)
humid['5min'] = self.dataRequested1['Humidity']
x = np.arange(len(self.index1))
x1 = np.arange(0,len(self.index1),28)
plt.xkcd()
fig, [ax_temp, ax_hum] = plt.subplots(2,1, figsize=(8,7))
fig.suptitle('Temperature, Humidity on '+ requestedDate, fontsize = 15)
ax_temp.plot(x, temp['30min'], label = '30min_movAvg')
ax_temp.plot(x, temp['5min'], 'r--', label = '5min_interval')
#ax_temp.set_title('Temperature on '+ requestedDate)
ax_temp.legend(loc='best')
#ax_temp.set_xlabel("Time", fontsize = 12)
ax_temp.set_ylabel("Temp (C)", fontsize = 12)
#plt.xticks(x1, self.index_time, rotation = 45)
#ax_temp.tick_params(labelsize=10)
ax_hum.plot(x, humid['30min'], label = '30min_movAvg')
ax_hum.plot(x, humid['5min'], 'r--', label = '5min_interval')
#ax_hum.set_title('Humidity on '+ requestedDate)
ax_hum.legend(loc='best')
ax_hum.set_xlabel("Time", fontsize = 12)
ax_hum.set_ylabel("Humidity (%)", fontsize = 12)
plt.xticks(x1, self.index_time, rotation = 45)
ax_hum.tick_params(labelsize=10)
#make x-axis ticks invisible for temp
plt.setp(ax_temp.get_xticklabels(), visible=False)
#plt.xticks(x1, self.index_time, rotation = 45)
plt.savefig('static/temp_humid_' + requestedDate + '.png')
self.store.close()
def generateTempPlot(self, requestedDate):
self.store = | pd.HDFStore(self.filename) | pandas.HDFStore |
import glob
import math
import os
import pickle
import shutil
import time
from typing import List, Dict
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import scipy.io
from PIL import Image, ImageDraw
N = 5
ERROR_THRESHOLD = 5.0
MASK_COORDS = [(225, 360), (335, 130)]
PATH_TO_SCENES = "images"
ANALYZE_DATA = False
PATH_TO_HIGHLIGHTS = "large_variations"
NUM_HIGHLIGHTS = 10
FOLD_VARIATIONS = 0
NUM_FOLDS = 0
def mask_ground_truth(path_to_frame: str):
img = Image.open(path_to_frame).convert('RGB')
mask = img.copy()
draw = ImageDraw.Draw(mask)
draw.rectangle(MASK_COORDS, fill="black")
return Image.blend(img, mask, 1)
def process_sequence(frame_idx: int, scene_paths: List, path_to_seq: str, images_gt: Dict):
ground_truths, errors = [], []
for i in range(N):
path_to_frame = scene_paths[frame_idx - i]
print("\t * Preceding frame {}: {}".format(str(abs(i - N)), path_to_frame))
masked_frame = mask_ground_truth(path_to_frame)
masked_frame.save(os.path.join(path_to_seq, str(abs(i - N)) + ".jpg"))
current_gt = images_gt[path_to_frame]
ground_truths = [current_gt] + ground_truths
if i < N - 1:
preceding_gt = images_gt[scene_paths[frame_idx - i - 1]]
error = angular_error(current_gt, preceding_gt)
errors.insert(0, angular_error(current_gt, preceding_gt))
if error >= ERROR_THRESHOLD:
print("\n\t -> Detected angle change {:.2f} >= {:.2f} between frames {} and {} \n"
.format(error, ERROR_THRESHOLD, frame_idx - i, frame_idx - i - 1))
mean_error, std_error = np.mean(errors), np.std(errors)
plt.plot(range(1, N), errors)
plt.title("AVG: {:.4f} - STD DEV: {:.4f}".format(mean_error, std_error))
plt.xticks(range(1, N + 1))
plt.xlabel("Frame Index")
plt.ylabel("Angular Error w.r.t. Preceding Frame")
plt.savefig(os.path.join(path_to_seq, "color_trend.png"), bbox_inches='tight', dpi=200)
plt.clf()
return ground_truths, mean_error, std_error
def angular_error(f1: np.ndarray, f2: np.ndarray) -> float:
return np.arccos(np.dot(f1, f2) / (np.linalg.norm(f1) * np.linalg.norm(f2))) * (180 / math.pi)
def main():
images = np.array([x.strip() for x in open("images_gt_order.lst").readlines()])
ground_truths = scipy.io.loadmat("groundtruth.mat")["real_rgb"].tolist()
pd.DataFrame({"image": images, "ground_truth": ground_truths}).to_csv("images_gt.csv", index=False)
images_gt = {img: gt for img, gt in zip(images, ground_truths)}
path_to_sequences = os.path.join("raw", "{}f_seqs".format(N))
os.makedirs(path_to_sequences, exist_ok=True)
print("\n--------------------------------------------------------------------------------------------\n")
print("\tPreprocessing SFU Gray Ball for N = {}".format(N))
print("\n--------------------------------------------------------------------------------------------\n")
num_sequences, variations, test_scenes = 0, [], []
for scene_name in os.listdir(PATH_TO_SCENES):
print("\n *** Processing scene {} ***".format(scene_name))
scene_paths = sorted(glob.glob(os.path.join(PATH_TO_SCENES, scene_name, "*.jpg")))
print(scene_paths)
for frame_idx, path_to_file in enumerate(scene_paths):
if frame_idx < N - 1:
continue
path_to_seq = os.path.join(path_to_sequences, scene_name, str(frame_idx))
os.makedirs(path_to_seq, exist_ok=True)
print("\n Processing seq {} - file {}".format(path_to_seq, path_to_file))
ground_truths, mean_var, std_var = process_sequence(frame_idx, scene_paths, path_to_seq, images_gt)
variations.append((scene_name, frame_idx, path_to_file.split(os.sep)[-1], mean_var, std_var))
pickle.dump(ground_truths, open(os.path.join(path_to_seq, 'seq_ground_truth.pkl'), "wb"))
gt = np.array(images_gt[path_to_file])
np.savetxt(os.path.join(path_to_seq, 'ground_truth.txt'), gt, delimiter=',')
num_sequences += 1
print("\n--------------------------------------------------------------------------------------------\n")
print("\t Generated {} sequences of length N = {} at {}".format(num_sequences, N, path_to_sequences))
print("\n--------------------------------------------------------------------------------------------\n")
if ANALYZE_DATA:
path_to_save = "{}_{}".format(PATH_TO_HIGHLIGHTS, time.time())
os.makedirs(path_to_save)
s, f, fn, mv, sdv = zip(*variations)
path_to_csv = os.path.join(path_to_save, "data.csv")
| pd.DataFrame({"scene": s, "frame": f, "file_name": fn, "mean_var": mv, "std_dev_var": sdv}) | pandas.DataFrame |
"""
Skfold Test
=============
Example
"""
# General
import numpy as np
import pandas as pd
# Specific
from sklearn.model_selection import StratifiedKFold
# ---------------------------------------------------
#
# ---------------------------------------------------
def repeated_splits(X, y, n_loops=2, n_splits=5):
"""This method...
"""
# Record for comparison
records = []
# Split
for i in range(n_loops):
# Create dataframe
dataframe = pd.DataFrame()
# Create splitter
skf = StratifiedKFold(n_splits=n_splits)
# Loop
for j, (train, test) in enumerate(skf.split(X, y)):
dataframe['fold_{0}'.format(j)] = \
np.concatenate((train, test))
# Append
records.append(dataframe)
# Return
return records
# ---------------------------------------------------
# Artificial example
# ---------------------------------------------------
# Size
n = 20
n_splits = 5
n_loops = 5
# Create dataset
X = np.arange(n).reshape(-1,1)
y = np.vstack((np.ones((10,1)),
np.zeros((10,1))))
# Create splits
records = repeated_splits(X, y, n_loops=n_loops,
n_splits=n_splits)
# Compare if all records are equal
for i in range(len(records)-1):
print('{0} == {1} : {2}'.format(i, i+1, \
records[i].equals(records[i+1])))
# ---------------------------------------------------
# Rel example
# ---------------------------------------------------
# Read dataset
dataset = | pd.read_csv('dataset.csv') | pandas.read_csv |
from datetime import datetime
import numpy as np
import pandas as pd
import pytest
import responses
import blueskyapi
@pytest.fixture()
def client():
return blueskyapi.Client()
default_result = [{"forecast_moment": "2021-12-27T18:00:00Z"}]
def add_api_response(path, *, result=default_result, status=200, api_key=None):
matchers = []
if api_key is not None:
matchers.append(
responses.matchers.header_matcher({"Authorization": f"Bearer {api_key}"})
)
responses.add(
responses.GET,
blueskyapi.default_config.base_url + path,
json=result,
status=status,
match=matchers,
)
def describe_init():
def test_defaults(mocker):
mocker.patch.multiple(
"blueskyapi.default_config",
base_url="the base url",
api_key="the api key",
)
client = blueskyapi.Client()
assert client.base_url == "the base url"
assert client.api_key == "the api key"
def test_with_args():
client = blueskyapi.Client("api-key", base_url="https://example.com/api")
assert client.base_url == "https://example.com/api"
assert client.api_key == "api-key"
def describe_with_api_key():
@responses.activate
def when_valid():
client = blueskyapi.Client(api_key="the-key")
add_api_response("/forecasts/latest?lat=53.5&lon=13.5", api_key="the-key")
client.latest_forecast(53.5, 13.5)
@responses.activate
def when_invalid():
client = blueskyapi.Client(api_key="the-key")
add_api_response(
"/forecasts/latest?lat=53.5&lon=13.5",
api_key="the-key",
result={"detail": "Invalid API key"},
status=401,
)
with pytest.raises(blueskyapi.errors.InvalidApiKey, match="401"):
client.latest_forecast(53.5, 13.5)
def describe_latest_forecast():
@responses.activate
def test_defaults(client):
add_api_response("/forecasts/latest?lat=53.5&lon=13.5")
client.latest_forecast(53.5, 13.5)
def describe_forecast_distances():
@responses.activate
def with_array(client):
add_api_response(
"/forecasts/latest?lat=53.5&lon=13.5&forecast_distances=0,24"
)
client.latest_forecast(53.5, 13.5, forecast_distances=[0, 24])
@responses.activate
def with_string(client):
add_api_response(
"/forecasts/latest?lat=53.5&lon=13.5&forecast_distances=0,24"
)
client.latest_forecast(53.5, 13.5, forecast_distances="0,24")
@responses.activate
def with_invalid_value(client):
with pytest.raises(TypeError, match="forecast_distances should be"):
client.latest_forecast(53.5, 13.5, forecast_distances=1.5)
def describe_columns():
@responses.activate
def with_array(client):
add_api_response("/forecasts/latest?lat=53.5&lon=13.5&columns=col_a,col_b")
client.latest_forecast(53.5, 13.5, columns=["col_a", "col_b"])
@responses.activate
def with_string(client):
add_api_response("/forecasts/latest?lat=53.5&lon=13.5&columns=col_a,col_b")
client.latest_forecast(53.5, 13.5, columns="col_a,col_b")
@responses.activate
def with_invalid_value(client):
with pytest.raises(TypeError, match="columns should be"):
client.latest_forecast(53.5, 13.5, columns=1)
@responses.activate
def test_over_rate_limit(client):
add_api_response(
"/forecasts/latest?lat=53.5&lon=13.5",
result={"the": "error"},
status=429,
)
with pytest.raises(blueskyapi.errors.OverRateLimit, match="429"):
client.latest_forecast(53.5, 13.5)
@responses.activate
def test_result(client):
add_api_response(
"/forecasts/latest?lat=53.5&lon=13.5",
result=[{"forecast_moment": "2021-12-27T18:00:00Z", "some_column": 5}],
)
result = client.latest_forecast(53.5, 13.5)
assert np.all(
result.forecast_moment == [pd.to_datetime("2021-12-27T18:00:00Z")]
)
assert np.all(result.some_column == [5])
@pytest.mark.vcr()
def test_integration(client):
result = client.latest_forecast(53.5, 13.5)
assert len(result.columns) == 35
assert len(result) == 15
assert str(result.forecast_moment.dtype) == "datetime64[ns, UTC]"
assert np.all(result.forecast_moment == | pd.to_datetime("2022-03-04T00:00:00Z") | pandas.to_datetime |
import pickle
from os import rename, remove
from os.path import join, dirname, realpath, isfile
import keras.backend as K
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import tensorflow as tf
from keras.callbacks import ModelCheckpoint
from keras.layers import BatchNormalization, Dropout
from keras.layers import Concatenate
from keras.layers import Conv2D
from keras.layers import Dense
from keras.layers import Flatten
from keras.layers import Input
from keras.models import Model
from keras.models import load_model
from sklearn.externals import joblib
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.utils import shuffle
import datasets
from logger_config import logger
MODEL_FILENAME = "model.h5"
SCALER_FILENAME = "scaler.dump"
FRAME_LENGTH = 10
FRAME_COLUMNS = 5
N_FEATURES = 5
PRICE_DELTA = 200
MAX_MODELS_COUNT = 3
LABELS = [
0, # short
1, # long
2 # others
]
LABELS_TITLES = [
"DOWN",
"UP",
"NOTHING"
]
DROP_COLUMNS = [
'time'
]
def copy_sub_frame(start, end, src, dest):
line = []
for j in range(start, end):
line.append(src[j])
line = np.asarray(line)
dest.append(line)
def get_labels(label, shape):
arr = np.zeros(shape)
for n in range(len(arr)):
arr[n][label] = 1
return arr
def create_scaler_filename(instrument, period):
dir_path = join(dirname(realpath(__file__)), datasets.DATASET_DIR)
return dir_path + "/" + str(instrument) + "_" + str(period) + "_" + SCALER_FILENAME
def create_model_filename(instrument, period, temp=False, index=None):
dir_path = join(dirname(realpath(__file__)), datasets.DATASET_DIR)
index_str = "" if index is None else (str(index) + "_")
if temp:
return dir_path + "/TEMP_" + str(instrument) + "_" + str(period) + "_" + index_str + MODEL_FILENAME
else:
return dir_path + "/" + str(instrument) + "_" + str(period) + "_" + index_str + MODEL_FILENAME
# https://www.kaggle.com/guglielmocamporese/macro-f1-score-keras
def f1(y_true, y_predict):
y_pred_rnd = K.round(y_predict)
tp = K.sum(K.cast(y_true * y_pred_rnd, 'float'), axis=0)
# tn = K.sum(K.cast((1-y_true)*(1-y_pred), 'float'), axis=0)
fp = K.sum(K.cast((1 - y_true) * y_pred_rnd, 'float'), axis=0)
fn = K.sum(K.cast(y_true * (1 - y_pred_rnd), 'float'), axis=0)
p = tp / (tp + fp + K.epsilon())
r = tp / (tp + fn + K.epsilon())
f1_res = 2 * p * r / (p + r + K.epsilon())
f1_res = tf.where(tf.is_nan(f1_res), tf.zeros_like(f1_res), f1_res)
return K.mean(f1_res)
def build_classifier(input_shape):
inp = Input(shape=input_shape)
filters = [(1, N_FEATURES - 1), (FRAME_LENGTH, 1), (1, 2), (1, 3), (3, 3), (2, 2), (2, 3), (3, 4), (3, 5)]
conv_layers = []
for f in filters:
conv = Conv2D(32, f, kernel_initializer='he_normal', activation='relu', padding='valid')(inp)
conv = BatchNormalization()(conv)
conv = Flatten()(conv)
conv_layers.append(conv)
x = Concatenate(axis=1)(conv_layers)
x = Dense(units=256, kernel_initializer='he_normal', activation='relu')(x)
x = BatchNormalization()(x)
x = Dropout(0.2)(x)
x = Dense(units=256, kernel_initializer='he_normal', activation='relu')(x)
x = BatchNormalization()(x)
x = Dropout(0.2)(x)
x = Dense(units=256, kernel_initializer='he_normal', activation='relu')(x)
x = BatchNormalization()(x)
x = Dropout(0.2)(x)
x = Dense(units=256, kernel_initializer='he_normal', activation='relu')(x)
x = BatchNormalization()(x)
x = Dropout(0.2)(x)
outp = Dense(units=3, kernel_initializer='glorot_normal', activation='sigmoid')(x)
model = Model(inputs=inp, outputs=outp)
model.compile(loss='categorical_crossentropy', optimizer='nadam', metrics=[f1])
return model
def create_features(data, point):
new_data = np.zeros((len(data), N_FEATURES), dtype='float64')
max_volume = data.iloc[:, [4]].values.max()
trend_counter = 0
for i in range(len(data)):
new_data[i][0] = (data.iloc[i][0] - data.iloc[i][3]) / point
new_data[i][1] = (data.iloc[i][1] - data.iloc[i][2]) / point
new_data[i][2] = pow(new_data[i][0], 2)
new_data[i][3] = (data.iloc[i][4] / max_volume) * 100
if i > 0:
if (data.iloc[i][3] / point) - (data.iloc[i - 1][3] / point) > PRICE_DELTA * 0.05:
trend_counter += 1
elif (data.iloc[i - 1][3] / point) - (data.iloc[i][3] / point) > PRICE_DELTA * 0.05:
trend_counter -= 1
new_data[i][4] = trend_counter
return new_data
def read_data(files, instrument, period, point, need_scale=True):
data_frames = []
for file in files:
data_frames.append(pd.read_csv(file))
data = pd.concat(data_frames, axis=0)
data = data.drop(columns=DROP_COLUMNS)
scaler = StandardScaler()
if need_scale:
scaler.fit(create_features(data, point))
frames_short = []
frames_long = []
frames_other = []
for i in range(FRAME_LENGTH + 2, len(data)):
close0 = round(data.iloc[i - 3][3] / point)
close1 = round(data.iloc[i - 2][3] / point)
close2 = round(data.iloc[i - 1][3] / point)
close3 = round(data.iloc[i][3] / point)
frame = []
for j in range(i - FRAME_LENGTH - 2, i - 2):
frame.append(data.iloc[j])
frame = np.asarray(frame)
if need_scale:
scaled_data = scaler.transform(create_features(pd.DataFrame(frame), point))
else:
scaled_data = create_features( | pd.DataFrame(frame) | pandas.DataFrame |
import pytest
import sys
import numpy as np
import swan_vis as swan
import networkx as nx
import math
import pandas as pd
###########################################################################
###################### Related to data analysis ############################
###########################################################################
class TestSGAnalysis(object):
# tests find_ir_genes, find_es_genes, get_die_genes, get_die_gene_table, test_gene
# TODO - add update_ids() call before running both find_ir/es_genes to make sure
# the subgraph shit works -
# TODO - check to make sure this works with + AND - strands because the
# loc_df locations no longer have strand associated with them from get_ordered_id_map!!!!
# test get_die_gene_table - gene that meets rc
def test_get_die_table_6(self):
columns = ['tid', 'gid', 'cond1', 'cond2', 'total_counts', 'cond1_counts', 'cond2_counts']
data = [[1, 1, .5, .5, 20, 10, 10],
[2, 1, .5, .5, 20, 10, 10]]
df = pd.DataFrame(data=data, columns=columns)
conditions = ['cond1', 'cond2']
df.set_index('tid', inplace=True)
df = swan.get_die_gene_table(df, conditions, rc=15)
columns = ['tid', 'gid', 'cond1', 'cond2', 'total_counts', 'cond1_counts', 'cond2_counts', 'dpi']
data = [[1, 1, .5, .5, 20, 10, 10, 0],
[2, 1, .5, .5, 20, 10, 10, 0]]
ctrl = | pd.DataFrame(data=data, columns=columns) | pandas.DataFrame |
import json
import os
import pickle
import sys
import tempfile
import uuid
from datetime import timedelta
from time import gmtime, strftime
from typing import Tuple
from zipfile import ZipFile
import requests
from dagster import (
Any,
AssetMaterialization,
DagsterType,
EventMetadataEntry,
Field,
InputDefinition,
Output,
OutputDefinition,
check,
composite_solid,
solid,
)
from dagster.utils import PICKLE_PROTOCOL
from dagster_examples.bay_bikes.constants import (
DARK_SKY_BASE_URL,
FEATURE_COLUMNS,
LABEL_COLUMN,
WEATHER_COLUMNS,
)
from dagster_examples.bay_bikes.types import (
RawTripDataFrame,
TrafficDataFrame,
TrainingSet,
TripDataFrame,
WeatherDataFrame,
)
from dagster_pandas import DataFrame as DagsterPandasDataFrame
from numpy import array, ndarray, transpose
from pandas import (
DataFrame,
date_range,
get_dummies,
notnull,
read_csv,
read_sql_table,
to_datetime,
)
if sys.version_info >= (3, 6):
# this is done because a compatibility issue with json_normalize in python 3.5
from pandas import json_normalize # pylint:disable=import-error,no-name-in-module
else:
from pandas.io.json import json_normalize # pylint:disable=import-error,no-name-in-module
# Added this to silence tensorflow logs. They are insanely verbose.
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
def _write_chunks_to_fp(response, output_fp, chunk_size):
for chunk in response.iter_content(chunk_size=chunk_size):
if chunk:
output_fp.write(chunk)
def _download_zipfile_from_url(url: str, target: str, chunk_size=8192) -> str:
with requests.get(url, stream=True) as response, open(target, "wb+") as output_fp:
response.raise_for_status()
_write_chunks_to_fp(response, output_fp, chunk_size)
return target
@solid(
input_defs=[InputDefinition("file_name", str), InputDefinition("base_url", str)],
output_defs=[OutputDefinition(str)],
config_schema={"chunk_size": Field(int, is_required=False, default_value=8192)},
required_resource_keys={"volume"},
)
def download_zipfile_from_url(context, file_name: str, base_url: str):
url = "/".join([base_url, file_name])
# mount dirs onto volume
target = os.path.join(context.resources.volume, file_name)
if not os.path.exists(target):
_download_zipfile_from_url(
url, target, context.solid_config["chunk_size"],
)
yield AssetMaterialization(
asset_key=file_name,
metadata_entries=[
EventMetadataEntry.text(url, "zipfile url source"),
EventMetadataEntry.text(target, "zipfile filepath"),
EventMetadataEntry.text(str(os.path.getsize(target)), "size of zipfile (bytes)"),
],
)
yield Output(target)
@solid(
input_defs=[
InputDefinition("input_file_path", str),
InputDefinition("target_csv_file_in_archive", str),
],
output_defs=[OutputDefinition(DagsterPandasDataFrame)],
config_schema={
"delimiter": Field(
str,
default_value=",",
is_required=False,
description=("A one-character string used to separate fields."),
),
"compression": Field(str, default_value="infer", is_required=False,),
},
required_resource_keys={"volume"},
)
def load_compressed_csv_file(
context, input_file_path: str, target_csv_file_in_archive: str
) -> DataFrame:
# There must be a header in all of these dataframes or it becomes had to load into a table or concat dataframes.
dataset = read_csv(
ZipFile(input_file_path).open(target_csv_file_in_archive),
sep=context.solid_config["delimiter"],
header=0,
index_col=0,
compression=context.solid_config["compression"],
)
return dataset
@solid(required_resource_keys={"gcs_client"})
def upload_pickled_object_to_gcs_bucket(context, value: Any, bucket_name: str, file_name: str):
gcs_bucket = context.resources.gcs_client.get_bucket(bucket_name)
key = "{}-{}".format(file_name, uuid.uuid4())
with tempfile.TemporaryFile("w+b") as fp:
pickle.dump(value, fp, PICKLE_PROTOCOL)
# Done because you can't upload the contents of a file outside the context manager if it's a tempfile.
fp.seek(0)
gcs_bucket.blob(key).upload_from_file(fp)
gcs_url = "gs://{bucket_name}/{key}".format(bucket_name=bucket_name, key=key)
yield AssetMaterialization(
asset_key=gcs_url,
description="Serialized object to Google Cloud Storage Bucket",
metadata_entries=[EventMetadataEntry.text(gcs_url, "google cloud storage URI"),],
)
yield Output(value)
def _create_and_load_staging_table(engine, table_name, records):
create_table_sql = "create table if not exists {table_name} (id serial not null primary key, staging_data json not null);".format(
table_name=table_name
)
engine.execute(create_table_sql)
records = records.where(notnull(records), None)
insert_sql = "insert into {table_name} (staging_data) values {data};".format(
table_name=table_name,
data=",".join(
[
"('{record}')".format(record=json.dumps(record).replace("'", ""))
for record in records.to_dict("records")
]
),
)
engine.execute(insert_sql)
@solid(
input_defs=[
InputDefinition("records", DagsterPandasDataFrame),
InputDefinition("table_name", str),
],
output_defs=[OutputDefinition(str, name="staging_table")],
required_resource_keys={"postgres_db"},
)
def insert_into_staging_table(context, records: DataFrame, table_name: str):
_create_and_load_staging_table(context.resources.postgres_db.engine, table_name, records)
yield AssetMaterialization(
asset_key=table_name,
description="Table {} created in database {}".format(
table_name, context.resources.postgres_db.db_name
),
metadata_entries=[EventMetadataEntry.text(str(len(records)), "num rows inserted")],
)
yield Output(output_name="staging_table", value=table_name)
def create_download_table_as_dataframe_solid(name, expected_dagster_pandas_dataframe_type):
check.str_param(name, "name")
check.inst_param(
expected_dagster_pandas_dataframe_type,
"expected_dagster_pandas_dataframe_schema",
DagsterType,
)
@solid(
input_defs=[InputDefinition("table_name", str)],
output_defs=[OutputDefinition(expected_dagster_pandas_dataframe_type)],
config_schema={"subsets": Field([str], is_required=False)},
required_resource_keys={"postgres_db"},
name=name,
)
def download_table_as_dataframe(context, table_name: str) -> DataFrame:
dataframe = read_sql_table(table_name, context.resources.postgres_db.engine,)
# flatten dataframe
dataframe = json_normalize(dataframe.to_dict("records"))
dataframe.columns = [column.split(".")[-1] for column in dataframe.columns]
# De-Duplicate Table
subsets = context.solid_config.get("subsets", None)
return dataframe.drop_duplicates(subset=subsets if subsets else None)
return download_table_as_dataframe
@solid(
input_defs=[InputDefinition("epoch_date", int)],
output_defs=[OutputDefinition(DagsterPandasDataFrame)],
config_schema={
"latitude": Field(
float,
default_value=37.8267,
is_required=False,
description=("Latitude coordinate to get weather data about. Default is SF."),
),
"longitude": Field(
float,
default_value=-122.4233,
is_required=False,
description=("Longitude coordinate to get weather data about. Default is SF."),
),
"times_to_exclude": Field(
[str],
default_value=["currently", "minutely", "hourly", "alerts", "flags"],
is_required=False,
description="data granularities to exclude when making this api call",
),
},
required_resource_keys={"credentials_vault"},
)
def download_weather_report_from_weather_api(context, epoch_date: int) -> DataFrame:
# Make API Call
coordinates = "{0},{1}".format(
context.solid_config["latitude"], context.solid_config["longitude"]
)
weather_api_key = context.resources.credentials_vault.credentials["DARK_SKY_API_KEY"]
url_prefix = "/".join([DARK_SKY_BASE_URL, weather_api_key, coordinates])
url = url_prefix + ",{}?exclude={}".format(
epoch_date, ",".join(context.solid_config["times_to_exclude"])
)
context.log.info("Sending Request. URL is: {}".format(url))
response = requests.get(url)
response.raise_for_status()
raw_weather_data = response.json()["daily"]["data"][0]
return DataFrame([raw_weather_data])
@solid(
input_defs=[InputDefinition(name="dataframe", dagster_type=RawTripDataFrame)],
output_defs=[OutputDefinition(name="trip_dataframe", dagster_type=TripDataFrame)],
)
def preprocess_trip_dataset(_, dataframe: DataFrame) -> DataFrame:
dataframe = dataframe[["bike_id", "start_time", "end_time"]].dropna(how="all").reindex()
dataframe["bike_id"] = dataframe["bike_id"].astype("int64")
dataframe["start_time"] = to_datetime(dataframe["start_time"])
dataframe["end_time"] = to_datetime(dataframe["end_time"])
dataframe["interval_date"] = dataframe["start_time"].apply(lambda x: x.date())
yield Output(dataframe, output_name="trip_dataframe")
@solid(
input_defs=[InputDefinition("dataframe", DagsterPandasDataFrame)],
output_defs=[OutputDefinition(name="weather_dataframe", dagster_type=WeatherDataFrame)],
)
def preprocess_weather_dataset(_, dataframe: DataFrame) -> DataFrame:
"""
Steps:
1. Converts time columns to match the date types in the traffic dataset so
that we can join effectively.
2. Fills N/A values in ozone, so that we have the right datatypes.
3. Convert precipType to a column that's more amenable to one hot encoding.
"""
dataframe = dataframe[WEATHER_COLUMNS].dropna(how="all").reindex()
# These are to address weird corner cases where columns are NA
# This fill is because the weather API fails to have a summary for certain types of clear days.
dataframe["summary"] = dataframe["summary"].fillna("Clear throughout the day.")
dataframe["icon"] = dataframe["icon"].fillna("clear-day")
# This happens frequently, I defaulted to the average.
dataframe["temperatureLow"] = dataframe["temperatureLow"].fillna(
dataframe.temperatureLow.mean()
)
# This happens frequently as well, I defaulted to the median since this is a time datatype
dataframe["temperatureLowTime"] = (
dataframe["temperatureLowTime"]
.fillna(dataframe.temperatureLowTime.median())
.astype("int64")
)
# This happens frequently, the median felt like the appropriate statistic. Can change this over time.
dataframe["ozone"] = dataframe["ozone"].fillna(dataframe.ozone.median())
dataframe["time"] = to_datetime(
dataframe["time"].apply(lambda epoch_ts: strftime("%Y-%m-%d", gmtime(epoch_ts)))
)
yield Output(dataframe, output_name="weather_dataframe")
@solid(
input_defs=[InputDefinition("trip_dataset", dagster_type=TripDataFrame)],
output_defs=[OutputDefinition(name="traffic_dataframe", dagster_type=TrafficDataFrame)],
)
def transform_into_traffic_dataset(_, trip_dataset: DataFrame) -> DataFrame:
def max_traffic_load(trips):
interval_count = {
start_interval: 0 for start_interval in date_range(trips.name, periods=24, freq="h")
}
for interval in interval_count.keys():
upper_bound_interval = interval + timedelta(hours=1)
# Count number of bikes in transit during sample interval
interval_count[interval] = len(
trips[
(
( # Select trip if the trip started within the sample interval
(interval <= trips["start_time"])
& (trips["start_time"] < upper_bound_interval)
)
| ( # Select trip if the trip ended within the sample interval
(interval <= trips["end_time"])
& (trips["end_time"] < upper_bound_interval)
)
| ( # Select trip if the trip started AND ended outside of the interval
(trips["start_time"] < interval)
& (trips["end_time"] >= upper_bound_interval)
)
)
]
)
return max(interval_count.values())
counts = trip_dataset.groupby(["interval_date"]).apply(max_traffic_load)
traffic_dataset = DataFrame(counts).reset_index()
traffic_dataset.columns = ["interval_date", "peak_traffic_load"]
yield Output(traffic_dataset, output_name="traffic_dataframe")
class Timeseries:
def __init__(self, sequence):
self.sequence = check.opt_list_param(sequence, "sequence", of_type=(int, float))
def convert_to_snapshot_sequence(self, memory_length):
"""
A snapshot sequence is a transformation of a sequence into a sequence of snapshots which are the past
{memory_length} observations in the sequence. This looks like the following:
f([1, 2, 3, 4, 5], 2) -> [[1,2,3], [2,3,4], [3,4,5]]
"""
if not self.sequence:
raise ValueError("Cannot produce snapshots for an empty sequence")
if memory_length < 1:
raise ValueError("Invalid snapshot length.")
if memory_length >= len(self.sequence):
raise ValueError(
"Unable to produce snapshots. Memory length is too large ({}) and the sequence is too small ({})".format(
memory_length, len(self.sequence)
)
)
snapshot_sequence = []
for index in range(len(self.sequence)):
if index >= memory_length:
snapshot = [
self.sequence[index - snapshot_delta]
for snapshot_delta in range(memory_length + 1)
]
snapshot_sequence.append(snapshot[::-1])
return snapshot_sequence
class MultivariateTimeseries:
def __init__(
self,
input_sequences,
output_sequence,
input_sequence_names,
output_sequence_name,
elem_type=(int, float),
):
self.input_timeseries_collection = [
Timeseries(input_sequence)
for input_sequence in check.matrix_param(
input_sequences, "input_sequences", of_type=elem_type
)
]
self.output_timeseries = Timeseries(check.list_param(output_sequence, "output_sequence"))
if len(input_sequence_names) != len(self.input_timeseries_collection):
raise ValueError("Every timeseries needs a name attached to it.")
self.input_timeseries_names = check.list_param(
input_sequence_names, "input_sequence_names", of_type=str
)
self.output_timeseries_name = check.str_param(output_sequence_name, "output_sequence_name")
def convert_to_snapshot_matrix(self, memory_length):
# Transpose the matrix so that inputs match up with tensorflow tensor expectation
input_snapshot_matrix = transpose(
[
timeseries.convert_to_snapshot_sequence(memory_length)
for timeseries in self.input_timeseries_collection
],
(1, 2, 0),
)
output_snapshot_sequence = self.output_timeseries.sequence[memory_length:]
return array(input_snapshot_matrix), array(output_snapshot_sequence)
@classmethod
def from_dataframe(cls, dataframe, input_sequence_names, output_sequence_name):
return cls(
input_sequences=[
dataframe[input_sequence_name].tolist()
for input_sequence_name in input_sequence_names
],
output_sequence=dataframe[output_sequence_name].tolist(),
input_sequence_names=input_sequence_names,
output_sequence_name=output_sequence_name,
)
@solid(
config_schema={"memory_length": Field(int, description="The window memory length")},
input_defs=[
InputDefinition("traffic_dataset", dagster_type=TrafficDataFrame),
InputDefinition("weather_dataset", dagster_type=WeatherDataFrame),
],
output_defs=[OutputDefinition(dagster_type=TrainingSet)],
)
def produce_training_set(
context, traffic_dataset: DataFrame, weather_dataset: DataFrame
) -> Tuple[ndarray, ndarray]:
traffic_dataset["time"] = to_datetime(traffic_dataset.interval_date)
weather_dataset["time"] = to_datetime(weather_dataset.time)
dataset = traffic_dataset.join(weather_dataset.set_index("time"), on="time")
dataset = | get_dummies(dataset, columns=["summary", "icon"]) | pandas.get_dummies |
# ---
# jupyter:
# jupytext:
# formats: ipynb,py
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.10.3
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import os
import pandas as pd
import pyrfume
from pyrfume import snitz, odorants
file_path = os.path.join(pyrfume.DATA, 'snitz', 'Snitz144.csv')
snitz_data_raw = pd.read_csv(file_path)
results = odorants.get_cids(snitz_data_raw['CAS'], kind='name', verbose=False)
snitz_data = | pd.Series(results, name='CID') | pandas.Series |
from flask import Flask, jsonify, request
from flask_cors import CORS, cross_origin
import pickle
import pandas as pd
from sklearn.preprocessing import StandardScaler
def func(filename):
with open(filename, 'rb') as file:
model = pickle.load(file)
return model
app = Flask(__name__)
CORS(app)
@app.route('/api/<string:productName>/<string:marketName>/<int:shelfLife>/<int:quantity>/<float:marketDemand>', methods=['GET'])
def predict(productName,marketName,shelfLife,quantity,marketDemand):
toBePredictedData = {'Shelf Life' : [shelfLife], 'Quantity' : [quantity], 'Market Demand' : [marketDemand]}
toBePredictedData = | pd.DataFrame(toBePredictedData) | pandas.DataFrame |
import re
from datetime import datetime
import nose
import pytz
import platform
from time import sleep
import os
import logging
import numpy as np
from distutils.version import StrictVersion
from pandas import compat
from pandas import NaT
from pandas.compat import u, range
from pandas.core.frame import DataFrame
import pandas.io.gbq as gbq
import pandas.util.testing as tm
from pandas.compat.numpy import np_datetime64_compat
PROJECT_ID = None
PRIVATE_KEY_JSON_PATH = None
PRIVATE_KEY_JSON_CONTENTS = None
if compat.PY3:
DATASET_ID = 'pydata_pandas_bq_testing_py3'
else:
DATASET_ID = 'pydata_pandas_bq_testing_py2'
TABLE_ID = 'new_test'
DESTINATION_TABLE = "{0}.{1}".format(DATASET_ID + "1", TABLE_ID)
VERSION = platform.python_version()
_IMPORTS = False
_GOOGLE_API_CLIENT_INSTALLED = False
_GOOGLE_API_CLIENT_VALID_VERSION = False
_HTTPLIB2_INSTALLED = False
_SETUPTOOLS_INSTALLED = False
def _skip_if_no_project_id():
if not _get_project_id():
raise nose.SkipTest(
"Cannot run integration tests without a project id")
def _skip_if_no_private_key_path():
if not _get_private_key_path():
raise nose.SkipTest("Cannot run integration tests without a "
"private key json file path")
def _skip_if_no_private_key_contents():
if not _get_private_key_contents():
raise nose.SkipTest("Cannot run integration tests without a "
"private key json contents")
def _in_travis_environment():
return 'TRAVIS_BUILD_DIR' in os.environ and \
'GBQ_PROJECT_ID' in os.environ
def _get_project_id():
if _in_travis_environment():
return os.environ.get('GBQ_PROJECT_ID')
else:
return PROJECT_ID
def _get_private_key_path():
if _in_travis_environment():
return os.path.join(*[os.environ.get('TRAVIS_BUILD_DIR'), 'ci',
'travis_gbq.json'])
else:
return PRIVATE_KEY_JSON_PATH
def _get_private_key_contents():
if _in_travis_environment():
with open(os.path.join(*[os.environ.get('TRAVIS_BUILD_DIR'), 'ci',
'travis_gbq.json'])) as f:
return f.read()
else:
return PRIVATE_KEY_JSON_CONTENTS
def _test_imports():
global _GOOGLE_API_CLIENT_INSTALLED, _GOOGLE_API_CLIENT_VALID_VERSION, \
_HTTPLIB2_INSTALLED, _SETUPTOOLS_INSTALLED
try:
import pkg_resources
_SETUPTOOLS_INSTALLED = True
except ImportError:
_SETUPTOOLS_INSTALLED = False
if compat.PY3:
google_api_minimum_version = '1.4.1'
else:
google_api_minimum_version = '1.2.0'
if _SETUPTOOLS_INSTALLED:
try:
try:
from googleapiclient.discovery import build # noqa
from googleapiclient.errors import HttpError # noqa
except:
from apiclient.discovery import build # noqa
from apiclient.errors import HttpError # noqa
from oauth2client.client import OAuth2WebServerFlow # noqa
from oauth2client.client import AccessTokenRefreshError # noqa
from oauth2client.file import Storage # noqa
from oauth2client.tools import run_flow # noqa
_GOOGLE_API_CLIENT_INSTALLED = True
_GOOGLE_API_CLIENT_VERSION = pkg_resources.get_distribution(
'google-api-python-client').version
if (StrictVersion(_GOOGLE_API_CLIENT_VERSION) >=
StrictVersion(google_api_minimum_version)):
_GOOGLE_API_CLIENT_VALID_VERSION = True
except ImportError:
_GOOGLE_API_CLIENT_INSTALLED = False
try:
import httplib2 # noqa
_HTTPLIB2_INSTALLED = True
except ImportError:
_HTTPLIB2_INSTALLED = False
if not _SETUPTOOLS_INSTALLED:
raise ImportError('Could not import pkg_resources (setuptools).')
if not _GOOGLE_API_CLIENT_INSTALLED:
raise ImportError('Could not import Google API Client.')
if not _GOOGLE_API_CLIENT_VALID_VERSION:
raise ImportError("pandas requires google-api-python-client >= {0} "
"for Google BigQuery support, "
"current version {1}"
.format(google_api_minimum_version,
_GOOGLE_API_CLIENT_VERSION))
if not _HTTPLIB2_INSTALLED:
raise ImportError(
"pandas requires httplib2 for Google BigQuery support")
# Bug fix for https://github.com/pandas-dev/pandas/issues/12572
# We need to know that a supported version of oauth2client is installed
# Test that either of the following is installed:
# - SignedJwtAssertionCredentials from oauth2client.client
# - ServiceAccountCredentials from oauth2client.service_account
# SignedJwtAssertionCredentials is available in oauthclient < 2.0.0
# ServiceAccountCredentials is available in oauthclient >= 2.0.0
oauth2client_v1 = True
oauth2client_v2 = True
try:
from oauth2client.client import SignedJwtAssertionCredentials # noqa
except ImportError:
oauth2client_v1 = False
try:
from oauth2client.service_account import ServiceAccountCredentials # noqa
except ImportError:
oauth2client_v2 = False
if not oauth2client_v1 and not oauth2client_v2:
raise ImportError("Missing oauth2client required for BigQuery "
"service account support")
def _setup_common():
try:
_test_imports()
except (ImportError, NotImplementedError) as import_exception:
raise nose.SkipTest(import_exception)
if _in_travis_environment():
logging.getLogger('oauth2client').setLevel(logging.ERROR)
logging.getLogger('apiclient').setLevel(logging.ERROR)
def _check_if_can_get_correct_default_credentials():
# Checks if "Application Default Credentials" can be fetched
# from the environment the tests are running in.
# See Issue #13577
import httplib2
try:
from googleapiclient.discovery import build
except ImportError:
from apiclient.discovery import build
try:
from oauth2client.client import GoogleCredentials
credentials = GoogleCredentials.get_application_default()
http = httplib2.Http()
http = credentials.authorize(http)
bigquery_service = build('bigquery', 'v2', http=http)
jobs = bigquery_service.jobs()
job_data = {'configuration': {'query': {'query': 'SELECT 1'}}}
jobs.insert(projectId=_get_project_id(), body=job_data).execute()
return True
except:
return False
def clean_gbq_environment(private_key=None):
dataset = gbq._Dataset(_get_project_id(), private_key=private_key)
for i in range(1, 10):
if DATASET_ID + str(i) in dataset.datasets():
dataset_id = DATASET_ID + str(i)
table = gbq._Table(_get_project_id(), dataset_id,
private_key=private_key)
for j in range(1, 20):
if TABLE_ID + str(j) in dataset.tables(dataset_id):
table.delete(TABLE_ID + str(j))
dataset.delete(dataset_id)
def make_mixed_dataframe_v2(test_size):
# create df to test for all BQ datatypes except RECORD
bools = np.random.randint(2, size=(1, test_size)).astype(bool)
flts = np.random.randn(1, test_size)
ints = np.random.randint(1, 10, size=(1, test_size))
strs = np.random.randint(1, 10, size=(1, test_size)).astype(str)
times = [datetime.now(pytz.timezone('US/Arizona'))
for t in range(test_size)]
return DataFrame({'bools': bools[0],
'flts': flts[0],
'ints': ints[0],
'strs': strs[0],
'times': times[0]},
index=range(test_size))
def test_generate_bq_schema_deprecated():
# 11121 Deprecation of generate_bq_schema
with tm.assert_produces_warning(FutureWarning):
df = make_mixed_dataframe_v2(10)
gbq.generate_bq_schema(df)
class TestGBQConnectorIntegration(tm.TestCase):
def setUp(self):
_setup_common()
_skip_if_no_project_id()
self.sut = gbq.GbqConnector(_get_project_id(),
private_key=_get_private_key_path())
def test_should_be_able_to_make_a_connector(self):
self.assertTrue(self.sut is not None,
'Could not create a GbqConnector')
def test_should_be_able_to_get_valid_credentials(self):
credentials = self.sut.get_credentials()
self.assertFalse(credentials.invalid, 'Returned credentials invalid')
def test_should_be_able_to_get_a_bigquery_service(self):
bigquery_service = self.sut.get_service()
self.assertTrue(bigquery_service is not None, 'No service returned')
def test_should_be_able_to_get_schema_from_query(self):
schema, pages = self.sut.run_query('SELECT 1')
self.assertTrue(schema is not None)
def test_should_be_able_to_get_results_from_query(self):
schema, pages = self.sut.run_query('SELECT 1')
self.assertTrue(pages is not None)
def test_get_application_default_credentials_does_not_throw_error(self):
if _check_if_can_get_correct_default_credentials():
raise nose.SkipTest("Can get default_credentials "
"from the environment!")
credentials = self.sut.get_application_default_credentials()
self.assertIsNone(credentials)
def test_get_application_default_credentials_returns_credentials(self):
if not _check_if_can_get_correct_default_credentials():
raise nose.SkipTest("Cannot get default_credentials "
"from the environment!")
from oauth2client.client import GoogleCredentials
credentials = self.sut.get_application_default_credentials()
self.assertTrue(isinstance(credentials, GoogleCredentials))
class TestGBQConnectorServiceAccountKeyPathIntegration(tm.TestCase):
def setUp(self):
_setup_common()
_skip_if_no_project_id()
_skip_if_no_private_key_path()
self.sut = gbq.GbqConnector(_get_project_id(),
private_key=_get_private_key_path())
def test_should_be_able_to_make_a_connector(self):
self.assertTrue(self.sut is not None,
'Could not create a GbqConnector')
def test_should_be_able_to_get_valid_credentials(self):
credentials = self.sut.get_credentials()
self.assertFalse(credentials.invalid, 'Returned credentials invalid')
def test_should_be_able_to_get_a_bigquery_service(self):
bigquery_service = self.sut.get_service()
self.assertTrue(bigquery_service is not None, 'No service returned')
def test_should_be_able_to_get_schema_from_query(self):
schema, pages = self.sut.run_query('SELECT 1')
self.assertTrue(schema is not None)
def test_should_be_able_to_get_results_from_query(self):
schema, pages = self.sut.run_query('SELECT 1')
self.assertTrue(pages is not None)
class TestGBQConnectorServiceAccountKeyContentsIntegration(tm.TestCase):
def setUp(self):
_setup_common()
_skip_if_no_project_id()
_skip_if_no_private_key_path()
self.sut = gbq.GbqConnector(_get_project_id(),
private_key=_get_private_key_path())
def test_should_be_able_to_make_a_connector(self):
self.assertTrue(self.sut is not None,
'Could not create a GbqConnector')
def test_should_be_able_to_get_valid_credentials(self):
credentials = self.sut.get_credentials()
self.assertFalse(credentials.invalid, 'Returned credentials invalid')
def test_should_be_able_to_get_a_bigquery_service(self):
bigquery_service = self.sut.get_service()
self.assertTrue(bigquery_service is not None, 'No service returned')
def test_should_be_able_to_get_schema_from_query(self):
schema, pages = self.sut.run_query('SELECT 1')
self.assertTrue(schema is not None)
def test_should_be_able_to_get_results_from_query(self):
schema, pages = self.sut.run_query('SELECT 1')
self.assertTrue(pages is not None)
class GBQUnitTests(tm.TestCase):
def setUp(self):
_setup_common()
def test_import_google_api_python_client(self):
if compat.PY2:
with tm.assertRaises(ImportError):
from googleapiclient.discovery import build # noqa
from googleapiclient.errors import HttpError # noqa
from apiclient.discovery import build # noqa
from apiclient.errors import HttpError # noqa
else:
from googleapiclient.discovery import build # noqa
from googleapiclient.errors import HttpError # noqa
def test_should_return_bigquery_integers_as_python_floats(self):
result = gbq._parse_entry(1, 'INTEGER')
tm.assert_equal(result, float(1))
def test_should_return_bigquery_floats_as_python_floats(self):
result = gbq._parse_entry(1, 'FLOAT')
tm.assert_equal(result, float(1))
def test_should_return_bigquery_timestamps_as_numpy_datetime(self):
result = gbq._parse_entry('0e9', 'TIMESTAMP')
tm.assert_equal(result, np_datetime64_compat('1970-01-01T00:00:00Z'))
def test_should_return_bigquery_booleans_as_python_booleans(self):
result = gbq._parse_entry('false', 'BOOLEAN')
tm.assert_equal(result, False)
def test_should_return_bigquery_strings_as_python_strings(self):
result = gbq._parse_entry('STRING', 'STRING')
tm.assert_equal(result, 'STRING')
def test_to_gbq_should_fail_if_invalid_table_name_passed(self):
with tm.assertRaises(gbq.NotFoundException):
gbq.to_gbq(DataFrame(), 'invalid_table_name', project_id="1234")
def test_to_gbq_with_no_project_id_given_should_fail(self):
with tm.assertRaises(TypeError):
gbq.to_gbq(DataFrame(), 'dataset.tablename')
def test_read_gbq_with_no_project_id_given_should_fail(self):
with tm.assertRaises(TypeError):
gbq.read_gbq('SELECT "1" as NUMBER_1')
def test_that_parse_data_works_properly(self):
test_schema = {'fields': [
{'mode': 'NULLABLE', 'name': 'VALID_STRING', 'type': 'STRING'}]}
test_page = [{'f': [{'v': 'PI'}]}]
test_output = gbq._parse_data(test_schema, test_page)
correct_output = DataFrame({'VALID_STRING': ['PI']})
tm.assert_frame_equal(test_output, correct_output)
def test_read_gbq_with_invalid_private_key_json_should_fail(self):
with tm.assertRaises(gbq.InvalidPrivateKeyFormat):
gbq.read_gbq('SELECT 1', project_id='x', private_key='y')
def test_read_gbq_with_empty_private_key_json_should_fail(self):
with tm.assertRaises(gbq.InvalidPrivateKeyFormat):
gbq.read_gbq('SELECT 1', project_id='x', private_key='{}')
def test_read_gbq_with_private_key_json_wrong_types_should_fail(self):
with tm.assertRaises(gbq.InvalidPrivateKeyFormat):
gbq.read_gbq(
'SELECT 1', project_id='x',
private_key='{ "client_email" : 1, "private_key" : True }')
def test_read_gbq_with_empty_private_key_file_should_fail(self):
with tm.ensure_clean() as empty_file_path:
with tm.assertRaises(gbq.InvalidPrivateKeyFormat):
gbq.read_gbq('SELECT 1', project_id='x',
private_key=empty_file_path)
def test_read_gbq_with_corrupted_private_key_json_should_fail(self):
_skip_if_no_private_key_path()
with tm.assertRaises(gbq.InvalidPrivateKeyFormat):
gbq.read_gbq(
'SELECT 1', project_id='x',
private_key=re.sub('[a-z]', '9', _get_private_key_path()))
class TestReadGBQIntegration(tm.TestCase):
@classmethod
def setUpClass(cls):
# - GLOBAL CLASS FIXTURES -
# put here any instruction you want to execute only *ONCE* *BEFORE*
# executing *ALL* tests described below.
_skip_if_no_project_id()
_setup_common()
def setUp(self):
# - PER-TEST FIXTURES -
# put here any instruction you want to be run *BEFORE* *EVERY* test is
# executed.
pass
@classmethod
def tearDownClass(cls):
# - GLOBAL CLASS FIXTURES -
# put here any instruction you want to execute only *ONCE* *AFTER*
# executing all tests.
pass
def tearDown(self):
# - PER-TEST FIXTURES -
# put here any instructions you want to be run *AFTER* *EVERY* test is
# executed.
pass
def test_should_read_as_user_account(self):
if _in_travis_environment():
raise nose.SkipTest("Cannot run local auth in travis environment")
query = 'SELECT "PI" as VALID_STRING'
df = gbq.read_gbq(query, project_id=_get_project_id())
tm.assert_frame_equal(df, DataFrame({'VALID_STRING': ['PI']}))
def test_should_read_as_service_account_with_key_path(self):
_skip_if_no_private_key_path()
query = 'SELECT "PI" as VALID_STRING'
df = gbq.read_gbq(query, project_id=_get_project_id(),
private_key=_get_private_key_path())
tm.assert_frame_equal(df, DataFrame({'VALID_STRING': ['PI']}))
def test_should_read_as_service_account_with_key_contents(self):
_skip_if_no_private_key_contents()
query = 'SELECT "PI" as VALID_STRING'
df = gbq.read_gbq(query, project_id=_get_project_id(),
private_key=_get_private_key_contents())
tm.assert_frame_equal(df, DataFrame({'VALID_STRING': ['PI']}))
def test_should_properly_handle_valid_strings(self):
query = 'SELECT "PI" as VALID_STRING'
df = gbq.read_gbq(query, project_id=_get_project_id(),
private_key=_get_private_key_path())
tm.assert_frame_equal(df, DataFrame({'VALID_STRING': ['PI']}))
def test_should_properly_handle_empty_strings(self):
query = 'SELECT "" as EMPTY_STRING'
df = gbq.read_gbq(query, project_id=_get_project_id(),
private_key=_get_private_key_path())
tm.assert_frame_equal(df, DataFrame({'EMPTY_STRING': [""]}))
def test_should_properly_handle_null_strings(self):
query = 'SELECT STRING(NULL) as NULL_STRING'
df = gbq.read_gbq(query, project_id=_get_project_id(),
private_key=_get_private_key_path())
tm.assert_frame_equal(df, DataFrame({'NULL_STRING': [None]}))
def test_should_properly_handle_valid_integers(self):
query = 'SELECT INTEGER(3) as VALID_INTEGER'
df = gbq.read_gbq(query, project_id=_get_project_id(),
private_key=_get_private_key_path())
tm.assert_frame_equal(df, DataFrame({'VALID_INTEGER': [3]}))
def test_should_properly_handle_null_integers(self):
query = 'SELECT INTEGER(NULL) as NULL_INTEGER'
df = gbq.read_gbq(query, project_id=_get_project_id(),
private_key=_get_private_key_path())
tm.assert_frame_equal(df, DataFrame({'NULL_INTEGER': [np.nan]}))
def test_should_properly_handle_valid_floats(self):
query = 'SELECT PI() as VALID_FLOAT'
df = gbq.read_gbq(query, project_id=_get_project_id(),
private_key=_get_private_key_path())
tm.assert_frame_equal(df, DataFrame(
{'VALID_FLOAT': [3.141592653589793]}))
def test_should_properly_handle_null_floats(self):
query = 'SELECT FLOAT(NULL) as NULL_FLOAT'
df = gbq.read_gbq(query, project_id=_get_project_id(),
private_key=_get_private_key_path())
tm.assert_frame_equal(df, DataFrame({'NULL_FLOAT': [np.nan]}))
def test_should_properly_handle_timestamp_unix_epoch(self):
query = 'SELECT TIMESTAMP("1970-01-01 00:00:00") as UNIX_EPOCH'
df = gbq.read_gbq(query, project_id=_get_project_id(),
private_key=_get_private_key_path())
tm.assert_frame_equal(df, DataFrame(
{'UNIX_EPOCH': [np.datetime64('1970-01-01T00:00:00.000000Z')]}))
def test_should_properly_handle_arbitrary_timestamp(self):
query = 'SELECT TIMESTAMP("2004-09-15 05:00:00") as VALID_TIMESTAMP'
df = gbq.read_gbq(query, project_id=_get_project_id(),
private_key=_get_private_key_path())
tm.assert_frame_equal(df, DataFrame({
'VALID_TIMESTAMP': [np.datetime64('2004-09-15T05:00:00.000000Z')]
}))
def test_should_properly_handle_null_timestamp(self):
query = 'SELECT TIMESTAMP(NULL) as NULL_TIMESTAMP'
df = gbq.read_gbq(query, project_id=_get_project_id(),
private_key=_get_private_key_path())
tm.assert_frame_equal(df, DataFrame({'NULL_TIMESTAMP': [NaT]}))
def test_should_properly_handle_true_boolean(self):
query = 'SELECT BOOLEAN(TRUE) as TRUE_BOOLEAN'
df = gbq.read_gbq(query, project_id=_get_project_id(),
private_key=_get_private_key_path())
tm.assert_frame_equal(df, DataFrame({'TRUE_BOOLEAN': [True]}))
def test_should_properly_handle_false_boolean(self):
query = 'SELECT BOOLEAN(FALSE) as FALSE_BOOLEAN'
df = gbq.read_gbq(query, project_id=_get_project_id(),
private_key=_get_private_key_path())
tm.assert_frame_equal(df, DataFrame({'FALSE_BOOLEAN': [False]}))
def test_should_properly_handle_null_boolean(self):
query = 'SELECT BOOLEAN(NULL) as NULL_BOOLEAN'
df = gbq.read_gbq(query, project_id=_get_project_id(),
private_key=_get_private_key_path())
tm.assert_frame_equal(df, DataFrame({'NULL_BOOLEAN': [None]}))
def test_unicode_string_conversion_and_normalization(self):
correct_test_datatype = DataFrame(
{'UNICODE_STRING': [u("\xe9\xfc")]}
)
unicode_string = "\xc3\xa9\xc3\xbc"
if compat.PY3:
unicode_string = unicode_string.encode('latin-1').decode('utf8')
query = 'SELECT "{0}" as UNICODE_STRING'.format(unicode_string)
df = gbq.read_gbq(query, project_id=_get_project_id(),
private_key=_get_private_key_path())
tm.assert_frame_equal(df, correct_test_datatype)
def test_index_column(self):
query = "SELECT 'a' as STRING_1, 'b' as STRING_2"
result_frame = gbq.read_gbq(query, project_id=_get_project_id(),
index_col="STRING_1",
private_key=_get_private_key_path())
correct_frame = DataFrame(
{'STRING_1': ['a'], 'STRING_2': ['b']}).set_index("STRING_1")
tm.assert_equal(result_frame.index.name, correct_frame.index.name)
def test_column_order(self):
query = "SELECT 'a' as STRING_1, 'b' as STRING_2, 'c' as STRING_3"
col_order = ['STRING_3', 'STRING_1', 'STRING_2']
result_frame = gbq.read_gbq(query, project_id=_get_project_id(),
col_order=col_order,
private_key=_get_private_key_path())
correct_frame = DataFrame({'STRING_1': ['a'], 'STRING_2': [
'b'], 'STRING_3': ['c']})[col_order]
tm.assert_frame_equal(result_frame, correct_frame)
def test_column_order_plus_index(self):
query = "SELECT 'a' as STRING_1, 'b' as STRING_2, 'c' as STRING_3"
col_order = ['STRING_3', 'STRING_2']
result_frame = gbq.read_gbq(query, project_id=_get_project_id(),
index_col='STRING_1', col_order=col_order,
private_key=_get_private_key_path())
correct_frame = DataFrame(
{'STRING_1': ['a'], 'STRING_2': ['b'], 'STRING_3': ['c']})
correct_frame.set_index('STRING_1', inplace=True)
correct_frame = correct_frame[col_order]
tm.assert_frame_equal(result_frame, correct_frame)
def test_malformed_query(self):
with tm.assertRaises(gbq.GenericGBQException):
gbq.read_gbq("SELCET * FORM [publicdata:samples.shakespeare]",
project_id=_get_project_id(),
private_key=_get_private_key_path())
def test_bad_project_id(self):
with tm.assertRaises(gbq.GenericGBQException):
gbq.read_gbq("SELECT 1", project_id='001',
private_key=_get_private_key_path())
def test_bad_table_name(self):
with tm.assertRaises(gbq.GenericGBQException):
gbq.read_gbq("SELECT * FROM [publicdata:samples.nope]",
project_id=_get_project_id(),
private_key=_get_private_key_path())
def test_download_dataset_larger_than_200k_rows(self):
test_size = 200005
# Test for known BigQuery bug in datasets larger than 100k rows
# http://stackoverflow.com/questions/19145587/bq-py-not-paging-results
df = gbq.read_gbq("SELECT id FROM [publicdata:samples.wikipedia] "
"GROUP EACH BY id ORDER BY id ASC LIMIT {0}"
.format(test_size),
project_id=_get_project_id(),
private_key=_get_private_key_path())
self.assertEqual(len(df.drop_duplicates()), test_size)
def test_zero_rows(self):
# Bug fix for https://github.com/pandas-dev/pandas/issues/10273
df = gbq.read_gbq("SELECT title, id "
"FROM [publicdata:samples.wikipedia] "
"WHERE timestamp=-9999999",
project_id=_get_project_id(),
private_key=_get_private_key_path())
page_array = np.zeros(
(0,), dtype=[('title', object), ('id', np.dtype(float))])
expected_result = DataFrame(page_array, columns=['title', 'id'])
self.assert_frame_equal(df, expected_result)
def test_legacy_sql(self):
legacy_sql = "SELECT id FROM [publicdata.samples.wikipedia] LIMIT 10"
# Test that a legacy sql statement fails when
# setting dialect='standard'
with | tm.assertRaises(gbq.GenericGBQException) | pandas.util.testing.assertRaises |
from datetime import datetime, timedelta
import operator
from typing import Any, Sequence, Type, Union, cast
import warnings
import numpy as np
from pandas._libs import NaT, NaTType, Timestamp, algos, iNaT, lib
from pandas._libs.tslibs.c_timestamp import integer_op_not_supported
from pandas._libs.tslibs.period import DIFFERENT_FREQ, IncompatibleFrequency, Period
from pandas._libs.tslibs.timedeltas import Timedelta, delta_to_nanoseconds
from pandas._libs.tslibs.timestamps import RoundTo, round_nsint64
from pandas._typing import DatetimeLikeScalar
from pandas.compat import set_function_name
from pandas.compat.numpy import function as nv
from pandas.errors import AbstractMethodError, NullFrequencyError, PerformanceWarning
from pandas.util._decorators import Appender, Substitution
from pandas.util._validators import validate_fillna_kwargs
from pandas.core.dtypes.common import (
is_categorical_dtype,
is_datetime64_any_dtype,
is_datetime64_dtype,
is_datetime64tz_dtype,
is_datetime_or_timedelta_dtype,
is_dtype_equal,
is_float_dtype,
is_integer_dtype,
is_list_like,
is_object_dtype,
is_period_dtype,
is_string_dtype,
is_timedelta64_dtype,
is_unsigned_integer_dtype,
pandas_dtype,
)
from pandas.core.dtypes.generic import ABCSeries
from pandas.core.dtypes.inference import is_array_like
from pandas.core.dtypes.missing import is_valid_nat_for_dtype, isna
from pandas.core import missing, nanops, ops
from pandas.core.algorithms import checked_add_with_arr, take, unique1d, value_counts
from pandas.core.arrays.base import ExtensionArray, ExtensionOpsMixin
import pandas.core.common as com
from pandas.core.indexers import check_bool_array_indexer
from pandas.core.ops.common import unpack_zerodim_and_defer
from pandas.core.ops.invalid import invalid_comparison, make_invalid_op
from pandas.tseries import frequencies
from pandas.tseries.offsets import DateOffset, Tick
def _datetimelike_array_cmp(cls, op):
"""
Wrap comparison operations to convert Timestamp/Timedelta/Period-like to
boxed scalars/arrays.
"""
opname = f"__{op.__name__}__"
nat_result = opname == "__ne__"
@unpack_zerodim_and_defer(opname)
def wrapper(self, other):
if isinstance(other, str):
try:
# GH#18435 strings get a pass from tzawareness compat
other = self._scalar_from_string(other)
except ValueError:
# failed to parse as Timestamp/Timedelta/Period
return invalid_comparison(self, other, op)
if isinstance(other, self._recognized_scalars) or other is NaT:
other = self._scalar_type(other)
self._check_compatible_with(other)
other_i8 = self._unbox_scalar(other)
result = op(self.view("i8"), other_i8)
if isna(other):
result.fill(nat_result)
elif not is_list_like(other):
return invalid_comparison(self, other, op)
elif len(other) != len(self):
raise ValueError("Lengths must match")
else:
if isinstance(other, list):
# TODO: could use pd.Index to do inference?
other = np.array(other)
if not isinstance(other, (np.ndarray, type(self))):
return invalid_comparison(self, other, op)
if is_object_dtype(other):
# We have to use comp_method_OBJECT_ARRAY instead of numpy
# comparison otherwise it would fail to raise when
# comparing tz-aware and tz-naive
with np.errstate(all="ignore"):
result = ops.comp_method_OBJECT_ARRAY(
op, self.astype(object), other
)
o_mask = isna(other)
elif not type(self)._is_recognized_dtype(other.dtype):
return invalid_comparison(self, other, op)
else:
# For PeriodDType this casting is unnecessary
other = type(self)._from_sequence(other)
self._check_compatible_with(other)
result = op(self.view("i8"), other.view("i8"))
o_mask = other._isnan
if o_mask.any():
result[o_mask] = nat_result
if self._hasnans:
result[self._isnan] = nat_result
return result
return set_function_name(wrapper, opname, cls)
class AttributesMixin:
_data: np.ndarray
@classmethod
def _simple_new(cls, values, **kwargs):
raise AbstractMethodError(cls)
@property
def _scalar_type(self) -> Type[DatetimeLikeScalar]:
"""The scalar associated with this datelike
* PeriodArray : Period
* DatetimeArray : Timestamp
* TimedeltaArray : Timedelta
"""
raise AbstractMethodError(self)
def _scalar_from_string(
self, value: str
) -> Union[Period, Timestamp, Timedelta, NaTType]:
"""
Construct a scalar type from a string.
Parameters
----------
value : str
Returns
-------
Period, Timestamp, or Timedelta, or NaT
Whatever the type of ``self._scalar_type`` is.
Notes
-----
This should call ``self._check_compatible_with`` before
unboxing the result.
"""
raise AbstractMethodError(self)
def _unbox_scalar(self, value: Union[Period, Timestamp, Timedelta, NaTType]) -> int:
"""
Unbox the integer value of a scalar `value`.
Parameters
----------
value : Union[Period, Timestamp, Timedelta]
Returns
-------
int
Examples
--------
>>> self._unbox_scalar(Timedelta('10s')) # DOCTEST: +SKIP
10000000000
"""
raise AbstractMethodError(self)
def _check_compatible_with(
self, other: Union[Period, Timestamp, Timedelta, NaTType], setitem: bool = False
) -> None:
"""
Verify that `self` and `other` are compatible.
* DatetimeArray verifies that the timezones (if any) match
* PeriodArray verifies that the freq matches
* Timedelta has no verification
In each case, NaT is considered compatible.
Parameters
----------
other
setitem : bool, default False
For __setitem__ we may have stricter compatiblity resrictions than
for comparisons.
Raises
------
Exception
"""
raise AbstractMethodError(self)
class DatelikeOps:
"""
Common ops for DatetimeIndex/PeriodIndex, but not TimedeltaIndex.
"""
@Substitution(
URL="https://docs.python.org/3/library/datetime.html"
"#strftime-and-strptime-behavior"
)
def strftime(self, date_format):
"""
Convert to Index using specified date_format.
Return an Index of formatted strings specified by date_format, which
supports the same string format as the python standard library. Details
of the string format can be found in `python string format
doc <%(URL)s>`__.
Parameters
----------
date_format : str
Date format string (e.g. "%%Y-%%m-%%d").
Returns
-------
ndarray
NumPy ndarray of formatted strings.
See Also
--------
to_datetime : Convert the given argument to datetime.
DatetimeIndex.normalize : Return DatetimeIndex with times to midnight.
DatetimeIndex.round : Round the DatetimeIndex to the specified freq.
DatetimeIndex.floor : Floor the DatetimeIndex to the specified freq.
Examples
--------
>>> rng = pd.date_range(pd.Timestamp("2018-03-10 09:00"),
... periods=3, freq='s')
>>> rng.strftime('%%B %%d, %%Y, %%r')
Index(['March 10, 2018, 09:00:00 AM', 'March 10, 2018, 09:00:01 AM',
'March 10, 2018, 09:00:02 AM'],
dtype='object')
"""
result = self._format_native_types(date_format=date_format, na_rep=np.nan)
return result.astype(object)
class TimelikeOps:
"""
Common ops for TimedeltaIndex/DatetimeIndex, but not PeriodIndex.
"""
_round_doc = """
Perform {op} operation on the data to the specified `freq`.
Parameters
----------
freq : str or Offset
The frequency level to {op} the index to. Must be a fixed
frequency like 'S' (second) not 'ME' (month end). See
:ref:`frequency aliases <timeseries.offset_aliases>` for
a list of possible `freq` values.
ambiguous : 'infer', bool-ndarray, 'NaT', default 'raise'
Only relevant for DatetimeIndex:
- 'infer' will attempt to infer fall dst-transition hours based on
order
- bool-ndarray where True signifies a DST time, False designates
a non-DST time (note that this flag is only applicable for
ambiguous times)
- 'NaT' will return NaT where there are ambiguous times
- 'raise' will raise an AmbiguousTimeError if there are ambiguous
times.
.. versionadded:: 0.24.0
nonexistent : 'shift_forward', 'shift_backward', 'NaT', timedelta, \
default 'raise'
A nonexistent time does not exist in a particular timezone
where clocks moved forward due to DST.
- 'shift_forward' will shift the nonexistent time forward to the
closest existing time
- 'shift_backward' will shift the nonexistent time backward to the
closest existing time
- 'NaT' will return NaT where there are nonexistent times
- timedelta objects will shift nonexistent times by the timedelta
- 'raise' will raise an NonExistentTimeError if there are
nonexistent times.
.. versionadded:: 0.24.0
Returns
-------
DatetimeIndex, TimedeltaIndex, or Series
Index of the same type for a DatetimeIndex or TimedeltaIndex,
or a Series with the same index for a Series.
Raises
------
ValueError if the `freq` cannot be converted.
Examples
--------
**DatetimeIndex**
>>> rng = pd.date_range('1/1/2018 11:59:00', periods=3, freq='min')
>>> rng
DatetimeIndex(['2018-01-01 11:59:00', '2018-01-01 12:00:00',
'2018-01-01 12:01:00'],
dtype='datetime64[ns]', freq='T')
"""
_round_example = """>>> rng.round('H')
DatetimeIndex(['2018-01-01 12:00:00', '2018-01-01 12:00:00',
'2018-01-01 12:00:00'],
dtype='datetime64[ns]', freq=None)
**Series**
>>> pd.Series(rng).dt.round("H")
0 2018-01-01 12:00:00
1 2018-01-01 12:00:00
2 2018-01-01 12:00:00
dtype: datetime64[ns]
"""
_floor_example = """>>> rng.floor('H')
DatetimeIndex(['2018-01-01 11:00:00', '2018-01-01 12:00:00',
'2018-01-01 12:00:00'],
dtype='datetime64[ns]', freq=None)
**Series**
>>> pd.Series(rng).dt.floor("H")
0 2018-01-01 11:00:00
1 2018-01-01 12:00:00
2 2018-01-01 12:00:00
dtype: datetime64[ns]
"""
_ceil_example = """>>> rng.ceil('H')
DatetimeIndex(['2018-01-01 12:00:00', '2018-01-01 12:00:00',
'2018-01-01 13:00:00'],
dtype='datetime64[ns]', freq=None)
**Series**
>>> pd.Series(rng).dt.ceil("H")
0 2018-01-01 12:00:00
1 2018-01-01 12:00:00
2 2018-01-01 13:00:00
dtype: datetime64[ns]
"""
def _round(self, freq, mode, ambiguous, nonexistent):
# round the local times
if is_datetime64tz_dtype(self):
# operate on naive timestamps, then convert back to aware
naive = self.tz_localize(None)
result = naive._round(freq, mode, ambiguous, nonexistent)
aware = result.tz_localize(
self.tz, ambiguous=ambiguous, nonexistent=nonexistent
)
return aware
values = self.view("i8")
result = round_nsint64(values, mode, freq)
result = self._maybe_mask_results(result, fill_value=NaT)
return self._simple_new(result, dtype=self.dtype)
@Appender((_round_doc + _round_example).format(op="round"))
def round(self, freq, ambiguous="raise", nonexistent="raise"):
return self._round(freq, RoundTo.NEAREST_HALF_EVEN, ambiguous, nonexistent)
@Appender((_round_doc + _floor_example).format(op="floor"))
def floor(self, freq, ambiguous="raise", nonexistent="raise"):
return self._round(freq, RoundTo.MINUS_INFTY, ambiguous, nonexistent)
@Appender((_round_doc + _ceil_example).format(op="ceil"))
def ceil(self, freq, ambiguous="raise", nonexistent="raise"):
return self._round(freq, RoundTo.PLUS_INFTY, ambiguous, nonexistent)
class DatetimeLikeArrayMixin(ExtensionOpsMixin, AttributesMixin, ExtensionArray):
"""
Shared Base/Mixin class for DatetimeArray, TimedeltaArray, PeriodArray
Assumes that __new__/__init__ defines:
_data
_freq
and that the inheriting class has methods:
_generate_range
"""
@property
def ndim(self) -> int:
return self._data.ndim
@property
def shape(self):
return self._data.shape
def reshape(self, *args, **kwargs):
# Note: we drop any freq
data = self._data.reshape(*args, **kwargs)
return type(self)(data, dtype=self.dtype)
def ravel(self, *args, **kwargs):
# Note: we drop any freq
data = self._data.ravel(*args, **kwargs)
return type(self)(data, dtype=self.dtype)
@property
def _box_func(self):
"""
box function to get object from internal representation
"""
raise AbstractMethodError(self)
def _box_values(self, values):
"""
apply box func to passed values
"""
return lib.map_infer(values, self._box_func)
def __iter__(self):
return (self._box_func(v) for v in self.asi8)
@property
def asi8(self) -> np.ndarray:
"""
Integer representation of the values.
Returns
-------
ndarray
An ndarray with int64 dtype.
"""
# do not cache or you'll create a memory leak
return self._data.view("i8")
@property
def _ndarray_values(self):
return self._data
# ----------------------------------------------------------------
# Rendering Methods
def _format_native_types(self, na_rep="NaT", date_format=None):
"""
Helper method for astype when converting to strings.
Returns
-------
ndarray[str]
"""
raise AbstractMethodError(self)
def _formatter(self, boxed=False):
# TODO: Remove Datetime & DatetimeTZ formatters.
return "'{}'".format
# ----------------------------------------------------------------
# Array-Like / EA-Interface Methods
@property
def nbytes(self):
return self._data.nbytes
def __array__(self, dtype=None) -> np.ndarray:
# used for Timedelta/DatetimeArray, overwritten by PeriodArray
if is_object_dtype(dtype):
return np.array(list(self), dtype=object)
return self._data
@property
def size(self) -> int:
"""The number of elements in this array."""
return np.prod(self.shape)
def __len__(self) -> int:
return len(self._data)
def __getitem__(self, key):
"""
This getitem defers to the underlying array, which by-definition can
only handle list-likes, slices, and integer scalars
"""
is_int = lib.is_integer(key)
if lib.is_scalar(key) and not is_int:
raise IndexError(
"only integers, slices (`:`), ellipsis (`...`), "
"numpy.newaxis (`None`) and integer or boolean "
"arrays are valid indices"
)
getitem = self._data.__getitem__
if is_int:
val = getitem(key)
if lib.is_scalar(val):
# i.e. self.ndim == 1
return self._box_func(val)
return type(self)(val, dtype=self.dtype)
if com.is_bool_indexer(key):
key = check_bool_array_indexer(self, key)
if key.all():
key = slice(0, None, None)
else:
key = lib.maybe_booleans_to_slice(key.view(np.uint8))
is_period = is_period_dtype(self)
if is_period:
freq = self.freq
else:
freq = None
if isinstance(key, slice):
if self.freq is not None and key.step is not None:
freq = key.step * self.freq
else:
freq = self.freq
elif key is Ellipsis:
# GH#21282 indexing with Ellipsis is similar to a full slice,
# should preserve `freq` attribute
freq = self.freq
result = getitem(key)
if result.ndim > 1:
# To support MPL which performs slicing with 2 dim
# even though it only has 1 dim by definition
if is_period:
return self._simple_new(result, dtype=self.dtype, freq=freq)
return result
return self._simple_new(result, dtype=self.dtype, freq=freq)
def __setitem__(
self,
key: Union[int, Sequence[int], Sequence[bool], slice],
value: Union[NaTType, Any, Sequence[Any]],
) -> None:
# I'm fudging the types a bit here. "Any" above really depends
# on type(self). For PeriodArray, it's Period (or stuff coercible
# to a period in from_sequence). For DatetimeArray, it's Timestamp...
# I don't know if mypy can do that, possibly with Generics.
# https://mypy.readthedocs.io/en/latest/generics.html
if lib.is_scalar(value) and not isna(value):
value = com.maybe_box_datetimelike(value)
if is_list_like(value):
is_slice = isinstance(key, slice)
if lib.is_scalar(key):
raise ValueError("setting an array element with a sequence.")
if not is_slice:
key = cast(Sequence, key)
if len(key) != len(value) and not com.is_bool_indexer(key):
msg = (
f"shape mismatch: value array of length '{len(key)}' "
"does not match indexing result of length "
f"'{len(value)}'."
)
raise ValueError(msg)
elif not len(key):
return
value = type(self)._from_sequence(value, dtype=self.dtype)
self._check_compatible_with(value, setitem=True)
value = value.asi8
elif isinstance(value, self._scalar_type):
self._check_compatible_with(value, setitem=True)
value = self._unbox_scalar(value)
elif is_valid_nat_for_dtype(value, self.dtype):
value = iNaT
else:
msg = (
f"'value' should be a '{self._scalar_type.__name__}', 'NaT', "
f"or array of those. Got '{type(value).__name__}' instead."
)
raise TypeError(msg)
self._data[key] = value
self._maybe_clear_freq()
def _maybe_clear_freq(self):
# inplace operations like __setitem__ may invalidate the freq of
# DatetimeArray and TimedeltaArray
pass
def astype(self, dtype, copy=True):
# Some notes on cases we don't have to handle here in the base class:
# 1. PeriodArray.astype handles period -> period
# 2. DatetimeArray.astype handles conversion between tz.
# 3. DatetimeArray.astype handles datetime -> period
from pandas import Categorical
dtype = pandas_dtype(dtype)
if is_object_dtype(dtype):
return self._box_values(self.asi8)
elif is_string_dtype(dtype) and not is_categorical_dtype(dtype):
return self._format_native_types()
elif is_integer_dtype(dtype):
# we deliberately ignore int32 vs. int64 here.
# See https://github.com/pandas-dev/pandas/issues/24381 for more.
values = self.asi8
if is_unsigned_integer_dtype(dtype):
# Again, we ignore int32 vs. int64
values = values.view("uint64")
if copy:
values = values.copy()
return values
elif (
is_datetime_or_timedelta_dtype(dtype)
and not is_dtype_equal(self.dtype, dtype)
) or is_float_dtype(dtype):
# disallow conversion between datetime/timedelta,
# and conversions for any datetimelike to float
msg = f"Cannot cast {type(self).__name__} to dtype {dtype}"
raise TypeError(msg)
elif is_categorical_dtype(dtype):
return Categorical(self, dtype=dtype)
else:
return np.asarray(self, dtype=dtype)
def view(self, dtype=None):
if dtype is None or dtype is self.dtype:
return type(self)(self._data, dtype=self.dtype)
return self._data.view(dtype=dtype)
# ------------------------------------------------------------------
# ExtensionArray Interface
def unique(self):
result = unique1d(self.asi8)
return type(self)(result, dtype=self.dtype)
def _validate_fill_value(self, fill_value):
"""
If a fill_value is passed to `take` convert it to an i8 representation,
raising ValueError if this is not possible.
Parameters
----------
fill_value : object
Returns
-------
fill_value : np.int64
Raises
------
ValueError
"""
if isna(fill_value):
fill_value = iNaT
elif isinstance(fill_value, self._recognized_scalars):
self._check_compatible_with(fill_value)
fill_value = self._scalar_type(fill_value)
fill_value = self._unbox_scalar(fill_value)
else:
raise ValueError(
f"'fill_value' should be a {self._scalar_type}. Got '{fill_value}'."
)
return fill_value
def take(self, indices, allow_fill=False, fill_value=None):
if allow_fill:
fill_value = self._validate_fill_value(fill_value)
new_values = take(
self.asi8, indices, allow_fill=allow_fill, fill_value=fill_value
)
return type(self)(new_values, dtype=self.dtype)
@classmethod
def _concat_same_type(cls, to_concat):
dtypes = {x.dtype for x in to_concat}
assert len(dtypes) == 1
dtype = list(dtypes)[0]
values = np.concatenate([x.asi8 for x in to_concat])
return cls(values, dtype=dtype)
def copy(self):
values = self.asi8.copy()
return type(self)._simple_new(values, dtype=self.dtype, freq=self.freq)
def _values_for_factorize(self):
return self.asi8, iNaT
@classmethod
def _from_factorized(cls, values, original):
return cls(values, dtype=original.dtype)
def _values_for_argsort(self):
return self._data
# ------------------------------------------------------------------
# Additional array methods
# These are not part of the EA API, but we implement them because
# pandas assumes they're there.
def searchsorted(self, value, side="left", sorter=None):
"""
Find indices where elements should be inserted to maintain order.
Find the indices into a sorted array `self` such that, if the
corresponding elements in `value` were inserted before the indices,
the order of `self` would be preserved.
Parameters
----------
value : array_like
Values to insert into `self`.
side : {'left', 'right'}, optional
If 'left', the index of the first suitable location found is given.
If 'right', return the last such index. If there is no suitable
index, return either 0 or N (where N is the length of `self`).
sorter : 1-D array_like, optional
Optional array of integer indices that sort `self` into ascending
order. They are typically the result of ``np.argsort``.
Returns
-------
indices : array of ints
Array of insertion points with the same shape as `value`.
"""
if isinstance(value, str):
value = self._scalar_from_string(value)
if not (isinstance(value, (self._scalar_type, type(self))) or isna(value)):
raise ValueError(f"Unexpected type for 'value': {type(value)}")
self._check_compatible_with(value)
if isinstance(value, type(self)):
value = value.asi8
else:
value = self._unbox_scalar(value)
return self.asi8.searchsorted(value, side=side, sorter=sorter)
def repeat(self, repeats, *args, **kwargs):
"""
Repeat elements of an array.
See Also
--------
numpy.ndarray.repeat
"""
nv.validate_repeat(args, kwargs)
values = self._data.repeat(repeats)
return type(self)(values.view("i8"), dtype=self.dtype)
def value_counts(self, dropna=False):
"""
Return a Series containing counts of unique values.
Parameters
----------
dropna : bool, default True
Don't include counts of NaT values.
Returns
-------
Series
"""
from pandas import Series, Index
if dropna:
values = self[~self.isna()]._data
else:
values = self._data
cls = type(self)
result = value_counts(values, sort=False, dropna=dropna)
index = Index(
cls(result.index.view("i8"), dtype=self.dtype), name=result.index.name
)
return Series(result.values, index=index, name=result.name)
def map(self, mapper):
# TODO(GH-23179): Add ExtensionArray.map
# Need to figure out if we want ExtensionArray.map first.
# If so, then we can refactor IndexOpsMixin._map_values to
# a standalone function and call from here..
# Else, just rewrite _map_infer_values to do the right thing.
from pandas import Index
return Index(self).map(mapper).array
# ------------------------------------------------------------------
# Null Handling
def isna(self):
return self._isnan
@property # NB: override with cache_readonly in immutable subclasses
def _isnan(self):
"""
return if each value is nan
"""
return self.asi8 == iNaT
@property # NB: override with cache_readonly in immutable subclasses
def _hasnans(self):
"""
return if I have any nans; enables various perf speedups
"""
return bool(self._isnan.any())
def _maybe_mask_results(self, result, fill_value=iNaT, convert=None):
"""
Parameters
----------
result : a ndarray
fill_value : object, default iNaT
convert : str, dtype or None
Returns
-------
result : ndarray with values replace by the fill_value
mask the result if needed, convert to the provided dtype if its not
None
This is an internal routine.
"""
if self._hasnans:
if convert:
result = result.astype(convert)
if fill_value is None:
fill_value = np.nan
result[self._isnan] = fill_value
return result
def fillna(self, value=None, method=None, limit=None):
# TODO(GH-20300): remove this
# Just overriding to ensure that we avoid an astype(object).
# Either 20300 or a `_values_for_fillna` would avoid this duplication.
if isinstance(value, ABCSeries):
value = value.array
value, method = validate_fillna_kwargs(value, method)
mask = self.isna()
if is_array_like(value):
if len(value) != len(self):
raise ValueError(
f"Length of 'value' does not match. Got ({len(value)}) "
f" expected {len(self)}"
)
value = value[mask]
if mask.any():
if method is not None:
if method == "pad":
func = missing.pad_1d
else:
func = missing.backfill_1d
values = self._data
if not is_period_dtype(self):
# For PeriodArray self._data is i8, which gets copied
# by `func`. Otherwise we need to make a copy manually
# to avoid modifying `self` in-place.
values = values.copy()
new_values = func(values, limit=limit, mask=mask)
if is_datetime64tz_dtype(self):
# we need to pass int64 values to the constructor to avoid
# re-localizing incorrectly
new_values = new_values.view("i8")
new_values = type(self)(new_values, dtype=self.dtype)
else:
# fill with value
new_values = self.copy()
new_values[mask] = value
else:
new_values = self.copy()
return new_values
# ------------------------------------------------------------------
# Frequency Properties/Methods
@property
def freq(self):
"""
Return the frequency object if it is set, otherwise None.
"""
return self._freq
@freq.setter
def freq(self, value):
if value is not None:
value = frequencies.to_offset(value)
self._validate_frequency(self, value)
self._freq = value
@property
def freqstr(self):
"""
Return the frequency object as a string if its set, otherwise None
"""
if self.freq is None:
return None
return self.freq.freqstr
@property # NB: override with cache_readonly in immutable subclasses
def inferred_freq(self):
"""
Tryies to return a string representing a frequency guess,
generated by infer_freq. Returns None if it can't autodetect the
frequency.
"""
if self.ndim != 1:
return None
try:
return frequencies.infer_freq(self)
except ValueError:
return None
@property # NB: override with cache_readonly in immutable subclasses
def _resolution(self):
return frequencies.Resolution.get_reso_from_freq(self.freqstr)
@property # NB: override with cache_readonly in immutable subclasses
def resolution(self):
"""
Returns day, hour, minute, second, millisecond or microsecond
"""
return frequencies.Resolution.get_str(self._resolution)
@classmethod
def _validate_frequency(cls, index, freq, **kwargs):
"""
Validate that a frequency is compatible with the values of a given
Datetime Array/Index or Timedelta Array/Index
Parameters
----------
index : DatetimeIndex or TimedeltaIndex
The index on which to determine if the given frequency is valid
freq : DateOffset
The frequency to validate
"""
if is_period_dtype(cls):
# Frequency validation is not meaningful for Period Array/Index
return None
inferred = index.inferred_freq
if index.size == 0 or inferred == freq.freqstr:
return None
try:
on_freq = cls._generate_range(
start=index[0], end=None, periods=len(index), freq=freq, **kwargs
)
if not np.array_equal(index.asi8, on_freq.asi8):
raise ValueError
except ValueError as e:
if "non-fixed" in str(e):
# non-fixed frequencies are not meaningful for timedelta64;
# we retain that error message
raise e
# GH#11587 the main way this is reached is if the `np.array_equal`
# check above is False. This can also be reached if index[0]
# is `NaT`, in which case the call to `cls._generate_range` will
# raise a ValueError, which we re-raise with a more targeted
# message.
raise ValueError(
f"Inferred frequency {inferred} from passed values "
f"does not conform to passed frequency {freq.freqstr}"
)
# monotonicity/uniqueness properties are called via frequencies.infer_freq,
# see GH#23789
@property
def _is_monotonic_increasing(self):
return algos.is_monotonic(self.asi8, timelike=True)[0]
@property
def _is_monotonic_decreasing(self):
return algos.is_monotonic(self.asi8, timelike=True)[1]
@property
def _is_unique(self):
return len(unique1d(self.asi8)) == len(self)
# ------------------------------------------------------------------
# Arithmetic Methods
_create_comparison_method = classmethod(_datetimelike_array_cmp)
# pow is invalid for all three subclasses; TimedeltaArray will override
# the multiplication and division ops
__pow__ = make_invalid_op("__pow__")
__rpow__ = make_invalid_op("__rpow__")
__mul__ = make_invalid_op("__mul__")
__rmul__ = make_invalid_op("__rmul__")
__truediv__ = make_invalid_op("__truediv__")
__rtruediv__ = make_invalid_op("__rtruediv__")
__floordiv__ = make_invalid_op("__floordiv__")
__rfloordiv__ = make_invalid_op("__rfloordiv__")
__mod__ = make_invalid_op("__mod__")
__rmod__ = make_invalid_op("__rmod__")
__divmod__ = make_invalid_op("__divmod__")
__rdivmod__ = make_invalid_op("__rdivmod__")
def _add_datetimelike_scalar(self, other):
# Overridden by TimedeltaArray
raise TypeError(f"cannot add {type(self).__name__} and {type(other).__name__}")
_add_datetime_arraylike = _add_datetimelike_scalar
def _sub_datetimelike_scalar(self, other):
# Overridden by DatetimeArray
assert other is not NaT
raise TypeError(f"cannot subtract a datelike from a {type(self).__name__}")
_sub_datetime_arraylike = _sub_datetimelike_scalar
def _sub_period(self, other):
# Overridden by PeriodArray
raise TypeError(f"cannot subtract Period from a {type(self).__name__}")
def _add_offset(self, offset):
raise AbstractMethodError(self)
def _add_delta(self, other):
"""
Add a timedelta-like, Tick or TimedeltaIndex-like object
to self, yielding an int64 numpy array
Parameters
----------
delta : {timedelta, np.timedelta64, Tick,
TimedeltaIndex, ndarray[timedelta64]}
Returns
-------
result : ndarray[int64]
Notes
-----
The result's name is set outside of _add_delta by the calling
method (__add__ or __sub__), if necessary (i.e. for Indexes).
"""
if isinstance(other, (Tick, timedelta, np.timedelta64)):
new_values = self._add_timedeltalike_scalar(other)
elif is_timedelta64_dtype(other):
# ndarray[timedelta64] or TimedeltaArray/index
new_values = self._add_delta_tdi(other)
return new_values
def _add_timedeltalike_scalar(self, other):
"""
Add a delta of a timedeltalike
return the i8 result view
"""
if isna(other):
# i.e np.timedelta64("NaT"), not recognized by delta_to_nanoseconds
new_values = np.empty(self.shape, dtype="i8")
new_values[:] = iNaT
return new_values
inc = delta_to_nanoseconds(other)
new_values = checked_add_with_arr(self.asi8, inc, arr_mask=self._isnan).view(
"i8"
)
new_values = self._maybe_mask_results(new_values)
return new_values.view("i8")
def _add_delta_tdi(self, other):
"""
Add a delta of a TimedeltaIndex
return the i8 result view
"""
if len(self) != len(other):
raise ValueError("cannot add indices of unequal length")
if isinstance(other, np.ndarray):
# ndarray[timedelta64]; wrap in TimedeltaIndex for op
from pandas.core.arrays import TimedeltaArray
other = TimedeltaArray._from_sequence(other)
self_i8 = self.asi8
other_i8 = other.asi8
new_values = checked_add_with_arr(
self_i8, other_i8, arr_mask=self._isnan, b_mask=other._isnan
)
if self._hasnans or other._hasnans:
mask = (self._isnan) | (other._isnan)
new_values[mask] = iNaT
return new_values.view("i8")
def _add_nat(self):
"""
Add pd.NaT to self
"""
if is_period_dtype(self):
raise TypeError(
f"Cannot add {type(self).__name__} and {type(NaT).__name__}"
)
# GH#19124 pd.NaT is treated like a timedelta for both timedelta
# and datetime dtypes
result = np.zeros(self.shape, dtype=np.int64)
result.fill(iNaT)
return type(self)(result, dtype=self.dtype, freq=None)
def _sub_nat(self):
"""
Subtract pd.NaT from self
"""
# GH#19124 Timedelta - datetime is not in general well-defined.
# We make an exception for pd.NaT, which in this case quacks
# like a timedelta.
# For datetime64 dtypes by convention we treat NaT as a datetime, so
# this subtraction returns a timedelta64 dtype.
# For period dtype, timedelta64 is a close-enough return dtype.
result = np.zeros(self.shape, dtype=np.int64)
result.fill(iNaT)
return result.view("timedelta64[ns]")
def _sub_period_array(self, other):
"""
Subtract a Period Array/Index from self. This is only valid if self
is itself a Period Array/Index, raises otherwise. Both objects must
have the same frequency.
Parameters
----------
other : PeriodIndex or PeriodArray
Returns
-------
result : np.ndarray[object]
Array of DateOffset objects; nulls represented by NaT.
"""
if not is_period_dtype(self):
raise TypeError(
f"cannot subtract {other.dtype}-dtype from {type(self).__name__}"
)
if self.freq != other.freq:
msg = DIFFERENT_FREQ.format(
cls=type(self).__name__, own_freq=self.freqstr, other_freq=other.freqstr
)
raise IncompatibleFrequency(msg)
new_values = checked_add_with_arr(
self.asi8, -other.asi8, arr_mask=self._isnan, b_mask=other._isnan
)
new_values = np.array([self.freq.base * x for x in new_values])
if self._hasnans or other._hasnans:
mask = (self._isnan) | (other._isnan)
new_values[mask] = NaT
return new_values
def _addsub_object_array(self, other: np.ndarray, op):
"""
Add or subtract array-like of DateOffset objects
Parameters
----------
other : np.ndarray[object]
op : {operator.add, operator.sub}
Returns
-------
result : same class as self
"""
assert op in [operator.add, operator.sub]
if len(other) == 1:
return op(self, other[0])
warnings.warn(
"Adding/subtracting array of DateOffsets to "
f"{type(self).__name__} not vectorized",
PerformanceWarning,
)
# For EA self.astype('O') returns a numpy array, not an Index
left = self.astype("O")
res_values = op(left, np.array(other))
kwargs = {}
if not is_period_dtype(self):
kwargs["freq"] = "infer"
try:
res = type(self)._from_sequence(res_values, **kwargs)
except ValueError:
# e.g. we've passed a Timestamp to TimedeltaArray
res = res_values
return res
def _time_shift(self, periods, freq=None):
"""
Shift each value by `periods`.
Note this is different from ExtensionArray.shift, which
shifts the *position* of each element, padding the end with
missing values.
Parameters
----------
periods : int
Number of periods to shift by.
freq : pandas.DateOffset, pandas.Timedelta, or str
Frequency increment to shift by.
"""
if freq is not None and freq != self.freq:
if isinstance(freq, str):
freq = frequencies.to_offset(freq)
offset = periods * freq
result = self + offset
return result
if periods == 0:
# immutable so OK
return self.copy()
if self.freq is None:
raise NullFrequencyError("Cannot shift with no freq")
start = self[0] + periods * self.freq
end = self[-1] + periods * self.freq
# Note: in the DatetimeTZ case, _generate_range will infer the
# appropriate timezone from `start` and `end`, so tz does not need
# to be passed explicitly.
return self._generate_range(start=start, end=end, periods=None, freq=self.freq)
@unpack_zerodim_and_defer("__add__")
def __add__(self, other):
# scalar others
if other is NaT:
result = self._add_nat()
elif isinstance(other, (Tick, timedelta, np.timedelta64)):
result = self._add_delta(other)
elif isinstance(other, DateOffset):
# specifically _not_ a Tick
result = self._add_offset(other)
elif isinstance(other, (datetime, np.datetime64)):
result = self._add_datetimelike_scalar(other)
elif lib.is_integer(other):
# This check must come after the check for np.timedelta64
# as is_integer returns True for these
if not is_period_dtype(self):
raise integer_op_not_supported(self)
result = self._time_shift(other)
# array-like others
elif is_timedelta64_dtype(other):
# TimedeltaIndex, ndarray[timedelta64]
result = self._add_delta(other)
elif is_object_dtype(other):
# e.g. Array/Index of DateOffset objects
result = self._addsub_object_array(other, operator.add)
elif is_datetime64_dtype(other) or is_datetime64tz_dtype(other):
# DatetimeIndex, ndarray[datetime64]
return self._add_datetime_arraylike(other)
elif is_integer_dtype(other):
if not is_period_dtype(self):
raise integer_op_not_supported(self)
result = self._addsub_int_array(other, operator.add)
else:
# Includes Categorical, other ExtensionArrays
# For PeriodDtype, if self is a TimedeltaArray and other is a
# PeriodArray with a timedelta-like (i.e. Tick) freq, this
# operation is valid. Defer to the PeriodArray implementation.
# In remaining cases, this will end up raising TypeError.
return NotImplemented
if is_timedelta64_dtype(result) and isinstance(result, np.ndarray):
from pandas.core.arrays import TimedeltaArray
return TimedeltaArray(result)
return result
def __radd__(self, other):
# alias for __add__
return self.__add__(other)
@unpack_zerodim_and_defer("__sub__")
def __sub__(self, other):
# scalar others
if other is NaT:
result = self._sub_nat()
elif isinstance(other, (Tick, timedelta, np.timedelta64)):
result = self._add_delta(-other)
elif isinstance(other, DateOffset):
# specifically _not_ a Tick
result = self._add_offset(-other)
elif isinstance(other, (datetime, np.datetime64)):
result = self._sub_datetimelike_scalar(other)
elif lib.is_integer(other):
# This check must come after the check for np.timedelta64
# as is_integer returns True for these
if not is_period_dtype(self):
raise integer_op_not_supported(self)
result = self._time_shift(-other)
elif isinstance(other, Period):
result = self._sub_period(other)
# array-like others
elif is_timedelta64_dtype(other):
# TimedeltaIndex, ndarray[timedelta64]
result = self._add_delta(-other)
elif is_object_dtype(other):
# e.g. Array/Index of DateOffset objects
result = self._addsub_object_array(other, operator.sub)
elif is_datetime64_dtype(other) or is_datetime64tz_dtype(other):
# DatetimeIndex, ndarray[datetime64]
result = self._sub_datetime_arraylike(other)
elif is_period_dtype(other):
# PeriodIndex
result = self._sub_period_array(other)
elif is_integer_dtype(other):
if not is_period_dtype(self):
raise integer_op_not_supported(self)
result = self._addsub_int_array(other, operator.sub)
else:
# Includes ExtensionArrays, float_dtype
return NotImplemented
if is_timedelta64_dtype(result) and isinstance(result, np.ndarray):
from pandas.core.arrays import TimedeltaArray
return TimedeltaArray(result)
return result
def __rsub__(self, other):
if is_datetime64_any_dtype(other) and is_timedelta64_dtype(self.dtype):
# ndarray[datetime64] cannot be subtracted from self, so
# we need to wrap in DatetimeArray/Index and flip the operation
if lib.is_scalar(other):
# i.e. np.datetime64 object
return Timestamp(other) - self
if not isinstance(other, DatetimeLikeArrayMixin):
# Avoid down-casting DatetimeIndex
from pandas.core.arrays import DatetimeArray
other = DatetimeArray(other)
return other - self
elif (
is_datetime64_any_dtype(self.dtype)
and hasattr(other, "dtype")
and not is_datetime64_any_dtype(other.dtype)
):
# GH#19959 datetime - datetime is well-defined as timedelta,
# but any other type - datetime is not well-defined.
raise TypeError(
f"cannot subtract {type(self).__name__} from {type(other).__name__}"
)
elif is_period_dtype(self.dtype) and is_timedelta64_dtype(other):
# TODO: Can we simplify/generalize these cases at all?
raise TypeError(f"cannot subtract {type(self).__name__} from {other.dtype}")
elif is_timedelta64_dtype(self.dtype):
if lib.is_integer(other) or is_integer_dtype(other):
# need to subtract before negating, since that flips freq
# -self flips self.freq, messing up results
return -(self - other)
return (-self) + other
return -(self - other)
def __iadd__(self, other): # type: ignore
result = self + other
self[:] = result[:]
if not is_period_dtype(self):
# restore freq, which is invalidated by setitem
self._freq = result._freq
return self
def __isub__(self, other): # type: ignore
result = self - other
self[:] = result[:]
if not is_period_dtype(self):
# restore freq, which is invalidated by setitem
self._freq = result._freq
return self
# --------------------------------------------------------------
# Reductions
def _reduce(self, name, axis=0, skipna=True, **kwargs):
op = getattr(self, name, None)
if op:
return op(skipna=skipna, **kwargs)
else:
return super()._reduce(name, skipna, **kwargs)
def min(self, axis=None, skipna=True, *args, **kwargs):
"""
Return the minimum value of the Array or minimum along
an axis.
See Also
--------
numpy.ndarray.min
Index.min : Return the minimum value in an Index.
Series.min : Return the minimum value in a Series.
"""
nv.validate_min(args, kwargs)
nv.validate_minmax_axis(axis)
result = nanops.nanmin(self.asi8, skipna=skipna, mask=self.isna())
if isna(result):
# Period._from_ordinal does not handle np.nan gracefully
return NaT
return self._box_func(result)
def max(self, axis=None, skipna=True, *args, **kwargs):
"""
Return the maximum value of the Array or maximum along
an axis.
See Also
--------
numpy.ndarray.max
Index.max : Return the maximum value in an Index.
Series.max : Return the maximum value in a Series.
"""
# TODO: skipna is broken with max.
# See https://github.com/pandas-dev/pandas/issues/24265
nv.validate_max(args, kwargs)
nv.validate_minmax_axis(axis)
mask = self.isna()
if skipna:
values = self[~mask].asi8
elif mask.any():
return NaT
else:
values = self.asi8
if not len(values):
# short-circuit for empty max / min
return NaT
result = | nanops.nanmax(values, skipna=skipna) | pandas.core.nanops.nanmax |
import numpy as np
import pandas as pd
from pandas.testing import assert_series_equal
import pytest
from fattails.metrics import mad, get_survival_probability
class TestMad:
"""Test the mean absolute deviation method"""
def test_example(self):
x = [0,5,-5,0,0]
mad_ = mad(x)
expected_mad = 2
assert mad_ == expected_mad
def test_handles_mad_of_zero(self):
x = [1,1,1,1,1]
x = np.array(x)
mad_ = mad(x)
assert mad_ == 0
@pytest.mark.parametrize("description, input_data, expected_output", [
("duplicate_values", [ 2, 2, 3], [0.75, 0.5, 0.25]),
("negative_values", [-1,-0.3, 7], [0.75, 0.5, 0.25]),
("not_sorted_values", [ 2, 3, 2], [0.75, 0.25, 0.5]),
])
class TestGetSurvivalProbability:
def test_accepts_list_input(self, description, input_data, expected_output):
"""List input data should be accepted even though output is always a pandas series."""
output = get_survival_probability(input_data)
assert output.name == 'survival_probability'
assert output.to_list() == expected_output
def test_accepts_series_input(self, description, input_data, expected_output):
# Setup
index = pd.date_range('2000-01-01', periods=len(input_data))
# Input series
input_name = 'name_placeholder'
input_data = | pd.Series(input_data, index, name=input_name) | pandas.Series |
import pandas as pd
from Project import Project
from logger import logging
from tqdm import tqdm
project = Project()
root = project.data_dir / 'yolo/frames_copy/'
print(project)
def read_bb(path):
return | pd.read_csv(path, delimiter=' ', names=['class', 'x', 'y', 'x2', 'y2'], header=None) | pandas.read_csv |
# -*- coding: utf-8 -*-
# This file as well as the whole tsfresh package are licenced under the MIT licence (see the LICENCE.txt)
# <NAME> (<EMAIL>), Blue Yonder Gmbh, 2016
from unittest import TestCase
import numpy as np
import pandas as pd
import pandas.testing as pdt
from tests.fixtures import DataTestCase
from tsfresh import extract_features, extract_relevant_features, select_features
from tsfresh.feature_extraction.settings import ComprehensiveFCParameters
from tsfresh.utilities.dataframe_functions import impute
class RelevantFeatureExtractionDataTestCase(DataTestCase):
"""
Test case for the relevant_feature_extraction function
"""
def test_functional_equality(self):
"""
`extract_relevant_features` should be equivalent to running first `extract_features` with impute and
`select_features` afterwards.
Meaning it should produce the same relevant features and the values of these features should be identical.
:return:
"""
df, y = self.create_test_data_sample_with_target()
relevant_features = extract_relevant_features(
df,
y,
column_id="id",
column_value="val",
column_kind="kind",
column_sort="sort",
)
extracted_features = extract_features(
df,
column_id="id",
column_value="val",
column_kind="kind",
column_sort="sort",
impute_function=impute,
)
selected_features = select_features(extracted_features, y)
self.assertEqual(
set(relevant_features.columns),
set(selected_features.columns),
"Should select the same columns:\n\t{}\n\nvs.\n\n\t{}".format(
relevant_features.columns, selected_features.columns
),
)
relevant_columns = relevant_features.columns
relevant_index = relevant_features.index
self.assertTrue(
relevant_features.equals(
selected_features.loc[relevant_index][relevant_columns]
),
"Should calculate the same feature values",
)
class RelevantFeatureExtractionTestCase(TestCase):
def setUp(self):
np.random.seed(42)
y = pd.Series(np.random.binomial(1, 0.5, 20), index=range(20))
df = pd.DataFrame(index=range(100))
df["a"] = np.random.normal(0, 1, 100)
df["b"] = np.random.normal(0, 1, 100)
df["id"] = np.repeat(range(20), 5)
X = pd.DataFrame(index=range(20))
X["f1"] = np.random.normal(0, 1, 20)
X["f2"] = np.random.normal(0, 1, 20)
self.df = df
self.X = X
self.y = y
def test_extracted_features_contain_X_features(self):
X = extract_relevant_features(self.df, self.y, self.X, column_id="id")
self.assertIn("f1", X.columns)
self.assertIn("f2", X.columns)
pdt.assert_series_equal(self.X["f1"], X["f1"])
pdt.assert_series_equal(self.X["f2"], X["f2"])
pdt.assert_index_equal(self.X["f1"].index, X["f1"].index)
pdt.assert_index_equal(self.X["f2"].index, X["f2"].index)
def test_extraction_null_as_column_name(self):
df1 = pd.DataFrame(
data={
0: range(10),
1: np.repeat([0, 1], 5),
2: np.repeat([0, 1, 2, 3, 4], 2),
}
)
X1 = extract_features(df1, column_id=1, column_sort=2)
self.assertEqual(len(X1), 2)
df2 = pd.DataFrame(
data={
1: range(10),
0: np.repeat([0, 1], 5),
2: np.repeat([0, 1, 2, 3, 4], 2),
}
)
X2 = extract_features(df2, column_id=0, column_sort=2)
self.assertEqual(len(X2), 2)
df3 = pd.DataFrame(
data={
0: range(10),
2: np.repeat([0, 1], 5),
1: np.repeat([0, 1, 2, 3, 4], 2),
}
)
X3 = extract_features(df3, column_id=2, column_sort=1)
self.assertEqual(len(X3), 2)
def test_raises_mismatch_index_df_and_y_df_more(self):
y = pd.Series(range(3), index=[1, 2, 3])
df_dict = {
"a": | pd.DataFrame({"val": [1, 2, 3, 4, 10, 11], "id": [1, 1, 1, 1, 2, 2]}) | pandas.DataFrame |
import numpy as np
import pytest
import pandas.util._test_decorators as td
from pandas.core.dtypes.generic import ABCIndexClass
import pandas as pd
import pandas._testing as tm
from pandas.api.types import is_float, is_float_dtype, is_integer, is_scalar
from pandas.core.arrays import IntegerArray, integer_array
from pandas.core.arrays.integer import (
Int8Dtype,
Int16Dtype,
Int32Dtype,
Int64Dtype,
UInt8Dtype,
UInt16Dtype,
UInt32Dtype,
UInt64Dtype,
)
from pandas.tests.extension.base import BaseOpsUtil
def make_data():
return list(range(8)) + [np.nan] + list(range(10, 98)) + [np.nan] + [99, 100]
@pytest.fixture(
params=[
Int8Dtype,
Int16Dtype,
Int32Dtype,
Int64Dtype,
UInt8Dtype,
UInt16Dtype,
UInt32Dtype,
UInt64Dtype,
]
)
def dtype(request):
return request.param()
@pytest.fixture
def data(dtype):
return integer_array(make_data(), dtype=dtype)
@pytest.fixture
def data_missing(dtype):
return integer_array([np.nan, 1], dtype=dtype)
@pytest.fixture(params=["data", "data_missing"])
def all_data(request, data, data_missing):
"""Parametrized fixture giving 'data' and 'data_missing'"""
if request.param == "data":
return data
elif request.param == "data_missing":
return data_missing
def test_dtypes(dtype):
# smoke tests on auto dtype construction
if dtype.is_signed_integer:
assert np.dtype(dtype.type).kind == "i"
else:
assert np.dtype(dtype.type).kind == "u"
assert dtype.name is not None
@pytest.mark.parametrize(
"dtype, expected",
[
(Int8Dtype(), "Int8Dtype()"),
(Int16Dtype(), "Int16Dtype()"),
(Int32Dtype(), "Int32Dtype()"),
(Int64Dtype(), "Int64Dtype()"),
(UInt8Dtype(), "UInt8Dtype()"),
(UInt16Dtype(), "UInt16Dtype()"),
(UInt32Dtype(), "UInt32Dtype()"),
(UInt64Dtype(), "UInt64Dtype()"),
],
)
def test_repr_dtype(dtype, expected):
assert repr(dtype) == expected
def test_repr_array():
result = repr(integer_array([1, None, 3]))
expected = "<IntegerArray>\n[1, <NA>, 3]\nLength: 3, dtype: Int64"
assert result == expected
def test_repr_array_long():
data = integer_array([1, 2, None] * 1000)
expected = (
"<IntegerArray>\n"
"[ 1, 2, <NA>, 1, 2, <NA>, 1, 2, <NA>, 1,\n"
" ...\n"
" <NA>, 1, 2, <NA>, 1, 2, <NA>, 1, 2, <NA>]\n"
"Length: 3000, dtype: Int64"
)
result = repr(data)
assert result == expected
class TestConstructors:
def test_uses_pandas_na(self):
a = pd.array([1, None], dtype=pd.Int64Dtype())
assert a[1] is pd.NA
def test_from_dtype_from_float(self, data):
# construct from our dtype & string dtype
dtype = data.dtype
# from float
expected = pd.Series(data)
result = pd.Series(
data.to_numpy(na_value=np.nan, dtype="float"), dtype=str(dtype)
)
tm.assert_series_equal(result, expected)
# from int / list
expected = pd.Series(data)
result = pd.Series(np.array(data).tolist(), dtype=str(dtype))
tm.assert_series_equal(result, expected)
# from int / array
expected = pd.Series(data).dropna().reset_index(drop=True)
dropped = np.array(data.dropna()).astype(np.dtype((dtype.type)))
result = pd.Series(dropped, dtype=str(dtype))
tm.assert_series_equal(result, expected)
class TestArithmeticOps(BaseOpsUtil):
def _check_divmod_op(self, s, op, other, exc=None):
super()._check_divmod_op(s, op, other, None)
def _check_op(self, s, op_name, other, exc=None):
op = self.get_op_from_name(op_name)
result = op(s, other)
# compute expected
mask = s.isna()
# if s is a DataFrame, squeeze to a Series
# for comparison
if isinstance(s, pd.DataFrame):
result = result.squeeze()
s = s.squeeze()
mask = mask.squeeze()
# other array is an Integer
if isinstance(other, IntegerArray):
omask = getattr(other, "mask", None)
mask = getattr(other, "data", other)
if omask is not None:
mask |= omask
# 1 ** na is na, so need to unmask those
if op_name == "__pow__":
mask = np.where(~s.isna() & (s == 1), False, mask)
elif op_name == "__rpow__":
other_is_one = other == 1
if isinstance(other_is_one, pd.Series):
other_is_one = other_is_one.fillna(False)
mask = np.where(other_is_one, False, mask)
# float result type or float op
if (
is_float_dtype(other)
or is_float(other)
or op_name in ["__rtruediv__", "__truediv__", "__rdiv__", "__div__"]
):
rs = s.astype("float")
expected = op(rs, other)
self._check_op_float(result, expected, mask, s, op_name, other)
# integer result type
else:
rs = pd.Series(s.values._data, name=s.name)
expected = op(rs, other)
self._check_op_integer(result, expected, mask, s, op_name, other)
def _check_op_float(self, result, expected, mask, s, op_name, other):
# check comparisons that are resulting in float dtypes
expected[mask] = np.nan
if "floordiv" in op_name:
# Series op sets 1//0 to np.inf, which IntegerArray does not do (yet)
mask2 = np.isinf(expected) & np.isnan(result)
expected[mask2] = np.nan
tm.assert_series_equal(result, expected)
def _check_op_integer(self, result, expected, mask, s, op_name, other):
# check comparisons that are resulting in integer dtypes
# to compare properly, we convert the expected
# to float, mask to nans and convert infs
# if we have uints then we process as uints
# then convert to float
# and we ultimately want to create a IntArray
# for comparisons
fill_value = 0
# mod/rmod turn floating 0 into NaN while
# integer works as expected (no nan)
if op_name in ["__mod__", "__rmod__"]:
if is_scalar(other):
if other == 0:
expected[s.values == 0] = 0
else:
expected = expected.fillna(0)
else:
expected[
(s.values == 0).fillna(False)
& ((expected == 0).fillna(False) | expected.isna())
] = 0
try:
expected[
((expected == np.inf) | (expected == -np.inf)).fillna(False)
] = fill_value
original = expected
expected = expected.astype(s.dtype)
except ValueError:
expected = expected.astype(float)
expected[
((expected == np.inf) | (expected == -np.inf)).fillna(False)
] = fill_value
original = expected
expected = expected.astype(s.dtype)
expected[mask] = pd.NA
# assert that the expected astype is ok
# (skip for unsigned as they have wrap around)
if not s.dtype.is_unsigned_integer:
original = pd.Series(original)
# we need to fill with 0's to emulate what an astype('int') does
# (truncation) for certain ops
if op_name in ["__rtruediv__", "__rdiv__"]:
mask |= original.isna()
original = original.fillna(0).astype("int")
original = original.astype("float")
original[mask] = np.nan
tm.assert_series_equal(original, expected.astype("float"))
# assert our expected result
tm.assert_series_equal(result, expected)
def test_arith_integer_array(self, data, all_arithmetic_operators):
# we operate with a rhs of an integer array
op = all_arithmetic_operators
s = pd.Series(data)
rhs = pd.Series([1] * len(data), dtype=data.dtype)
rhs.iloc[-1] = np.nan
self._check_op(s, op, rhs)
def test_arith_series_with_scalar(self, data, all_arithmetic_operators):
# scalar
op = all_arithmetic_operators
s = pd.Series(data)
self._check_op(s, op, 1, exc=TypeError)
def test_arith_frame_with_scalar(self, data, all_arithmetic_operators):
# frame & scalar
op = all_arithmetic_operators
df = pd.DataFrame({"A": data})
self._check_op(df, op, 1, exc=TypeError)
def test_arith_series_with_array(self, data, all_arithmetic_operators):
# ndarray & other series
op = all_arithmetic_operators
s = pd.Series(data)
other = np.ones(len(s), dtype=s.dtype.type)
self._check_op(s, op, other, exc=TypeError)
def test_arith_coerce_scalar(self, data, all_arithmetic_operators):
op = all_arithmetic_operators
s = pd.Series(data)
other = 0.01
self._check_op(s, op, other)
@pytest.mark.parametrize("other", [1.0, np.array(1.0)])
def test_arithmetic_conversion(self, all_arithmetic_operators, other):
# if we have a float operand we should have a float result
# if that is equal to an integer
op = self.get_op_from_name(all_arithmetic_operators)
s = pd.Series([1, 2, 3], dtype="Int64")
result = op(s, other)
assert result.dtype is np.dtype("float")
def test_arith_len_mismatch(self, all_arithmetic_operators):
# operating with a list-like with non-matching length raises
op = self.get_op_from_name(all_arithmetic_operators)
other = np.array([1.0])
s = pd.Series([1, 2, 3], dtype="Int64")
with pytest.raises(ValueError, match="Lengths must match"):
op(s, other)
@pytest.mark.parametrize("other", [0, 0.5])
def test_arith_zero_dim_ndarray(self, other):
arr = integer_array([1, None, 2])
result = arr + np.array(other)
expected = arr + other
tm.assert_equal(result, expected)
def test_error(self, data, all_arithmetic_operators):
# invalid ops
op = all_arithmetic_operators
s = pd.Series(data)
ops = getattr(s, op)
opa = getattr(data, op)
# invalid scalars
msg = (
r"(:?can only perform ops with numeric values)"
r"|(:?IntegerArray cannot perform the operation mod)"
)
with pytest.raises(TypeError, match=msg):
ops("foo")
with pytest.raises(TypeError, match=msg):
ops(pd.Timestamp("20180101"))
# invalid array-likes
with pytest.raises(TypeError, match=msg):
ops(pd.Series("foo", index=s.index))
if op != "__rpow__":
# TODO(extension)
# rpow with a datetimelike coerces the integer array incorrectly
msg = (
"can only perform ops with numeric values|"
"cannot perform .* with this index type: DatetimeArray|"
"Addition/subtraction of integers and integer-arrays "
"with DatetimeArray is no longer supported. *"
)
with pytest.raises(TypeError, match=msg):
ops(pd.Series(pd.date_range("20180101", periods=len(s))))
# 2d
result = opa(pd.DataFrame({"A": s}))
assert result is NotImplemented
msg = r"can only perform ops with 1-d structures"
with pytest.raises(NotImplementedError, match=msg):
opa(np.arange(len(s)).reshape(-1, len(s)))
@pytest.mark.parametrize("zero, negative", [(0, False), (0.0, False), (-0.0, True)])
def test_divide_by_zero(self, zero, negative):
# https://github.com/pandas-dev/pandas/issues/27398
a = pd.array([0, 1, -1, None], dtype="Int64")
result = a / zero
expected = np.array([np.nan, np.inf, -np.inf, np.nan])
if negative:
expected *= -1
tm.assert_numpy_array_equal(result, expected)
def test_pow_scalar(self):
a = pd.array([-1, 0, 1, None, 2], dtype="Int64")
result = a ** 0
expected = pd.array([1, 1, 1, 1, 1], dtype="Int64")
tm.assert_extension_array_equal(result, expected)
result = a ** 1
expected = pd.array([-1, 0, 1, None, 2], dtype="Int64")
tm.assert_extension_array_equal(result, expected)
result = a ** pd.NA
expected = pd.array([None, None, 1, None, None], dtype="Int64")
tm.assert_extension_array_equal(result, expected)
result = a ** np.nan
expected = np.array([np.nan, np.nan, 1, np.nan, np.nan], dtype="float64")
tm.assert_numpy_array_equal(result, expected)
# reversed
a = a[1:] # Can't raise integers to negative powers.
result = 0 ** a
expected = pd.array([1, 0, None, 0], dtype="Int64")
tm.assert_extension_array_equal(result, expected)
result = 1 ** a
expected = pd.array([1, 1, 1, 1], dtype="Int64")
tm.assert_extension_array_equal(result, expected)
result = pd.NA ** a
expected = pd.array([1, None, None, None], dtype="Int64")
tm.assert_extension_array_equal(result, expected)
result = np.nan ** a
expected = np.array([1, np.nan, np.nan, np.nan], dtype="float64")
tm.assert_numpy_array_equal(result, expected)
def test_pow_array(self):
a = integer_array([0, 0, 0, 1, 1, 1, None, None, None])
b = integer_array([0, 1, None, 0, 1, None, 0, 1, None])
result = a ** b
expected = integer_array([1, 0, None, 1, 1, 1, 1, None, None])
tm.assert_extension_array_equal(result, expected)
def test_rpow_one_to_na(self):
# https://github.com/pandas-dev/pandas/issues/22022
# https://github.com/pandas-dev/pandas/issues/29997
arr = integer_array([np.nan, np.nan])
result = np.array([1.0, 2.0]) ** arr
expected = np.array([1.0, np.nan])
tm.assert_numpy_array_equal(result, expected)
class TestComparisonOps(BaseOpsUtil):
def _compare_other(self, data, op_name, other):
op = self.get_op_from_name(op_name)
# array
result = pd.Series(op(data, other))
expected = pd.Series(op(data._data, other), dtype="boolean")
# fill the nan locations
expected[data._mask] = pd.NA
tm.assert_series_equal(result, expected)
# series
s = pd.Series(data)
result = op(s, other)
expected = op(pd.Series(data._data), other)
# fill the nan locations
expected[data._mask] = pd.NA
expected = expected.astype("boolean")
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("other", [True, False, pd.NA, -1, 0, 1])
def test_scalar(self, other, all_compare_operators):
op = self.get_op_from_name(all_compare_operators)
a = pd.array([1, 0, None], dtype="Int64")
result = op(a, other)
if other is pd.NA:
expected = pd.array([None, None, None], dtype="boolean")
else:
values = op(a._data, other)
expected = pd.arrays.BooleanArray(values, a._mask, copy=True)
tm.assert_extension_array_equal(result, expected)
# ensure we haven't mutated anything inplace
result[0] = pd.NA
tm.assert_extension_array_equal(a, pd.array([1, 0, None], dtype="Int64"))
def test_array(self, all_compare_operators):
op = self.get_op_from_name(all_compare_operators)
a = pd.array([0, 1, 2, None, None, None], dtype="Int64")
b = pd.array([0, 1, None, 0, 1, None], dtype="Int64")
result = op(a, b)
values = op(a._data, b._data)
mask = a._mask | b._mask
expected = pd.arrays.BooleanArray(values, mask)
tm.assert_extension_array_equal(result, expected)
# ensure we haven't mutated anything inplace
result[0] = pd.NA
tm.assert_extension_array_equal(
a, pd.array([0, 1, 2, None, None, None], dtype="Int64")
)
tm.assert_extension_array_equal(
b, pd.array([0, 1, None, 0, 1, None], dtype="Int64")
)
def test_compare_with_booleanarray(self, all_compare_operators):
op = self.get_op_from_name(all_compare_operators)
a = pd.array([True, False, None] * 3, dtype="boolean")
b = pd.array([0] * 3 + [1] * 3 + [None] * 3, dtype="Int64")
other = pd.array([False] * 3 + [True] * 3 + [None] * 3, dtype="boolean")
expected = op(a, other)
result = op(a, b)
tm.assert_extension_array_equal(result, expected)
def test_no_shared_mask(self, data):
result = data + 1
assert np.shares_memory(result._mask, data._mask) is False
def test_compare_to_string(self, any_nullable_int_dtype):
# GH 28930
s = pd.Series([1, None], dtype=any_nullable_int_dtype)
result = s == "a"
expected = pd.Series([False, pd.NA], dtype="boolean")
self.assert_series_equal(result, expected)
def test_compare_to_int(self, any_nullable_int_dtype, all_compare_operators):
# GH 28930
s1 = pd.Series([1, None, 3], dtype=any_nullable_int_dtype)
s2 = pd.Series([1, None, 3], dtype="float")
method = getattr(s1, all_compare_operators)
result = method(2)
method = getattr(s2, all_compare_operators)
expected = method(2).astype("boolean")
expected[s2.isna()] = pd.NA
self.assert_series_equal(result, expected)
class TestCasting:
@pytest.mark.parametrize("dropna", [True, False])
def test_construct_index(self, all_data, dropna):
# ensure that we do not coerce to Float64Index, rather
# keep as Index
all_data = all_data[:10]
if dropna:
other = np.array(all_data[~all_data.isna()])
else:
other = all_data
result = pd.Index(integer_array(other, dtype=all_data.dtype))
expected = pd.Index(other, dtype=object)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("dropna", [True, False])
def test_astype_index(self, all_data, dropna):
# as an int/uint index to Index
all_data = all_data[:10]
if dropna:
other = all_data[~all_data.isna()]
else:
other = all_data
dtype = all_data.dtype
idx = pd.Index(np.array(other))
assert isinstance(idx, ABCIndexClass)
result = idx.astype(dtype)
expected = idx.astype(object).astype(dtype)
tm.assert_index_equal(result, expected)
def test_astype(self, all_data):
all_data = all_data[:10]
ints = all_data[~all_data.isna()]
mixed = all_data
dtype = Int8Dtype()
# coerce to same type - ints
s = pd.Series(ints)
result = s.astype(all_data.dtype)
expected = pd.Series(ints)
tm.assert_series_equal(result, expected)
# coerce to same other - ints
s = pd.Series(ints)
result = s.astype(dtype)
expected = pd.Series(ints, dtype=dtype)
tm.assert_series_equal(result, expected)
# coerce to same numpy_dtype - ints
s = pd.Series(ints)
result = s.astype(all_data.dtype.numpy_dtype)
expected = pd.Series(ints._data.astype(all_data.dtype.numpy_dtype))
tm.assert_series_equal(result, expected)
# coerce to same type - mixed
s = pd.Series(mixed)
result = s.astype(all_data.dtype)
expected = pd.Series(mixed)
tm.assert_series_equal(result, expected)
# coerce to same other - mixed
s = | pd.Series(mixed) | pandas.Series |
"""
This code implements a probabilistic matrix factorization (PMF) per weeks 10 and 11 assignment of the machine learning module part of Columbia University Micromaster programme in AI.
Written using Python 3.7.
"""
from __future__ import division
# builtin modules
import sys
import os
import math
from random import randrange
import functools
import operator
import requests
import psutil
# 3rd party modules
import numpy as np
import pandas as pd
import scipy as sp
from scipy.cluster.vq import kmeans2
from scipy.stats import multivariate_normal
from scipy.spatial.distance import cdist
from scipy.special import logsumexp
from scipy import stats
def get_data(filename, **kwargs):
"""
Read data from a file given its name. Option to provide the path to the file if different from: [./datasets/in].
------------
Parameters:
- filename: name of the file to be read, to get the data from.
- kwargs (optional):
- 'headers': list of str for the headers to include in the outputs file created
- 'path': str of the path to where the file is read, specified if different from default ([./datasets/in])
------------
Returns:
- df: a dataframe of the data
"""
# Define input filepath
if 'path' in kwargs:
filepath = kwargs['path']
else:
filepath = os.path.join(os.getcwd(),'datasets','out')
input_path = os.path.join(filepath, filename)
# If provided, use the title of the columns in the input dataset as headers of the dataframe
if 'headers' in kwargs:
# Read input data
df = pd.read_csv(input_path, names = kwargs['headers'])
else:
# Read input data
df = pd.read_csv(input_path)
return df
def PMF(train_data, headers = ['user_id', 'movie_id'], lam:int = 2, sigma2:float = 0.1, d:int = 5, iterations:int = 50, output_iterations:list=[10,25,50]):
"""
Implements Probabilistic Matrix Factorization.
------------
Parameters:
- data: dataset used for training (e.g. the ratings.csv dataset with missing values for users and movies).
- headers: title of the headers in the dataset for the 'users id' and 'movie id' values.
- lam: lambda value to initialise the Gaussian zero mean distribution (default lam = 2 for this assignment).
- sigma2: covariance of the Gaussian (default sigma2 = 0.1 for this assignment).
- d: number of dimensions for the ranking, (default d = 5 for this assignment).
- iterations: number of iterations to run PMF for (default, 50 iterations).
------------
Returns:
- L_results: objective function
- U_matrices: matrices of users
- V_matrices: matrices of objects
"""
L_results = []
# first convert dataframe to the ratings matrix as a sparse matrix
M, n, m, users, objects, rows, cols = df_to_ratings_matrix(train_data, headers = headers)
parameters = initialize_parameters(lam, n, m, d)
for i in range(1, iterations + 1):
parameters = update_parameters(M, parameters, lam, n, m, d)
L = objective_function(M, sigma2, lam, parameters)
L_results.append(L)
if i in output_iterations:
print('Objective function L at iteration ', i, ':', L)
filename = "U-" + str(i) + ".csv"
np.savetxt(filename, parameters['U'].T, delimiter=",")
filename = "V-" + str(i) + ".csv"
np.savetxt(filename, parameters['V'].T, delimiter=",")
np.savetxt("objective.csv", L_results, delimiter=",")
return L_results, users, objects, parameters, M, rows, cols
def initialize_parameters(lam, n, m, d):
"""
Initializes our parameters. First the V matrix as a random Gaussian zero mean distribution from a given lambda.
------------
Parameters:
- lam: dataframe used for training (e.g. the ratings.csv dataset with missing values for users and movies).
- n: number of users in dataset
- m: number of movies in dataset
- d: number of dimensions for the ranking, (default d = 5 for this assignment).
------------
Returns:
- parameters: a dictionary with the values for:
- U: matrix of users
- V: matrix of objects (movies in this case)
- lambda_U: value of lambda, per the inputs
- lambda_V: value of lambda, per the inputs
"""
U = np.zeros((d, n), dtype=np.float64)
V = np.random.normal(0.0, 1.0 / lam, (d, m))
parameters = {}
parameters['U'] = U
parameters['V'] = V
parameters['lambda_U'] = lam
parameters['lambda_V'] = lam
return parameters
def df_to_ratings_matrix(df, **kwargs):
"""
Converts a given dataframe to a sparse matrix, in this case the M ratings matrix.
------------
Parameters:
- df: dataframe used for training (e.g. the ratings.csv dataset with missing values for users and movies).
- headers (optional): title of the headers in the dataset for the 'users id' and 'movie id' values.
------------
Returns:
- M: the ratings matrix, as sparse (zeros used to fill the nan, missing values)
- n: number of rows
- m: number of columns
- users: list of unique users
- movies: list of unique movies
- rows: rows of the matrix M
- cols: columns of the matrix M
"""
df = df.dropna(how='all')
if 'headers' in kwargs:
headers = kwargs['headers']
users_header = headers[0]
movies_header = headers[1]
else:
users_header = 'user_id'
movies_header = 'movie_id'
users = df[users_header].unique()
movies = df[movies_header].unique()
df_values = df.values
# initialise M ratings matrix as a sparse matrix of zeros
M = np.zeros((len(users), len(movies)))
rows = {}
cols = {}
for i, user_id in enumerate(users):
rows[user_id] = i
for j, movie_id in enumerate(movies):
cols[movie_id] = j
for index, row in df.iterrows():
i = rows[row.user_id]
j = cols[row.movie_id]
M[i, j] = row.rating
n = len(users) #number of rows
m = len(movies) #number of columns
return M, n, m, users, movies, rows, cols
def update_parameters(M, parameters, lam, n, m, d):
"""
Implements the function that updates U and V.
------------
Parameters:
- M: the ratings matrix, as sparse (zeros used to fill the nan, missing values)
- parameters: a dictionary with the values for:
- U: matrix of users
- V: matrix of objects (movies in this case)
- lambda_U: value of lambda, per the inputs
- lambda_V: value of lambda, per the inputs
- lam: lambda value to initialise the Gaussian zero mean distribution (default lam = 2 for this assignment).
- n: number of users in dataset
- m: number of movies in dataset
- d: number of dimensions for the ranking, (default d = 5 for this assignment).
------------
Returns:
- parameters: a dictionary with the values for:
- U: matrix of users
- V: matrix of objects (movies in this case)
- lambda_U: value of lambda, per the inputs
- lambda_V: value of lambda, per the inputs
"""
U = parameters['U']
V = parameters['V']
lambda_U = parameters['lambda_U']
lambda_V = parameters['lambda_V']
for i in range(n):
V_j = V[:, M[i, :] > 0]
U[:, i] = np.dot(np.linalg.inv(np.dot(V_j, V_j.T) + lambda_U * np.identity(d)), np.dot(M[i, M[i, :] > 0], V_j.T))
for j in range(m):
U_i = U[:, M[:, j] > 0]
V[:, j] = np.dot(np.linalg.inv(np.dot(U_i, U_i.T) + lambda_V * np.identity(d)), np.dot(M[M[:, j] > 0, j], U_i.T))
parameters['U'] = U
parameters['V'] = V
min_rating = np.min(M)
max_rating = np.max(M)
return parameters
def objective_function(M, sigma2, lam, parameters):
"""
Calculates the result of the objective function 'L' with equation as follows:
L = − ∑(i,j)∈Ω12σ2(Mij−uTivj)2 − ∑Nui=1λ2∥ui∥2 − ∑Nvj=1λ2∥vj∥2
------------
Parameters:
- M: the ratings matrix, as sparse (zeros used to fill the nan, missing values)
- sigma2:
- lam:
- parameters: a dictionary with the values for:
- U: matrix of users
- V: matrix of objects (movies in this case)
- lambda_U: value of lambda, per the inputs
- lambda_V: value of lambda, per the inputs
------------
Returns:
- L: the resulting float number from calculating the objective function based on the above equation of 'L'
"""
lambda_U = parameters['lambda_U']
lambda_V = parameters['lambda_V']
U = parameters['U']
V = parameters['V']
# We divide L equation into its three main summands
UV = np.dot(U.T, V) # uTivj
M_UV = (M[M > 0] - UV[M > 0]) # (Mij−uTivj)
L1 = - (1 / (2 * sigma2)) * (np.sum((M_UV)**2))
L2 = - (lambda_U / 2 ) * (np.sum(np.linalg.norm(U)**2))
L3 = - (lambda_V / 2 ) * (np.sum(np.linalg.norm(V)**2))
L = L1 + L2 + L3
#L = -0.5 * (sigma2)* (np.sum(np.dot(M_UV, M_UV.T)) + lambda_U * np.sum(np.dot(U, U.T)) + lambda_V * np.sum(np.dot(V, V.T)))
return L
def save_outputs_txt(data, output_iterations:list = [5, 10, 25]):
"""
Write the outputs to csv files.
------------
Parameters:
- data: a list of the resulting matrixes to write as outputs.
- output_iterations: the iterations to store as output csv files for the U and V matrixes.
------------
Returns:
- csv files with the output data
"""
L_results = data[0]
np.savetxt("objective.csv", L_results, delimiter=",")
U_results = data[1]
V_results = data[2]
for i in output_iterations:
filename = "U-" + str(i) + ".csv"
np.savetxt(filename, U_results[i].T, delimiter=",")
filename = "V-" + str(i) + ".csv"
np.savetxt(filename, V_results[i].T, delimiter=",")
return
def predict(M, rows, cols, parameters, user_id, movie_id):
"""
Predicts the rating value. Note the value has been scaled within the range 0-5.
------------
Parameters:
- M: the ratings matrix, as sparse (zeros used to fill the nan, missing values)
- rows: rows of the matrix M
- cols: columns of the matrix M
- parameters: a dictionary with the values for:
- U: matrix of users
- V: matrix of objects (movies in this case)
- lambda_U: value of lambda, per the inputs
- lambda_V: value of lambda, per the inputs
- user_id: id of the users being examined
- movie_id: id of the objects being rated
------------
Returns:
- rating: a float number of the predicted rating for the object and user pair
"""
U = parameters['U']
V = parameters['V']
M_ij = U[:, rows[user_id]].T.reshape(1, -1) @ V[:, cols[movie_id]].reshape(-1, 1)
min_rating = np.min(M)
max_rating = np.max(M)
return 0 if max_rating == min_rating else ((M_ij[0][0] - min_rating) / (max_rating - min_rating)) * 5.0
def get_prediction(user_id, movies, M, rows, cols, parameters):
"""
Obtain a dataframe of users Ids, movies Ids and the predicted rating for a given user Id.
------------
Parameters:
- user_id: the id of the user being examined
- movies: the list of unique movie Ids
- M: the ratings matrix, as sparse (zeros used to fill the nan, missing values)
- rows: rows of the matrix M
- cols: columns of the matrix M
- parameters: a dictionary with the values for:
- U: matrix of users
- V: matrix of objects (movies in this case)
- lambda_U: value of lambda, per the inputs
- lambda_V: value of lambda, per the inputs
------------
Returns:
- df_result: a dataframe of users Ids, movies Ids and the predicted rating for a given user Id
"""
predictions = np.zeros((len(movies), 1))
df_result = | pd.DataFrame(columns=['UserID', 'MovieID', 'Prediction']) | pandas.DataFrame |
from pathlib import Path
import os
import subprocess
import pandas as pd
import numpy as np
from collections import Counter
from tqdm import tqdm
import matplotlib.pyplot as mpl
import matplotlib.dates as md
import matplotlib
import pylab as pl
from IPython.core.display import display, HTML
pwd = "/home/srivbane/shared/caringbridge/data/projects/sna-social-support/csv_data/"
def main():
# Retrieve the empath database
with open(os.path.join(pwd, "empath_score.csv"), 'r', encoding='utf-8') as infile:
empaths = pd.read_csv(infile, header=0, index_col = 0, usecols=[0,1,2,3,4,5,6])
# Retrieve the clean author list, narrow down our empath database to journals with length > 50 who also show up in the clean list
with open(os.path.join(pwd, "cleaned_auths.csv"), 'r', encoding='utf-8') as infile:
authors = | pd.read_csv(infile, header=0, usecols=[0,1,2,3]) | pandas.read_csv |
"""
This script allows a detailed analysis of the pathways found by the learner
in the AYS model.
@author: <NAME>
"""
import sys,os
import numpy as np
import pandas as pd
from plots.AYS_3D_figures import *
import scipy.integrate as integ
shelter_color='#ffffb3'
def read_trajectories(learner_type, reward_type, basin, policy='epsilon_greedy', episode=0):
runs=[]
#10_path_[0.5, 0.5, 0.5]_episode9500
limit=150
parameters=['A' , 'Y' , 'S' , 'Action' , 'Reward' ]
for i in range(limit):
file_name=('./'+learner_type+'/' + policy +'/' +reward_type + '/DQN_Path/' +
basin+ '/' + str(i)+'_path_[0.5, 0.5, 0.5]_episode' + str(episode)+'.txt')
if os.path.isfile(file_name):
tmp_file= pd.read_csv(file_name, sep='\s+' ,header=None, names=parameters, skiprows=1, index_col=False)
runs.append(tmp_file)
# print(file_name)
# For not too many files
if len(runs) > 100:
break
print(learner_type +' '+ reward_type + ' ' + basin + ':' ,len(runs))
return runs
def read_one_trajectory(learner_type, reward_type, basin, policy='epsilon_greedy', episode=0, run_idx=0):
file_name=('./'+learner_type+'/' + policy +'/' +reward_type + '/DQN_Path/' +
basin+ '/' + str(run_idx)+'_path_[0.5, 0.5, 0.5]_episode' + str(episode)+'.txt')
if os.path.isfile(file_name):
tmp_file= pd.read_csv(file_name, sep='\s+' ,header=None, names=parameters, skiprows=1, index_col=False)
return tmp_file
else:
#print("No trajectories available for this simulation! run_idx: ", run_idx, " episode: ", episode)
return None
def plot_current_state_trajectories(ax3d, label=False):
# Trajectories for the current state with all possible management options
time = np.linspace(0, 300, 1000)
for action_number in range(len(management_actions)):
if label==True:
this_label=management_options[action_number]
else:
this_label=None
parameter_list=get_parameters(action_number)
my_color=color_list[action_number]
traj_one_step=odeint(ays.AYS_rescaled_rhs, current_state,time , args=parameter_list[0])
ax3d.plot3D(xs=traj_one_step[:,0], ys=traj_one_step[:,1], zs=traj_one_step[:,2],
color=my_color, alpha=.8, lw=2, label=this_label)
def plot_run(ax3d, learning_progress, reward_type, alpha=1., color_set=True, own_color=None):
#print(learning_progress)
timeStart = 0
intSteps = 10 # integration Steps
dt=1
sim_time_step=np.linspace(timeStart,dt, intSteps)
ax3d.plot3D(xs=learning_progress['A'], ys=learning_progress['Y'], zs=learning_progress['S'],
alpha=alpha, lw=1)
def plot_2D_AYS(self, learning_progress, file_path):
#print(learning_progress)
start_state=learning_progress[0][0]
states=np.array(learning_progress)[:,0]
a_states=list(zip(*states))[0]
y_states=list(zip(*states))[1]
s_states=list(zip(*states))[2]
actions=np.array(learning_progress)[:,1]
rewards=np.array(learning_progress)[:,2]
fig=plt.figure(figsize=(14,8))
ax=fig.add_subplot(111)
plt.plot(actions +1, ".")
plt.plot(a_states, "b", label='A')
plt.plot([a*0 + self.A_PB for a in a_states], "b:")
plt.plot(y_states, "k", label='Y')
plt.plot([y*0 + self.Y_SF for y in y_states], "k:")
plt.plot(s_states, "y", label='S')
plt.ylim(-0.1,)
at = AnchoredText((" 1.0: " + self.management_options[0] +"\n" +
" 2.0: " + self.management_options[1] +"\n" +
" 3.0: " + self.management_options[2] +"\n" +
" 4.0: " + self.management_options[3] ),
prop=dict(size=14), frameon=True,
loc='center right'
)
at.patch.set_boxstyle("round,pad=0.,rounding_size=0.2")
ax.add_artist(at)
fontP = FontProperties()
fontP.set_size(12)
plt.xlabel('# Timesteps')
plt.ylabel('rescaled dynamic variables')
plt.legend(loc=9, bbox_to_anchor=(0.5, -0.2), ncol=3, prop=fontP)
plt.tight_layout()
final_state=self._which_final_state().name
save_path = (file_path +'/DQN_Path/'+ final_state +'/'+
str (self.run_number) + '_' + '2D_time_series'+ str(start_state) + '.pdf' )
#plt.savefig(self.image_dir + '/sucess_path/2D_time_series'+str(start_state)+'.pdf')
plt.savefig(save_path)
plt.close()
print('Saved as figure in path:' + save_path)
def get_averaged_AYS(learning_progress, k):
A=learning_progress['A'].rolling(k ).mean()
Y=learning_progress['Y'].rolling(k ).mean()
S=learning_progress['S'].rolling(k ).mean()
return A, Y, S
def correct_averaged_AYS(A,Y,S):
A[np.isnan(A)]=0.5
Y[np.isnan(Y)]=0.5
S[np.isnan(S)]=0.5
return A, Y, S
def find_green_knick_point(A, Y, S):
gradient_arrayA=np.gradient(A, S)
max_change1=np.argmax((gradient_arrayA))
gradient_arrayY=np.gradient( Y, S,edge_order=2)
max_change=np.where( gradient_arrayY==np.nanmin(gradient_arrayY))[0][0] # gets the index of the element, which is not NaN
#print("[", A[max_change],",", Y[max_change],",", S[max_change],"],")
return max_change
def find_brown_knick_point(A,Y,S):
gradient_arrayY=(np.gradient(Y)) # In Y we have a significant change in the slope that goes to 0
max_change=np.where( gradient_arrayY==np.nanmin(gradient_arrayY))[0][0] +1 # gets the index of the element, which is not NaN, +1 to get the point where the knick has happend
#print("[", A[max_change],",", Y[max_change],",", S[max_change],"],")
return max_change
def find_shelter_point(S):
return next(idx for idx, value in enumerate(S) if value > 0.75)
def find_backwater_point(S):
return next(idx for idx, value in enumerate(S) if value < 0.1)
def cut_shelter_traj(A, Y):
idx_Y= next(idx for idx, value in enumerate(Y) if value > 0.63)
idx_A= next(idx for idx, value in enumerate(A) if value < 0.4)
if idx_Y<idx_A:
return idx_Y
else:
return idx_A
def get_percentage_of_max_action(action, idx_1, idx_2):
x = action[idx_1:idx_2]
unique, counts = np.unique(x, return_counts=True)
distribution=np.asarray((unique, counts)).T
#print(unique, counts, distribution)
tot_len_array=idx_2 - idx_1
max_count=np.max(counts)
idx_max_action=np.argwhere(counts==max_count)
#print(idx_max_action, unique)
max_action = int(unique[idx_max_action][0])
percentage=max_count/tot_len_array
return max_action, percentage
def plot_part_trajectory(ax3d, A, Y, S, action, start_idx, end_idx):
max_action, percentage=get_percentage_of_max_action(action, start_idx, end_idx )
#print(np.mean(action[start_idx:end_idx]), end_idx, max_action, percentage)
ax3d.plot3D(xs=A[start_idx:end_idx], ys=Y[start_idx:end_idx], zs=S[start_idx:end_idx], color=color_list[max_action], alpha=percentage,lw=1)
def plot_action_trajectory (ax3d, learning_progress, start_idx, end_idx, lw=1):
timeStart = 0
intSteps = 2 # integration Steps
dt=1
sim_time_step=np.linspace(timeStart,dt, intSteps)
cut_array=learning_progress[start_idx:end_idx].reset_index(drop = True)
#print(cut_array)
for index, row in cut_array.iterrows():
#print(row)
state=(row['A'], row['Y'], row['S'])
action=int(row['Action'])
parameter_list=get_parameters(action)
my_color=color_list[action]
traj_one_step=odeint(ays.AYS_rescaled_rhs, state, sim_time_step , args=parameter_list[0])
# Plot trajectory
ax3d.plot3D(xs=traj_one_step[:,0], ys=traj_one_step[:,1], zs=traj_one_step[:,2],
color=my_color, alpha=1., lw=lw)
def plot_point_cloud_positive_developement(ax3d, learning_progress, cut_shelter=False,with_traj=True):
timeStart = 0
intSteps = 10 # integration Steps
dt=1
sim_time_step=np.linspace(timeStart,dt, intSteps)
k=20
A,Y,S=get_averaged_AYS(learning_progress, k)
action=learning_progress['Action']
knick_point=find_green_knick_point(A, Y, S)
A,Y,S=correct_averaged_AYS( A,Y,S)
if learning_progress['S'][knick_point]<0.65 and knick_point >5:
#knick_point=np.min( np.argpartition(gradient_array, k)[0:k])
ax3d.scatter(xs=A[knick_point+k], ys=Y[knick_point+k], zs=S[knick_point+k],
alpha=1,lw=1, color='tab:green' )
if with_traj:
# Part before knick
# plot_part_trajectory(ax3d, learning_progress['A'], learning_progress['Y'], learning_progress['S'], action, 0, knick_point)
plot_part_trajectory(ax3d, A, Y, S, action, 0, knick_point+k)
# Part between knick and shelter
idx_shelter=find_shelter_point(learning_progress['S'])
# plot_part_trajectory(ax3d, learning_progress['A'], learning_progress['Y'], learning_progress['S'], action, knick_point, idx_shelter)
plot_part_trajectory(ax3d, A, Y, S, action, knick_point+k, idx_shelter+1)
# Part of the shelter, where every trajectory leads to green FP
if cut_shelter:
idx_cut_shelter=cut_shelter_traj(learning_progress['A'],learning_progress['Y'])
# plot_part_trajectory(ax3d, learning_progress['A'], learning_progress['Y'], learning_progress['S'], action, idx_shelter, idx_cut_shelter)
plot_part_trajectory(ax3d, A,Y,S, action, idx_shelter, idx_cut_shelter)
# ax3d.plot3D(learning_progress['A'][idx_shelter:idx_cut_shelter], learning_progress['Y'][idx_shelter:idx_cut_shelter],
# learning_progress['S'][idx_shelter:idx_cut_shelter], color=shelter_color, alpha=.9)
else:
ax3d.plot3D(A[idx_shelter:], Y[idx_shelter:],
S[idx_shelter:], color=shelter_color, alpha=.9)
#plot_part_trajectory(ax3d, learning_progress['A'], learning_progress['Y'], learning_progress['S'], action, idx_shelter-1, len(learning_progress['A']))
#plot_action_trajectory(ax3d, learning_progress, idx_shelter, len(learning_progress['A']))
else:
print('here!', knick_point)
def plot_point_cloud_positive_developement_averaged(ax3d, learning_progress, cut_shelter=False, with_traj=True):
k=20
A,Y,S=get_averaged_AYS(learning_progress, k)
action=learning_progress['Action']
knick_point=find_green_knick_point(A, Y, S)
if learning_progress['S'][knick_point]<0.65 and knick_point >5:
#knick_point=np.min( np.argpartition(gradient_array, k)[0:k])
ax3d.scatter(xs=A[knick_point+k], ys=Y[knick_point+k], zs=S[knick_point+k],
alpha=1,lw=1, color='lawngreen' )
# Part before knick
if with_traj:
plot_part_trajectory(ax3d, A,Y,S, action, 0, knick_point+k)
# Part between knick and shelter
idx_shelter=find_shelter_point(S)
plot_part_trajectory(ax3d, A,Y,S, action, knick_point+k, idx_shelter)
# Part of the shelter, where every trajectory leads to green FP
if cut_shelter:
idx_cut_shelter=cut_shelter_traj(A,Y)
ax3d.plot3D(A[idx_shelter:idx_cut_shelter], Y[idx_shelter:idx_cut_shelter],
S[idx_shelter:idx_cut_shelter], color=shelter_color, alpha=.5)
else:
ax3d.plot3D(A[idx_shelter:], Y[idx_shelter:],
S[idx_shelter:], color=shelter_color, alpha=.5)
else:
print('here!', knick_point)
def plot_point_cloud_negative_developement(ax3d, learning_progress, with_traj=True):
timeStart = 0
intSteps = 10 # integration Steps
dt=1
sim_time_step=np.linspace(timeStart,dt, intSteps)
k=25
A,Y,S=get_averaged_AYS(learning_progress, k)
action=learning_progress['Action']
knick_point=find_brown_knick_point(A, Y, S)
A,Y,S=correct_averaged_AYS( A,Y,S)
#backwater_point=find_backwater_point(S)
if learning_progress['S'][knick_point]>0.35 and knick_point >5:
#knick_point=np.min( np.argpartition(gradient_array, k)[0:k])
ax3d.scatter(xs=A[knick_point+k], ys=Y[knick_point+k], zs=S[knick_point+k],
alpha=1,lw=1, color='black' )
if with_traj:
# Part before knick
# plot_part_trajectory(ax3d, learning_progress['A'], learning_progress['Y'], learning_progress['S'], action, 0, knick_point+1)
plot_part_trajectory(ax3d, A,Y,S, action, 0, knick_point+k)
# Part up to end (requires constant management)
#plot_part_trajectory(ax3d, A,Y,S,action, knick_point+k, backwater_point+1)
#plot_part_trajectory(ax3d, A,Y,S,action, backwater_point, len(S))
plot_part_trajectory(ax3d, A,Y,S,action, knick_point+k, len(S))
# ax3d.plot3D(xs=learning_progress['A'], ys=learning_progress['Y'], zs=learning_progress['S'],
# alpha=alpha, lw=1)
else:
print('here!', knick_point)
def plot_3D_AYS_basins(learning_progress_green, learning_progress_brown, cut_shelter_image=False, num_traj=50,ax=None ):
if ax is None:
if cut_shelter_image:
fig, ax3d=create_extract_figure(Azimut=-160, plot_boundary=True)
else:
fig, ax3d=create_figure(Azimut=-160, )
else:
ax3d=ax
for i in range(0,num_traj):
if len(learning_progress_green) > i and len(learning_progress_brown) > i:
if cut_shelter_image:
# plot_point_cloud_positive_developement_averaged(ax3d, learning_progress=runs_survive_green[i],cut_shelter=cut_shelter )
plot_point_cloud_positive_developement(ax3d, learning_progress=learning_progress_green[i],cut_shelter=cut_shelter_image )
else:
plot_point_cloud_positive_developement(ax3d, learning_progress=learning_progress_green[i], )
plot_point_cloud_negative_developement(ax3d, learning_progress=learning_progress_brown[i], )
if ax is None:
if cut_shelter_image:
fig.savefig('./images/phase_space_plots/zoom3D_AYS_trajectory_many_paths.pdf')
else:
#plot_hairy_lines(200, ax3d)
fig.savefig('./images/phase_space_plots/3D_AYS_trajectory_many_paths.pdf')
def plot_knickPoints_2D(learning_progress_arr_1, learning_progress_arr_2, label=None, colors=['tab:green','black','#1f78b4'],
basins=[True,False], savepath='./images/phase_space_plots/Knick_points_2D.pdf'):
k=20
lst_FP_arr1=pd.DataFrame(columns=parameters)
lst_FP_arr2=pd.DataFrame(columns=parameters)
for idx, simulation in enumerate(learning_progress_arr_1):
learning_progress=simulation
A,Y,S=get_averaged_AYS(learning_progress, k)
action=learning_progress['Action']
if basins[0]:
knick_point_1=find_green_knick_point(A, Y, S)
else:
knick_point_1=find_brown_knick_point(A, Y, S)
tmp_data_1= pd.DataFrame(learning_progress.iloc[knick_point_1]).T
lst_FP_arr1=pd.concat([lst_FP_arr1, tmp_data_1]).reset_index(drop = True)
for idx, simulation in enumerate(learning_progress_arr_2):
learning_progress=simulation
A,Y,S=get_averaged_AYS(learning_progress, k)
action=learning_progress['Action']
if basins[1]:
knick_point_2=find_green_knick_point(A, Y, S)
else:
knick_point_2=find_brown_knick_point(A, Y, S)
tmp_data_2= pd.DataFrame(learning_progress.iloc[knick_point_2]).T
lst_FP_arr2=pd.concat([lst_FP_arr2, tmp_data_2]).reset_index(drop = True)
fig, ax = plt.subplots(nrows=1, ncols=3, figsize=(14,4))
A_PB, Y_PB, S_PB= [0.5897435897435898, 0.36363636363636365, 0]
ax1=ax[0]
ax2=ax[1]
ax3=ax[2]
a_min,a_max=0.55,0.6
y_min,y_max=0.36,0.47
s_min,s_max=0.54,0.62
# AY
ax1.set_title('A-Y Plane')
ax1.set_xlabel('A [GtC]')
ax1.set_ylabel("Y [%1.0e USD/yr]"%Y_scale)
make_2d_ticks(ax1, boundaries=[[a_min,a_max], [y_min, y_max]], scale_1=A_scale, scale_2=Y_scale, x_mid_1=current_state[0], x_mid_2=current_state[1])
ax1.plot(lst_FP_arr1['A'], lst_FP_arr1['Y'], 'd', color=colors[0], fillstyle='none')
ax1.plot(lst_FP_arr2['A'], lst_FP_arr2['Y'], 'x', color=colors[1])
ax1.axvline(x=A_PB, color='red', linestyle='--')
ax1.axhline(y=Y_PB, color='red', linestyle='--')
# AS
ax2.set_title('A-S Plane')
ax2.set_xlabel('A [GtC]')
ax2.set_ylabel("S [%1.0e GJ]"%S_scale )
make_2d_ticks(ax2, boundaries=[[a_min,a_max], [s_min, s_max]], scale_1=A_scale, scale_2=S_scale, x_mid_1=current_state[0], x_mid_2=current_state[2])
ax2.plot(lst_FP_arr1['A'], lst_FP_arr1['S'], 'd', color=colors[0],fillstyle='none')
ax2.plot(lst_FP_arr2['A'], lst_FP_arr2['S'], 'x', color=colors[1])
ax2.axvline(x=A_PB, color='red', linestyle='--')
# YS
ax3.set_title('Y-S Plane')
ax3.set_xlabel("Y [%1.0e USD/yr]"%Y_scale)
ax3.set_ylabel("S [%1.0e GJ]"%S_scale )
make_2d_ticks(ax3, boundaries=[[y_min,y_max], [s_min, s_max]], scale_1=Y_scale, scale_2=S_scale, x_mid_1=current_state[1], x_mid_2=current_state[2])
ax3.plot(lst_FP_arr1['Y'], lst_FP_arr1['S'], 'd', color=colors[0],fillstyle='none')
ax3.plot(lst_FP_arr2['Y'], lst_FP_arr2['S'], 'x', color=colors[1])
ax3.axvline(x=Y_PB, color='red', linestyle='--', label='Boundaries')
ax3.legend(label, loc='center left', bbox_to_anchor=(1, .9), fontsize=14,fancybox=True, shadow=True )
fig.tight_layout()
fig.savefig(savepath)
def plot_action_3D_basins(learning_progress_arr, cut_shelter_image=False, num_plots=50,):
if cut_shelter_image:
fig, ax3d=create_extract_figure(Azimut=-160, plot_boundary=True, label=None, colors=None)
else:
fig, ax3d=create_figure(Azimut=-160,label=None, colors=None )
for idx, learning_progress in enumerate(learning_progress_arr):
for i in range(0,num_plots):
if len(learning_progress) > i:
k=15
A,Y,S=learning_progress[i]['A'],learning_progress[i]['Y'],learning_progress[i]['S']
if cut_shelter_image and learning_progress[i]['S'].iloc[-1]>0.9:
idx_cut_shelter=cut_shelter_traj(A,Y)
plot_action_trajectory(ax3d, learning_progress[i], 0, idx_cut_shelter)
else:
plot_action_trajectory(ax3d, learning_progress[i], 0, len(S))
if learning_progress[i]['S'].iloc[-1]>0.9:
plot_point_cloud_positive_developement(ax3d, learning_progress[i], cut_shelter_image, with_traj=False)
else:
plot_point_cloud_negative_developement(ax3d, learning_progress[i], with_traj=False)
if cut_shelter_image:
fig.savefig('./images/phase_space_plots/zoom3D_AYS_trajectory_actions_many_paths.pdf')
else:
#plot_hairy_lines(200, ax3d)
fig.savefig('./images/phase_space_plots/3D_AYS_trajectory_actions_many_paths.pdf')
def plot_averaged_3D_basins(learning_progress_arr, cut_shelter_image=False, num_plots=50,label_arr=None, color_arr=None, ax=None ):
if color_arr is None:
color_arr=['black', 'green']
if ax is None:
if cut_shelter_image:
fig, ax3d=create_extract_figure(Azimut=-160, plot_boundary=True, label=label_arr, colors=color_arr)
else:
fig, ax3d=create_figure(Azimut=-160,label=label_arr, colors=color_arr )
else:
ax3d=ax
for idx, learning_progress in enumerate(learning_progress_arr):
for i in range(0,num_plots):
if len(learning_progress) > i:
k=15
A,Y,S=correct_averaged_AYS( *get_averaged_AYS(learning_progress[i], k))
if cut_shelter_image and learning_progress[i]['S'].iloc[-1]>0.9:
idx_cut_shelter=cut_shelter_traj(A,Y)
ax3d.plot3D(xs=A[:idx_cut_shelter], ys=Y[:idx_cut_shelter], zs=S[:idx_cut_shelter],
color=color_arr[idx] , alpha=1., lw=1)
else:
ax3d.plot3D(xs=A, ys=Y, zs=S, color=color_arr[idx], alpha=1., lw=1)
if cut_shelter_image:
fig.savefig('./images/phase_space_plots/zoom3D_AYS_averaged_trajectory_many_paths.pdf')
else:
#plot_hairy_lines(200, ax3d)
fig.savefig('./images/phase_space_plots/3D_AYS_averaged_trajectory_many_paths.pdf')
return ax3d
def find_color(option):
if option in management_options:
idx=management_options.index(option)
return color_list[idx]
else:
print("ERROR! This management option does not exist: ", option)
sys.exit(1)
def plot_management_dynamics(ax, option=None):
if option is not None:
time = np.linspace(0, 300, 1000)
x0 = [0.5,0.5,0.5]
if option in management_options:
parameter_list=get_parameters(management_options.index(option))
print(option)
color=find_color(option)
traj = integ.odeint(ays.AYS_rescaled_rhs, x0, time, args=parameter_list[0])
ax.plot3D(xs=traj[:,0], ys=traj[:,1], zs=traj[:,2],
color=color)
def plot_example_figure(learning_progress_arr, cut_shelter=False, num_traj=0,num_hairs=300, ax=None, ticks=True,label=[], colors=[],
option=None,plot_traj=True, plot_boundary=True,
filename='./images/phase_space_plots/3D_AYS_example_trajectory.pdf', ):
learning_progress= learning_progress_arr[num_traj]
if ax is None:
if cut_shelter:
fig, ax3d=create_extract_figure(Azimut=-160, plot_boundary=plot_boundary, label=None, colors=None)
else:
if option=='DG':
Azimut=-177
Elevation=65
elif option=='ET':
Azimut=-88
Elevation=65
else:
Azimut=-167
Elevation=25
fig, ax3d=create_figure(Azimut=Azimut,Elevation=Elevation, label=label, colors=colors, ticks=ticks, plot_boundary=plot_boundary )
else:
ax3d=ax
if option:
plot_management_dynamics(ax3d, option)
# plot_management_dynamics(ax3d, 'ET')
else:
plot_hairy_lines(num_hairs, ax3d)
A,Y,S=learning_progress['A'],learning_progress['Y'],learning_progress['S']
if cut_shelter and learning_progress['S'].iloc[-1]>0.9:
idx_cut_shelter=cut_shelter_traj(A,Y)
plot_action_trajectory(ax3d, learning_progress, 0, idx_cut_shelter, lw=4)
else:
if plot_traj:
plot_action_trajectory(ax3d, learning_progress, 0, len(S),lw=4)
fig.savefig(filename)
return ax3d
def plot_learning_developement(learner_type='ddqn_per_is_duel', reward_type='survive', policy='epsilon_greedy', run_idx=0):
#episodes=[0, 500, 1000, 1500, 2000, 2500, 3000, 3500, 4000,4500, 5000, 6000, 7000, 8000, 9000]
#episodes=[0, 500, 1000, 1500, 2000, 2500, 3000, 3500, 4000,4500, 5000, 6000, 7000, 8000, 9000]
episodes=[0, 500, 1000, 1500, 2000, 2500, 3000, 5000, 7000, 8000, 9000]
nrows=3
ncols=3
fig= plt.figure( figsize=(12,12))
for i in range(0,nrows):
for j in range(0,ncols):
idx=i*ncols + j
episode=episodes[idx]
this_ax=fig.add_subplot(nrows, ncols, idx+1, projection='3d')
this_ax.set_title("Episode: " + str(episode))
this_ax.dist = 13
create_axis_3d(this_ax)
for basin in ['BROWN_FP', 'GREEN_FP', 'OUT_PB']:
learning_progress=read_one_trajectory(learner_type, reward_type, basin, policy, episode, run_idx)
#print(filepath)
if learning_progress is not None:
print("Path for " + basin+ " at episode: ", episode )
this_ax.plot3D(xs=learning_progress['A'], ys=learning_progress['Y'], zs=learning_progress['S'],
color='green' if learning_progress['S'].iloc[-1]>0.9 else 'black',
alpha=1., lw=1)
break
fig.tight_layout()
fig.savefig('./images/learning_success/learning_developement.pdf')
def management_distribution_part(learning_progress):
k=20
A,Y,S=get_averaged_AYS(learning_progress, k)
action=learning_progress['Action']
knick_point=find_green_knick_point(A, Y, S)
A,Y,S=learning_progress['A'],learning_progress['Y'],learning_progress['S']
learning_progress['Action']
idx_shelter=find_shelter_point(S)
if learning_progress['S'][knick_point]<0.65 and knick_point >5:
#knick_point=np.min( np.argpartition(gradient_array, k)[0:k])
my_actions= learning_progress['Action'][knick_point:idx_shelter]
weights = np.ones_like(my_actions)/float(len(my_actions))
my_actions.hist(bins=3, density=True, weights=weights)
plt.xlabel("Action number", fontsize=15)
plt.ylabel("Probability",fontsize=15)
plt.xlim([0,3])
else:
print('No knick point found!', knick_point)
def heat_map_knick_point(learner_type, reward_type='PB', label=None, colors=['tab:green','black','#1f78b4'],
basin='GREEN_FP', savepath='./images/phase_space_plots/Knick_points_heatmap.pdf'):
import matplotlib as mpl
import scipy.stats as st
a_min,a_max=0.56,0.595
y_min,y_max=0.36,0.47
s_min,s_max=0.54,0.62
k=20
lst_FP_arr1=pd.DataFrame(columns=parameters)
for episode in range(0,10000, 500):
learning_progress_arr=read_trajectories(learner_type=learner_type, reward_type=reward_type, basin=basin, policy='epsilon_greedy',
episode=episode)
for idx, simulation in enumerate(learning_progress_arr):
learning_progress=simulation
A,Y,S=get_averaged_AYS(learning_progress, k)
action=learning_progress['Action']
if basin=='GREEN_FP':
knick_point_green=find_green_knick_point(A, Y, S)
tmp_data_1= | pd.DataFrame(learning_progress.iloc[knick_point_green]) | pandas.DataFrame |
from datetime import date, timedelta
from . import data
import pandas as pd
import numpy as np
### Solvency Ratios
def debtToEquity(ticker, timeFrame):
years = list(data.getLongTermDebt(ticker, timeFrame).keys())
debt = list(data.getLongTermDebt(ticker, timeFrame).values())
equity = list(data.getShareholderEquity(ticker, timeFrame).values())
debt_equity = np.divide(equity, debt)
d_e = [round(de, 2) for de in debt_equity]
return {years[i]: d_e[i] for i in range(len(d_e))}
def debtToAssets(ticker, timeFrame):
years = list(data.getLongTermDebt(ticker, timeFrame).keys())
debt = list(data.getLongTermDebt(ticker, timeFrame).values())
assets = list(data.getTotalAssets(ticker, timeFrame).values())
d_a = np.divide(debt, assets)
debt_assets = [round(de, 2) for de in d_a]
return {years[i]: debt_assets[i] for i in range(len(debt_assets))}
def equityToAssets(ticker, timeFrame):
years = list(data.getShareholderEquity(ticker, timeFrame).keys())
equity = list(data.getShareholderEquity(ticker, timeFrame).values())
assets = list(data.getTotalAssets(ticker, timeFrame).values())
e_a = np.divide(equity, assets)
equity_assets = [round(ea, 2) for ea in e_a]
return {years[i]: equity_assets[i] for i in range(len(equity_assets))}
def interestCoverage(ticker):
interest = list(data.getInterest(ticker).values())[1:]
years = list(data.getEBITDA(ticker, "A").keys())[0:len(interest)]
ebitda = list(data.getEBITDA(ticker, "A").values())[0:len(interest)]
ic = np.divide(ebitda, interest)
interest_cov = [round(i, 2) for i in ic]
return {years[i]: interest_cov[i] for i in range(len(interest_cov))}
def assetToLiab(stock, timeFrame):
total_assets = list(data.getTotalAssets(stock, timeFrame).values())
total_liab = list(data.getTotalLiab(stock, timeFrame).values())
asset_liab = [round(total_assets[i] / total_liab[i], 2) for i in range(len(total_assets))]
years = list(data.getTotalAssets(stock, timeFrame).keys())
final = dict(zip(years, asset_liab))
return final
# Profitability
def grossMargin(ticker, timeFrame):
years = list(data.getRevenue(ticker, timeFrame).keys())
grossProfit = list(data.getGrossProfit(ticker, timeFrame).values())
revenue = list(data.getRevenue(ticker, timeFrame).values())
margin = np.divide(grossProfit, revenue)
g_margin = [round(m * 100, 2) for m in margin]
return {years[i]: g_margin[i] for i in range(len(g_margin))}
def operatingMargin(ticker, timeFrame):
years = list(data.getRevenue(ticker, timeFrame).keys())
operatingProfit = list(data.getOperatingIncome(ticker, timeFrame).values())
revenue = list(data.getRevenue(ticker, timeFrame).values())
margin = np.divide(operatingProfit, revenue)
o_margin = [round(m * 100, 2) for m in margin]
return {years[i]: o_margin[i] for i in range(len(o_margin))}
def ebitMargin(ticker, timeFrame):
years = list(data.getRevenue(ticker, timeFrame).keys())
ebit = list(data.getEbit(ticker, timeFrame).values())
revenue = list(data.getRevenue(ticker, timeFrame).values())
margin = np.divide(ebit, revenue)
e_margin = [round(m * 100, 2) for m in margin]
return {years[i]: e_margin[i] for i in range(len(e_margin))}
def ebitdaMargin(ticker, timeFrame):
years = list(data.getRevenue(ticker, timeFrame).keys())
ebitda = list(data.getEBITDA(ticker, timeFrame).values())
revenue = list(data.getRevenue(ticker, timeFrame).values())
margin = np.divide(ebitda, revenue)
e_margin = [round(m * 100, 2) for m in margin]
return {years[i]: e_margin[i] for i in range(len(e_margin))}
def netMargin(ticker, timeFrame):
years = list(data.getRevenue(ticker, timeFrame).keys())
netProfit = list(data.getNetIncome(ticker, timeFrame).values())
revenue = list(data.getRevenue(ticker, timeFrame).values())
margin = np.divide(netProfit, revenue)
n_margin = [round(m * 100, 2) for m in margin]
return {years[i]: n_margin[i] for i in range(len(n_margin))}
def revenueGrowth(ticker, timeFrame):
revenue = list(data.getRevenue(ticker, timeFrame).values())
years = list(data.getRevenue(ticker, timeFrame).keys())
rev = | pd.Series(revenue) | pandas.Series |
# Author: <NAME>
# Feel free to use, copy, distribute or modify the Python Script, licensed under MIT License.
# Please ensure to provide proper credit for the same.
import streamlit as st
import pandas as pd
from csv import DictWriter
from datetime import datetime
import states_data
st.header('India Fights Covid-19')
st.write("Let's save our families and friends together!")
st.write("")
st.info("Click the TOP LEFT BAR / PANE to view the options.")
states = states_data.states_data()
def state_data(key):
states_list = list(states.keys())
state_selection = st.selectbox('States & Union Territories', options=states_list, key=key)
district_lists = list(states[state_selection].keys())
district_selection = st.selectbox('District', options=district_lists, key=key)
cities = st.selectbox('Cities', options=list(states[state_selection][district_selection]), key=key)
return state_selection, district_selection, cities
# 1. STATES IMPORTANT LINKS
st.write("---")
st.sidebar.subheader("Links & Helpline Number")
states_imp_links = {
"": "",
"National Links": {
"Links": {
"Cipla Med Access": "https://www.cipla.com/",
"Dr. Reddy's COVID-19 Med Access": "https://readytofightcovid.in/",
"Pan India Plasma Resources": "https://covidplasma.online/",
"COVID-19 Resources Citizen's Compiled Data- 1": "https://docs.google.com/spreadsheets/d/1mrlaZg8jvduKcxvCWs"
"-phAdltgBmY3lTOFTgH4-SLzY/edit#gid=2047572279",
"COVID-19 Resources Citizen's Compiled Data- 2": "https://docs.google.com/spreadsheets/d"
"/1fHiBtzxBC_3Q7I5SXr_GpNA4ivT73w4W4hjK6IkGDBY/edit#gid"
"=1482732175",
"COVID-19 Resources Citizen's Compiled Data- 3": "https://shubhamkarjos.github.io/WebDev/Covid/Covid-Help"
"/main.html "
},
"COVID Helpline Number": "+911123978046",
},
"Andaman & Nicobar Islands": {
# "Links": {
#
# },
"COVID Helpline Number": "03192232102",
},
"Andhra Pradesh": {
"Links": {
"COVID-19 AP": "http://dashboard.covid19.ap.gov.in/ims/hospbed_reports/",
},
"COVID Helpline Number": "08662410978",
},
"Arunachal Pradesh": {
# "Links": {
#
# },
"COVID Helpline Number": "9436055743",
},
"Assam": {
# "Links": {
#
# },
"COVID Helpline Number": "6913347770",
},
"Bihar": {
# "Links": {
#
# },
"COVID Helpline Number": "104",
},
"Chandigarh": {
# "Links": {
#
# },
"COVID Helpline Number": "9779558282",
},
"Chhattisgarh": {
"Links": {
"COVID-19 Chattisgarh": "https://cg.nic.in/health/covid19/RTPBedAvailable.aspx",
},
"COVID Helpline Number": "07712235091, 104",
},
"Dadra & Nagar Haveli & Daman & Diu": {
# "Links": {
#
# },
"COVID Helpline Number": "104",
},
"Delhi": {
"Links": {
"COVID-19 Delhi": "https://coronabeds.jantasamvad.org/beds.html",
},
"COVID Helpline Number": "01122307145",
},
"Goa": {
# "Links": {
#
# },
"COVID Helpline Number": "104",
},
"Gujarat": {
"Links": {
"COVID-19 GandhiNagar": "https://vmc.gov.in/HospitalModuleGMC/BedDetails.aspx?HOSP_ID=HOS00041",
"COVID-19 Vadodara": "https://vmc.gov.in/Covid19VadodaraApp/HospitalBedsDetails.aspx?tid=1",
"COVID-19 Resources Citizen's Compiled Data- 1": "https://docs.google.com/spreadsheets/d"
"/1ZyrYsowjk6PdC9N5yKBxMslI7FypoeIqDvlAYrqprL8/edit#gid=0 "
},
"COVID Helpline Number": "104",
},
"Haryana": {
"Links": {
"COVID-19 Haryana": "https://coronaharyana.in/",
},
"COVID Helpline Number": "8558893911",
},
"Himachal Pradesh": {
# "Links": {
#
# },
"COVID Helpline Number": "104",
},
"Jammu & Kashmir": {
# "Links": {
#
# },
"COVID Helpline Number": "01912520982",
},
"Jharkhand": {
# "Links": {
#
# },
"COVID Helpline Number": "104",
},
"Karnataka": {
"Links": {
"COVID-19 Bangalore": "https://docs.google.com/spreadsheets/u/1/d/e/2PACX-1vS-ipQLaCHZ8id4t4_NHf1FM4vQmBGQrGHAPFzNzJeuuGKsY_It6Tdb0Un_bC9gmig5G2dVxlXHoaEp/pubhtml?gid=1381543057&single=true",
},
"COVID Helpline Number": "104",
},
"Kerala": {
# "Links": {
#
# },
"COVID Helpline Number": "04712552056",
},
"Ladakh": {
# "Links": {
#
# },
"COVID Helpline Number": "01982256462",
},
"Lakshadweep": {
# "Links": {
#
# },
"COVID Helpline Number": "104",
},
"Madhya Pradesh": {
# "Links": {
#
# },
"COVID Helpline Number": "07552527177",
},
"Maharashtra": {
"Links": {
"COVID-19 Nagpur": "http://nsscdcl.org/covidbeds/AvailableHospitals.jsp",
"COVID-19 Panvel": "https://covidbedpanvel.in/HospitalInfo/showindex",
"COVID-19 Pune": "https://covidpune.com/",
"COVID-19 UlhasNagar": "https://umccovidbed.in/HospitalInfo/showindex",
"COVID-19 Thane": "https://covidbedthane.in/HospitalInfo/showindex",
},
"COVID Helpline Number": "02026127394",
},
"Manipur": {
# "Links": {
#
# },
"COVID Helpline Number": "3852411668",
},
"Meghalaya": {
# "Links": {
#
# },
"COVID Helpline Number": "108",
},
"Mizoram": {
# "Links": {
#
# },
"COVID Helpline Number": "102",
},
"Nagaland": {
# "Links": {
#
# },
"COVID Helpline Number": "7005539653",
},
"Odisha (Orissa)": {
# "Links": {
#
# },
"COVID Helpline Number": "9439994859",
},
"Puducherry (Pondicherry)": {
# "Links": {
#
# },
"COVID Helpline Number": "104",
},
"Punjab": {
"Links": {
"COVID-19 Ludhiana": "http://hbmsludhiana.in/index_app_detail.php?type=all",
}
},
"Rajasthan": {
"Links": {
"COVID-19 Rajasthan": "https://covidinfo.rajasthan.gov.in/covid-isolation-hospital.aspx",
},
"COVID Helpline Number": "01412225624",
},
"Sikkim": {
# "Links": {
#
# },
"COVID Helpline Number": "104",
},
"Tamil Nadu": {
"Links": {
"COVID-19 TN": "https://stopcorona.tn.gov.in/beds.php",
},
"COVID Helpline Number": "04429510500",
},
"Telangana": {
# "Links": {
#
# },
"COVID Helpline Number": "104",
},
"Tripura": {
# "Links": {
#
# },
"COVID Helpline Number": "03812315879",
},
"Uttarakhand (Uttaranchal)": {
# "Links": {
#
# },
"COVID Helpline Number": "104",
},
"Uttar Pradesh": {
"Links": {
"COVID-19 Lucknow": "https://docs.google.com/spreadsheets/d/1roxOi2_Uw4YBzLd5s8vC8cp6lbuM9016tWeWTcx2q5Y"
"/edit#gid=0 "
},
"COVID Helpline Number": "18001805145",
},
"West Bengal": {
# "Links": {
#
# },
"COVID Helpline Number": "3323412600",
},
}
select_state = st.sidebar.selectbox("", list(states_imp_links.keys()))
st.write(states_imp_links[select_state])
st.sidebar.subheader("Offering or Required Assistance? ")
person_kind = st.sidebar.selectbox("", ["Please Select", "Providing Help!", "Need Your Help!"])
# 2. PROVIDING HELP
if person_kind == "Providing Help!":
st.write("------------")
st.write("Thank you for being a potential life saver.")
st.write("Please provide correct information to the best of your knowledge.")
st.write("")
st.subheader("Volunteer for or Add a Lead:")
requirement = st.selectbox("", ["Please Select", "Ambulance Services", "Child Care", "Home Visit",
"Hospital Beds", "Medicine", "Oxygen Cylinders", "Plasma", "Others"])
# 2.1 PROVIDING HELP: AMBULANCE SERVICES
if requirement == "Ambulance Services":
contact_person = st.text_input('Contact Person: ')
st.subheader("Contact Number: Format: 9876543210, PLEASE DO NOT PREFIX +91.")
contact_information = st.text_input('Contact Number: ')
st.write("---")
st.subheader("Provide Pickup Location: ")
pickup_location_state, pickup_location_district, pickup_location_city = state_data(key="provider_pickup")
st.write("---")
st.subheader("Provide Drop Location: ")
drop_location_state, drop_location_district, drop_location_city = state_data(key="provider_drop")
verified_lead = st.selectbox("Verified: ", ["Yes", "No"])
additional_notes = st.text_input('Additional Notes: ')
created_time = datetime.now().strftime("%d/%m/%Y %H:%M:%S")
updated_time = datetime.now().strftime("%d/%m/%Y %H:%M:%S")
submit_info = st.button('Submit the info!', key="provider_drop")
if submit_info:
if not contact_person or not contact_information:
st.write("Please provide the necessary information."
" Contact Name & Mobile Number Info is necessary!")
else:
field_names = ["Contact Person", "Contact Mobile Number",
"Pickup: State", "Pickup: District", "Pickup: City",
"Drop: State", "Drop: District", "Drop: City", "Verified", "Additional Notes",
"Created Time", "Updated Time"]
dict_data = {"Contact Person": contact_person,
"Contact Mobile Number": contact_information,
"Pickup: State": pickup_location_state,
"Pickup: District": pickup_location_district,
"Pickup: City": pickup_location_city,
"Drop: State": drop_location_state,
"Drop: District": drop_location_district,
"Drop: City": drop_location_city,
"Verified": verified_lead,
"Additional Notes": additional_notes,
"Created Time": created_time,
"Updated Time": updated_time
}
with open('./ambulance_service_provider.csv', 'a') as f_object:
dictwriter_object = DictWriter(f_object, fieldnames=field_names)
dictwriter_object.writerow(dict_data)
f_object.close()
st.success('Information saved successfully. Thank you for being a helping hand at this time:)')
# 2.2 PROVIDING HELP: CHILD CARE
elif requirement == "Child Care":
contact_person = st.text_input('Contact Person: ')
contact_information = st.text_input('Contact Number: ')
state, district, city = state_data("provider_child_care")
verified_lead = st.selectbox("Verified: ", ["Yes", "No"])
additional_notes = st.text_input('Additional Notes: ')
created_time = datetime.now().strftime("%d/%m/%Y %H:%M:%S")
updated_time = datetime.now().strftime("%d/%m/%Y %H:%M:%S")
submit_info = st.button('Submit the info!', key="provider_child_care")
if submit_info:
if not contact_person or not contact_information:
st.write("Please provide the necessary information."
" Contact Name & Mobile Number Info is necessary!")
else:
field_names = ["Contact Person", "Contact Mobile Number",
"State", "District", "City", "Verified", "Additional Notes",
"Created Time", "Updated Time"]
dict_data = {"Contact Person": contact_person,
"Contact Mobile Number": contact_information,
"State": state,
"District": district,
"City": city,
"Verified": verified_lead,
"Additional Notes": additional_notes,
"Created Time": created_time,
"Updated Time": updated_time
}
with open('./child_care_provider.csv', 'a') as f_object:
dictwriter_object = DictWriter(f_object, fieldnames=field_names)
dictwriter_object.writerow(dict_data)
f_object.close()
st.success('Information saved successfully. Thank you for being a helping hand at this time:)')
# 2.3 PROVIDING HELP: HOME VISIT
elif requirement == "Home Visit":
contact_person = st.text_input('Contact Person: ')
contact_information = st.text_input('Enter Contact Number: ')
state, district, city = state_data("provider_home_visit")
verified_lead = st.selectbox("Verified: ", ["Yes", "No"])
additional_notes = st.text_input('Additional Notes: ')
created_time = datetime.now().strftime("%d/%m/%Y %H:%M:%S")
updated_time = datetime.now().strftime("%d/%m/%Y %H:%M:%S")
submit_info = st.button('Submit the info!', key="provider_home_visit")
if submit_info:
if not contact_person or not contact_information:
st.write("Please provide the necessary information."
" Contact Name & Mobile Number Info is necessary!")
else:
field_names = ["Contact Person", "Contact Mobile Number",
"State", "District", "City", "Verified", "Additional Notes",
"Created Time", "Updated Time"]
dict_data = {"Contact Person": contact_person,
"Contact Mobile Number": contact_information,
"State": state,
"District": district,
"City": city,
"Verified": verified_lead,
"Additional Notes": additional_notes,
"Created Time": created_time,
"Updated Time": updated_time
}
with open('./home_visit_provider.csv', 'a') as f_object:
dictwriter_object = DictWriter(f_object, fieldnames=field_names)
dictwriter_object.writerow(dict_data)
f_object.close()
st.success('Information saved successfully. Thank you for being a helping hand at this time:)')
# 2.4 PROVIDING HELP: HOSPITAL BEDS
elif requirement == "Hospital Beds":
contact_person = st.text_input('Contact Name or Doctor Name: ')
contact_information = st.text_input('Mobile Number: ')
hospital_name = st.text_input('Hospital Name: ')
hospital_address = st.text_input('Hospital Address: ')
state, district, city = state_data("provider_hospital_beds")
total_bed_count = st.text_input("Total Bed Count: ")
oxygen_bed_count = st.text_input("Oxygen Bed Count: ")
icu_bed_count = st.text_input("ICU Bed Count: ")
ventilator_bed_count = st.text_input("Ventilator Bed Count: ")
verified_lead = st.selectbox("Verified: ", ["Yes", "No"])
additional_notes = st.text_input('Additional Notes: ')
created_time = datetime.now().strftime("%d/%m/%Y %H:%M:%S")
updated_time = datetime.now().strftime("%d/%m/%Y %H:%M:%S")
submit_info = st.button('Submit the info!', key="provider_hospital_beds")
if submit_info:
field_names = ["Contact Person", "Contact Mobile Number", "Hospital Name", "Hospital Address",
"State", "District", "City", "Total Bed Count",
"Oxygen Bed Count", "ICU Bed Count", "Ventilator Bed Count", "Verified", "Additional Notes",
"Created Time", "Updated Time"]
dict_data = {"Contact Person": contact_person,
"Contact Mobile Number": contact_information,
"Hospital Name": hospital_name,
"Hospital Address": hospital_address,
"State": state,
"District": district,
"City": city,
"Total Bed Count": total_bed_count,
"Oxygen Bed Count": oxygen_bed_count,
"ICU Bed Count": icu_bed_count,
"Ventilator Bed Count": ventilator_bed_count,
"Verified": verified_lead,
"Additional Notes": additional_notes,
"Created Time": created_time,
"Updated Time": updated_time
}
with open('./hospital_bed_provider.csv', 'a') as f_object:
dictwriter_object = DictWriter(f_object, fieldnames=field_names)
dictwriter_object.writerow(dict_data)
f_object.close()
st.success('Information saved successfully. Thank you for being a helping hand at this time:)')
# 2.5 PROVIDING HELP: MEDICINES
elif requirement == "Medicine":
contact_person = st.text_input('Distributor / Retailer Name: ')
medicine_name = st.text_input('Medicine Name: ')
state, district, city = state_data(key="provider_medicine")
address = st.text_input('Distributor / Retailer Address: ')
contact_information = st.text_input('Contact Mobile Number: ')
verified_lead = st.selectbox("Verified: ", ["Yes", "No"])
additional_notes = st.text_input('Additional Notes: ')
created_time = datetime.now().strftime("%d/%m/%Y %H:%M:%S")
updated_time = datetime.now().strftime("%d/%m/%Y %H:%M:%S")
submit_info = st.button('Submit the info!')
if submit_info:
if not contact_person or not address:
st.write("Please provide the necessary information."
" Distributor / Retailer Name and Address Info is necessary!")
else:
field_names = ["Distributor Name", "Medicine Name",
"State", "District", "City", "Address", "Contact Number", "Verified", "Additional Notes",
"Created Time", "Updated Time"]
dict_data = {"Distributor Name": contact_person,
"Medicine Name": medicine_name,
"State": state,
"District": district,
"City": city,
"Address": address,
"Contact Number": contact_information,
"Verified": verified_lead,
"Additional Notes": additional_notes,
"Created Time": created_time,
"Updated Time": updated_time
}
with open('./medicines_provider.csv', 'a') as f_object:
dictwriter_object = DictWriter(f_object, fieldnames=field_names)
dictwriter_object.writerow(dict_data)
f_object.close()
st.success('Information saved successfully. Thank you for being a helping hand at this time:)')
# 2.6 PROVIDING HELP: OXYGEN CYLINDERS
elif requirement == "Oxygen Cylinders":
contact_person = st.text_input('Contact Name: ')
contact_information = st.text_input('Contact Mobile Number: ')
just_refill = st.selectbox("Just refilling?", ["Yes", "No"])
start_timings = st.time_input('Start Timing: ')
end_timings = st.time_input('End Timing: ')
availability_for = st.selectbox('Availability for', ["Home", "Hospitals", "Home & Hospitals"])
address = st.text_input('Address: ')
state, district, city = state_data(key="provider_oxygen_cylinders")
verified_lead = st.selectbox("Verified: ", ["Yes", "No"])
additional_notes = st.text_input('Additional Notes: ')
created_time = datetime.now().strftime("%d/%m/%Y %H:%M:%S")
updated_time = datetime.now().strftime("%d/%m/%Y %H:%M:%S")
submit_info = st.button('Submit the info!')
if submit_info:
if not contact_person or not contact_information:
st.write("Please provide the necessary information so that we can help together!"
" Contact Name & Mobile Number Info is necessary!")
else:
field_names = ["Contact Person", "Contact Mobile Number", "Just Refill", "Start Timings", "End Timings",
"Availability for", "Address", "State", "District", "City", "Verified",
"Additional Notes", "Created Time", "Updated Time"]
dict_data = {"Contact Person": contact_person,
"Contact Mobile Number": contact_information,
"Just Refill": just_refill,
"Start Timings": start_timings,
"End Timings": end_timings,
"Availability for": availability_for,
"Address": address,
"State": state,
"District": district,
"City": city,
"Verified": verified_lead,
"Additional Notes": additional_notes,
"Created Time": created_time,
"Updated Time": updated_time
}
with open('./oxygen_cylinders_provider.csv', 'a') as f_object:
dictwriter_object = DictWriter(f_object, fieldnames=field_names)
dictwriter_object.writerow(dict_data)
f_object.close()
st.success('Information saved successfully. Thank you for being a helping hand at this time:)')
# 2.7 PROVIDING HELP: PLASMA
elif requirement == "Plasma":
contact_person = st.text_input('Donor Name: ')
contact_information = st.text_input('Donor Contact Number: ')
contact_age = st.text_input("Donor Age: ")
blood_group = st.selectbox('Patient Blood Group: ', ['Please Select', 'A+', 'A-', 'B+', 'B-', 'AB+',
'AB-', 'O+', 'O-', 'Bombay Blood Group'])
recovered_date = st.date_input('Enter the date of recovery: ')
state, district, city = state_data(key="provider_plasma")
donated_before = st.selectbox("Have you donated it before?", ["Yes", "No"])
if donated_before == "Yes":
last_donated_date = st.date_input("Last Donated Date: ")
else:
last_donated_date = ""
antibodies_test = st.selectbox("Tested for antibodies yet?", ["Yes", "No"])
medical_issues = st.text_input("Any chronic disease such as high B.P., Diabetes etc.: ")
verified_lead = st.selectbox("Verified: ", ["Yes", "No"])
additional_notes = st.text_input('Additional Notes: ')
created_time = datetime.now().strftime("%d/%m/%Y %H:%M:%S")
updated_time = datetime.now().strftime("%d/%m/%Y %H:%M:%S")
submit_info = st.button('Submit the info!', key="provider_plasma")
if submit_info:
if not contact_person or not contact_information or not blood_group or not contact_age \
or not recovered_date or not donated_before:
st.write("Please provide the necessary information so that we can help together!"
" Donor Name, Mobile Number, Age, Blood Group, Recovered Date, "
"and Donated Before Info is necessary!")
else:
field_names = ["Donor Name", "Donor Contact Number",
"Donor Age", "Donor Blood Group", "Recovered Date",
"State", "District", "City", "Donated Before", "Last Donated Date",
"Antibodies Test", "Medical Issues", "Verified", "Additional Notes",
"Created Time", "Updated Time"]
dict_data = {"Donor Name": contact_person,
"Donor Contact Number": contact_information,
"Donor Age": contact_age,
"Donor Blood Group": blood_group,
"Recovered Date": recovered_date,
"State": state,
"District": district,
"City": city,
"Donated Before": donated_before,
"Last Donated Date": last_donated_date,
"Antibodies Test": antibodies_test,
"Medical Issues": medical_issues,
"Verified": verified_lead,
"Additional Notes": additional_notes,
"Created Time": created_time,
"Updated Time": updated_time
}
with open('./plasma_provider.csv', 'a') as f_object:
dictwriter_object = DictWriter(f_object, fieldnames=field_names)
dictwriter_object.writerow(dict_data)
f_object.close()
st.success('Information saved successfully. Thank you for being a helping hand at this time:)')
# 2.8 PROVIDING HELP: OTHERS
elif requirement == "Others":
text = st.text_input('Write others: ')
verified_lead = st.selectbox("Verified: ", ["Yes", "No"])
created_time = datetime.now().strftime("%d/%m/%Y %H:%M:%S")
updated_time = datetime.now().strftime("%d/%m/%Y %H:%M:%S")
submit_info = st.button('Submit the info!', key="provider_others")
if submit_info:
field_names = ["Text", "Verified",
"Created Time", "Updated Time"]
dict_data = {"Text": text,
"Verified": verified_lead,
"Created Time": created_time,
"Updated Time": updated_time}
with open('./others_service_provider.csv', 'a') as f_object:
dictwriter_object = DictWriter(f_object, fieldnames=field_names)
dictwriter_object.writerow(dict_data)
f_object.close()
st.success('Information saved successfully. Thank you for being a helping hand at this time:)')
# 3. NEED ASSISTANCE
elif person_kind == "Need Your Help!":
st.write("------------")
st.write("I'm trying my best to keep the webpage updated. Kindly please share with others so more data and "
"verified leads can be collected and the resources be made available to the needful people.")
requirement = st.selectbox("Need List", ["Ambulance Services", "Child Care", "Home Visit",
"Hospital Beds", "Medicine", "Oxygen Cylinders", "Plasma", "Others"])
# 3.1 ASSISTANCE: AMBULANCE SERVICES / HOSPITAL BED / PLASMA
if requirement == "Ambulance Services" or requirement == "Hospital Beds" or requirement == "Plasma" \
or requirement == "Oxygen Cylinders":
patient_name = st.text_input('Patient Name: ')
contact_information = st.text_input('Patient Mobile Number: ')
patient_age = st.text_input('Patient Age: ')
patient_sex = st.selectbox('Patient Sex: ', ['Male', 'Female', 'Transgender'])
patient_condition = st.selectbox('Patient Condition: ', ['Stable', 'SOS'])
assistance_for = st.selectbox("Assistance For: ", ["Ambulance Services", "Hospital Beds", "Oxygen Cylinder",
"Oxygen Cylinder Refill", "Plasma"])
if assistance_for == "Ambulance Services":
facilities = st.selectbox("Facilities: ", ["Normal", "Oxygen without AC", "Oxygen with AC", "Ventilator"])
else:
facilities = ""
patient_blood_group = st.selectbox('Patient Blood Group: ', ['Please Select', 'A+', 'A-', 'B+', 'B-', 'AB+',
'AB-', 'O+', 'O-', 'Bombay Blood Group'])
if assistance_for == "Hospital Beds":
bed_type = st.selectbox("Bed Type: ", ["Without Oxygen", "With Oxygen", "Ventilalor Bed"])
else:
bed_type = ""
patient_oxygen_level = st.text_input("Patient Oxygen Level")
state, district, city = state_data(key="assist_ambulance")
address = st.text_input('Patient Address: ')
additional_notes = st.text_input('Additional Notes: ')
status = st.selectbox("Status", ["", "Resolved"])
created_time = datetime.now().strftime("%d/%m/%Y %H:%M:%S")
updated_time = datetime.now().strftime("%d/%m/%Y %H:%M:%S")
submit_info = st.button('Submit the info!')
if submit_info:
if not patient_name or not contact_information or not assistance_for:
st.write("Please provide the necessary information."
" Patient Name, Mobile Number and Assistance For Info is necessary!")
else:
field_names = ["Patient Name", "Patient Mobile Number", "Patient Age", "Patient Sex", "Patient Condition",
"Assistance For", "Facilities", "Bed Type", "Patient Blood Group", "Patient Oxygen Level",
"State", "District", "City", "Address", "Additional Notes", "Status",
"Created Time", "Updated Time"]
dict_data = {"Patient Name": patient_name,
"Patient Mobile Number": contact_information,
"Patient Age": patient_age,
"Patient Sex": patient_sex,
"Patient Condition": patient_condition,
"Assistance For": assistance_for,
"Facilities": facilities,
"Bed Type": bed_type,
"Patient Blood Group": patient_blood_group,
"Patient Oxygen Level": patient_oxygen_level,
"State": state,
"District": district,
"City": city,
"Address": address,
"Additional Notes": additional_notes,
"Status": status,
"Created Time": created_time,
"Updated Time": updated_time
}
with open('./critical_assistance.csv', 'a') as f_object:
dictwriter_object = DictWriter(f_object, fieldnames=field_names)
dictwriter_object.writerow(dict_data)
f_object.close()
st.success('Information saved successfully. Please keep rechecking the page:)')
# 3.2 ASSISTANCE: MEDICINE
elif requirement == "Medicine":
state = list(state_data(key="medicine_assistance"))
df = pd.read_csv("./medicines_provider.csv")
state_retailers_data = df[df["State"] == state[0]]
st.table(state_retailers_data)
# for iterate in range(retailers_count[0]):
# retailer_data = state_retailers_data.iloc[iterate, :]
# data_to_df = pd.DataFrame(retailer_data, columns=[0])
# retailer_info = data_to_df.dropna()
# st.write(retailer_info)
# 3.3 ASSISTANCE: HOME VISIT / CHILD CARE
elif requirement == "Home Visit" or requirement == "Child Care":
contact_person = st.text_input('Patient Name: ')
contact_information = st.text_input('Patient Mobile Number: ')
patient_age = st.text_input('Patient Age: ')
patient_sex = st.selectbox('Patient Sex: ', ['Male', 'Female', 'Transgender'])
patient_condition = st.selectbox('Patient Condition: ', ['Stable', 'SOS'])
assistance_for = st.selectbox("Assistance For: ", ["Home Visit", "Child Care"])
state, district, city = state_data(key="assist_home_visit")
address = st.text_input('Patient Address: ')
additional_notes = st.text_input('Additional Notes: ')
status = st.selectbox("Status", ["", "Resolved"])
created_time = datetime.now().strftime("%d/%m/%Y %H:%M:%S")
updated_time = datetime.now().strftime("%d/%m/%Y %H:%M:%S")
submit_info = st.button('Submit the info!')
if submit_info:
if not contact_person or not contact_information or not assistance_for:
st.write("Please provide the necessary information."
" Patient Name, Mobile Number and Assistance For Info is necessary!")
else:
field_names = ["Patient Name", "Patient Mobile Number", "Patient Age", "Patient Sex", "Patient Condition",
"Assistance For", "State", "District", "City", "Address", "Additional Notes", "Status",
"Created Time", "Updated Time"]
dict_data = {"Patient Name": contact_person,
"Patient Mobile Number": contact_information,
"Patient Age": patient_age,
"Patient Sex": patient_sex,
"Patient Condition": patient_condition,
"Assistance For": assistance_for,
"State": state,
"District": district,
"City": city,
"Address": address,
"Additional Notes": additional_notes,
"Status": status,
"Created Time": created_time,
"Updated Time": updated_time
}
with open('./home_personal_assistance.csv', 'a') as f_object:
dictwriter_object = DictWriter(f_object, fieldnames=field_names)
dictwriter_object.writerow(dict_data)
f_object.close()
st.success('Information saved successfully. Please keep rechecking the page:)')
# 2.4 ASSISTANCE: OTHERS
elif requirement == "Others":
text = st.text_input('Write others: ')
status = st.selectbox("Status", ["", "Resolved"])
created_time = datetime.now().strftime("%d/%m/%Y %H:%M:%S")
updated_time = datetime.now().strftime("%d/%m/%Y %H:%M:%S")
submit_info = st.button('Submit the info!')
if submit_info:
field_names = ["Text", "Status"
"Created Time", "Updated Time"]
dict_data = {"Text": text,
"Status": status,
"Created Time": created_time,
"Updated Time": updated_time}
with open('./others_service_assistance.csv', 'a') as f_object:
dictwriter_object = DictWriter(f_object, fieldnames=field_names)
dictwriter_object.writerow(dict_data)
f_object.close()
st.success('Information saved successfully. Please keep rechecking the page:)')
# 4. UPDATE THE DATA
st.sidebar.subheader("Update the Data!")
data_type = st.sidebar.selectbox("", ["Please Select",
"Need Assistance: Ambulance Services", "Providing Assistance: Ambulance Services",
"Need Assistance: Child Care", "Providing Assistance: Child Care",
"Need Assistance: Home Visit", "Providing Assistance: Home Visit",
"Need Assistance: Hospital Beds", "Providing Assistance: Hospital Beds",
"Providing Assistance: Medicine",
"Need Assistance: Oxygen Cylinders", "Providing Assistance: Oxygen Cylinders",
"Need Assistance: Plasma", "Providing Assistance: Plasma",
])
# 4.1 UPDATE: NEED ASSISTANCE: HOME VISIT AND CHILD CARE
if data_type == "Need Assistance: Home Visit" or data_type == "Need Assistance: Child Care":
df = | pd.read_csv("./home_personal_assistance.csv") | pandas.read_csv |
import numpy as np
import pandas as pd
import nibabel as nib
from nilearn import plotting
class SubjectAnalyzer:
def __init__(self,subject_nii_path,mean_nii_path,sd_nii_path,atlas_nii_path):
'''Get paths for files'''
self.subject_nii_path = subject_nii_path
self.mean_nii_path = mean_nii_path
self.sd_nii_path = sd_nii_path
self.atlas_nii_path = atlas_nii_path
# Read nii images:
self.load_data()
# If data is OK, continue to analysis:
if self.is_data_proper:
self.calculate_zscore() # Calculate voxel z-scores
self.calculate_atlas_results() # Calculate atlas areas mean values and z-scores
else: # If data dimensions do not fit, output an error message detailing the error
self.error_message = \
"The following inputs: {}{}{}have an inconsistent have a dimension mismatch with the subject".format(
'mean map, ' if not self.is_mean_proper else '',
'st. dev. map, ' if not self.is_sd_proper else '',
'atlas, ' if not self.is_atlas_proper else '')
def load_data(self):
# Load nifti data of subject, mean and sd of "population" and atlas:
self.subject_img = nib.load(self.subject_nii_path)
self.mean_img = nib.load(self.mean_nii_path)
self.sd_img = nib.load(self.sd_nii_path)
self.atlas_img = nib.load(self.atlas_nii_path)
self.shape = self.subject_img.shape # get dimensions of subject's data
self.is_mean_proper = self.mean_img.shape == self.shape # test that the mean data is the same shape
self.is_sd_proper = self.sd_img.shape == self.shape # test that the sd data is the same shape
self.is_atlas_proper = self.atlas_img.shape == self.shape # test that the atlas data is the same shape
# set is_data_proper to false if one of the inputs is not in the same dimensions as the subject
self.is_data_proper = self.is_mean_proper and self.is_sd_proper and self.is_atlas_proper
self.subject_data = self.subject_img.get_data() # get subject data from image
self.mean_data = self.mean_img.get_data() # get mean data from image
self.sd_data = self.sd_img.get_data() # get SD data from image
self.atlas_data = self.atlas_img.get_data() # get atlas data from image
# set zeros values to nan for subject, mean and sd data
self.subject_data[self.subject_data==0] = np.nan
self.mean_data[self.mean_data == 0] = np.nan
self.sd_data[self.sd_data == 0] = np.nan
def calculate_zscore(self):
'''
calculates the zscore for each subject voxel based on the control mean and sd
finds only significant voxels and saves them as "zs.nii.gz"
'''
self.zscores = (self.subject_data - self.mean_data) / self.sd_data # calculate zscores
zscores = self.zscores
zscores[np.isnan(zscores)] = 0 # replace nans with z scores temporarily
self.significant_zscores = np.where(np.abs(zscores)<=1.96,np.nan,zscores) # finds non significant values and replaces them with zeros for new variable
self.significant_zscores_nii = nib.Nifti1Image(self.significant_zscores,self.subject_img.affine) # creates nifti template
nib.save(self.significant_zscores_nii, 'zs.nii.gz') # save nifti template
zs_nii_path = self.significant_zscores_nii
plotting.plot_glass_brain(zs_nii_path, threshold=1.96, colorbar=True, plot_abs=False,
output_file='Z_map.png',vmax=5)
def calculate_atlas_results(self):
'''
for each area in the atlas supplied, calculate the average value and z-score
'''
vals = np.zeros(self.atlas_data.max()) # initialize values array
zs = np.zeros(self.atlas_data.max()) # initialize zscores array
for i in range(1,self.atlas_data.max()+1): # for every area
vals[i-1] = np.nanmean(self.subject_data[self.atlas_data == i]) # calculate mean value in area
zs[i-1] = np.nanmean(self.zscores[self.atlas_data == i]) # calculate mean z-score in area
vals = pd.Series(vals,index = np.arange(1,self.atlas_data.max()+1)) # create values series
zs_s = pd.Series(zs,index = np.arange(1,self.atlas_data.max()+1)) # create zscore series
self.area_data = | pd.DataFrame({'Values': vals, 'Z-scores': zs_s}) | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import platform
import pandas as pd
def cleanup_moissi(file_name):
if platform.system() == 'Darwin' or platform.system() == 'Linux':
source_projects = os.getcwd() + '/' + file_name
else:
source_projects = os.getcwd() + '\\' + file_name
source_projects = pd.read_csv(source_projects)
source_projects['Type'] = 'MoISSI'
source_projects = source_projects[source_projects['git_url'] != '?']
return source_projects
def cleanup_se(file_name):
if platform.system() == 'Darwin' or platform.system() == 'Linux':
source_projects = os.getcwd() + '/' + file_name
else:
source_projects = os.getcwd() + '\\' + file_name
source_projects = pd.read_csv(source_projects)
source_projects['Type'] = 'SE'
return source_projects
number_of_samples = 100
starting_index = 14
for i in range(starting_index, number_of_samples+starting_index):
se_df = cleanup_se('Samplings_with_mined_data/' + 'se_projects_' + str(i) + '_with_other_attributes.csv')
moissi_df = cleanup_moissi('moissi_projects_with_other_attributes.csv')
combined_df = | pd.concat([moissi_df, se_df], axis=0) | pandas.concat |
import os
# import tensorflow as tf
import math
import numpy as np
import pandas as pd
import itertools
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import matplotlib.animation as animation
from matplotlib.lines import Line2D
import matplotlib.patheffects as path_effects
import matplotlib as mpl
import cv2
import glob
from scipy.interpolate import CubicSpline
import scipy.interpolate
from scipy import signal
import scipy.stats as stats
import seaborn as sns
from sklearn.linear_model import LinearRegression
from gekko import GEKKO
import pywt
# import waymo dataset related modules
from waymo_open_dataset import dataset_pb2 as open_dataset
from waymo_open_dataset.utils import range_image_utils
from waymo_open_dataset.utils import transform_utils
from waymo_open_dataset.utils import frame_utils
def show_camera_image(camera_image, camera_labels, layout, cmap=None):
# Show a camera image and the given camera labels (if avaliable)
ax = plt.subplot(*layout)
# Draw the camera labels.
for camera_labels in frame.camera_labels:
# Ignore camera labels that do not correspond to this camera.
if camera_labels.name != camera_image.name:
continue
# Iterate over the individual labels.
for label in camera_labels.labels:
# Draw the object bounding box.
ax.add_patch(patches.Rectangle(
xy=(label.box.center_x - 0.5 * label.box.length,
label.box.center_y - 0.5 * label.box.width),
width=label.box.length,
height=label.box.width,
linewidth=1,
edgecolor='red',
facecolor='none'))
# Show the camera image.
frame_image = plt.imshow(tf.image.decode_jpeg(camera_image.image), cmap=cmap, animated=True)
plt.title(open_dataset.CameraName.Name.Name(camera_image.name))
plt.grid(False)
plt.axis('off')
return frame_image
def camera_video_generation():
img_array = []
for num in range(1, len(os.listdir('figure_save/temp_cam_pic/')) + 1):
image_filename = 'figure_save/temp_cam_pic/' + 'frame_' + str(num) + '.jpg'
img = cv2.imread(image_filename)
height, width, layers = img.shape
size = (width, height)
img_array.append(img)
video_save_name = 'figure_save/cam_video/' + 'camera_video_segment_' + str(segment_id) + '.avi'
out = cv2.VideoWriter(video_save_name, cv2.VideoWriter_fourcc(*'DIVX'), 10, size)
for i in range(len(img_array)):
out.write(img_array[i])
out.release()
print('camera video made success')
# after making the video, delete all the frame jpgs
filelist = glob.glob(os.path.join('figure_save/temp_cam_pic/', "*.jpg"))
for f in filelist:
os.remove(f)
def frame_context_update(frame_in):
# collect environment context in this frame
frame_context_dict = {}
frame_context_dict['segment_id'] = segment_id
frame_context_dict['frame_label'] = frame_label
frame_context_dict['time_of_day'] = frame_in.context.stats.time_of_day
frame_context_dict['location'] = frame_in.context.stats.location
frame_context_dict['weather'] = frame_in.context.stats.weather
for count in frame_in.context.stats.laser_object_counts:
if count.type != 1: # note that 1 means vehicle object
continue
frame_context_dict['laser_veh_count'] = count.count
return frame_context_dict
def collect_lidar_veh_label(single_lidar_label, row_dict, ego_dict, ego_pose):
# this function extract the information of a single object (Lidar label)
# note that the original position and heading in label is in local coordinate
# single_lidar_label is from lidar label from original data
# row_dict is an initialized dictionary that will be filled
# global unique object_id
row_dict['obj_id'] = single_lidar_label.id
row_dict['local_center_x'] = single_lidar_label.box.center_x
row_dict['local_center_y'] = single_lidar_label.box.center_y
row_dict['local_center_z'] = single_lidar_label.box.center_z
# we need to use ego_dict and ego_pose to transform local label position to global position
# (in vehicle frame), it needs to be transformed to global frame
# make ego_pose in the form of transformation matrix
trans_matrix = np.reshape(np.array(ego_pose), (4, 4))
# print(trans_matrix)
local_pos_matrix = np.reshape(
np.array([row_dict['local_center_x'], row_dict['local_center_y'], row_dict['local_center_z'], 1]), (4, 1))
# print(local_pos_matrix)
label_global_pos = np.matmul(trans_matrix, local_pos_matrix)
# print(label_global_pos)
row_dict['global_center_x'] = label_global_pos[0][0]
row_dict['global_center_y'] = label_global_pos[1][0]
row_dict['global_center_z'] = label_global_pos[2][0]
row_dict['length'] = single_lidar_label.box.length
row_dict['width'] = single_lidar_label.box.width
row_dict['height'] = single_lidar_label.box.height
frame_ego_heading = ego_dict['heading']
row_dict['heading'] = single_lidar_label.box.heading + frame_ego_heading
row_dict['speed_x'] = single_lidar_label.metadata.speed_x
row_dict['speed_y'] = single_lidar_label.metadata.speed_y
row_dict['accel_x'] = single_lidar_label.metadata.accel_x
row_dict['accel_y'] = single_lidar_label.metadata.accel_y
# angular speed remains to be calculated
row_dict['angular_speed'] = 0
return row_dict
def veh_trj_collect(frame_in):
# this funtion collects all Lidar object information in current frame
# collect environment context in this frame
frame_context_dict = frame_context_update(frame)
# print(frame_context_dict)
ego_row_dict = frame_context_dict.copy() # add context info to every row
# collect ego (AV) vehicle's timestamp, position and speed
ego_row_dict['obj_type'] = 'vehicle'
ego_row_dict['obj_id'] = 'ego'
ego_row_dict['global_time_stamp'] = frame_in.timestamp_micros # unix time (in micro seconds)
# time referenced to segment start time
ego_row_dict['local_time_stamp'] = (frame_in.timestamp_micros - segment_start_time) / float(1000000) # in seconds
# self driving car's (sdc) global position and heading (yaw, pitch, roll)
sdc_pose = frame_in.pose.transform # the transformation matrix
# print(sdc_pose)
frame_images = frame_in.images
for image in frame_images:
# print(image.velocity)
ego_speed_x = image.velocity.v_x
ego_speed_y = image.velocity.v_y
ego_angular_speed = image.velocity.w_z
# only get speed from the front camera
break
# print(image.pose)
# ego_velocity = frame_in.images
# ego vehicle's local position will be 0, because itself is the origin
ego_row_dict['local_center_x'] = 0
ego_row_dict['local_center_y'] = 0
ego_row_dict['local_center_z'] = 0
# ego vehicle's global position is extracted from the transformation matrix
ego_row_dict['global_center_x'] = sdc_pose[3]
ego_row_dict['global_center_y'] = sdc_pose[7]
ego_row_dict['global_center_z'] = sdc_pose[11]
# note that the actual model of AV is 2019 Chrysler Pacifica Hybrid
# the dimensions for AV is length 5.18m, width 2.03m, height 1.78m
ego_row_dict['length'] = 5.18
ego_row_dict['width'] = 2.03
ego_row_dict['height'] = 1.78
ego_row_dict['heading'] = math.atan2(sdc_pose[4], sdc_pose[0])
ego_row_dict['speed_x'] = ego_speed_x
ego_row_dict['speed_y'] = ego_speed_y
# accleration remains to be calculated
ego_row_dict['accel_x'] = 0
ego_row_dict['accel_y'] = 0
ego_row_dict['angular_speed'] = ego_angular_speed
# print(ego_row_dict)
# add to final file
all_segment_all_frame_all_object_info.append(ego_row_dict)
# collect vehicle's info in the lidar label
for lidar_label in frame_in.laser_labels:
# labe object type and its correspoding
# TYPE_UNKNOWN = 0;
# TYPE_VEHICLE = 1;
# TYPE_PEDESTRIAN = 2;
# TYPE_SIGN = 3;
# TYPE_CYCLIST = 4;
if lidar_label.type in [1, 2, 4]:
temp_row_dict = ego_row_dict.copy()
if lidar_label.type == 1:
temp_row_dict['obj_type'] = 'vehicle'
elif lidar_label.type == 4:
temp_row_dict['obj_type'] = 'bicycle'
else:
temp_row_dict['obj_type'] = 'pedestrian'
temp_row_dict = collect_lidar_veh_label(lidar_label, temp_row_dict, ego_row_dict, sdc_pose)
# add to final file
all_segment_all_frame_all_object_info.append(temp_row_dict)
def final_trj_result_format():
# format the final output
global all_segment_all_frame_all_object_info_pd
all_segment_all_frame_all_object_info_pd['local_time_stamp'] = all_segment_all_frame_all_object_info_pd[
'local_time_stamp'].map('{:.2f}'.format)
all_segment_all_frame_all_object_info_pd['local_center_x'] = all_segment_all_frame_all_object_info_pd[
'local_center_x'].map('{:.4f}'.format)
all_segment_all_frame_all_object_info_pd['local_center_y'] = all_segment_all_frame_all_object_info_pd[
'local_center_y'].map('{:.4f}'.format)
all_segment_all_frame_all_object_info_pd['local_center_z'] = all_segment_all_frame_all_object_info_pd[
'local_center_z'].map('{:.4f}'.format)
all_segment_all_frame_all_object_info_pd['global_center_x'] = all_segment_all_frame_all_object_info_pd[
'global_center_x'].map('{:.4f}'.format)
all_segment_all_frame_all_object_info_pd['global_center_y'] = all_segment_all_frame_all_object_info_pd[
'global_center_y'].map('{:.4f}'.format)
all_segment_all_frame_all_object_info_pd['global_center_z'] = all_segment_all_frame_all_object_info_pd[
'global_center_z'].map('{:.4f}'.format)
all_segment_all_frame_all_object_info_pd['length'] = all_segment_all_frame_all_object_info_pd['length'].map(
'{:.2f}'.format)
all_segment_all_frame_all_object_info_pd['width'] = all_segment_all_frame_all_object_info_pd['width'].map(
'{:.2f}'.format)
all_segment_all_frame_all_object_info_pd['height'] = all_segment_all_frame_all_object_info_pd['height'].map(
'{:.2f}'.format)
all_segment_all_frame_all_object_info_pd['heading'] = all_segment_all_frame_all_object_info_pd['heading'].map(
'{:.4f}'.format)
all_segment_all_frame_all_object_info_pd['speed_x'] = all_segment_all_frame_all_object_info_pd['speed_x'].map(
'{:.4f}'.format)
all_segment_all_frame_all_object_info_pd['speed_y'] = all_segment_all_frame_all_object_info_pd['speed_y'].map(
'{:.4f}'.format)
all_segment_all_frame_all_object_info_pd['accel_x'] = all_segment_all_frame_all_object_info_pd['accel_x'].map(
'{:.4f}'.format)
all_segment_all_frame_all_object_info_pd['accel_y'] = all_segment_all_frame_all_object_info_pd['accel_y'].map(
'{:.4f}'.format)
all_segment_all_frame_all_object_info_pd['angular_speed'] = all_segment_all_frame_all_object_info_pd[
'angular_speed'].map('{:.4f}'.format)
def plot_top_view_ani_with_lidar_label(trj_in, seg_id_in, frame_id_in):
# this function plots one single frame of the top view video
# trj_in is a pandas with three columns(obj_id, frame_label, local_time_stamp, global_center_x, global_center_y, length, width, heading)
# trj_in is all the trajectories within one segment
# seg_id_in is the current segment id
trj_in['global_center_x'] = trj_in['global_center_x'] - trj_in['global_center_x'].min() #进行坐标原点的一个平移
trj_in['global_center_y'] = trj_in['global_center_y'] - trj_in['global_center_y'].min()
unique_veh_id = pd.unique(trj_in['obj_id'])
plt.figure(figsize=(18, 13.5))
plt.figure()
plt.xlabel('global center x (m)', fontsize=10)
plt.ylabel('global center y (m)', fontsize=10)
plt.axis('square')
plt.xlim([trj_in['global_center_x'].min() - 1, trj_in['global_center_x'].max() + 1])
plt.ylim([trj_in['global_center_y'].min() - 1, trj_in['global_center_y'].max() + 1])
# max_range = max(trj_in['global_center_x'].max(), )
title_name = 'Segment ' + str(seg_id_in)
plt.title(title_name, loc='left')
plt.xticks(
np.arange(round(float(trj_in['global_center_x'].min())), round(float(trj_in['global_center_x'].max())), 10),
fontsize=5)
plt.yticks(
np.arange(round(float(trj_in['global_center_y'].min())), round(float(trj_in['global_center_y'].max())), 10),
fontsize=5)
ax = plt.gca()
# find out the global heading of ego vehicle first, use it to transform other vehicles' local heading to global heading
ego_veh_trj = trj_in.loc[trj_in['obj_id'] == 'ego', :]
ego_current_heading = ego_veh_trj.loc[ego_veh_trj['frame_label'] == frame_id, 'heading'].values[0]
# get all the trajectories until current frame
for signle_veh_id in unique_veh_id:
single_veh_trj = trj_in[trj_in['obj_id'] == signle_veh_id]
# print(single_veh_trj)
single_veh_trj = single_veh_trj[single_veh_trj['frame_label'] == frame_id_in]
# print(single_veh_trj)
if len(single_veh_trj) > 0:
ts = ax.transData
coords = [single_veh_trj['global_center_x'].iloc[0], single_veh_trj['global_center_y'].iloc[0]]
if single_veh_trj.iloc[0, 0] == 'ego':
veh_local_id = 0
temp_facecolor = 'red'
temp_alpha = 0.99
heading_angle = single_veh_trj['heading'].iloc[0] * 180 / np.pi
tr = mpl.transforms.Affine2D().rotate_deg_around(coords[0], coords[1], heading_angle) #对车辆按照航向角进行旋转
else:
# calculate vehicle's local id
veh_id_match_temp = veh_name_id_match[veh_name_id_match['obj_id'] == signle_veh_id]
if single_veh_trj['obj_type'].iloc[0] == 'vehicle':
# only vehicle has a local id
veh_local_id = veh_id_match_temp['local_id'].iloc[0]
if single_veh_trj['obj_type'].iloc[0] == 'vehicle':
temp_facecolor = 'blue'
elif single_veh_trj['obj_type'].iloc[0] == 'bicycle':
temp_facecolor = 'green'
else:
temp_facecolor = 'magenta'
temp_alpha = 0.5
heading_angle = single_veh_trj['heading'].iloc[0] * 180 / np.pi
# transform for other vehicles, note that the ego global heading should be added to current local heading
tr = mpl.transforms.Affine2D().rotate_deg_around(coords[0], coords[1], heading_angle)
t = tr + ts
# note that exact xy needs to to calculated
veh_length = single_veh_trj['length'].iloc[0]
veh_width = single_veh_trj['width'].iloc[0]
ax.add_patch(patches.Rectangle(
xy=(single_veh_trj['global_center_x'].iloc[0] - 0.5 * veh_length,
single_veh_trj['global_center_y'].iloc[0] - 0.5 * veh_width),
width=veh_length,
height=veh_width,
linewidth=0.1,
facecolor=temp_facecolor,
edgecolor='black',
alpha=temp_alpha,
transform=t))
# add vehicle local id for only vehicle object
if single_veh_trj['obj_type'].iloc[0] == 'vehicle':
temp_text = plt.text(single_veh_trj['global_center_x'].iloc[0],
single_veh_trj['global_center_y'].iloc[0], str(veh_local_id), style='italic',
weight='heavy', ha='center', va='center', color='white', rotation=heading_angle,
size=3)
temp_text.set_path_effects(
[path_effects.Stroke(linewidth=1, foreground='black'), path_effects.Normal()])
trj_save_name = 'figure_save/temp_top_view_figure/top_view_segment_' + str(seg_id_in) + '_frame_' + str(
frame_id_in) + '_trajectory.jpg'
plt.savefig(trj_save_name, dpi=600)
plt.close('all')
def top_view_video_generation():
# this function generates one top view video based on top view figures from one segment
img_array = []
for num in range(1, len(os.listdir('figure_save/temp_top_view_figure/')) + 1):
image_filename = 'figure_save/temp_top_view_figure/' + 'top_view_segment_' + str(
single_seg_id) + '_frame_' + str(num) + '_trajectory.jpg'
img = cv2.imread(image_filename)
height, width, layers = img.shape
size = (width, height)
img_array.append(img)
video_save_name = 'figure_save/top_view_video/' + 'animation_top_view_segment_' + str(single_seg_id) + '.avi'
out = cv2.VideoWriter(video_save_name, cv2.VideoWriter_fourcc(*'DIVX'), 10, size)
for i in range(len(img_array)):
out.write(img_array[i])
out.release()
print('top view video made success')
# after making the video, delete all the frame jpgs
filelist = glob.glob(os.path.join('figure_save/temp_top_view_figure/', "*.jpg"))
for f in filelist:
os.remove(f)
def cumulated_dis_cal(coord_series_in, segment_id_in, veh_id_in, start_time_in):
# this function calculate the cumulated distance based on the given global coordinates,
# input coord_series_in: ['global_center_x', 'global_center_y', 'speed_x', 'speed_y']
# output coord_series_in: ['global_center_x', 'global_center_y', 'speed_x', 'speed_y', 'cumu_dis', 'speed', 'accer', 'filter_cumu_dis',
# 'filter_speed', 'filter_accer', 'speed_based_cumu_dis', 'speed_based_speed', 'speed_based_accer', 'speed_based_filter_cumu_dis',
# 'speed_based_filter_speed', 'speed_based_accer']
coord_series_in.reset_index(drop=True, inplace=True)
coord_series_in.loc[:, 'cumu_dis'] = float(0)
coord_series_in.loc[:, 'speed'] = float(0)
coord_series_in.loc[:, 'accer'] = float(0)
coord_series_in.loc[:, 'speed_based_cumu_dis'] = float(0)
coord_series_in.loc[:, 'speed_based_speed'] = float(0)
coord_series_in.loc[:, 'speed_based_accer'] = float(0)
coord_series_in.loc[:, 'speed_based_jerk'] = float(0)
# calculate distance for position based method, and speed for speed based method
for i in range(1, len(coord_series_in['global_center_x'])):
pre_x = coord_series_in['global_center_x'].iloc[i - 1]
pre_y = coord_series_in['global_center_y'].iloc[i - 1]
post_x = coord_series_in['global_center_x'].iloc[i]
post_y = coord_series_in['global_center_y'].iloc[i]
single_dis = math.sqrt((post_x - pre_x) ** 2 + (post_y - pre_y) ** 2)
coord_series_in.loc[i, 'cumu_dis'] = coord_series_in.loc[i - 1, 'cumu_dis'] + single_dis
for i in range(len(coord_series_in['global_center_x'])):
speed_based_speed = math.sqrt((coord_series_in.at[i, 'speed_x']) ** 2 + (coord_series_in.at[i, 'speed_y']) ** 2)
coord_series_in.loc[i, 'speed_based_speed'] = speed_based_speed
# calculate speed and acceleration position based method, distance and aceleration for speed based method
coord_series_in = update_speed_and_accer(coord_series_in, 0)
coord_series_in = speed_based_update_distance_and_accer(coord_series_in)
# trajectory correctness
# initialize filter_value
coord_series_in.loc[:, 'filter_cumu_dis'] = coord_series_in.loc[:, 'cumu_dis'].to_numpy()
coord_series_in.loc[:, 'filter_speed'] = coord_series_in.loc[:, 'speed'].to_numpy()
coord_series_in.loc[:, 'filter_accer'] = coord_series_in.loc[:, 'accer'].to_numpy()
coord_series_in.loc[:, 'filter_jerk'] = 0
coord_series_in = trajectory_correctness(coord_series_in, segment_id_in, veh_id_in, start_time_in)
return coord_series_in
def speed_based_update_distance_and_accer(series_in):
# this function calculate the distance, acceleration and jerk based on speed (for speed-based data)
# series_in is the same format as coord_series_in
# output is series_in with updated speed and accer
current_cumu_dis = 'speed_based_cumu_dis'
current_speed = 'speed_based_speed'
current_accer = 'speed_based_accer'
for i in range(1, len(series_in['global_center_x'])):
if i == 1:
series_in.loc[0, current_cumu_dis] = 0
series_in.loc[i, current_cumu_dis] = series_in.loc[i - 1, current_cumu_dis] + (
series_in.loc[i, current_speed] + series_in.loc[i - 1, current_speed]) * 0.5 * 0.1
else:
series_in.loc[i, current_cumu_dis] = series_in.loc[i - 1, current_cumu_dis] + (
series_in.loc[i, current_speed] + series_in.loc[i - 1, current_speed]) * 0.5 * 0.1
for i in range(0, len(series_in['global_center_x'])):
if i == 0:
series_in.at[i, current_accer] = float(
series_in.at[i + 2, current_speed] - series_in.at[i, current_speed]) / (float(0.2))
elif i == len(series_in['global_center_x']) - 1:
series_in.at[i, current_accer] = float(
series_in.at[i, current_speed] - series_in.at[i - 2, current_speed]) / (float(0.2))
else:
series_in.at[i, current_accer] = float(
series_in.at[i + 1, current_speed] - series_in.at[i - 1, current_speed]) / (float(0.2))
current_jerk = 'speed_based_jerk'
for i in range(0, len(series_in['global_center_x'])):
if i == 0:
series_in.at[i, current_jerk] = float(
series_in.at[i + 2, current_accer] - series_in.at[i, current_accer]) / (float(0.2))
elif i == len(series_in['global_center_x']) - 1:
series_in.at[i, current_jerk] = float(
series_in.at[i, current_accer] - series_in.at[i - 2, current_accer]) / (float(0.2))
else:
series_in.at[i, current_jerk] = float(
series_in.at[i + 1, current_accer] - series_in.at[i - 1, current_accer]) / (float(0.2))
return series_in
def update_speed_and_accer(series_in, filter_label):
# this function calculate the speed, accelearation, jerk based on position
# series_in is the same format as coord_series_in
# output is series_in with updated speed and accer
if filter_label == 1:
current_cumu_dis = 'filter_cumu_dis'
current_speed = 'filter_speed'
current_accer = 'filter_accer'
elif filter_label == 0:
current_cumu_dis = 'cumu_dis'
current_speed = 'speed'
current_accer = 'accer'
current_jerk = 'jerk'
else:
# label should be 2
current_cumu_dis = 'remove_outlier_cumu_dis'
current_speed = 'remove_outlier_speed'
current_accer = 'remove_outlier_accer'
current_jerk = 'remove_outlier_jerk'
# calculate speed
for i in range(0, len(series_in['global_center_x'])):
if i == 0:
series_in.at[i, current_speed] = float(
series_in.at[i + 2, current_cumu_dis] - series_in.at[i, current_cumu_dis]) / (float(0.2))
elif i == len(series_in['global_center_x']) - 1:
series_in.at[i, current_speed] = float(
series_in.at[i, current_cumu_dis] - series_in.at[i - 2, current_cumu_dis]) / (float(0.2))
else:
series_in.at[i, current_speed] = float(
series_in.at[i + 1, current_cumu_dis] - series_in.at[i - 1, current_cumu_dis]) / (float(0.2))
# calculate accerleration
for i in range(0, len(series_in['global_center_x'])):
if i == 0:
series_in.at[i, current_accer] = float(
series_in.at[i + 2, current_speed] - series_in.at[i, current_speed]) / (float(0.2))
elif i == len(series_in['global_center_x']) - 1:
series_in.at[i, current_accer] = float(
series_in.at[i, current_speed] - series_in.at[i - 2, current_speed]) / (float(0.2))
else:
series_in.at[i, current_accer] = float(
series_in.at[i + 1, current_speed] - series_in.at[i - 1, current_speed]) / (float(0.2))
for i in range(0, len(series_in['global_center_x'])):
if i == 0:
series_in.at[i, current_jerk] = float(
series_in.at[i + 2, current_accer] - series_in.at[i, current_accer]) / (float(0.2))
elif i == len(series_in['global_center_x']) - 1:
series_in.at[i, current_jerk] = float(
series_in.at[i, current_accer] - series_in.at[i - 2, current_accer]) / (float(0.2))
else:
series_in.at[i, current_jerk] = float(
series_in.at[i + 1, current_accer] - series_in.at[i - 1, current_accer]) / (float(0.2))
return series_in
def plot_outlier_adjacent_trj(series_in, outlier_pos_in, first_pos_in, last_pos_in, segment_id_in, veh_id_in, start_time_in, comparison_label):
# plot the adjacent trajectory of the outlier (20 points)
outlier_time = round(start_time_in + outlier_pos_in * 0.1, 1)
included_index = np.arange(first_pos_in, last_pos_in + 1, dtype=int)
outlier_trj = series_in.loc[included_index, :]
outlier_trj.loc[:, 'local_time'] = np.array(included_index) * 0.1 + start_time_in
plt.subplot(3, 1, 1)
plt.plot(outlier_trj['local_time'], outlier_trj['cumu_dis'], '-*k', linewidth=0.25, label='Original', markersize=1.5)
if comparison_label == 1:
plt.plot(outlier_trj['local_time'], outlier_trj['remove_outlier_cumu_dis'], '-m', linewidth=0.25, label='Outliers Removed')
plt.legend(prop={'size': 6})
trj_title = 'Segment ' + str(int(segment_id_in)) + ' Vehicle' + str(
int(veh_id_in)) + ' Outlier at Time ' + str(outlier_time) + ' Removing'
else:
trj_title = 'Segment ' + str(int(segment_id_in)) + ' Vehicle' + str(
int(veh_id_in)) + ' Outlier at Time ' + str(outlier_time) + ' Pattern'
plt.ylabel('Position (m)')
plt.title(trj_title)
plt.subplot(3, 1, 2)
plt.plot(outlier_trj['local_time'], outlier_trj['speed'], '-*k', linewidth=0.5, label='Original', markersize=1.5)
if comparison_label == 1:
plt.plot(outlier_trj['local_time'], outlier_trj['remove_outlier_speed'], '-m', linewidth=0.5, label='Outliers Removed')
plt.legend(prop={'size': 6})
plt.ylabel('Speed (m/s)')
plt.ylim([0, 35])
plt.subplot(3, 1, 3)
plt.plot(outlier_trj['local_time'], outlier_trj['accer'], '-*k', linewidth=0.5, label='Original', markersize=1.5)
if comparison_label == 1:
plt.plot(outlier_trj['local_time'], outlier_trj['remove_outlier_accer'], '-m', linewidth=0.5, label='Outliers Removed')
plt.legend(prop={'size': 6})
plt.xlabel('Time (s)')
plt.ylabel('Acceleration (m/s2)')
plt.ylim([-15, 15])
trj_save_title = 'figure_save/trajectory_process/outlier_pattern_and_removing/' + trj_title + '.png'
plt.savefig(trj_save_title, dpi=600)
plt.close('all')
def outlier_removing_optimization_model(initial_state_in, last_state_in, num_points_in):
# note that the num_points_in includes the first and last points
# if the total number of interpolated points is n, then num_points_in = n + 2
# time interval is 0.1 second
# total number of time steps
max_acc = 5
min_acc = -8
total_steps = num_points_in
first_pos_in = initial_state_in[0]
first_speed_in = initial_state_in[1]
first_acc_in = initial_state_in[2]
last_pos_in = last_state_in[0]
last_speed_in = last_state_in[1]
last_acc_in = last_state_in[2]
# time interval in each step
time_interval = 0.1
# model = GEKKO() # Initialize gekko
model = GEKKO(remote=False) # Initialize gekko
# Use IPOPT solver (default)
model.options.SOLVER = 3
model.options.SCALING = 2
# Initialize variables
acc = [None] * total_steps # simulated acceleration
velocity = [None] * total_steps # simulated velocity
pos = [None] * total_steps # simulated position
for i in range(total_steps):
pos[i] = model.Var()
velocity[i] = model.Var()
velocity[i].lower = 0
acc[i] = model.Var(lb=min_acc, ub=max_acc)
min_sim_acc = model.Var()
max_sim_acc = model.Var()
model.Equation(pos[0] == first_pos_in)
model.Equation(velocity[0] == first_speed_in)
model.Equation(acc[0] == first_acc_in)
model.Equation(pos[total_steps - 1] == last_pos_in)
model.Equation(velocity[total_steps - 1] == last_speed_in)
model.Equation(acc[total_steps - 1] == last_acc_in)
for i in range(total_steps):
if 1 <= i <= total_steps - 1:
model.Equation(velocity[i] == velocity[i - 1] + acc[i - 1] * time_interval)
model.Equation(pos[i] == pos[i - 1] + 0.5 * (velocity[i] + velocity[i - 1]) * time_interval)
for i in range(total_steps):
model.Equation(min_sim_acc <= acc[i])
model.Equation(max_sim_acc >= acc[i])
# objective function: minimize the difference between max_sim_acc and min_sim_acc
model.Obj(max_sim_acc - min_sim_acc)
# model.options.IMODE = 2 # Steady state optimization
model.options.MAX_MEMORY = 5
model.solve(disp=False)
# solve_time = model.options.SOLVETIME
# extract values from Gekko type variables
acc_value = np.zeros(total_steps)
velocity_value = np.zeros(total_steps)
pos_value = np.zeros(total_steps)
for i in range(total_steps):
acc_value[i] = acc[i].value[0]
velocity_value[i] = velocity[i].value[0]
pos_value[i] = pos[i].value[0]
return pos_value, velocity_value, acc_value
def optimization_based_outlier_removing(series_in, first_pos_in, last_pos_in, min_acc_in, max_acc_in):
# given the position of the outlier, optimize its vicinity's trajectory
first_point_pos = first_pos_in
last_point_pos = last_pos_in
first_point_cumu_dis = series_in.at[first_point_pos, 'remove_outlier_cumu_dis']
first_point_speed = series_in.at[first_point_pos, 'remove_outlier_speed']
if series_in.at[first_point_pos, 'remove_outlier_accer'] <= min_acc_in:
first_point_acc = min_acc_in
elif series_in.at[first_point_pos, 'remove_outlier_accer'] >= max_acc_in:
first_point_acc = max_acc_in
else:
first_point_acc = series_in.at[first_point_pos, 'remove_outlier_accer']
first_point_state = [first_point_cumu_dis, first_point_speed, first_point_acc]
last_point_cumu_dis = series_in.at[last_point_pos, 'remove_outlier_cumu_dis']
last_point_speed = series_in.at[last_point_pos, 'remove_outlier_speed']
if series_in.at[last_point_pos, 'remove_outlier_accer'] <= min_acc_in:
last_point_acc = min_acc_in
elif series_in.at[last_point_pos, 'remove_outlier_accer'] >= max_acc_in:
last_point_acc = max_acc_in
else:
last_point_acc = series_in.at[last_point_pos, 'remove_outlier_accer']
last_point_state = [last_point_cumu_dis, last_point_speed, last_point_acc]
actual_total_related_points = last_point_pos - first_point_pos + 1
pos_result, speed_result, acc_result = outlier_removing_optimization_model(first_point_state, last_point_state, actual_total_related_points)
series_in.loc[first_point_pos:last_point_pos, 'remove_outlier_cumu_dis'] = pos_result
series_in = update_speed_and_accer(series_in, 2)
return series_in
def wavefilter(data):
# We will use the Daubechies(6) wavelet
daubechies_num = 6
wname = "db" + str(daubechies_num)
datalength = data.shape[0]
max_level = pywt.dwt_max_level(datalength, wname)
print('maximun level is: %s' % max_level)
# Initialize the container for the filtered data
# Decompose the signal
# coeff[0] is approximation coeffs, coeffs[1] is nth level detail coeff, coeff[-1] is first level detail coeffs
coeffs = pywt.wavedec(data, wname, mode='smooth', level=max_level)
# thresholding
for j in range(max_level):
coeffs[-j - 1] = np.zeros_like(coeffs[-j - 1])
# Reconstruct the signal and save it
filter_data = pywt.waverec(coeffs, wname, mode='smooth')
fdata = filter_data[0:datalength]
return fdata
def wavelet_filter(series_in):
remove_outlier_speed_signal = series_in.loc[:, 'remove_outlier_speed'].to_numpy()
wavelet_filter_speed = wavefilter(remove_outlier_speed_signal)
series_in.loc[:, 'wavelet_filter_speed'] = wavelet_filter_speed
series_in.loc[:, 'wavelet_filter_cumu_dis'] = None
series_in.loc[:, 'wavelet_filter_accer'] = None
series_in.loc[:, 'wavelet_filter_jerk'] = None
# update cumulative distance
for i in range(len(series_in['global_center_x'])):
if i == 0:
# start from the filtered value
series_in.loc[i, 'wavelet_filter_cumu_dis'] = 0 # initial pos should be 0
else:
series_in.loc[i, 'wavelet_filter_cumu_dis'] = series_in.loc[i - 1, 'wavelet_filter_cumu_dis'] + (
series_in.loc[i - 1, 'wavelet_filter_speed'] + series_in.loc[i, 'wavelet_filter_speed']) * 0.5 * 0.1
# update acceleration
current_speed = 'wavelet_filter_speed'
current_accer = 'wavelet_filter_accer'
for i in range(0, len(series_in['global_center_x'])):
if i == 0:
series_in.at[i, current_accer] = float(
series_in.at[i + 2, current_speed] - series_in.at[i, current_speed]) / (float(0.2))
elif i == len(series_in['global_center_x']) - 1:
series_in.at[i, current_accer] = float(
series_in.at[i, current_speed] - series_in.at[i - 2, current_speed]) / (float(0.2))
else:
series_in.at[i, current_accer] = float(
series_in.at[i + 1, current_speed] - series_in.at[i - 1, current_speed]) / (float(0.2))
current_jerk = 'wavelet_filter_jerk'
for i in range(0, len(series_in['global_center_x'])):
if i == 0:
series_in.at[i, current_jerk] = float(
series_in.at[i + 2, current_accer] - series_in.at[i, current_accer]) / (float(0.2))
elif i == len(series_in['global_center_x']) - 1:
series_in.at[i, current_jerk] = float(
series_in.at[i, current_accer] - series_in.at[i - 2, current_accer]) / (float(0.2))
else:
series_in.at[i, current_jerk] = float(
series_in.at[i + 1, current_accer] - series_in.at[i - 1, current_accer]) / (float(0.2))
return series_in
def trajectory_correctness(coord_series_in, segment_id_in, veh_id_in, start_time_in):
# this function remove outliers and filter the trajectory
# input coord_series_in: ['global_center_x', 'global_center_y', 'cumu_dis', 'speed', 'accer']
# output coord_series_in: ['global_center_x', 'global_center_y', 'cumu_dis', 'speed', 'accer', 'filter_cumu_dis', 'filter_speed', 'filter_accer']
minimum_accer = -8
maximum_accer = 5
reference_points_num = 20
coord_series_in.reset_index(inplace=True, drop=True)
global all_outlier_record
# remove outliers in acceleration, note that cubic spline interpolation is implemented on distance
# initialize remove outlier results
coord_series_in.loc[:, 'remove_outlier_cumu_dis'] = coord_series_in.loc[:, 'cumu_dis']
coord_series_in.loc[:, 'remove_outlier_speed'] = coord_series_in.loc[:, 'speed']
coord_series_in.loc[:, 'remove_outlier_accer'] = coord_series_in.loc[:, 'accer']
# removing outliers should be conducted multiple times until there is no outlier
outlier_label = 1
while outlier_label:
outlier_label = 0
for m in range(len(coord_series_in['global_center_x'])):
if coord_series_in.at[m, 'remove_outlier_accer'] >= maximum_accer or coord_series_in.at[m, 'remove_outlier_accer'] <= minimum_accer:
print('Outlier info: Current segment: %s, vehicle id: %s, time: %s, position: %s' % (
segment_id_in, veh_id_in, round(m * 0.1 + start_time_in, 1), m))
single_outlier_record = pd.DataFrame(np.zeros((1, 3)), columns=['segment_id', 'local_veh_id', 'outlier_time'])
single_outlier_record.loc[0, 'segment_id'] = segment_id_in
single_outlier_record.loc[0, 'local_veh_id'] = veh_id_in
single_outlier_record.loc[0, 'outlier_time'] = start_time_in + 0.1 * m
all_outlier_record = all_outlier_record.append(single_outlier_record)
total_related_points = 20
first_point_pos = int(max(0, m - total_related_points / 2))
last_point_pos = int(min(len(coord_series_in.loc[:, 'remove_outlier_accer']) - 1, m + total_related_points / 2))
if first_point_pos == 0:
last_point_pos = first_point_pos + total_related_points
if last_point_pos == len(coord_series_in.loc[:, 'remove_outlier_accer']) - 1:
first_point_pos = last_point_pos - total_related_points
plot_outlier_adjacent_trj(coord_series_in, m, first_point_pos, last_point_pos, segment_id_in, veh_id_in, start_time_in, 0)
# the following pairs may not have feasible solutions during outlier removal
if segment_id_in == 191 and veh_id_in == 6:
pass
elif segment_id_in == 270 and veh_id_in == 4:
pass
elif segment_id_in == 276 and veh_id_in == 2:
pass
elif segment_id_in == 320 and veh_id_in == 1:
pass
elif segment_id_in == 406 and veh_id_in == 25:
pass
elif segment_id_in == 449 and veh_id_in == 41:
pass
elif segment_id_in == 450 and veh_id_in == 15:
pass
elif segment_id_in == 676 and veh_id_in == 15:
pass
elif segment_id_in == 769 and veh_id_in == 50:
pass
elif segment_id_in == 916 and veh_id_in == 4:
pass
elif segment_id_in == 968 and veh_id_in == 18:
pass
else:
coord_series_in = optimization_based_outlier_removing(coord_series_in, first_point_pos, last_point_pos, minimum_accer,
maximum_accer)
plot_outlier_adjacent_trj(coord_series_in, m, first_point_pos, last_point_pos, segment_id_in, veh_id_in, start_time_in, 1)
outlier_label = 0 # outlier still exsit in this loop
# implement wavelet filter after removing outliers
coord_series_in = wavelet_filter(coord_series_in)
# set the final filter results to the wavelet filte results
coord_series_in.loc[:, 'filter_cumu_dis'] = coord_series_in.loc[:, 'wavelet_filter_cumu_dis'].to_numpy()
coord_series_in.loc[:, 'filter_speed'] = coord_series_in.loc[:, 'wavelet_filter_speed'].to_numpy()
coord_series_in.loc[:, 'filter_accer'] = coord_series_in.loc[:, 'wavelet_filter_accer'].to_numpy()
coord_series_in.loc[:, 'filter_jerk'] = coord_series_in.loc[:, 'wavelet_filter_jerk'].to_numpy()
return coord_series_in
def before_and_after_remove_outlier_plot(trj_in):
current_seg_id = trj_in['segment_id'].iloc[0]
follower_id_in = trj_in['local_veh_id'].iloc[0]
if len(all_outlier_record) > 0:
current_seg_outlier_record = all_outlier_record.loc[
all_outlier_record['segment_id'] == current_seg_id, :]
current_seg_outlier_record_local_veh_id = current_seg_outlier_record.loc[:, 'local_veh_id'].to_numpy().astype(np.int32)
else:
current_seg_outlier_record_local_veh_id = []
if int(follower_id_in) in current_seg_outlier_record_local_veh_id:
plt.subplot(3, 1, 1)
plt.plot(trj_in['local_time'], trj_in['position'], '--k', linewidth=0.25, label='Original')
plt.plot(trj_in['local_time'], trj_in['remove_outlier_pos'], '-m', linewidth=0.25, label='Outliers Removed')
plt.ylabel('Position (m)')
plt.legend(prop={'size': 6})
trj_title = 'Segment ' + str(int(current_seg_id)) + ' Vehicle' + str(
int(follower_id_in)) + ' Before and After Removing Outliers'
plt.title(trj_title)
plt.subplot(3, 1, 2)
plt.plot(trj_in['local_time'], trj_in['speed'], '--k', linewidth=0.5, label='Original')
plt.plot(trj_in['local_time'], trj_in['remove_outlier_speed'], '-m', linewidth=0.5, label='Outliers Removed')
plt.ylabel('Speed (m/s)')
plt.legend(prop={'size': 6})
plt.ylim([0, 35])
plt.subplot(3, 1, 3)
plt.plot(trj_in['local_time'], trj_in['accer'], '--k', linewidth=0.5, label='Original')
plt.plot(trj_in['local_time'], trj_in['remove_outlier_accer'], '-m', linewidth=0.5, label='Outliers Removed')
plt.legend(prop={'size': 6})
plt.xlabel('Time (s)')
plt.ylabel('Acceleration (m/s2)')
plt.ylim([-15, 15])
trj_save_title = 'figure_save/trajectory_process/before_and_after_remove_outlier_plot/' + trj_title + '.png'
plt.savefig(trj_save_title, dpi=600)
plt.close('all')
def before_and_after_filtering_plot(trj_in):
current_seg_id = trj_in['segment_id'].iloc[0]
follower_id_in = trj_in['local_veh_id'].iloc[0]
if len(all_outlier_record) > 0:
current_seg_outlier_record = all_outlier_record.loc[
all_outlier_record['segment_id'] == current_seg_id, :]
current_seg_outlier_record_local_veh_id = current_seg_outlier_record.loc[:, 'local_veh_id'].to_numpy().astype(np.int32)
else:
current_seg_outlier_record_local_veh_id = []
if int(follower_id_in) in current_seg_outlier_record_local_veh_id:
plt.subplot(3, 1, 1)
plt.plot(trj_in['local_time'], trj_in['position'], '--k', linewidth=0.25, label='Original')
plt.plot(trj_in['local_time'], trj_in['remove_outlier_pos'], '-m', linewidth=0.25, label='Outliers Removed')
plt.plot(trj_in['local_time'], trj_in['wavelet_filter_pos'], '-*g', linewidth=0.25, label='Outliers Removed + Filtering', markersize=0.5)
plt.ylabel('Position (m)')
plt.legend(prop={'size': 6})
trj_title = 'Segment ' + str(int(current_seg_id)) + ' Vehicle' + str(
int(follower_id_in)) + ' Before and After Filtering'
plt.title(trj_title)
plt.subplot(3, 1, 2)
plt.plot(trj_in['local_time'], trj_in['speed'], '--k', linewidth=0.25, label='Original')
plt.plot(trj_in['local_time'], trj_in['remove_outlier_speed'], '-m', linewidth=0.25, label='Outliers Removed')
plt.plot(trj_in['local_time'], trj_in['wavelet_filter_speed'], '-*g', linewidth=0.25, label='Outliers Removed + Filtering', markersize=0.5)
plt.ylabel('Speed (m/s)')
plt.legend(prop={'size': 6})
plt.ylim([0, 35])
plt.subplot(3, 1, 3)
plt.plot(trj_in['local_time'], trj_in['accer'], '--k', linewidth=0.25, label='Original')
plt.plot(trj_in['local_time'], trj_in['remove_outlier_accer'], '-m', linewidth=0.25, label='Outliers Removed')
plt.plot(trj_in['local_time'], trj_in['wavelet_filter_accer'], '-*g', linewidth=0.25, label='Outliers Removed + Filtering', markersize=0.5)
plt.legend(prop={'size': 6})
plt.xlabel('Time (s)')
plt.ylabel('Acceleration (m/s2)')
plt.ylim([-15, 15])
trj_save_title = 'figure_save/trajectory_process/before_and_after_filtering_plot/' + trj_title + '.png'
plt.savefig(trj_save_title, dpi=600)
plt.close('all')
def pair_cf_coord_cal(leader_id, leader_trj_in, follower_id, follower_trj_in, av_label):
# convert 2-d coordinates to 1-d longitudinal coordinates
# note that the leader and follower interacts with each other
# av_label is to determine whether av is leader or follower (0 for follower, 1 for leader, 2 for non-av pair)
global all_seg_paired_cf_trj_final
global all_seg_paired_cf_trj_with_comparison
# extract mutual cf trajectory
min_local_time = max(leader_trj_in['local_time_stamp'].min(), follower_trj_in['local_time_stamp'].min())
max_local_time = min(leader_trj_in['local_time_stamp'].max(), follower_trj_in['local_time_stamp'].max())
leader_trj_in = leader_trj_in.loc[leader_trj_in['local_time_stamp'] >= min_local_time, :]
leader_trj_in = leader_trj_in.loc[leader_trj_in['local_time_stamp'] <= max_local_time, :]
follower_trj_in = follower_trj_in.loc[follower_trj_in['local_time_stamp'] >= min_local_time, :]
follower_trj_in = follower_trj_in.loc[follower_trj_in['local_time_stamp'] <= max_local_time, :]
# sort the trj
leader_trj_in = leader_trj_in.sort_values(['local_time_stamp'])
follower_trj_in = follower_trj_in.sort_values(['local_time_stamp'])
# initialize output format
out_leader_trj = pd.DataFrame(leader_trj_in[['segment_id', 'veh_id', 'length', 'local_time_stamp']].to_numpy(),
columns=['segment_id', 'local_veh_id', 'length', 'local_time'])
out_leader_trj.loc[:, 'follower_id'] = follower_id
out_leader_trj.loc[:, 'leader_id'] = leader_id
out_follower_trj = pd.DataFrame(follower_trj_in[['segment_id', 'veh_id', 'length', 'local_time_stamp']].to_numpy(),
columns=['segment_id', 'local_veh_id', 'length', 'local_time'])
out_follower_trj.loc[:, 'follower_id'] = follower_id
out_follower_trj.loc[:, 'leader_id'] = leader_id
# calculate coordinates of leader and follower
temp_current_segment_id = out_follower_trj['segment_id'].iloc[0]
temp_start_time = out_follower_trj['local_time'].iloc[0]
leader_cumu_dis = cumulated_dis_cal(
leader_trj_in.loc[:, ['global_center_x', 'global_center_y', 'speed_x', 'speed_y']], temp_current_segment_id, leader_id, temp_start_time)
follower_cumu_dis = cumulated_dis_cal(
follower_trj_in.loc[:, ['global_center_x', 'global_center_y', 'speed_x', 'speed_y']], temp_current_segment_id, follower_id, temp_start_time)
# calculate initial distance
pre_x_1 = leader_trj_in['global_center_x'].iloc[0]
pre_y_1 = leader_trj_in['global_center_y'].iloc[0]
post_x_1 = follower_trj_in['global_center_x'].iloc[0]
post_y_1 = follower_trj_in['global_center_y'].iloc[0]
initial_dis = math.sqrt((post_x_1 - pre_x_1) ** 2 + (post_y_1 - pre_y_1) ** 2)
# create position, speed, and acceleration data
# follower's position always start from 0
# position based
out_follower_trj.loc[:, 'position'] = follower_cumu_dis['cumu_dis'].to_numpy()
out_follower_trj.loc[:, 'remove_outlier_pos'] = follower_cumu_dis['remove_outlier_cumu_dis'].to_numpy()
out_follower_trj.loc[:, 'filter_pos'] = follower_cumu_dis['filter_cumu_dis'].to_numpy()
out_follower_trj.loc[:, 'wavelet_filter_pos'] = follower_cumu_dis['wavelet_filter_cumu_dis'].to_numpy()
out_follower_trj.loc[:, 'speed'] = follower_cumu_dis['speed'].to_numpy()
out_follower_trj.loc[:, 'remove_outlier_speed'] = follower_cumu_dis['remove_outlier_speed'].to_numpy()
out_follower_trj.loc[:, 'filter_speed'] = follower_cumu_dis['filter_speed'].to_numpy()
out_follower_trj.loc[:, 'wavelet_filter_speed'] = follower_cumu_dis['wavelet_filter_speed'].to_numpy()
out_follower_trj.loc[:, 'accer'] = follower_cumu_dis['accer'].to_numpy()
out_follower_trj.loc[:, 'remove_outlier_accer'] = follower_cumu_dis['remove_outlier_accer'].to_numpy()
out_follower_trj.loc[:, 'filter_accer'] = follower_cumu_dis['filter_accer'].to_numpy()
out_follower_trj.loc[:, 'wavelet_filter_accer'] = follower_cumu_dis['wavelet_filter_accer'].to_numpy()
out_follower_trj.loc[:, 'jerk'] = follower_cumu_dis['jerk'].to_numpy()
out_follower_trj.loc[:, 'filter_jerk'] = follower_cumu_dis['filter_jerk'].to_numpy()
out_follower_trj.loc[:, 'wavelet_filter_jerk'] = follower_cumu_dis['wavelet_filter_jerk'].to_numpy()
out_leader_trj.loc[:, 'position'] = leader_cumu_dis['cumu_dis'].to_numpy() + initial_dis
out_leader_trj.loc[:, 'remove_outlier_pos'] = leader_cumu_dis['remove_outlier_cumu_dis'].to_numpy() + initial_dis
out_leader_trj.loc[:, 'filter_pos'] = leader_cumu_dis['filter_cumu_dis'].to_numpy() + initial_dis
out_leader_trj.loc[:, 'wavelet_filter_pos'] = leader_cumu_dis['wavelet_filter_cumu_dis'].to_numpy() + initial_dis
out_leader_trj.loc[:, 'speed'] = leader_cumu_dis['speed'].to_numpy()
out_leader_trj.loc[:, 'remove_outlier_speed'] = leader_cumu_dis['remove_outlier_speed'].to_numpy()
out_leader_trj.loc[:, 'filter_speed'] = leader_cumu_dis['filter_speed'].to_numpy()
out_leader_trj.loc[:, 'wavelet_filter_speed'] = leader_cumu_dis['wavelet_filter_speed'].to_numpy()
out_leader_trj.loc[:, 'accer'] = leader_cumu_dis['accer'].to_numpy()
out_leader_trj.loc[:, 'remove_outlier_accer'] = leader_cumu_dis['remove_outlier_accer'].to_numpy()
out_leader_trj.loc[:, 'filter_accer'] = leader_cumu_dis['filter_accer'].to_numpy()
out_leader_trj.loc[:, 'wavelet_filter_accer'] = leader_cumu_dis['wavelet_filter_accer'].to_numpy()
out_leader_trj.loc[:, 'jerk'] = leader_cumu_dis['jerk'].to_numpy()
out_leader_trj.loc[:, 'filter_jerk'] = leader_cumu_dis['filter_jerk'].to_numpy()
out_leader_trj.loc[:, 'wavelet_filter_jerk'] = leader_cumu_dis['wavelet_filter_jerk'].to_numpy()
# speed based
out_follower_trj.loc[:, 'speed_based_position'] = follower_cumu_dis['speed_based_cumu_dis'].to_numpy()
out_follower_trj.loc[:, 'speed_based_speed'] = follower_cumu_dis['speed_based_speed'].to_numpy()
out_follower_trj.loc[:, 'speed_based_accer'] = follower_cumu_dis['speed_based_accer'].to_numpy()
out_follower_trj.loc[:, 'speed_based_jerk'] = follower_cumu_dis['speed_based_jerk'].to_numpy()
out_leader_trj.loc[:, 'speed_based_position'] = leader_cumu_dis['speed_based_cumu_dis'].to_numpy() + initial_dis
out_leader_trj.loc[:, 'speed_based_speed'] = leader_cumu_dis['speed_based_speed'].to_numpy()
out_leader_trj.loc[:, 'speed_based_accer'] = leader_cumu_dis['speed_based_accer'].to_numpy()
out_leader_trj.loc[:, 'speed_based_jerk'] = leader_cumu_dis['speed_based_jerk'].to_numpy()
# plot speed and acc figure
before_and_after_remove_outlier_plot(out_follower_trj)
before_and_after_remove_outlier_plot(out_leader_trj)
before_and_after_filtering_plot(out_follower_trj)
before_and_after_filtering_plot(out_leader_trj)
# save cf paired trj
# all_seg_paired_cf_trj = pd.concat([all_seg_paired_cf_trj, pd.concat([out_leader_trj, out_follower_trj])])
all_seg_paired_cf_trj_with_comparison = all_seg_paired_cf_trj_with_comparison.append(
pd.concat([out_leader_trj, out_follower_trj]))
out_follower_trj_final = out_follower_trj.loc[:,
['segment_id', 'local_veh_id', 'length', 'local_time', 'follower_id', 'leader_id',
'filter_pos', 'filter_speed', 'filter_accer']]
out_follower_trj_final.columns = ['segment_id', 'local_veh_id', 'length', 'local_time', 'follower_id', 'leader_id',
'filter_pos', 'filter_speed', 'filter_accer']
out_leader_trj_final = out_leader_trj.loc[:,
['segment_id', 'local_veh_id', 'length', 'local_time', 'follower_id', 'leader_id',
'filter_pos', 'filter_speed', 'filter_accer']]
out_leader_trj_final.columns = ['segment_id', 'local_veh_id', 'length', 'local_time', 'follower_id', 'leader_id',
'filter_pos', 'filter_speed', 'filter_accer']
all_seg_paired_cf_trj_final = all_seg_paired_cf_trj_final.append(
pd.concat([out_leader_trj_final, out_follower_trj_final]))
# plot the car following trj of both follower and leader
cf_paired_trj_plot(out_leader_trj_final, out_follower_trj_final, av_label)
def cf_pair_coord_trans(seg_trj_in, follower_id_in, leader_id_in, av_related_label):
# extract all cf pairs in one segment
# the input seg_trj_in is already with local id
# av_related_label determines if there is av involed
# return the paired trj with transformed coordination with format of ['segment_id', 'local_veh_id', 'length','local_time','follower_id', 'leader_id', 'position', 'speed', 'accer']
follower_trj = seg_trj_in[seg_trj_in['veh_id'] == follower_id_in]
leader_trj = seg_trj_in[seg_trj_in['veh_id'] == leader_id_in]
ego_trj = seg_trj_in[seg_trj_in['veh_id'] == 0]
if av_related_label:
# process av related pair
if follower_id_in == 0 and leader_id_in == 0:
# this segment is not suitable for cf (av related)
pass
elif follower_id_in == 0 and leader_id_in != 0:
# AV-HV pair
pair_cf_coord_cal(leader_id_in, leader_trj, 0, ego_trj, 0)
elif follower_id_in != 0 and leader_id_in == 0:
# HV-AV pair
pair_cf_coord_cal(0, ego_trj, follower_id_in, follower_trj, 1)
else:
# both AV-HV pair and HV-AV pair
pair_cf_coord_cal(leader_id_in, leader_trj, 0, ego_trj, 0)
pair_cf_coord_cal(0, ego_trj, follower_id_in, follower_trj, 1)
else:
# process HV-HV pair
pair_cf_coord_cal(leader_id_in, leader_trj, follower_id_in, follower_trj, 2)
def cf_paired_trj_plot(leader_trj_in, follower_trj_in, av_label):
# av_label is to determine whether av is leader or follower (0 for follower, 1 for leader, 2 for non-av)
# the format of the trajectory is pandas dataframe
# for av_label: 0 means AV-HV, 1 means HV-AV, 2 means HV-HV
current_segment_id = int(leader_trj_in['segment_id'].iloc[0])
current_leader_id = int(leader_trj_in['local_veh_id'].iloc[0])
current_follower_id = int(follower_trj_in['local_veh_id'].iloc[0])
if av_label == 0:
follower_line = '-r'
leader_line = '--b'
follower_label = 'AV Follower'
leader_label = 'HV Leader'
trj_title = 'AV' + '-HV' + str(current_leader_id)
trj_save_title = 'figure_save/trajectory_process/position_time_plot/av_hv/' + 'Segment_' + str(
current_segment_id) + '_' + trj_title + '_position_time_plot.png'
elif av_label == 1:
follower_line = '-b'
leader_line = '--r'
follower_label = 'HV Follower'
leader_label = 'AV Leader'
trj_title = 'HV' + str(current_follower_id) + '-AV'
trj_save_title = 'figure_save/trajectory_process/position_time_plot/hv_av/' + 'Segment_' + str(
current_segment_id) + '_' + trj_title + '_position_time_plot.png'
else:
follower_line = '-b'
leader_line = '--b'
follower_label = 'HV Follower'
leader_label = 'HV Leader'
trj_title = 'HV' + str(current_follower_id) + '-HV' + str(current_leader_id)
trj_save_title = 'figure_save/trajectory_process/position_time_plot/hv_hv/' + 'Segment_' + str(
current_segment_id) + '_' + trj_title + '_position_time_plot.png'
plt.subplot(3, 1, 1)
plt.plot(follower_trj_in['local_time'], follower_trj_in['filter_pos'], follower_line, linewidth=0.5, label=follower_label)
plt.plot(leader_trj_in['local_time'], leader_trj_in['filter_pos'], leader_line, linewidth=0.5, label=leader_label)
plt.ylabel('Position (m)')
plt.legend(prop={'size': 6})
trj_title = 'Segment ' + str(current_segment_id) + ' ' + trj_title + ' Trajectory'
plt.title(trj_title)
plt.subplot(3, 1, 2)
plt.plot(follower_trj_in['local_time'], follower_trj_in['filter_speed'], follower_line, linewidth=0.5, label=follower_label)
plt.plot(leader_trj_in['local_time'], leader_trj_in['filter_speed'], leader_line, linewidth=0.5, label=leader_label)
plt.ylabel('Speed (m/s)')
plt.legend(prop={'size': 6})
plt.ylim([0, 35])
plt.subplot(3, 1, 3)
plt.plot(follower_trj_in['local_time'], follower_trj_in['filter_accer'], follower_line, linewidth=0.5, label=follower_label)
plt.plot(leader_trj_in['local_time'], leader_trj_in['filter_accer'], leader_line, linewidth=0.5, label=leader_label)
plt.legend(prop={'size': 6})
plt.xlabel('Time (s)')
plt.ylabel('Acceleration (m/s2)')
plt.ylim([-5, 5])
plt.savefig(trj_save_title, dpi=600)
plt.close('all')
def cf_trj_info_cal(all_seg_paired_cf_trj_in):
# calculate car following measurements
# output format
# ['segment_id', 'local_veh_id', 'length','local_time','follower_id', 'leader_id', 'position', 'speed', 'accer',
# 'cf_label', 'space_hwy', 'net_distance', 'time_hwy', 'speed_diff', 'TTC', 'DRAC']
# cf_label: 0 for AV-HV, 1 for HV-AV
# net_distance = space_hwy - 0.5*follower_length - 0.5*leader_length
# time_hwy = space_hwy/follower_speed
# speed_diff = follower_speed - leader_speed
# Time To Collision: TTC = speed_diff/net_distance
# Deceleration Required to Avoid Crash: DRAC = (speed_diff ** 2) / net_distance
def single_cf_pair_info_cal(follower_trj_in, leader_trj_in, cf_label_in):
global all_seg_cf_info
# input format ['segment_id', 'local_veh_id', 'length','local_time','follower_id', 'leader_id', 'position', 'speed', 'accer']
out_cf_info = follower_trj_in.copy(deep=True)
out_cf_info['cf_label'] = cf_label_in
out_cf_info['space_hwy'] = 0
out_cf_info['net_distance'] = 0
out_cf_info['time_hwy'] = 0
out_cf_info['speed_diff'] = 0
out_cf_info['TTC'] = 0
out_cf_info['DRAC'] = 0
for i in range(len(out_cf_info['segment_id'])):
current_time = out_cf_info['local_time'].iloc[i]
l_time_match = abs(leader_trj_in['local_time'] - current_time) <= 0.001
matched_leader_trj = leader_trj_in.loc[l_time_match, :]
if len(matched_leader_trj) > 0:
space_hwy = matched_leader_trj['filter_pos'].iloc[0] - out_cf_info['filter_pos'].iloc[i]
out_cf_info['space_hwy'].iloc[i] = space_hwy
net_distance = space_hwy - 0.5 * matched_leader_trj['length'].iloc[0] - 0.5 * \
out_cf_info['length'].iloc[i]
out_cf_info['net_distance'].iloc[i] = net_distance
if out_cf_info['filter_speed'].iloc[i] <= 0.1:
out_cf_info['time_hwy'].iloc[i] = 1000000
else:
out_cf_info['time_hwy'].iloc[i] = space_hwy / out_cf_info['filter_speed'].iloc[i]
speed_diff = out_cf_info['filter_speed'].iloc[i] - matched_leader_trj['filter_speed'].iloc[0]
out_cf_info['speed_diff'].iloc[i] = speed_diff
if speed_diff < 0:
out_cf_info['TTC'].iloc[i] = 0
out_cf_info['DRAC'].iloc[i] = 0
else:
out_cf_info['TTC'].iloc[i] = net_distance / speed_diff
out_cf_info['DRAC'].iloc[i] = (speed_diff ** 2) / net_distance
all_seg_cf_info = all_seg_cf_info.append(out_cf_info)
# ----- av-related cf info -----
all_seg_av_hv_trj = all_seg_paired_cf_trj_in.loc[all_seg_paired_cf_trj_in['follower_id'] == 0, :]
all_seg_hv_av_trj = all_seg_paired_cf_trj_in.loc[all_seg_paired_cf_trj_in['leader_id'] == 0, :]
av_hv_seg_id = pd.unique(all_seg_av_hv_trj['segment_id'])
for id1 in av_hv_seg_id:
segment_print = 'Now in AV-HV segment: ' + str(id1)
print(segment_print)
current_seg_trj = all_seg_av_hv_trj.loc[all_seg_av_hv_trj['segment_id'] == id1, :]
follower_id = current_seg_trj['follower_id'].iloc[0]
leader_id = current_seg_trj['leader_id'].iloc[0]
flollower_trj = current_seg_trj.loc[current_seg_trj['local_veh_id'] == follower_id, :]
leader_trj = current_seg_trj.loc[current_seg_trj['local_veh_id'] == leader_id, :]
single_cf_pair_info_cal(flollower_trj, leader_trj, 0)
follower_av_seg_id = pd.unique(all_seg_hv_av_trj['segment_id'])
for id1 in follower_av_seg_id:
segment_print = 'Now in HV-AV segment: ' + str(id1)
print(segment_print)
current_seg_trj = all_seg_hv_av_trj.loc[all_seg_hv_av_trj['segment_id'] == id1, :]
follower_id = current_seg_trj['follower_id'].iloc[0]
leader_id = current_seg_trj['leader_id'].iloc[0]
flollower_trj = current_seg_trj.loc[current_seg_trj['local_veh_id'] == follower_id, :]
leader_trj = current_seg_trj.loc[current_seg_trj['local_veh_id'] == leader_id, :]
single_cf_pair_info_cal(flollower_trj, leader_trj, 1)
# ----- hv-hv cf info -----
l1 = all_seg_paired_cf_trj_in['follower_id'] != 0
l2 = all_seg_paired_cf_trj_in['leader_id'] != 0
all_seg_hv_hv_leader_trj = all_seg_paired_cf_trj_in.loc[l1 & l2, :]
hv_hv_seg_id = pd.unique(all_seg_hv_hv_leader_trj['segment_id'])
for id1 in hv_hv_seg_id:
segment_print = 'Now in HV-HV segment: ' + str(id1)
print(segment_print)
current_seg_trj = all_seg_hv_hv_leader_trj.loc[all_seg_hv_hv_leader_trj['segment_id'] == id1, :]
all_follower_id = pd.unique(current_seg_trj['follower_id'])
for id2 in all_follower_id:
# note that one segment may have multiple hv-hv pairs
current_pair_trj = current_seg_trj.loc[current_seg_trj['follower_id'] == id2, :]
follower_id = current_pair_trj['follower_id'].iloc[0]
leader_id = current_pair_trj['leader_id'].iloc[0]
flollower_trj = current_pair_trj.loc[current_pair_trj['local_veh_id'] == follower_id, :]
leader_trj = current_pair_trj.loc[current_pair_trj['local_veh_id'] == leader_id, :]
single_cf_pair_info_cal(flollower_trj, leader_trj, 2)
def cf_pair_exclude_rules_implementation(all_seg_paired_cf_trj_in, all_seg_cf_info_in):
# this function verify if a selected CF pair is suitable for car following research
# this verification is necessary because currently the CF pairs are extracted manually by watching the top view videos and might be error-prone
# 6 rules are defined in the paper:
# rule 1: Exclude if there is no leader or follower
# rule 2: Exclude if the follower or leader is off the Lidar detection range (disappear from the video) for some time
# rule 3: Exclude if the leader or follower is a bus or heavy truck
# rule 4: Exclude if the follower changes its leader (either the follower or the leader changes its lane)
# rule 5: Exclude if follower remains standstill during the entire segment
# rule 6: Exclude if the car following state is interrupted by turning, parking, stop signs, traffic signals, pedestrians, or other obstacles
# note that: for rule 1 there is no need to verify because all selected pairs have a follower and a leader
# note that: for rule 4, since there is no lane mark in the provided dataset, so we are not able to analysis lane changing pairs
# therefore, only rules 2, 3, 5, 6 are implemented here
all_seg_paired_cf_trj_verified = all_seg_paired_cf_trj_in.copy(deep=True)
def single_cf_pair_verification(flollower_trj_in, leader_trj_in, follower_cf_info_in):
# this function implement rules 2, 3, 5, 6
# output is 0 or 1: 0 denotes this pair is valid, 1 denotes this pair will be removed
output_label = 0 # default value is 0
flollower_trj_in.reset_index(inplace=True)
leader_trj_in.reset_index(inplace=True)
follower_cf_info_in.reset_index(inplace=True)
# rule 2
for i in range(1, len(flollower_trj_in.loc[:, 'segment_id'])):
# if the time difference between two consecutive points is larger than 0.2s, then this pair is excluded
if flollower_trj_in.loc[i, 'local_time'] - flollower_trj_in.loc[i - 1, 'local_time'] >= 0.2:
output_label = 1
print('Rule 2 excluded')
return output_label
for j in range(1, len(leader_trj_in.loc[:, 'segment_id'])):
# if the time difference between two consecutive points is larger than 0.2s, then this pair is excluded
if leader_trj_in.loc[j, 'local_time'] - leader_trj_in.loc[j - 1, 'local_time'] >= 0.2:
output_label = 1
print('Rule 2 excluded')
return output_label
# rule 3
large_vehicle_length_threshold = 8
if flollower_trj_in.loc[0, 'length'] >= large_vehicle_length_threshold:
output_label = 1
print('Rule 3 excluded')
return output_label
if leader_trj_in.loc[0, 'length'] >= large_vehicle_length_threshold:
output_label = 1
print('Rule 3 excluded')
return output_label
# rule 5
if flollower_trj_in.loc[:, 'filter_speed'].max() <= 0.1:
# the case where the follower is always standstill
output_label = 1
print('Rule 5 excluded')
return output_label
# rule 6
# based on the slope of v-s curve, if the slope is obviously negative, then this pair is excluded
detection_length = 50 # calculate the slope every 50 points
slope_threhold = -0.5 # if the slope of v-s curve is smaller than this threshold, then this pair is excluded
for i in range(len(follower_cf_info_in.loc[:, 'segment_id']) - detection_length):
# monotonic test, only in the case of monotonic v-s curve will be slope be calculated
l_speed = follower_cf_info_in.loc[i:i+detection_length - 1, 'filter_speed'].is_monotonic_increasing or \
follower_cf_info_in.loc[i:i+detection_length - 1, 'filter_speed'].is_monotonic_decreasing
l_spacing = follower_cf_info_in.loc[i:i+detection_length - 1, 'space_hwy'].is_monotonic_increasing or \
follower_cf_info_in.loc[i:i+detection_length - 1, 'space_hwy'].is_monotonic_decreasing
if l_speed and l_spacing:
v_data = follower_cf_info_in.loc[i:i+detection_length - 1, 'filter_speed'].values.reshape(-1, 1)
s_data = follower_cf_info_in.loc[i:i+detection_length - 1, 'space_hwy'].values.reshape(-1, 1)
current_regression = LinearRegression()
current_regression.fit(s_data, v_data)
current_slope = current_regression.coef_[0]
if current_slope <= slope_threhold:
output_label = 1
print('Rule 6 excluded')
return output_label
return output_label
all_seg_id = | pd.unique(all_seg_paired_cf_trj_in['segment_id']) | pandas.unique |
""""
FOR RESEARCH USE ONLY
Drafted Jan 6, 2021
Updated July 20, 2021
Seattle, WA
Copyright (c) 2021 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import os
import re
import pandas as pd
import numpy as np
# ██╗ ██╗ ██████╗ ███████╗ ███████╗██╗ ██╗██╗██████╗ ███████╗███╗ ██╗ ██████╗███████╗
# ██║ ██║ ██╔═══██╗██╔════╝ ██╔════╝██║ ██║██║██╔══██╗██╔════╝████╗ ██║██╔════╝██╔════╝
# ██║ █╗ ██║ ██║ ██║█████╗ █████╗ ██║ ██║██║██║ ██║█████╗ ██╔██╗ ██║██║ █████╗
# ██║███╗██║ ██║ ██║██╔══╝ ██╔══╝ ╚██╗ ██╔╝██║██║ ██║██╔══╝ ██║╚██╗██║██║ ██╔══╝
# ╚███╔███╔╝██╗ ╚██████╔╝██║ ███████╗ ╚████╔╝ ██║██████╔╝███████╗██║ ╚████║╚██████╗███████╗
# ╚══╝╚══╝ ╚═╝ ╚═════╝ ╚═╝ ╚══════╝ ╚═══╝ ╚═╝╚═════╝ ╚══════╝╚═╝ ╚═══╝ ╚═════╝╚══════╝
def weight_of_evidence(
hla_hits_df,
locus = "HLA-A",
threshold = 0.1,
use_detects = True,
use_counts = False,
remove_columns = ['association_pvalue']):
"""
Parameters
----------
hla_hits_df : pd.DataFrame
input DataFrame (columns are samples, rows are TCR features, values are counts per sample)
locus : str
"HLA-A",
threshold : float
0.2
use_detects : bool
if True, use detections
use_counts : bool
if True, use counts versus detections
remove_columns : list
['association_pvalue']
Result
------
pd.DataFrame
columns:
"""
# Remove columns that aren't feature, hla_allele, or sample>
col_ind = [x for x in hla_hits_df.columns if x not in remove_columns]
hla_hits_df = hla_hits_df[col_ind]
# Subset columns to only alleles that start with <loci> string
ind = hla_hits_df['hla_allele'].apply(lambda x : x.startswith(locus))
hla_hits_df = hla_hits_df[ind].reset_index(drop = True)
# Gather wide DataFrame to a Long Data Frame
if 'tcr' in hla_hits_df.columns:
hla_hits_df = pd.melt(hla_hits_df, id_vars =['tcr','hla_allele'])
if 'match' in hla_hits_df.columns:
hla_hits_df = | pd.melt(hla_hits_df, id_vars =['match','hla_allele']) | pandas.melt |
########################################################################################################
# data_sql.py - Data pull from json, clean it up and upload to SQL
# by <NAME>
#
# This is Python script Pulls the metadata (link) from following three json data:-
# 1. https://api.weather.gov/points/31.7276,-110.8754
# 2. https://api.weather.gov/points/32.395,-110.6911
# 3. https://api.weather.gov/points/32.4186,-110.7383
#
# The Link pulled (json data) from the above three json data are
# the grid data links that are use to pull all the weather related data for the three capmgrounds:-
# 1. https://api.weather.gov/gridpoints/TWC/91,26
# 2. https://api.weather.gov/gridpoints/TWC/101,54
# 3. https://api.weather.gov/gridpoints/TWC/100,56
#
# From the above grid data 4 dataframes are created. The challenge was pulling the data from the
# above json links and then converting the date-time columns to the format (date-time) that can be used
# to upload to SQL and creating the graphs. Also Temperatures need to be converted to degreeF and wind
# speeds to Miles per hour:-
# 1. Campgroud information dF with information like lat, lon, elevation,
# meta url, grid url, forest url, campsite url fire danger and map code.
# 2. One for each campground (bs_grid_df, rc_grid_df, sc_grid_df). These df
# have columns (temp in degreeF, temp time, wind speed, wind speed time, wind gust,
# wind gust time, prob precipitation, Prob precp time, qty precip, qty precip time).
#
# SQLalchemy was used to create 4 tables in postgres SQL and then the above 4 DataFrames were uploaded
# Postgres SQL. The table names in SQL are:
# 1. camp_wx
# 2. cg_bog_spring
# 3. cg_rose_canyon
# 4. cg_spencer_canyon
#
# This script was converted from data_sql.ipynb
##########################################################################################################
# %%
# ------------------------
# Dependencies and Setup
# ------------------------
import pandas as pd
import json
import requests
import numpy as np
import datetime
from datetime import timedelta
from splinter import Browser
from bs4 import BeautifulSoup
# %%
# --------------------------------------------------------------------
# Bog Spring CAMPGROUND
# --------------------------------------------------------------------
# ---------------------------------------------
# Pull Grid Data URL From Metadata url for
# ---------------------------------------------
bs_url = "https://api.weather.gov/points/31.7276,-110.8754"
response_bs = requests.get(bs_url)
data_bs = response_bs.json()
data_bs
grid_data_bs = data_bs["properties"]["forecastGridData"]
grid_data_bs
# %%
# ------------------------------------------------------------------------
# Pull latitude, Longitude and Elevation data for BogSprings Campground
# ------------------------------------------------------------------------
bs_forcast_url = grid_data_bs
response_bs_forecast = requests.get(bs_forcast_url)
data_bs_forecast = response_bs_forecast.json()
data_bs_forecast
lat_bs = data_bs_forecast["geometry"]["coordinates"][0][0][1]
lat_bs
lng_bs = data_bs_forecast["geometry"]["coordinates"][0][0][0]
lng_bs
elevation_bs = data_bs_forecast["properties"]["elevation"]["value"]
elevation_bs
# ---------------------------------------------------------------------------------
# Create a Dataframe with Latitude, Longitude Elevation and all other related URL
# ---------------------------------------------------------------------------------
bs_df = pd.DataFrame({"id": 1,
"campground": "Bog Springs",
"lat": [lat_bs],
"lon": [lng_bs],
"elevation": [elevation_bs],
"nws_meta_url": [bs_url],
"nws_grid_url": [grid_data_bs],
"forest_url":"https://www.fs.usda.gov/recarea/coronado/recreation/camping-cabins/recarea/?recid=25732&actid=29",
"campsite_url": "https://www.fs.usda.gov/Internet/FSE_MEDIA/fseprd746637.jpg",
"fire_danger": "Very High",
"map_code": '<iframe src="https://www.google.com/maps/embed?pb=!1m18!1m12!1m3!1d3393.5714340164473!2d-110.87758868361043!3d31.72759998130141!2m3!1f0!2f0!3f0!3m2!1i1024!2i768!4f13.1!3m3!1m2!1s0x86d6970db0a5e44d%3A0x1b48084e4d6db970!2sBog%20Springs%20Campground!5e0!3m2!1sen!2sus!4v1626560932236!5m2!1sen!2sus" width="600" height="450" style="border:0;" allowfullscreen="" loading="lazy"></iframe>'
})
bs_df
# %%
# -------------------------------------------------------------------------------------------------
# Pull temperate, Wind Speed, Wind Gust, Probability of Precipitation, Quantity or Precipitation
# data along with the date and time for each.
# -------------------------------------------------------------------------------------------------
# =================== Temperature Data ======================
temp = []
for i in data_bs_forecast["properties"]["temperature"]["values"]:
temp.append(i)
temp_df = pd.DataFrame(temp)
temp_df
# Temperature conversion to Degree Fahrenheit
temp_df['degF'] = (temp_df['value'] * 9 / 5) + 32
temp_df
# validTime Column split to date and time for Temperature
date_temp = temp_df['validTime'].str.split('T', n=1, expand=True)
time_temp = date_temp[1].str.split('+', n=1, expand=True)
time_temp
temp_df['date_temp'] = date_temp[0]
temp_df['time_temp'] = time_temp[0]
# Combine date and time with a space in between the two
temp_df['date_time_temp'] = temp_df['date_temp'] + ' ' + temp_df['time_temp']
# Convert the above to date time format so it can be recognized by the PostgresSQL and js
temp_df['date_time_temp'] = pd.to_datetime(temp_df['date_time_temp'])
# Pull all the data for today + 3 days
time_delta_temp = datetime.datetime.strptime(temp_df['date_temp'][0],"%Y-%m-%d") + timedelta(days = 4)
temp_df['times_temp'] = time_delta_temp.strftime("%Y-%m-%d")
temp_df = temp_df.loc[temp_df['date_temp'] < temp_df['times_temp']]
temp_df
# temp_df.dtypes
# =================== Wind Speed Data ======================
wind_speed = []
for i in data_bs_forecast["properties"]["windSpeed"]["values"]:
wind_speed.append(i)
windSpeed_df = pd.DataFrame(wind_speed)
windSpeed_df
# Converting KM/hour to Miles/hour
windSpeed_df['miles/hour'] = windSpeed_df['value'] * 0.621371
windSpeed_df
# validTime Column split to date and time for Wind Speed
date_ws = windSpeed_df['validTime'].str.split('T', n=1, expand=True)
time_ws = date_ws[1].str.split('+', n=1, expand=True)
time_ws
windSpeed_df['date_ws'] = date_ws[0]
windSpeed_df['time_ws'] = time_ws[0]
# Combine date and time with a space in between the two
windSpeed_df['date_time_ws'] = windSpeed_df['date_ws'] + ' ' + windSpeed_df['time_ws']
# Convert the above to date time format so it can be recognized by the PostgresSQL and js
windSpeed_df['date_time_ws'] = pd.to_datetime(windSpeed_df['date_time_ws'])
# Pull all the data for today + 3 days
time_delta_ws = datetime.datetime.strptime(windSpeed_df['date_ws'][0],"%Y-%m-%d") + timedelta(days = 4)
windSpeed_df['times_ws'] = time_delta_ws.strftime("%Y-%m-%d")
windSpeed_df = windSpeed_df.loc[windSpeed_df['date_ws'] < windSpeed_df['times_ws']]
windSpeed_df
# windSpeed_df.dtypes
# =================== Wind Gust Data ======================
wind_gust = []
for i in data_bs_forecast["properties"]["windGust"]["values"]:
wind_gust.append(i)
wind_gust_df = pd.DataFrame(wind_gust)
wind_gust_df
# Converting KM/hour to Miles/hour
wind_gust_df['m/h'] = wind_gust_df['value'] * 0.621371
wind_gust_df
# # validTime Column split to date and time for Wind Gusts
date_wg = wind_gust_df['validTime'].str.split('T', n=1, expand=True)
time_wg = date_wg[1].str.split('+', n=1, expand=True)
time_wg
wind_gust_df['date_wg'] = date_wg[0]
wind_gust_df['time_wg'] = time_wg[0]
# Combine date and time with a space in between the two
wind_gust_df['date_time_wg'] = wind_gust_df['date_wg'] + ' ' + wind_gust_df['time_wg']
# Convert the above to date time format so it can be recognized by the PostgresSQL and js
wind_gust_df['date_time_wg'] = pd.to_datetime(wind_gust_df['date_time_wg'])
wind_gust_df
# Pull all the data for today + 3 days
time_delta_wg = datetime.datetime.strptime(wind_gust_df['date_wg'][0],"%Y-%m-%d") + timedelta(days = 4)
wind_gust_df['times_wg'] = time_delta_wg.strftime("%Y-%m-%d")
wind_gust_df = wind_gust_df.loc[wind_gust_df['date_wg'] < wind_gust_df['times_wg']]
wind_gust_df
# wind_gust_df.dtypes
# =================== Probability of Precipitation Data ======================
prob_precip = []
for i in data_bs_forecast["properties"]["probabilityOfPrecipitation"]["values"]:
prob_precip.append(i)
prob_precip_df = pd.DataFrame(prob_precip)
prob_precip_df
# # validTime Column split to date and time for Probability Precipitation
date_pp = prob_precip_df['validTime'].str.split('T', n=1, expand=True)
time_pp = date_pp[1].str.split('+', n=1, expand=True)
time_pp
prob_precip_df['date_pp'] = date_pp[0]
prob_precip_df['time_pp'] = time_pp[0]
# Combine date and time with a space in between the two
prob_precip_df['date_time_pp'] = prob_precip_df['date_pp'] + ' ' + prob_precip_df['time_pp']
# Convert the above to date time format so it can be recognized by the PostgresSQL and js
prob_precip_df['date_time_pp'] = pd.to_datetime(prob_precip_df['date_time_pp'])
prob_precip_df
# Pull all the data for today + 3 days
time_delta_pp = datetime.datetime.strptime(prob_precip_df['date_pp'][0],"%Y-%m-%d") + timedelta(days = 4)
prob_precip_df['times_pp'] = time_delta_pp.strftime("%Y-%m-%d")
prob_precip_df = prob_precip_df.loc[prob_precip_df['date_pp'] < prob_precip_df['times_pp']]
prob_precip_df
# prob_precip_df.dtypes
# =================== Quantity of Precipitation Data ======================
qty_precip = []
for i in data_bs_forecast["properties"]["quantitativePrecipitation"]["values"]:
qty_precip.append(i)
qty_precip_df = pd.DataFrame(qty_precip)
qty_precip_df
# # validTime Column split to date and time for quantity Precipitation
date_qp = qty_precip_df['validTime'].str.split('T', n=1, expand=True)
time_qp = date_qp[1].str.split('+', n=1, expand=True)
time_qp
qty_precip_df['date_qp'] = date_qp[0]
qty_precip_df['time_qp'] = time_qp[0]
# Combine date and time with a space in between the two
qty_precip_df['date_time_qp'] = qty_precip_df['date_qp'] + ' ' + qty_precip_df['time_qp']
# Convert the above to date time format so it can be recognized by the PostgresSQL and js
qty_precip_df['date_time_qp'] = pd.to_datetime(qty_precip_df['date_time_qp'])
qty_precip_df
# Pull all the data for today + 3 days
time_delta_qp = datetime.datetime.strptime(qty_precip_df['date_qp'][0],"%Y-%m-%d") + timedelta(days = 4)
qty_precip_df['times_qp'] = time_delta_qp.strftime("%Y-%m-%d")
qty_precip_df = qty_precip_df.loc[qty_precip_df['date_qp'] < qty_precip_df['times_qp']]
qty_precip_df
# qty_precip_df.dtypes
# =================== Create DataFrame with all the above data for Bog Spring Campground ======================
bs_grid_df = pd.DataFrame({"id":1,
"campground": "Bog Springs",
"forecasted_temperature_degF": temp_df['degF'],
"forecastTime_temperature": temp_df['date_time_temp'],
"forecasted_windSpeed_miles_per_h": windSpeed_df['miles/hour'],
"forecastTime_windSpeed": windSpeed_df['date_time_ws'],
"forecasted_windGust_miles_per_h": wind_gust_df['m/h'],
"forecastTime_windGust": wind_gust_df['date_time_wg'],
"forecasted_probabilityOfPrecipitation": prob_precip_df['value'],
"forecastTime_probabilityOfPrecipitation": prob_precip_df['date_time_pp'],
"forecasted_quantityOfPrecipitation_mm": qty_precip_df['value'],
"forecastTime_quantityOfPrecipitation": qty_precip_df['date_time_qp'],
})
bs_grid_df
# bs_grid_df.dtypes
# %%
# --------------------------------------------------------------------
# ROSE CANYON CAMPGROUND
# --------------------------------------------------------------------
# -------------------------------------------
# Pull Grid Data URL From Metadata url
# -------------------------------------------
rc_url = "https://api.weather.gov/points/32.395,-110.6911"
response_rc = requests.get(rc_url)
data_rc = response_rc.json()
data_rc
grid_data_rc = data_rc["properties"]["forecastGridData"]
grid_data_rc
# %%
# ------------------------------------------------------------------------
# Pull latitude, Longitude and Elevation data for Rose Canyon Campground
# ------------------------------------------------------------------------
rc_forcast_url = grid_data_rc
response_rc_forecast = requests.get(rc_forcast_url)
data_rc_forecast = response_rc_forecast.json()
data_rc_forecast
lat_rc = data_rc_forecast["geometry"]["coordinates"][0][0][1]
lat_rc
lng_rc = data_rc_forecast["geometry"]["coordinates"][0][0][0]
lng_rc
elevation_rc = data_rc_forecast["properties"]["elevation"]["value"]
elevation_rc
# ---------------------------------------------------------------------------------
# Create a Dataframe with Latitude, Longitude Elevation and all other related URL
# ---------------------------------------------------------------------------------
rc_df = pd.DataFrame({"id": 2,
"campground": "Rose Canyon",
"lat": [lat_rc],
"lon": [lng_rc],
"elevation": [elevation_rc],
"nws_meta_url": [rc_url],
"nws_grid_url": [grid_data_rc],
"forest_url":"https://www.fs.usda.gov/recarea/coronado/recreation/camping-cabins/recarea/?recid=25698&actid=29",
"campsite_url": "https://cdn.recreation.gov/public/2019/06/20/00/19/232284_beeddff5-c966-49e2-93a8-c63c1cf21294_700.jpg",
# "nws_meta_json":[data_rc],
# "nws_grid_json": [data_rc_forecast],
"fire_danger": "Very High",
"map_code": '<iframe src="https://www.google.com/maps/embed?pb=!1m18!1m12!1m3!1d3368.97130566869!2d-110.70672358360277!3d32.39313088108983!2m3!1f0!2f0!3f0!3m2!1i1024!2i768!4f13.1!3m3!1m2!1s0x86d6400421614087%3A0xb6cfb84a4b05c95b!2sRose%20Canyon%20Campground!5e0!3m2!1sen!2sus!4v1626560965073!5m2!1sen!2sus" width="600" height="450" style="border:0;" allowfullscreen="" loading="lazy"></iframe>'
})
rc_df
# %%
# -------------------------------------------------------------------------------------------------
# Pull temperate, Wind Speed, Wind Gust, Probability of Precipitation, Quantity or Precipitation
# data along with the date and time for each.
# -------------------------------------------------------------------------------------------------
# =================== Temperature Data ======================
temp_rc = []
for i in data_rc_forecast["properties"]["temperature"]["values"]:
temp_rc.append(i)
temp_rc_df = pd.DataFrame(temp_rc)
temp_rc_df
# Temperature conversion to Degree Fahrenheit
temp_rc_df['degF_rc'] = (temp_rc_df['value'] * 9 / 5) + 32
temp_rc_df
# validTime Column split to date and time for Temperature
date_temp_rc = temp_rc_df['validTime'].str.split('T', n=1, expand=True)
time_temp_rc = date_temp_rc[1].str.split('+', n=1, expand=True)
time_temp_rc
temp_rc_df['date_temp_rc'] = date_temp_rc[0]
temp_rc_df['time_temp_rc'] = time_temp_rc[0]
# Combine date and time with a space in between the two
temp_rc_df['date_time_temp_rc'] = temp_rc_df['date_temp_rc'] + ' ' + temp_rc_df['time_temp_rc']
# Convert the above to date time format so it can be recognized by the PostgresSQL and js
temp_rc_df['date_time_temp_rc'] = pd.to_datetime(temp_rc_df['date_time_temp_rc'])
# Pull all the data for today + 3 days
time_delta_temp_rc = datetime.datetime.strptime(temp_rc_df['date_temp_rc'][0],"%Y-%m-%d") + timedelta(days = 4)
temp_rc_df['times_temp_rc'] = time_delta_temp_rc.strftime("%Y-%m-%d")
temp_rc_df = temp_rc_df.loc[temp_rc_df['date_temp_rc'] < temp_rc_df['times_temp_rc']]
temp_rc_df
temp_rc_df.dtypes
# =================== Wind Speed Data ======================
wind_speed_rc = []
for i in data_rc_forecast["properties"]["windSpeed"]["values"]:
wind_speed_rc.append(i)
windSpeed_rc_df = pd.DataFrame(wind_speed_rc)
windSpeed_rc_df
# Converting KM/hour to Miles/hour
windSpeed_rc_df['miles/hour_rc'] = windSpeed_rc_df['value'] * 0.621371
windSpeed_rc_df
# validTime Column split to date and time for wind Speed
date_ws_rc = windSpeed_rc_df['validTime'].str.split('T', n=1, expand=True)
time_ws_rc = date_ws_rc[1].str.split('+', n=1, expand=True)
time_ws_rc
windSpeed_rc_df['date_ws_rc'] = date_ws_rc[0]
windSpeed_rc_df['time_ws_rc'] = time_ws_rc[0]
# Combine date and time with a space in between the two
windSpeed_rc_df['date_time_ws_rc'] = windSpeed_rc_df['date_ws_rc'] + ' ' + windSpeed_rc_df['time_ws_rc']
# Convert the above to date time format so it can be recognized by the PostgresSQL and js
windSpeed_rc_df['date_time_ws_rc'] = pd.to_datetime(windSpeed_rc_df['date_time_ws_rc'])
# Pull all the data for today + 3 days
time_delta_ws = datetime.datetime.strptime(windSpeed_rc_df['date_ws_rc'][0],"%Y-%m-%d") + timedelta(days = 4)
windSpeed_rc_df['times_ws_rc'] = time_delta_ws.strftime("%Y-%m-%d")
windSpeed_rc_df = windSpeed_rc_df.loc[windSpeed_rc_df['date_ws_rc'] < windSpeed_rc_df['times_ws_rc']]
windSpeed_rc_df
# windSpeed_rc_df.dtypes
# =================== Wind Gust Data ======================
wind_gust_rc = []
for i in data_rc_forecast["properties"]["windGust"]["values"]:
wind_gust_rc.append(i)
wind_gust_rc_df = pd.DataFrame(wind_gust_rc)
wind_gust_rc_df
# Converting KM/hour to Miles/hour
wind_gust_rc_df['m/h_rc'] = wind_gust_rc_df['value'] * 0.621371
wind_gust_rc_df
# # validTime Column split to date and time for wind Gusts
date_wg_rc = wind_gust_rc_df['validTime'].str.split('T', n=1, expand=True)
time_wg_rc = date_wg_rc[1].str.split('+', n=1, expand=True)
time_wg_rc
wind_gust_rc_df['date_wg_rc'] = date_wg_rc[0]
wind_gust_rc_df['time_wg_rc'] = time_wg_rc[0]
# Combine date and time with a space in between the two
wind_gust_rc_df['date_time_wg_rc'] = wind_gust_rc_df['date_wg_rc'] + ' ' + wind_gust_rc_df['time_wg_rc']
# Convert the above to date time format so it can be recognized by the PostgresSQL and js
wind_gust_rc_df['date_time_wg_rc'] = pd.to_datetime(wind_gust_rc_df['date_time_wg_rc'])
wind_gust_rc_df
# Pull all the data for today + 3 days
time_delta_wg = datetime.datetime.strptime(wind_gust_rc_df['date_wg_rc'][0],"%Y-%m-%d") + timedelta(days = 4)
wind_gust_rc_df['times_wg_rc'] = time_delta_wg.strftime("%Y-%m-%d")
wind_gust_rc_df = wind_gust_rc_df.loc[wind_gust_rc_df['date_wg_rc'] < wind_gust_rc_df['times_wg_rc']]
wind_gust_rc_df
# wind_gust_rc_df.dtypes
# =================== Probability of Precipitataion ======================
prob_precip_rc = []
for i in data_rc_forecast["properties"]["probabilityOfPrecipitation"]["values"]:
prob_precip_rc.append(i)
prob_precip_rc_df = pd.DataFrame(prob_precip_rc)
prob_precip_rc_df
# # validTime Column split to date and time for Probability Precipitation
date_pp_rc = prob_precip_rc_df['validTime'].str.split('T', n=1, expand=True)
time_pp_rc = date_pp_rc[1].str.split('+', n=1, expand=True)
time_pp_rc
prob_precip_rc_df['date_pp_rc'] = date_pp_rc[0]
prob_precip_rc_df['time_pp_rc'] = time_pp_rc[0]
# Combine date and time with a space in between the two
prob_precip_rc_df['date_time_pp_rc'] = prob_precip_rc_df['date_pp_rc'] + ' ' + prob_precip_rc_df['time_pp_rc']
# Convert the above to date time format so it can be recognized by the PostgresSQL and js
prob_precip_rc_df['date_time_pp_rc'] = pd.to_datetime(prob_precip_rc_df['date_time_pp_rc'])
prob_precip_rc_df
# Pull all the data for today + 3 days
time_delta_pp = datetime.datetime.strptime(prob_precip_rc_df['date_pp_rc'][0],"%Y-%m-%d") + timedelta(days = 4)
prob_precip_rc_df['times_pp_rc'] = time_delta_pp.strftime("%Y-%m-%d")
prob_precip_rc_df = prob_precip_rc_df.loc[prob_precip_rc_df['date_pp_rc'] < prob_precip_rc_df['times_pp_rc']]
prob_precip_rc_df
# prob_precip_rc_df.dtypes
# =================== Quantity of Precipitataion ======================
qty_precip_rc = []
for i in data_rc_forecast["properties"]["quantitativePrecipitation"]["values"]:
qty_precip_rc.append(i)
qty_precip_rc_df = | pd.DataFrame(qty_precip_rc) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 5 16:37:53 2019
@author: sdenaro
"""
import pandas as pd
import numpy as np
def setup(year,operating_horizon,perfect_foresight):
#read generator parameters into DataFrame
df_gen = pd.read_csv('PNW_data_file/generators.csv',header=0)
zone = ['PNW']
##time series of load for each zone
df_load = pd.read_csv('../Stochastic_engine/Synthetic_demand_pathflows/Sim_hourly_load.csv',header=0)
df_load = df_load[zone]
df_load = df_load.loc[year*8760:year*8760+8759,:]
df_load = df_load.reset_index(drop=True)
##time series of operational reserves for each zone
rv= df_load.values
reserves = np.zeros((len(rv),1))
for i in range(0,len(rv)):
reserves[i] = np.sum(rv[i,:])*.04
df_reserves = pd.DataFrame(reserves)
df_reserves.columns = ['reserves']
##daily hydropower availability
df_hydro = pd.read_csv('Hydro_setup/PNW_dispatchable_hydro.csv',header=0)
##time series of wind generation for each zone
df_wind = pd.read_csv('../Stochastic_engine/Synthetic_wind_power/wind_power_sim.csv',header=0)
df_wind = df_wind.loc[:,'PNW']
df_wind = df_wind.loc[year*8760:year*8760+8759]
df_wind = df_wind.reset_index()
##time series solar for each TAC
df_solar = pd.read_csv('PNW_data_file/solar.csv',header=0)
##daily time series of dispatchable imports by path
df_imports = pd.read_csv('Path_setup/PNW_dispatchable_imports.csv',header=0)
##daily time series of dispatchable imports by path
forecast_days = ['fd1','fd2','fd3','fd4','fd5','fd6','fd7']
df_imports3 = pd.read_csv('Path_setup/PNW_dispatchable_3.csv',header=0)
df_imports8 = | pd.read_csv('Path_setup/PNW_dispatchable_8.csv',header=0) | pandas.read_csv |
import os, sys
sys.path.append(os.path.dirname(__file__))
import numpy as np
import pandas as pd
import cv2
# torch libs
import torch
from torch.autograd import Variable
from torch.nn.parallel.data_parallel import data_parallel
from torch.utils.data import DataLoader
from torch.utils.data.dataset import Dataset
from torch.utils.data.sampler import SequentialSampler
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torchvision.transforms as transforms
from common import RESULTS_DIR, IDENTIFIER, SEED, PROJECT_PATH
import matplotlib.pyplot as plt
from utility.file import Logger
from net.resnet50_mask_rcnn.configuration import Configuration
from net.resnet50_mask_rcnn.draw import draw_multi_proposal_metric, draw_mask_metric, image_show
from net.resnet50_mask_rcnn.model import MaskRcnnNet
from net.metric import compute_average_precision_for_mask, compute_precision_for_box, HIT
from dataset.reader import ScienceDataset, multi_mask_to_annotation, instance_to_multi_mask, \
multi_mask_to_contour_overlay, multi_mask_to_color_overlay
from dataset.transform import pad_to_factor
class Evaluator(object):
def __init__(self):
self.OUT_DIR = RESULTS_DIR + '/mask-rcnn-50-gray500-02'
self.OVERLAYS_DIR = self.OUT_DIR + '/evaluate/overlays'
self.STATS_DIR = self.OUT_DIR + '/evaluate/stats'
self.logger = Logger()
## setup ---------------------------
os.makedirs(self.OVERLAYS_DIR, exist_ok=True)
os.makedirs(self.STATS_DIR, exist_ok=True)
os.makedirs(self.OUT_DIR + '/evaluate/npys', exist_ok=True)
os.makedirs(self.OUT_DIR + '/checkpoint', exist_ok=True)
os.makedirs(self.OUT_DIR + '/backup', exist_ok=True)
logger = self.logger
logger.open(self.OUT_DIR + '/log.evaluate.txt', mode='a')
logger.write('\n--- [START %s] %s\n\n' % (IDENTIFIER, '-' * 64))
logger.write('** some experiment setting **\n')
logger.write('\tSEED = %u\n' % SEED)
logger.write('\tPROJECT_PATH = %s\n' % PROJECT_PATH)
logger.write('\tOUT_DIR = %s\n' % self.OUT_DIR)
logger.write('\n')
## dataset ----------------------------------------
logger.write('** dataset setting **\n')
self.test_dataset = ScienceDataset(
'train1_ids_gray2_500',
# 'valid1_ids_gray2_43',
mode='train',
#'debug1_ids_gray2_10', mode='train',
transform=self._eval_augment)
self.test_loader = DataLoader(
self.test_dataset,
sampler=SequentialSampler(self.test_dataset),
batch_size=1,
drop_last=False,
num_workers=4,
pin_memory=True,
collate_fn=self._eval_collate)
logger.write('\ttest_dataset.split = %s\n' % (self.test_dataset.split))
logger.write('\tlen(self.test_dataset) = %d\n' % (len(self.test_dataset)))
logger.write('\n')
def _revert(sefl, results, images):
"""Reverts test-time-augmentation (e.g., unpad, scale back to input image size, etc).
"""
assert len(results) == len(images), 'Results and images should be the same length'
batch_size = len(images)
for index_in_batch in range(batch_size):
result = results[index_in_batch]
image = images[index_in_batch]
height, width = image.shape[:2]
result.multi_mask = result.multi_mask[:height, :width]
for bounding_box in result.bounding_boxes:
x0, y0, x1, y1 = bounding_box.coordinates
x0, x1 = min((x0, x1), (height, height))
y0, y1 = min((y0, y1), (width, width))
bounding_box.coordinates = (x0, y0, x1, y1)
def _eval_augment(self, image, multi_mask, meta, index):
pad_image = pad_to_factor(image, factor=16)
input = torch.from_numpy(pad_image.transpose((2, 0, 1))).float().div(255)
boxes_coordinates, label, instance = multi_mask_to_annotation(multi_mask)
return input, boxes_coordinates, label, instance, meta, image, index
def _eval_collate(self, batch):
batch_size = len(batch)
#for index_in_batch in range(batch_size): print (batch[index_in_batch][0].size())
inputs = torch.stack([batch[index_in_batch][0] for index_in_batch in range(batch_size)], 0)
boxes = [batch[index_in_batch][1] for index_in_batch in range(batch_size)]
labels = [batch[index_in_batch][2] for index_in_batch in range(batch_size)]
instances = [batch[index_in_batch][3] for index_in_batch in range(batch_size)]
metas = [batch[index_in_batch][4] for index_in_batch in range(batch_size)]
images = [batch[index_in_batch][5] for index_in_batch in range(batch_size)]
indices = [batch[index_in_batch][6] for index_in_batch in range(batch_size)]
return [inputs, boxes, labels, instances, metas, images, indices]
def _save_prediction_png(self, name: str, mask, proposal_boxes, truth_box, truth_label,
truth_instance, image):
cfg = self.cfg
contour_overlay = multi_mask_to_contour_overlay(mask, image=image, color=[0, 255, 0])
color_overlay = multi_mask_to_color_overlay(mask, color='summer')
color_overlay_with_contour = multi_mask_to_contour_overlay(
mask, image=color_overlay, color=[255, 255, 255])
all1 = np.hstack((image, contour_overlay, color_overlay_with_contour))
all6 = draw_multi_proposal_metric(cfg, image, proposal_boxes, truth_box, truth_label,
[0, 255, 255], [255, 0, 255], [255, 255, 0])
all7 = draw_mask_metric(cfg, image, mask, truth_box, truth_label, truth_instance)
cv2.imwrite('{}/{}_all1.png'.format(self.OVERLAYS_DIR, name), all1)
cv2.imwrite('{}/{}_all6.png'.format(self.OVERLAYS_DIR, name), all6)
cv2.imwrite('{}/{}_all7.png'.format(self.OVERLAYS_DIR, name), all7)
def _append_hit_and_miss_stats(self, name: str, truth_boxes, truth_box_results, thresholds,
bb_results: pd.DataFrame):
"""Checks which ground thruth boxes are hit and which are missed for each threshold level.
Populates results dataframe.
Args:
truth_boxes: an array of ground truth boxes.
thruth_box_results: an list of results for each threshold. Each result is an array of
truth_boxes length. Each element of the array is whether HIT or not.
thresholds: threshold levels for IoU.
bb_results: bounding boxes results.
"""
for threshold_index, threshold in enumerate(thresholds):
for boxes_coordinates, result in zip(truth_boxes, truth_box_results[threshold_index]):
x0, y0, x1, y1 = boxes_coordinates.astype(np.int32)
w, h = (x1 - x0, y1 - y0)
bb_results.loc[bb_results.shape[0]] = {
'id': name,
'w': w,
'h': h,
'threshold': threshold,
'is_hit': result == HIT
}
def _append_results_stats(self, name: str, box_precision, thresholds, mask_average_precision,
overall_results):
"""Appends overall precision results to the results dataframe.
"""
results_row = {'id': name, 'mask_average_precision': mask_average_precision}
for threshold_index, threshold in enumerate(thresholds):
results_row['box_precision_{}'.format(int(
threshold * 100))] = box_precision[threshold_index]
overall_results.loc[overall_results.shape[0]] = results_row
def run_evaluate(self, model_checkpoint):
self.cfg = Configuration()
logger = self.logger
thresholds = [0.5, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95]
# TODO(alexander): Populate this.
overall_results_columns = ['id', 'mask_average_precision']
for threshold in thresholds:
overall_results_columns.append('box_precision_{}'.format(int(threshold * 100)))
overall_results = | pd.DataFrame(columns=overall_results_columns) | pandas.DataFrame |
import os
import shutil
import sys
from collections import namedtuple
from datetime import datetime, date
from sqlalchemy import create_engine
import pandas as pd
import numpy as np
BASE_DIR = os.getenv('ETL_HOME', '/home/etl')
HeaderMapping = namedtuple('HeaderMapping', ['raw_name', 'output_name'])
LOCATION_ID_HEADER = 'location_id'
DATE_ID_HEADER = 'date_id'
CITY_HEADER = 'city'
STATE_HEADER = 'state'
COUNTRY_HEADER = 'country'
COMBINED_HEADER = 'combined_key'
FIPS_HEADER = 'FIPS'
UID_HEADER = 'UID'
LATITUDE_HEADER = 'latitude'
LONGITUDE_HEADER = 'longitude'
CASES_HEADER = 'cases'
DEATHS_HEADER = 'deaths'
RECOVERIES_HEADER = 'recoveries'
# Tests performed per 100,000 people
TESTING_RATE_HEADER = 'testing_rate'
# hospitalized / number cases
HOSPITALIZATION_RATE_HEADER = 'hospitalization_rate'
# Cases per 100,000 people
CASES_100K_HEADER = 'cases_100k'
DATE_HEADER = 'date'
POPULATION_HEADER = 'population'
state_v1 = HeaderMapping('Province/State', STATE_HEADER)
state_v2 = HeaderMapping('Province_State', STATE_HEADER)
country_v1 = HeaderMapping('Country/Region', COUNTRY_HEADER)
country_v2 = HeaderMapping('Country_Region', COUNTRY_HEADER)
cases_v1 = HeaderMapping('Confirmed', CASES_HEADER)
deaths_v1 = HeaderMapping('Deaths', DEATHS_HEADER)
recoveries_v1 = HeaderMapping('Recovered', RECOVERIES_HEADER)
testing_rate_v1 = HeaderMapping('Testing_Rate', TESTING_RATE_HEADER)
hospitalization_rate_v1 = HeaderMapping('Hospitalization_Rate', HOSPITALIZATION_RATE_HEADER)
cases_100K_v1 = HeaderMapping('Incidence_Rate', CASES_100K_HEADER)
cases_100K_v2 = HeaderMapping('Incident_Rate', CASES_100K_HEADER)
latitude_v1 = HeaderMapping('Latitude', LATITUDE_HEADER)
latitude_v2 = HeaderMapping('Lat', LATITUDE_HEADER)
longitude_v1 = HeaderMapping('Longitude', LONGITUDE_HEADER)
longitude_v2 = HeaderMapping('Long_', LONGITUDE_HEADER)
known_headers = [
'Province/State,Country/Region,Last Update,Confirmed,Deaths,Recovered',
'Province/State,Country/Region,Last Update,Confirmed,Deaths,Recovered,Latitude,Longitude',
'FIPS,Admin2,Province_State,Country_Region,Last_Update,Lat,Long_,Confirmed,Deaths,Recovered,Active,Combined_Key',
'FIPS,Admin2,Province_State,Country_Region,Last_Update,Lat,Long_,Confirmed,Deaths,Recovered,Active,Combined_Key,Incidence_Rate,Case-Fatality_Ratio',
'FIPS,Admin2,Province_State,Country_Region,Last_Update,Lat,Long_,Confirmed,Deaths,Recovered,Active,Combined_Key,Incident_Rate,Case_Fatality_Ratio',
'Province_State,Country_Region,Last_Update,Lat,Long_,Confirmed,Deaths,Recovered,Active,FIPS,Incident_Rate,People_Tested,People_Hospitalized,Mortality_Rate,UID,ISO3,Testing_Rate,Hospitalization_Rate',
'Province_State,Country_Region,Last_Update,Lat,Long_,Confirmed,Deaths,Recovered,Active,FIPS,Incident_Rate,Total_Test_Results,People_Hospitalized,Case_Fatality_Ratio,UID,ISO3,Testing_Rate,Hospitalization_Rate'
]
header_transformation_mappings = [
state_v1,
state_v2,
country_v1,
country_v2,
cases_v1 ,
deaths_v1,
recoveries_v1,
testing_rate_v1,
hospitalization_rate_v1,
cases_100K_v1,
cases_100K_v2,
latitude_v1,
latitude_v2,
longitude_v1,
longitude_v2,
]
required_headers = [
CITY_HEADER,
STATE_HEADER,
COUNTRY_HEADER,
LATITUDE_HEADER,
LONGITUDE_HEADER,
CASES_HEADER,
DEATHS_HEADER,
RECOVERIES_HEADER,
TESTING_RATE_HEADER,
HOSPITALIZATION_RATE_HEADER,
CASES_100K_HEADER,
]
load_headers = [
DATE_ID_HEADER,
LOCATION_ID_HEADER,
CASES_HEADER,
RECOVERIES_HEADER,
DEATHS_HEADER,
CASES_100K_HEADER,
TESTING_RATE_HEADER,
HOSPITALIZATION_RATE_HEADER,
]
def remap_header_name(header_name):
'''Given any string, match it to an instance of HeaderMapping
and return the output_name of the match or the original
header_name if no match it found.
Args:
header_name (str): raw input header name to transform
Returns:
A header name that underwent transformation
'''
for hm in header_transformation_mappings:
if header_name == hm.raw_name:
return hm.output_name
return header_name
def transform_headers(df, ds):
'''Takes a Pandas DataFrame, validates the headers are from an
an expected list of headers the ETL pipeline knows apriori and
remaps the names of the headers to a uniform set of names and order
Args:
df (DataFrame): input DataFrame to transform
ds (str or datetime): the date stamp the DataFrame contains data for
Returns:
A transformed DataFrame with uniform header names and order
'''
if isinstance(ds, str):
ds = datetime.strptime(ds, '%m-%d-%Y')
elif not isinstance(ds, datetime):
raise TypeError('ds argument is expected to be either a datetime instance or str representing one')
transformed_df = df.rename(columns=remap_header_name)
keep_columns = [col for col in transformed_df.columns
if col in required_headers]
add_columns = [col for col in required_headers
if col not in keep_columns]
transformed_df = transformed_df[keep_columns]
for col in add_columns:
transformed_df[col] = np.nan
transformed_df[DATE_HEADER] = ds
expected_order = [DATE_HEADER] + required_headers
transformed_df = transformed_df[expected_order]
if 'Combined_Key' not in df.columns:
combined_key_rows = []
for idx, row in transformed_df.iterrows():
combined = ''
if not pd.isnull(row.city) and row.city:
combined += row.city + ', '
if not pd.isnull(row.state) and row.state and row.state != row.country:
combined += row.state + ', '
if not pd.isnull(row.country) and row.country:
combined += row.country
combined_key_rows.append(combined)
transformed_df[COMBINED_HEADER] = combined_key_rows
else:
transformed_df[COMBINED_HEADER] = df.Combined_Key
transformed_df[COMBINED_HEADER] = transformed_df[COMBINED_HEADER].str.lower()
if 'FIPS' not in df.columns:
transformed_df[FIPS_HEADER] = np.nan
else:
transformed_df[FIPS_HEADER] = df.FIPS
if 'UID' not in df.columns:
transformed_df[UID_HEADER] = np.nan
else:
transformed_df[UID_HEADER] = df.UID
return transformed_df
COVID_DATA_START_DATE = date(2020, 1, 22)
def make_date_dims(start, end=None):
date_range = pd.date_range(start=start,
end=end or date.today(),
freq='1D')
data = {
'date_id': list(range(1, len(date_range)+1)),
'date': date_range.date,
'year': date_range.year,
'month': date_range.month,
'day_of_month': date_range.day,
'day_of_week': date_range.weekday
}
return pd.DataFrame(data, index=date_range)
def parse_city_from_combined_key(key):
parts = key.split(',')
if len(parts) == 3:
return parts[0]
return None
def transform_global():
INPUT_DATA_DIR = os.path.join(BASE_DIR,
'COVID-19',
'csse_covid_19_data',
'csse_covid_19_daily_reports')
print("Input Dir: " + INPUT_DATA_DIR)
TRANSFORMED_DATA_DIR = os.path.join(BASE_DIR, 'COVID-19-TRANSFORMED')
if os.path.exists(TRANSFORMED_DATA_DIR):
shutil.rmtree(TRANSFORMED_DATA_DIR)
os.makedirs(TRANSFORMED_DATA_DIR)
print("Output Dir: " + TRANSFORMED_DATA_DIR)
# Fix any BOM files (there are some early on ones in Jan 2020, could be more later)
input_files = [f for f in os.listdir(INPUT_DATA_DIR) if f.endswith('.csv')]
for f in input_files:
input_f = os.path.join(INPUT_DATA_DIR, f)
output_f = os.path.join(TRANSFORMED_DATA_DIR, 'global_'+f)
with open(input_f, mode='r', encoding='utf-8-sig') as fin, open(output_f, mode='w', encoding='utf-8') as fout:
fout.write(fin.read())
# remap headers to consistent format
files = [f for f in os.listdir(TRANSFORMED_DATA_DIR) if f.startswith('global_')]
for f in files:
fname, fext = os.path.splitext(f)
date_str = fname.replace('global_', '')
file_path = os.path.join(TRANSFORMED_DATA_DIR, f)
with open(file_path) as fp:
headers = fp.readline().strip()
if headers not in known_headers:
print("{} has unrecognized headers {}".format(f, headers))
sys.exit(1)
print('Transforming {}'.format(f))
df = pd.read_csv(file_path)
transformed_df = transform_headers(df, date_str)
transformed_path = os.path.join(TRANSFORMED_DATA_DIR, 'transformed_'+date_str+'.csv')
transformed_df.to_csv(transformed_path, index=False)
def transform_us():
INPUT_DATA_DIR = os.path.join(BASE_DIR,
'COVID-19',
'csse_covid_19_data',
'csse_covid_19_daily_reports_us')
print("Input Dir: " + INPUT_DATA_DIR)
TRANSFORMED_DATA_DIR = os.path.join(BASE_DIR, 'COVID-19-TRANSFORMED')
if not os.path.exists(TRANSFORMED_DATA_DIR):
os.makedirs(TRANSFORMED_DATA_DIR)
print("Output Dir: " + TRANSFORMED_DATA_DIR)
# Fix any BOM files (there are some early on ones in Jan 2020, could be more later)
input_files = [f for f in os.listdir(INPUT_DATA_DIR) if f.endswith('.csv')]
for f in input_files:
input_f = os.path.join(INPUT_DATA_DIR, f)
output_f = os.path.join(TRANSFORMED_DATA_DIR, 'us_'+f)
with open(input_f, mode='r', encoding='utf-8-sig') as fin, open(output_f, mode='w', encoding='utf-8') as fout:
fout.write(fin.read())
# remap headers to consistent format
files = [f for f in os.listdir(TRANSFORMED_DATA_DIR) if f.startswith('us_')]
for f in files:
fname, fext = os.path.splitext(f)
date_str = fname.replace('us_', '')
file_path = os.path.join(TRANSFORMED_DATA_DIR, f)
with open(file_path) as fp:
headers = fp.readline().strip()
df = pd.read_csv(file_path)
if headers not in known_headers:
print("{} has unrecognized headers {}".format(f, headers))
df.head()
sys.exit(1)
print('Transforming {}'.format(f))
transformed_df = transform_headers(df, date_str)
transformed_path = os.path.join(TRANSFORMED_DATA_DIR, 'transformed_'+date_str+'.csv')
if os.path.exists(transformed_path):
global_df = pd.read_csv(transformed_path)
# for country in transformed_df.country.unique():
# global_df = global_df.loc[global_df.country != country]
transformed_df = pd.concat([transformed_df, global_df]).drop_duplicates()
transformed_df.to_csv(transformed_path, index=False)
COVID_TMP_FACTS_TBL = 'tmp_covid_facts'
COVID_DATE_DIM_TBL = 'date_dim'
COVID_LOCATION_DIM_TBL = 'location_dim'
def create_sql_engine():
return create_engine('postgresql://etl:etl@localhost:5432/dw')
def validate_location_sql_entry(row):
values = (
row.location_id,
row.country,
row.state if pd.notnull(row.state) else None,
row.city if pd.notnull(row.city) else None,
row.latitude,
row.longitude,
int(row.population) if pd.notnull(row.population) else None
)
return values
def validate_covid_facts_sql_entry(row):
values = (
int(row.date_id),
int(row.location_id),
int(row.cases) if pd.notnull(row.cases) else None,
int(row.recoveries) if pd.notnull(row.recoveries) else None,
int(row.deaths) if pd.notnull(row.deaths) else None,
row.cases_100k if pd.notnull(row.cases_100k) else None,
row.testing_rate if pd.notnull(row.testing_rate) else None,
row.hospitalization_rate if pd.notnull(row.hospitalization_rate) else None
)
return values
def regenerate_tmp_covid_facts_table():
conn = None
try:
engine = create_sql_engine()
conn = engine.raw_connection()
cur = conn.cursor()
cur.execute('DROP TABLE tmp_covid_facts')
cur.execute('CALL create_covid_facts_tables()')
cur.close()
except Exception as e:
print('Failed to regenerate tmp_covid_facts table', e)
finally:
if conn:
conn.close()
def load_covid_facts_from_tmp():
conn = None
try:
engine = create_sql_engine()
conn = engine.raw_connection()
cur = conn.cursor()
cur.execute('CALL refresh_from_tmp_facts()')
conn.commit()
cur.close()
except Exception as e:
print('Failed to load covid_facts from tmp_covid_facts table', e)
finally:
if conn:
conn.close()
def load():
LOCATIONS_PATH = os.path.join(BASE_DIR,
'COVID-19',
'csse_covid_19_data',
'UID_ISO_FIPS_LookUp_Table.csv')
print("Locations File: " + LOCATIONS_PATH)
TRANSFORMED_DATA_DIR = os.path.join(BASE_DIR, 'COVID-19-TRANSFORMED')
print("Output Dir: " + TRANSFORMED_DATA_DIR)
locations_df = pd.read_csv(LOCATIONS_PATH)
lower_combined_key = locations_df.Combined_Key.str.lower().values
locations_df = locations_df.set_index(lower_combined_key)
locations_df['city'] = locations_df.Combined_Key.apply(parse_city_from_combined_key)
locations_dim_df = locations_df.rename(columns={
'UID': LOCATION_ID_HEADER,
'Province_State': STATE_HEADER,
'Country_Region': COUNTRY_HEADER,
'Lat': LATITUDE_HEADER,
'Long_': LONGITUDE_HEADER,
'Population': POPULATION_HEADER
})
keep_columns = [
LOCATION_ID_HEADER,
COUNTRY_HEADER,
STATE_HEADER,
CITY_HEADER,
LATITUDE_HEADER,
LONGITUDE_HEADER,
POPULATION_HEADER
]
locations_dim_df = locations_dim_df[keep_columns]
locations_dim_df.to_csv(os.path.join(TRANSFORMED_DATA_DIR, 'loadable_locations.csv'),
index=False)
locations_sql = '\n'.join([
'INSERT INTO location_dim ({})'.format(','.join(keep_columns)),
'VALUES (%s, %s, %s, %s, %s, %s, %s)',
'ON CONFLICT (location_id) DO NOTHING'
])
conn = None
try:
engine = create_sql_engine()
conn = engine.raw_connection()
cur = conn.cursor()
for idx, row in locations_dim_df.iterrows():
cur.execute(locations_sql, validate_location_sql_entry(row))
conn.commit()
cur.close()
except Exception as e:
print('Failed to insert locations', e)
finally:
if conn:
conn.close()
date_df = make_date_dims(COVID_DATA_START_DATE)
date_df.to_csv(os.path.join(TRANSFORMED_DATA_DIR, 'loadable_dates.csv'),
index=False)
date_sql = '\n'.join([
'INSERT INTO date_dim (date_id, date, year, month, day_of_month, day_of_week)',
'VALUES (%s, %s, %s, %s, %s, %s)',
'ON CONFLICT (date_id) DO NOTHING'
])
date_values = [row.values.tolist() for idx, row in date_df.iterrows()]
conn = None
try:
engine = create_sql_engine()
conn = engine.raw_connection()
cur = conn.cursor()
cur.executemany(date_sql, date_values)
conn.commit()
cur.close()
except Exception as e:
print('Failed to insert dates', e)
finally:
if conn:
conn.close()
files = [f for f in os.listdir(TRANSFORMED_DATA_DIR)
if f.startswith('transformed_')]
regenerate_tmp_covid_facts_table()
for f in files:
file_path = os.path.join(TRANSFORMED_DATA_DIR, f)
df = | pd.read_csv(file_path) | pandas.read_csv |
# coding: utf-8
# In[1]:
import pandas as pd
import numpy as np
edata = pd.read_csv("docword.enron.txt", skiprows=3, sep = ' ', header=None)
evocab = pd.read_csv("vocab.enron.txt", header=None)
print (evocab)
evocab.columns = ['word']
edata.columns = ['docid','wordid','freq']
# Taking a sample data set
edata = edata.iloc[:100,:]
evocab.index = evocab.index + 1
wc = edata.groupby('wordid')['freq'].sum()
#wc = egrouped['freq'].agg(np.sum)
print (wc)
in_list = wc
# In[ ]:
m = 1
# Input parameter 'm'
# while(True):
# m = input("Enter no. of randomized iterations required: ")
# try:
# m = int(m)
# break
# except:
# print("Enter valid number.")
# continue
# Calculating the dissimilarities and smoothing factors.
# Set the value of parameter m = the no. of iterations you require
Card = pd.Series(np.NAN)
DS= | pd.Series(np.NAN) | pandas.Series |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Jan 25 12:57:48 2020
@authors: <NAME>
Last modified: 2020-02-19
--------------------------------------------------
** Semantic Search Analysis: Integrate **
--------------------------------------------------
** CHANGE FILE NAMING BEFORE RUNNING!! **
aThis script: Join the new data to your old data (if any). For the file
TaggedLogAllMonths.xlsx, the script replaces total search counts to existing
query rows and adds new rows for new queries; for the Tableau discovery UI.
The file BiggestMovers.xlsx follows the same procedure regarding row addition,
however a new column is appended for every month of data to allow for time
series analysis over months.
After you have your second month of data, dupe off dataProcessed + 'taggedLog' + TimePeriod + '.xlsx'
to dataProcessed + 'TaggedLogAllMonths.xlsx'.
INPUTS:
- data/processed/taggedLog-[TimePeriod].xlsx - Importable into Tableau, etc. but also summaries for Excel users
- dataProcessed/'BiggestMovers' + TimePeriod + '.xlsx' - For PreferredTerm trends
OUTPUTS:
- reports/TaggedLogAllMonths.xlsx - Summarize for discovery UI
- data/processed/BiggestMovers.xlsx - Summarize for time series; study
trends across time.
----------------
SCRIPT CONTENTS
----------------
1. Start-up / What to put into place, where
2. Append columns to TaggedLogAllMonths.xlsx
3. Append columns to BiggestMoversAllMonths.xlsx
4. Create summary infographic
"""
#%%
# ============================================
# 1. Start-up / What to put into place, where
# ============================================
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
import os
from datetime import datetime, timedelta
# import random
from scipy.optimize import basinhopping, differential_evolution
from pathlib import *
# To be used with str(Path.home())
# Set working directory and directories for read/write
home_folder = str(Path.home()) # os.path.expanduser('~')
os.chdir(home_folder + '/Projects/classifysearches')
dataRaw = 'data/raw/' # Put log here before running script
dataMatchFiles = 'data/matchFiles/' # Permanent helper files; both reading and writing required
dataInterim = 'data/interim/' # Save to disk as desired, to re-start easily
dataProcessed = 'data/processed/' # Ready to visualize
reports = 'reports/'
#%%
# =============================================
# 2. Append columns to TaggedLogAllMonths.xlsx
# =============================================
'''
You'll need to build this based on what you've already done.
After processing the second month, you can create TaggedLogAllMonths.
In this example code, 3 months were processed before aggregating.
'''
# Bring in new tagged log
taggedLog201910 = pd.read_excel(dataProcessed + 'taggedLog201910.xlsx')
taggedLog201911 = pd.read_excel(dataProcessed + 'taggedLog201911.xlsx')
taggedLog201912 = pd.read_excel(dataProcessed + 'taggedLog201912.xlsx')
# Join
TaggedMerge1 = pd.merge(taggedLog201910, taggedLog201911, how='outer', on=['Query', 'AdjustedQueryTerm', 'SemanticType', 'SemanticGroup', 'LocationOfSearch'])
TaggedMerge1.columns
'''
'Query', 'AdjustedQueryTerm_x', 'TotalSearchFreq201910',
'TotalUniqueSearches_x', 'SemanticGroup', 'SemanticType',
'PreferredTerm_x', 'LocationOfSearch_x', 'Impressions_x', 'Clicks_x',
'CTR_x', 'AveragePosition_x', 'ResultsPVSearch_x',
'PercentSearchExits_x', 'PercentSearchRefinements_x',
'TimeAfterSearch_x', 'AvgSearchDepth_x', 'ui_x', 'CustomTag1_x',
'CustomTag2_x', 'AdjustedQueryTerm_y', 'TotalSearchFreq201911',
'TotalUniqueSearches_y', 'PreferredTerm_y', 'LocationOfSearch_y',
'Impressions_y', 'Clicks_y', 'CTR_y', 'AveragePosition_y',
'ResultsPVSearch_y', 'PercentSearchExits_y',
'PercentSearchRefinements_y', 'TimeAfterSearch_y', 'AvgSearchDepth_y',
'ui_y', 'CustomTag1_y', 'CustomTag2_y'
'''
# Join the last
TaggedMerge2 = pd.merge(TaggedMerge1, taggedLog201912, how='outer', on=['Query', 'AdjustedQueryTerm', 'SemanticType', 'SemanticGroup', 'LocationOfSearch'])
TaggedMerge2.columns
'''
'Query', 'AdjustedQueryTerm', 'TotalSearchFreq201910',
'TotalUniqueSearches_x', 'SemanticGroup', 'SemanticType',
'PreferredTerm_x', 'LocationOfSearch', 'Impressions_x', 'Clicks_x',
'CTR_x', 'AveragePosition_x', 'ResultsPVSearch_x',
'PercentSearchExits_x', 'PercentSearchRefinements_x',
'TimeAfterSearch_x', 'AvgSearchDepth_x', 'ui_x', 'CustomTag1_x',
'CustomTag2_x', 'TotalSearchFreq201911', 'TotalUniqueSearches_y',
'PreferredTerm_y', 'Impressions_y', 'Clicks_y', 'CTR_y',
'AveragePosition_y', 'ResultsPVSearch_y', 'PercentSearchExits_y',
'PercentSearchRefinements_y', 'TimeAfterSearch_y', 'AvgSearchDepth_y',
'ui_y', 'CustomTag1_y', 'CustomTag2_y', 'TotalSearchFreq201912',
'TotalUniqueSearches', 'PreferredTerm', 'Impressions', 'Clicks', 'CTR',
'AveragePosition', 'ResultsPVSearch', 'PercentSearchExits',
'PercentSearchRefinements', 'TimeAfterSearch', 'AvgSearchDepth', 'ui',
'CustomTag1', 'CustomTag2'
'''
# Reduce and reorder
# If you changed the matchFiles over months, _x and _y may not be the same, and in that case
# you might want to reconcile them. Here the unneeded cols are dropped
TaggedMergeCleanup = TaggedMerge2[['Query', 'AdjustedQueryTerm',
'PreferredTerm', 'PreferredTerm_x', 'PreferredTerm_y',
'SemanticType', 'SemanticGroup',
'TotalSearchFreq201910', 'TotalSearchFreq201911', 'TotalSearchFreq201912',
'CustomTag1', # 'CustomTag1_x', 'CustomTag1_y',
'CustomTag2', # 'CustomTag2_x', 'CustomTag2_y',
# 'CustomTag3', # 'CustomTag3_x', 'CustomTag3_y',
'LocationOfSearch']]
'''
prefNull = TaggedMergeCleanup['PreferredTerm'].isnull().sum() # 144960
prefXNull = TaggedMergeCleanup['PreferredTerm_x'].isnull().sum() # 132719
prefYNull = TaggedMergeCleanup['PreferredTerm_y'].isnull().sum() # 135750
'''
# FIXME - Same routine as other scripts use after merge, but this time to level-set PreferredTerm
TaggedMergeCleanup['PreferredTerm2'] = TaggedMergeCleanup['PreferredTerm_x'].where(TaggedMergeCleanup['PreferredTerm_x'].notnull(), TaggedMergeCleanup['PreferredTerm_y'])
TaggedMergeCleanup['PreferredTerm2'] = TaggedMergeCleanup['PreferredTerm_y'].where(TaggedMergeCleanup['PreferredTerm_y'].notnull(), TaggedMergeCleanup['PreferredTerm_x'])
# If it's still null, copy in AdjustedQueryTerm
TaggedMergeCleanup['PreferredTerm2'] = TaggedMergeCleanup['AdjustedQueryTerm'].where(TaggedMergeCleanup['PreferredTerm2'].isnull(), TaggedMergeCleanup['PreferredTerm2'])
# How many null in PreferredTerm?
# prefNull = TaggedMergeCleanup['PreferredTerm'].isnull().sum() # 144960
# prefNull
# Clean up
TaggedMergeCleanup.drop(['PreferredTerm', 'PreferredTerm_x', 'PreferredTerm_y'], axis=1, inplace=True)
TaggedMergeCleanup.rename(columns={'PreferredTerm2': 'PreferredTerm'}, inplace=True)
TaggedMergeCleanup.columns
'''
'Query', 'AdjustedQueryTerm', 'SemanticType', 'SemanticGroup',
'TotalSearchFreq201910', 'TotalSearchFreq201911',
'TotalSearchFreq201912', 'CustomTag1', 'CustomTag2', 'LocationOfSearch',
'PreferredTerm'
'''
# Total month counts
TaggedMergeCleanup.fillna({'TotalSearchFreq201910': 0,
'TotalSearchFreq201911': 0,
'TotalSearchFreq201912': 0}, inplace = True)
# New col
TaggedMergeCleanup['TotalSearchFreq'] = ''
TaggedMergeCleanup['TotalSearchFreq'] = TaggedMergeCleanup.TotalSearchFreq201910 + TaggedMergeCleanup.TotalSearchFreq201911 + TaggedMergeCleanup.TotalSearchFreq201912
# Sort
TaggedMergeCleanup = TaggedMergeCleanup.sort_values(by=['TotalSearchFreq', 'Query'], ascending=[False, True])
TaggedMergeCleanup.reset_index(inplace=True)
# View a sample
# Top = TaggedMergeCleanup.head(50)
# TopAndBottom = Top.append(TaggedMergeCleanup.tail(50))
# Reorder again
TaggedMergeCleanup = TaggedMergeCleanup[['TotalSearchFreq', 'Query', 'AdjustedQueryTerm', 'PreferredTerm',
'SemanticType', 'SemanticGroup', 'TotalSearchFreq201910',
'TotalSearchFreq201911', 'TotalSearchFreq201912',
'CustomTag1', 'CustomTag2', 'LocationOfSearch']]
# Write out
writer = pd.ExcelWriter(reports + 'TaggedLogAllMonths.xlsx')
TaggedMergeCleanup.to_excel(writer,'TaggedLogAllMonths', index=False)
writer.save()
#%%
# =================================================
# 3. Append columns to BiggestMoversAllMonths.xlsx
# =================================================
'''
BREAKS THE SCRIPT IF YOU DON'T HAVE MULTIPLE FILES WRITTEN OUT ALREADY
Creating a multi-month analysis of trends by PreferredTerm, is recommended,
but this will be commented out, to avoid auto-run errors. Update the file
names every time you run. The pilot project looks at 3 months at a time.
'''
# Open files that from previous analyses
BiggestMoversNew = pd.read_excel(dataProcessed + 'BiggestMovers' + TimePeriod + '.xlsx')
BiggestMovers10 = pd.read_excel(dataProcessed + 'BiggestMovers2019-10.xlsx')
BiggestMovers11 = pd.read_excel(dataProcessed + 'BiggestMovers2019-11.xlsx')
BiggestMovers10.columns
'''
'PreferredTerm', 'SemanticType', 'SemanticGroup', 'TotalSearchFreq',
'PercentShare', 'Month'
'''
# Update col names
BiggestMovers10.rename(columns={'TotalSearchFreq': 'TotFreq201910', 'PercentShare': 'PerShare201910'}, inplace=True)
BiggestMovers11.rename(columns={'TotalSearchFreq': 'TotFreq201911', 'PercentShare': 'PerShare201911'}, inplace=True)
BiggestMovers12.rename(columns={'TotalSearchFreq': 'TotFreq201912', 'PercentShare': 'PerShare201912'}, inplace=True)
# Drop Month
BiggestMovers10.drop(['Month'], axis=1, inplace=True)
BiggestMovers11.drop(['Month'], axis=1, inplace=True)
BiggestMovers12.drop(['Month'], axis=1, inplace=True)
# Join on PreferredTerm
bgtemp = | pd.merge(BiggestMovers10, BiggestMovers11, how='outer', on=['PreferredTerm', 'SemanticType', 'SemanticGroup']) | pandas.merge |
import glob
import os
import datetime
from collections import OrderedDict
import pandas as pd
import numpy as np
from sklearn.manifold import TSNE
import scipy.stats as stats
import scipy.sparse as sparse
from sparse_dataframe import SparseDataFrame
def combine_sdf_files(run_folder, folders, verbose=False, **kwargs):
"""function for concatenating SparseDataFrames together"""
combined = SparseDataFrame()
combined.rows = []
columns = set()
for folder in folders:
filename = os.path.join(run_folder, folder, f'{folder}.mus.cell-gene.npz')
if verbose:
print(f'Reading {filename} ...')
sdf = SparseDataFrame(filename)
columns.add(tuple(sdf.columns))
combined.rows.extend(sdf.rows)
if combined.matrix is None:
combined.matrix = sdf.matrix
else:
combined.matrix = sparse.vstack((combined.matrix, sdf.matrix),
format='csr')
assert len(columns) == 1
combined.columns = columns.pop()
return combined
def combine_csv_files(folder, globber, verbose=False, **kwargs):
"""generic function for concatentating a bunch of csv files into a single
pandas Dataframe"""
dfs = []
for filename in glob.iglob(os.path.join(folder, globber)):
if verbose:
print(f'Reading {filename} ...')
df = pd.read_csv(filename, **kwargs)
dfs.append(df)
combined = pd.concat(dfs)
return combined
def maybe_to_numeric(series):
try:
return pd.to_numeric(series)
except ValueError:
return series
def clean_mapping_stats(mapping_stats_original, convert_to_percentage=None):
"""Remove whitespace from all values and convert to numbers"""
if convert_to_percentage is None:
convert_to_percentage = set()
mapping_stats_original = mapping_stats_original.applymap(
lambda x: (x.replace(',', '').strip().strip('%')
if isinstance(x, str) else x))
numeric = mapping_stats_original.apply(maybe_to_numeric)
numeric.columns = numeric.columns.map(str.strip)
# for 10X mapping stats
numeric.columns = numeric.columns.map(
lambda x: ('Percent {}'.format(x.replace('Fraction ', ''))
if x in convert_to_percentage else x)
)
return numeric
def diff_exp(matrix, group1, group2, index):
"""Computes differential expression between group 1 and group 2
for each column in the dataframe counts.
Returns a dataframe of Z-scores and p-values."""
g1 = matrix[group1, :]
g2 = matrix[group2, :]
g1mu = g1.mean(0)
g2mu = g2.mean(0)
mean_diff = np.asarray(g1mu - g2mu).flatten()
# E[X^2] - (E[X])^2
pooled_sd = np.sqrt(
((g1.power(2)).mean(0) - np.power(g1mu, 2)) / len(group1)
+ ((g2.power(2)).mean(0) - np.power(g2mu, 2)) / len(group2))
pooled_sd = np.asarray(pooled_sd).flatten()
z_scores = np.zeros_like(pooled_sd)
nz = pooled_sd > 0
z_scores[nz] = np.nan_to_num(mean_diff[nz] / pooled_sd[nz])
# t-test
p_vals = (1 - stats.norm.cdf(np.abs(z_scores))) * 2
df = pd.DataFrame(OrderedDict([('z', z_scores), ('p', p_vals)]),
index=index)
return df
class Plates(object):
# Names of commonly accessed columns
MEAN_READS_PER_CELL = 'Mean reads per well'
MEDIAN_GENES_PER_CELL = 'Median genes per well'
PERCENT_ERCC = 'Percent ERCC'
PERCENT_MAPPED_READS = 'Percent mapped to genome'
# maybe we should change this to the right thing
SAMPLE_MAPPING = 'WELL_MAPPING'
def __init__(self, data_folder, metadata, genes_to_drop='Rn45s',
verbose=False, nrows=None):
plates_folder = os.path.join(data_folder, 'plates')
counts = combine_csv_files(
plates_folder, '*.htseq-count-by-cell.csv',
index_col=[0, 1, 2, 3], verbose=verbose, nrows=nrows)
mapping_stats = combine_csv_files(
plates_folder, '*.log-by-cell.csv',
index_col=[0, 1, 2, 3], verbose=verbose)
self.genes, self.cell_metadata, self.mapping_stats = \
self.clean_and_reformat(counts, mapping_stats)
self.plate_summaries = self.calculate_plate_summaries()
original_metadata = pd.read_csv(metadata, index_col=0)
self.plate_metadata = self.clean_plate_metadata(original_metadata)
self.plate_metadata = self.plate_metadata.loc[
self.plate_summaries.index]
if not os.path.exists(os.path.join(data_folder, 'coords')):
os.mkdir(os.path.join(data_folder, 'coords'))
self.bulk_smushed_cache_file = os.path.join(data_folder, 'coords',
'bulk_smushed.csv')
self.cell_smushed_cache_file = os.path.join(data_folder, 'coords',
'cell_smushed.pickle')
self.bulk_smushed = self.compute_bulk_smushing()
self.cell_smushed = self.compute_cell_smushing()
self.gene_names = sorted(self.genes.columns)
self.plate_metadata_features = sorted(self.plate_metadata.columns)
# Remove pesky genes
self.genes = self.genes.drop(genes_to_drop, axis=1)
# Get a counts per million rescaling of the genes
self.counts_per_million = self.genes.divide(self.genes.sum(axis=1),
axis=0) * 1e6
self.top_genes = self.compute_top_genes_per_cell()
self.data = {'genes': self.genes,
'mapping_stats': self.mapping_stats,
'cell_metadata': self.cell_metadata,
'plate_metadata': self.plate_metadata,
'plate_summaries': self.plate_summaries}
def __repr__(self):
n_plates = self.plate_summaries.shape[0]
n_barcodes = self.genes.shape[0]
s = f'This is an object holding data for {n_plates} plates and ' \
f'{n_barcodes} barcodes.\nHere are the accessible dataframes:\n'
for name, df in self.data.items():
s += f'\t"{name}" table dimensions: ' + str(df.shape) + '\n'
return s
@staticmethod
def clean_and_reformat(counts, mapping_stats):
"""Move metadata information into separate dataframe and simplify ids
Parameters
----------
counts : pandas.DataFrame
A (samples, genes) dataframe of integer number of reads that mapped
to a gene in a cell, but also has extra columns of ERCC mapping and
htseq-count output that we want to remove
mapping_stats : pandas.DataFrame
A (samples, mapping_statistics) dataframe of the time the alignment
began, number of input reads, number of mapped reads, and other
information output by STAR, but everything is a string instead of
numbers which makes us sad
Returns
-------
genes : pandas.DataFrame
A (samples, genes) dataframe of integer number of reads that mapped
to a gene in a cell
cell_metadata : pandas.DataFrame
A (samples, sample_features) dataframe of number of detected genes,
total reads, ercc counts, and "WELL_MAPPING" (really,
plate mapping)
mapping_stats : pandas.DataFrame
A (samples, mapping_statistics) dataframe of the time the alignment
began, number of input reads, number of mapped reads, and other
information output by STAR, with numbers properly formatted
"""
mapping_stats = clean_mapping_stats(mapping_stats)
cell_metadata = counts.index.to_frame()
sample_ids = cell_metadata.index.droplevel([1, 2, 3])
cell_metadata.index = sample_ids
mapping_stats.index = sample_ids
counts.index = sample_ids
# Extract htseq-count outputs and save as separate files
cols = [x for x in counts if x.startswith('__')]
count_stats = counts[cols]
count_stats.columns = [x.strip('_') for x in count_stats]
# Separate spike-ins (ERCCs) and genes
ercc_names = [col for col in counts.columns[3:] if 'ERCC-' in col]
gene_names = [col for col in counts.columns[3:] if
'ERCC-' not in col and col[0] != '_']
cell_metadata['total_reads'] = counts.sum(axis=1)
# Separate counts of everything from genes-only
genes = counts[gene_names]
# Add mapping and ERCC counts to cell metadata
cell_metadata['n_genes'] = (genes > 0).sum(axis=1)
cell_metadata['mapped_reads'] = genes.sum(axis=1)
cell_metadata['ercc'] = counts[ercc_names].sum(axis=1)
cell_metadata = pd.concat([cell_metadata, count_stats], axis=1)
# Remove not useful columns
cell_metadata.drop(['too_low_aQual', 'not_aligned'], inplace=True,
axis=1)
return genes, cell_metadata, mapping_stats
def calculate_plate_summaries(self):
"""Get mean reads, percent mapping, etc summaries for each plate"""
well_map = self.cell_metadata.groupby(Plates.SAMPLE_MAPPING)
# these stats are from STAR mapping
star_cols = ['Number of input reads', 'Uniquely mapped reads number']
star_stats = self.mapping_stats[star_cols].groupby(
self.cell_metadata[Plates.SAMPLE_MAPPING]).sum()
total_reads = star_stats['Number of input reads']
unique_reads = star_stats['Uniquely mapped reads number']
percent_ercc = well_map.sum()['ercc'].divide(total_reads, axis=0)
percent_mapped_reads = unique_reads / total_reads - percent_ercc
plate_summaries = pd.DataFrame(OrderedDict([
(Plates.MEAN_READS_PER_CELL, total_reads / well_map.size()),
(Plates.MEDIAN_GENES_PER_CELL, well_map.median()['n_genes']),
('Percent not uniquely aligned', 100 * well_map.sum()['alignment_not_unique'].divide(total_reads, axis=0)),
(Plates.PERCENT_MAPPED_READS, 100 * percent_mapped_reads),
('Percent no feature', 100 * well_map.sum()['no_feature'].divide(total_reads, axis=0)),
('Percent Rn45s', 100 * self.genes['Rn45s'].groupby(
self.cell_metadata[Plates.SAMPLE_MAPPING]).sum() / total_reads),
(Plates.PERCENT_ERCC, 100 * percent_ercc),
('n_wells', well_map.size())
]))
return plate_summaries
@staticmethod
def clean_plate_metadata(plate_metadata):
# Remove whitespace from "tissue" column
plate_metadata.tissue = plate_metadata.tissue.map(
lambda x: x.strip() if isinstance(x, str) else x)
# Add a column with both tissue and subtissue
cleaned_subtissue = plate_metadata['subtissue'].map(
lambda x: ': ' + x.strip() if isinstance(x, str) else '')
plate_metadata['tissue_subtissue'] = plate_metadata['tissue'] \
+ cleaned_subtissue
# Hard-coded column name of 21_55_F is actually the sample id column
plate_metadata = plate_metadata.rename(
columns={'mouse.id': 'Sample ID'})
plate_metadata['Age (months)'] = plate_metadata['Sample ID'].map(
lambda x: x.split('_')[0] if isinstance(x, str) else '')
def parse_date(x):
if isinstance(x, str):
x = x.strip()
if not x:
return np.nan
if x.endswith('/2017'):
return datetime.datetime.strptime(x, '%m/%d/%Y')
elif x.endswith('/17'):
return datetime.datetime.strptime(x, '%m/%d/%y')
else:
return datetime.datetime.strptime(x, '%y%m%d')
elif isinstance(x, float):
return datetime.datetime.strptime(str(int(x)), '%y%m%d')
else:
raise TypeError
for col in plate_metadata.columns:
if 'date' in col.lower():
plate_metadata[col] = plate_metadata[col].map(
parse_date,
na_action='ignore'
)
# Use only the metadata for the plates that have been sequenced
plate_metadata = plate_metadata.dropna(how='all', axis=1)
return plate_metadata
def compute_bulk_smushing(self):
"""Get average signal from each plate ('bulk') and find 2d embedding"""
grouped = self.genes.groupby(self.cell_metadata[self.SAMPLE_MAPPING])
if os.path.exists(self.bulk_smushed_cache_file):
smushed = pd.read_csv(self.bulk_smushed_cache_file, names=[0, 1],
header=0, index_col=0)
# if the set of plates hasn't changed, return the cached version
if set(grouped.groups) == set(smushed.index):
return smushed
# if the cache was missing or invalid, compute a new projection
medians = grouped.median()
smusher = TSNE(random_state=0, perplexity=10, metric='cosine')
smushed = pd.DataFrame(smusher.fit_transform(medians),
index=medians.index)
smushed.to_csv(self.bulk_smushed_cache_file)
return smushed
def compute_cell_smushing(self):
"""Within each plate, find a 2d embedding of all cells"""
grouped = self.genes.groupby(self.cell_metadata[self.SAMPLE_MAPPING])
if os.path.exists(self.cell_smushed_cache_file):
smusheds = pd.read_pickle(self.cell_smushed_cache_file)
# if nothing is missing, return the cached version
if not set(grouped.groups) - set(smusheds):
return smusheds
else:
smusheds = {}
for plate_name, genes_subset in grouped:
if plate_name not in smusheds:
cell_smusher = TSNE(metric='cosine', random_state=0)
cell_smushed = pd.DataFrame(
cell_smusher.fit_transform(genes_subset),
index=genes_subset.index)
smusheds[plate_name] = cell_smushed
pd.to_pickle(smusheds, self.cell_smushed_cache_file)
return smusheds
def compute_top_genes_per_cell(self):
"""Get the most highly expressed genes in every cell
Returns
-------
top_genes : pandas.Series
A mapping of the cell barcode to a ranked list of the top 10 genes,
where the first item is the most highly expressed (e.g. Rn45s)
"""
ranks = self.genes.rank(axis=1, ascending=False)
in_top10 = ranks[ranks <= 10]
top_genes = in_top10.apply(
lambda x: x.sort_values().dropna().index.tolist(), axis=1)
return top_genes
class TenX_Runs(Plates):
# Names of commonly accessed columns
MEAN_READS_PER_CELL = 'Mean Reads per Cell'
MEDIAN_GENES_PER_CELL = 'Median Genes per Cell'
PERCENT_MAPPED_READS = 'Percent Reads Mapped Confidently to Transcriptome'
SAMPLE_MAPPING = 'CHANNEL_MAPPING'
COLUMNS_TO_CONVERT = {'Valid Barcodes',
'Reads Mapped Confidently to Transcriptome',
'Reads Mapped Confidently to Exonic Regions',
'Reads Mapped Confidently to Intronic Regions',
'Reads Mapped Confidently to Intergenic Regions',
'Reads Mapped Antisense to Gene',
'Sequencing Saturation',
'Q30 Bases in Barcode', 'Q30 Bases in RNA Read',
'Q30 Bases in Sample Index', 'Q30 Bases in UMI',
'Fraction Reads in Cells'}
def __init__(self, data_folder, genes_to_drop='Rn45s',
verbose=False, nrows=None, tissue=None,
channels_to_use=None, tissue_folder='tissues'):
run_folder = os.path.join(data_folder, '10x_data')
self.plate_metadata = combine_csv_files(run_folder,
'MACA_10X_P*.csv',
index_col=0)
if tissue is not None:
tissues = tissue.split(',')
folders = self.plate_metadata.index[self.plate_metadata['Tissue'].isin(tissues)]
else:
folders = self.plate_metadata.index
folders = [f for f in folders if os.path.exists(os.path.join(run_folder, f))]
if channels_to_use is not None:
folders = [f for f in folders if f in channels_to_use]
counts = combine_sdf_files(run_folder, folders,
verbose=verbose)
mapping_stats = self.combine_metrics_files(
run_folder, folders)
self.genes, self.cell_metadata, self.mapping_stats = \
self.clean_and_reformat(counts, mapping_stats)
self.plate_summaries = self.calculate_plate_summaries()
self.plate_metadata = self.plate_metadata.loc[
self.plate_summaries.index]
self.cell_metadata = self.cell_metadata.join(self.plate_metadata,
on=self.SAMPLE_MAPPING)
smushed_folder = os.path.join(run_folder, tissue_folder)
if not os.path.exists(smushed_folder):
os.mkdir(smushed_folder)
self.cell_smushed = self.read_tissue_smushed(smushed_folder, verbose,
tissue)
self.gene_names = sorted(self.genes.columns)
self.plate_metadata_features = sorted(self.plate_metadata.columns)
# Remove pesky genes
self.genes = self.genes.drop(genes_to_drop)
# Get a counts per million rescaling of the genes
# self.counts_per_million = self.genes.divide(self.genes.sum(axis=1),
# axis=0) * 1e6
# self.top_genes = self.compute_top_genes_per_cell()
self.data = {'genes': self.genes,
'mapping_stats': self.mapping_stats,
'cell_metadata': self.cell_metadata,
'plate_metadata': self.plate_metadata,
'plate_summaries': self.plate_summaries}
def __repr__(self):
n_channels = self.plate_summaries.shape[0]
n_barcodes = len(self.genes.rows)
s = f'This is an object holding data for {n_channels} 10X channels and ' \
f'{n_barcodes} barcodes.\nHere are the accessible dataframes:\n'
for name, df in self.data.items():
s += f'\t"{name}" table dimensions: ' + str(df.shape) + '\n'
return s
@staticmethod
def combine_cell_files(folder, globber, verbose=False):
dfs = []
for filename in glob.iglob(os.path.join(folder, globber)):
if verbose:
print(f'Reading {filename} ...')
channel = os.path.basename(os.path.dirname(filename))
df = pd.read_csv(filename, index_col=0)
df.index = pd.MultiIndex.from_product(([channel], df.index),
names=['channel', 'cell_id'])
dfs.append(df)
combined = pd.concat(dfs)
return combined
@staticmethod
def combine_metrics_files(run_folder, folders):
dfs = []
for folder in folders:
filename = os.path.join(run_folder, folder, 'metrics_summary.csv')
p_name = os.path.basename(os.path.dirname(filename))
df = | pd.read_csv(filename) | pandas.read_csv |
from .conftest import base_config
import numpy as np
from numpy.testing import assert_allclose, assert_array_equal
import openamundsen as oa
import openamundsen.errors as errors
import pandas as pd
import pytest
import xarray as xr
@pytest.mark.parametrize('fmt', ['netcdf', 'csv', 'memory'])
def test_formats(fmt, tmp_path):
config = base_config()
config.end_date = '2020-01-16'
config.results_dir = tmp_path
config.output_data.timeseries.format = fmt
config.output_data.timeseries.variables = [{'var': 'snow.num_layers'}]
model = oa.OpenAmundsen(config)
model.initialize()
model.run()
point_ids = ['bellavista', 'latschbloder', 'proviantdepot']
if fmt in ('netcdf', 'memory'):
if fmt == 'netcdf':
ds = xr.open_dataset(tmp_path / 'output_timeseries.nc')
elif fmt == 'memory':
ds = model.point_output.data
assert ds.time.to_index().equals(model.dates)
assert_array_equal(ds.point, point_ids)
assert_array_equal(
list(ds.coords.keys()),
['time', 'point', 'lon', 'lat', 'alt', 'x', 'y', 'soil_layer', 'snow_layer'],
)
assert ds.temp.dims == ('time', 'point')
assert ds.snow_thickness.dims == ('time', 'snow_layer', 'point')
assert ds.soil_temp.dims == ('time', 'soil_layer', 'point')
assert ds.temp.dtype == np.float32
assert np.issubdtype(ds.num_layers.dtype, np.integer)
assert np.all(ds.temp > 250.)
elif fmt == 'csv':
for point_id in point_ids:
assert (tmp_path / f'point_{point_id}.csv').exists()
df = pd.read_csv(tmp_path / 'point_bellavista.csv', index_col='time', parse_dates=True)
assert df.index.equals(model.dates)
assert df.temp.dtype == np.float64
assert np.issubdtype(df.num_layers.dtype, np.integer)
assert 'snow_thickness0' in df
assert np.all(df.temp > 250.)
def test_values():
config = base_config()
config.end_date = '2020-01-15'
model = oa.OpenAmundsen(config)
model.initialize()
point = 'proviantdepot'
row = int(model.meteo.sel(station=point).row)
col = int(model.meteo.sel(station=point).col)
data_temp = | pd.Series(index=model.dates, dtype=float) | pandas.Series |
# -*- coding: utf-8 -*-
"""
Created on Sep
@author: CocoLiao
Topic: NEC_system_PathDist_module
Input ex:
Run_TotalSites('D:\\nec-backend\\dist\\docs\\mrData.xlsx',
'D:\\nec-backend\\dist\\docs\\workerData.xlsx',
'D:\\nec-backend\\dist\\docs\\officeAddress.xlsx',
'D:\\nec-backend\\dist\\docs\\taxiCost.xlsx',
30, 800.0, 6.0, 4.0, 2.42)
"""
# packages import
import pandas as pd
import numpy as np
import googlemaps
from gurobipy import *
import time
def Run_TotalSites(Service_FN, Worker_FN, Office_FN, TXcost_FN, workTime_buffer, PC_basicMilleage, PC_belowCost, PC_upperCost, CC_avgCost):
'''
<input>
Service_FN: string [file path] # NEC_MRDATA_original
Worker_FN: string [file path] # NEC_workerDATA
Office_FN: string [file path] # TW_sites_address
TXcost_FN: string [file path] # TW_TXcars_cost
workTime_buffer: int # works_between_buffer_mins
PC_basicMilleage: float # private_car_monthly_basic_Mileage_km
PC_belowCost: float # private_car_below_basicM_fuel_cost_$/km
PC_upperCost: float # private_car_below_basicM_fuel_cost_$/km
CC_avgCost: float # company_car_fuel_cost_$/km
<output>
PriceSens_final_df: dataframe (table)
'''
tStart = time.time()#計時開始
###### MODULE ONE: PathDist.py################################################################
def PathDist(Service_File, Worker_File, Office_File, office_EGnm):
'''
<input>
Service_File: string [file path] # NEC_MRDATA_original
Worker_File: string [file path] # NEC_workerDATA
Office_File: string [file path] # NEC_TW_sites_address
office: string
<output>
(site)_PathDist_detail: dataframe (table), original MRDATA resort with path labeled
(site)_PathDist_analy: dataframe (table), each uniquePath info with Out_date, Path_ID/order, MoveDist_GO/BACK/TOL, Begin/End_time columns
'''
# read original MR_DATA files
Service_Data = pd.read_excel(Service_File)
Worker_Data = pd.read_excel(Worker_File)
Office_Data = pd.read_excel(Office_File)
# match serciceDATA and workerDATA
Worker_Data = Worker_Data.drop(['person_nm', 'actgr_nm'], axis = 1)
Data = | pd.merge(Service_Data, Worker_Data, on='case_no') | pandas.merge |
from unittest.mock import patch
import numpy as np
import pandas as pd
import pytest
from pandas.testing import assert_frame_equal, assert_series_equal
from evalml.pipelines import TimeSeriesBaselineRegressionPipeline
from evalml.pipelines.time_series_baselines import (
TimeSeriesBaselineBinaryPipeline,
TimeSeriesBaselineMulticlassPipeline
)
@pytest.mark.parametrize('X_none', [True, False])
@pytest.mark.parametrize('gap', [0, 1])
@pytest.mark.parametrize('pipeline_class', [TimeSeriesBaselineRegressionPipeline,
TimeSeriesBaselineBinaryPipeline, TimeSeriesBaselineMulticlassPipeline])
@patch("evalml.pipelines.TimeSeriesClassificationPipeline._decode_targets", side_effect=lambda y: y)
def test_time_series_baseline(mock_decode, pipeline_class, gap, X_none, ts_data):
X, y = ts_data
clf = pipeline_class(parameters={"pipeline": {"gap": gap, "max_delay": 1},
"Time Series Baseline Estimator": {'gap': gap, 'max_delay': 1}})
expected_y = y.shift(1) if gap == 0 else y
expected_y = expected_y.reset_index(drop=True)
if not expected_y.isnull().values.any():
expected_y = expected_y.astype("Int64")
if X_none:
X = None
clf.fit(X, y)
assert_series_equal(expected_y, clf.predict(X, y).to_series())
@pytest.mark.parametrize('X_none', [True, False])
@pytest.mark.parametrize('gap', [0, 1])
@pytest.mark.parametrize('pipeline_class', [TimeSeriesBaselineBinaryPipeline, TimeSeriesBaselineMulticlassPipeline])
def test_time_series_baseline_predict_proba(pipeline_class, gap, X_none):
X = pd.DataFrame({"a": [4, 5, 6, 7, 8]})
y = pd.Series([0, 1, 1, 0, 1])
expected_proba = pd.DataFrame({0: pd.Series([1, 0, 0, 1, 0], dtype="float64"),
1: pd.Series([0, 1, 1, 0, 1], dtype="float64")})
if pipeline_class == TimeSeriesBaselineMulticlassPipeline:
y = | pd.Series([0, 1, 2, 2, 1]) | pandas.Series |
#!/usr/bin/env python3
#pylint: disable = C, R
#pylint: disable = E1101 # no-member (generated-members)
#pylint: disable = C0302 # too-many-lines
"""
This code features the article
"Pareto-based evaluation of national responses to COVID-19 pandemic shows
that saving lives and protecting economy are non-trade-off objectives"
by Kochanczyk & Lipniacki (Scientific Reports, 2021).
License: MIT
Last changes: November 09, 2020
"""
# --------------------------------------------------------------------------------------------------
import re
from operator import itemgetter
from multiprocessing import Pool
import pandas as pd
import numpy as np
import scipy.stats
import dill
import gzip
from shared import *
# -- Contents settings -----------------------------------------------------------------------------
#TMP SNAPSHOT_BASE_URL = 'https://raw.githubusercontent.com/' + \
#TMP 'kochanczyk/covid19-pareto/master/data/snapshot-20200706/'
SNAPSHOT_BASE_URL = 'data/snapshot-20201109/' # TMP
OWID_DATA_URL = SNAPSHOT_BASE_URL + 'owid-covid-data.csv.bz2'
OWID_TESTING_DATA_URL = SNAPSHOT_BASE_URL + 'covid-testing-all-observations.csv.bz2'
MOBILITY_DATA_URL = SNAPSHOT_BASE_URL + 'Global_Mobility_Report.csv.bz2'
TRACKING_URL = SNAPSHOT_BASE_URL + 'daily.csv.bz2'
EXCESS_DEATHS_EUROSTAT_URL = SNAPSHOT_BASE_URL + 'demo_r_mwk_ts_1_Data.csv.bz2'
EXCESS_DEATHS_CDC_URL = SNAPSHOT_BASE_URL + 'Excess_Deaths_Associated_with_COVID-19.csv.bz2'
GDP_EUROSTAT_URL = SNAPSHOT_BASE_URL + 'estat_namq_10_gdp--SCA.csv.bz2'
THROWIN_DATES_ = {
'Spain': ['2020-04-19', '2020-05-22', '2020-05-25'],
'France': ['2020-05-07', '2020-05-29', '2020-06-03'],
'United Kingdom': ['2020-05-21'],
'Ireland': ['2020-05-15'],
'Portugal': ['2020-05-03']
}
THROWIN_DATES = {country: list(map(pd.to_datetime, days)) for country, days in THROWIN_DATES_.items()}
# -- Data analysis auxiliary functions -------------------------------------------------------------
def extract_cases_and_deaths(location):
columns = ['date', 'new_cases', 'total_cases', 'new_deaths', 'total_deaths']
if is_USA_state(location):
state_abbrev = STATE_TO_ABBREV[location]
return TRACKING_DATA.loc[state_abbrev]
else:
country = location
country_indices = OWID_DATA['location'] == country
return OWID_DATA[country_indices][columns].set_index('date')
def extract_mobility(location):
if is_USA_state(location):
df = MOBILITY_DATA[ (MOBILITY_DATA['location'] == 'United States') \
& (MOBILITY_DATA['sub_region_1'] == location) \
& MOBILITY_DATA['sub_region_2'].isnull() ].set_index('date')
else:
df = MOBILITY_DATA[ (MOBILITY_DATA['location'] == location) \
& MOBILITY_DATA['sub_region_1'].isnull() ].set_index('date')
if 'metro_area' in df.columns:
df = df[ df['metro_area'].isnull() ]
assert df['sub_region_1'].isnull().all() and df['sub_region_2'].isnull().all()
return df
def smoothed_daily_data(location, fix=True):
daily_ws = [3, 7, 14]
df = extract_cases_and_deaths(location).copy()
if fix:
# general
for col_new in ('new_cases', 'new_deaths'):
df[col_new] = df[col_new].fillna(0)
for col_tot in ('total_cases', 'total_deaths'):
if pd.isna(df.iloc[0][col_tot]):
initial_date = df.index[0]
df.at[initial_date, col_tot] = 0
df[col_tot] = df[col_tot].ffill()
# location-specific
if location in THROWIN_DATES:
for throwin in THROWIN_DATES[location]:
new_cases = df.loc[throwin, 'new_cases']
if new_cases == 0:
pass
elif new_cases < 0:
df.loc[throwin, 'new_cases'] = 0
elif new_cases > 0:
prevv = df.loc[throwin - pd.offsets.Day(1), 'new_cases']
nextt = df.loc[throwin + pd.offsets.Day(1), 'new_cases']
df.loc[throwin, 'new_cases'] = int(round(0.5*(prevv + nextt)))
# WARNING: because of the above, diff(cumulative total) != daily
for k in ('cases', 'deaths'):
for w in daily_ws:
df[f"new_{k}{w}"] = df[f"new_{k}"].rolling(window=w, min_periods=w//2+1, **ROLL_OPTS).mean()
is_w_even = not (w % 2)
has_nan_initially = pd.isnull(df.iloc[0][f"new_{k}{w}"])
if is_w_even and has_nan_initially:
df.at[df.index[0], f"new_{k}{w}"] = 0
for col in ('new_cases', 'total_cases', 'new_deaths', 'total_deaths'):
df[col] = df[col].astype('Int64')
return df
def calc_Rt(theta, TINY=1e-16):
if pd.isnull(theta) or theta < TINY:
return pd.NA
elif theta == 1:
return 1
log2 = np.log(2)
Td = log2/np.log(theta)
m, n, sigma, gamma = 6, 1, 1/5.28, 1/3
Rt = log2/Td*(log2/(Td * m *sigma) + 1)**m / (gamma*(1 - (log2/(Td * n * gamma) + 1)**(-n)))
return Rt
def insert_epidemic_dynamics(df, timespan_days=14, data_smoothing_window=14):
half_timespan_days = timespan_days//2
exponent = (1/(timespan_days - 1))
for kind in ('cases', 'deaths'):
values = df[f"new_{kind}{data_smoothing_window}"].values
thetas = []
for vi in range(len(values)):
if vi < half_timespan_days or vi + half_timespan_days >= len(values):
thetas += [pd.NA]
else:
bwd, fwd = values[vi - half_timespan_days], values[vi + half_timespan_days]
if bwd > 0 and fwd >= 0:
theta = (fwd/bwd)**exponent
theta = float(theta)
else:
theta = [pd.NA]
thetas += [theta]
df[f"theta_{kind}"] = thetas
df[f"Rt_{kind}"] = df[f"theta_{kind}"].map(calc_Rt)
return df
def average_mobility_reduction(location_or_mo):
if type(location_or_mo) == str:
location = location_or_mo
mo = extract_mobility(location)
else:
mo = location_or_mo
return mo['retail_and_recreation workplaces'.split()].agg(np.mean, axis=1).astype('Float64').to_frame(name='mobility')
def insert_mobility_reduction(df, location, min_sum_weights=0.5):
def has_day_(dd): return dd in avg_mo.index
def is_weekday_(dd): return dd.dayofweek < 5
def is_holiday_(cc, dd): return cc in HOLIDAYS and dd in HOLIDAYS[cc]
def is_valid_day_(cc, dd): return has_day_(dd) and is_weekday_(dd) and not is_holiday_(cc, dd)
mo = extract_mobility(location)
avg_mo = average_mobility_reduction(mo)
df['mobility'] = avg_mo
df['mobility_reduction'] = 0
for day in mo.index:
if day in df.index and is_valid_day_(location, day):
df.at[day,'mobility_reduction'] = avg_mo.loc[day]
for kind in ('cases', 'deaths'):
distrib = {'cases': INFECTION_TO_REMOVAL, 'deaths': INFECTION_TO_DEATH}[kind]
df[f"mobility_historical_{kind}"] = pd.NA # previous values that gave rise to current daily new cases or deaths
for day in mo.index:
if day in df.index:
valid_days_indices = {di for di in range(len(distrib))
if is_valid_day_(location, day - pd.offsets.Day(di))}
weights = [distrib[di]
for di in valid_days_indices]
weighted_ms = [distrib[di] * avg_mo.loc[day - pd.offsets.Day(di)]
for di in valid_days_indices]
sum_weights = np.sum(weights)
df.at[day, f"mobility_historical_{kind}"] = np.sum(weighted_ms)/sum_weights \
if sum_weights >= min_sum_weights else pd.NA
return df
def insert_tests_performed(df, location, interpolate=True, w=7, verbose=False):
if is_USA_state(location):
df[f"new_tests{w}"] = df['new_tests'].rolling(window=w, min_periods=w//2+1, **ROLL_OPTS).mean()
df['tests_per_hit'] = df[f"new_tests{w}"] \
/ df['new_cases'].rolling(window=w, min_periods=w//2+1, **ROLL_OPTS).mean()
return df
else:
df_test = None
colnames = ['date', 'Cumulative total']
endings = ('tests performed', 'tests performed (CDC) (incl. non-PCR)', 'samples tested',
'samples analysed', 'units unclear', 'units unclear (incl. non-PCR)',
'people tested', 'people tested (incl. non-PCR)', 'cases tested')
entities = set(OWID_TESTING_DATA['Entity'])
location_entities = {}
for cc, tt in [(e.split(' - ')[0], e.split(' - ')[1]) for e in entities]:
assert tt in endings
if cc in location_entities:
location_entities[cc] = location_entities[cc] + [tt]
else:
location_entities[cc] = [tt]
sel_endings = ['people tested (incl. non-PCR)'] if location == 'Japan' else endings
for ending in sel_endings:
ent = f"{location.replace('Czechia', 'Czech Republic')} - {ending}"
if ent in entities:
ent_indices = OWID_TESTING_DATA['Entity'] == ent
if location == 'France':
df_fr = OWID_TESTING_DATA[ent_indices][colnames + ['Daily change in cumulative total']]
df_fr.at[df_fr.index[0], 'Cumulative total'] = df_fr.iloc[0]['Daily change in cumulative total']
for i in range(len(df_fr) - 1):
prev_cumulative = df_fr.iloc[i]['Cumulative total']
change_in_cumulative = df_fr.iloc[i + 1]['Daily change in cumulative total']
df_fr.at[df_fr.index[i + 1], 'Cumulative total'] = prev_cumulative + change_in_cumulative
df_pre = df_fr[colnames].set_index('date') \
.rename(columns={'Cumulative total': ending})
else:
df_pre = OWID_TESTING_DATA[ent_indices][colnames].set_index('date') \
.rename(columns={'Cumulative total': ending})
if not df_pre[ending].isnull().all():
df_test = df_pre if df_test is None else df_test.join(df_pre, how='outer')
if df_test is None:
print(f"{location}: missing data on testing")
df['total_tests'] = np.nan
df['tests_per_hit'] = np.nan
return df
else:
if verbose:
print(location, '::',
df_test.index[ 0].strftime('%Y: %B, %d'), '--',
df_test.index[-1].strftime('%B, %d'), '::', ', '.join(list(df_test.columns)))
if len(df_test.columns) == 1:
df_test.rename(columns=lambda colnm: re.sub(r'^.*$', 'total_tests', colnm), inplace=True)
else:
df_test['total_tests'] = np.nan
df_test['test_type'] = '?'
for ending in endings:
if ending not in df_test.columns: continue
for day in df_test.index:
if np.isnan(df_test.loc[day]['total_tests']) and not np.isnan(df_test.loc[day][ending]):
df_test.at[day, 'total_tests'] = df_test.loc[day][ending]
df_test.at[day, 'test_type'] = ending
if verbose:
for ending in endings:
if ending not in df_test.columns: continue
df_sub = df_test[ df_test['test_type'] == ending ][ending].dropna()
if len(df_sub):
print(' '*len(location), '::',
df_sub.index[ 0].strftime('%Y: %B, %d'), '--',
df_sub.index[-1].strftime('%B, %d'), '::', ending)
if interpolate:
df_test['total_tests'] = df_test['total_tests'].interpolate(limit_area='inside',
limit_direction='both')
else:
df_test['total_tests'] = df_test['total_tests'].astype('Int64')
df_test['new_tests'] = df_test['total_tests'].diff()
df_test[f"new_tests{w}"] = df_test['new_tests'].rolling(window=w, min_periods=w//2+1, **ROLL_OPTS).mean()
df_test['tests_per_hit'] = df_test[f"new_tests{w}"] \
/ df['new_cases'].rolling(window=w, min_periods=w//2+1, **ROLL_OPTS).mean()
return df.join(df_test)
def process_location(location, kv=True):
df = smoothed_daily_data(location)
df = insert_epidemic_dynamics(df)
df = insert_mobility_reduction(df, location)
df = insert_tests_performed(df, location)
return (location, df) if kv else df
def check_gaps(location, traj):
missing = []
dt = traj['new_deaths'].index[-1] - traj['new_deaths'].index[0]
if dt.days != len(traj['new_deaths'].index) - 1:
for i in range(len(traj.index) - 1):
since, until = traj.index[i], traj.index[i + 1]
inter_days = (until - since).days
if inter_days > 1:
gap = inter_days - 1
if gap == 1:
timespan_s = f"{(since + pd.offsets.Day(1)).strftime('%B %d')}"
else:
timespan_s = f"{(since + pd.offsets.Day(1)).strftime('%B %d')}--" \
f"{(until - pd.offsets.Day(1)).strftime('%B %d')}"
for i in range(gap):
day = since + pd.offsets.Day(1 + i)
if day < FINAL_DAY:
missing += []
print(f"{location}: missing {gap} day{'s' if gap > 1 else ''}: {timespan_s}")
return missing
def check_mobility(location, trajectory):
missing = []
nan_blocks = []
in_nan_block = False
for index, value in trajectory[['mobility']].iterrows():
if pd.isnull(float(value)):
if index < FINAL_DAY:
missing += [index]
if not in_nan_block:
in_nan_block = True
nan_blocks.append([index])
else:
nan_blocks[-1].append(index)
else:
if in_nan_block:
in_nan_block = False
for nan_block in nan_blocks:
since, until = nan_block[0], nan_block[-1]
if since != trajectory.index[0] and until != trajectory.index[-1]:
timespan_s = f"{since.strftime('%B %d')}--" \
f"{until.strftime('%B %d')}"
print(f"{location}: missing mobility: {timespan_s}")
return missing
# --------------------------------------------------------------------------------------------------
# https://ec.europa.eu/eurostat/statistics-explained/images/d/da/Weekly_deaths_15_10_2020-update.xlsx
# which are source data for:
# https://ec.europa.eu/eurostat/statistics-explained/index.php?title=Weekly_death_statistics
def read_excess_deaths_eurostat():
d = pd.read_csv(EXCESS_DEATHS_EUROSTAT_URL) \
.drop(columns=['UNIT', 'Flag and Footnotes', 'SEX'])
d.loc[ d['Value']==':', 'Value'] = pd.NA
d['Value'] = d['Value'].map(lambda v: int(v.replace(',', '')) if type(v)==str else v)
d = d[ d['GEO'] != 'Georgia' ]
weeks = [f"W{i:0>2d}" for i in range(1, 13+13+1)]
years = list(map(str, range(2016, 2020)))
excess_deaths = {}
for loc, dd in d.groupby('GEO'):
dd.set_index('TIME', inplace=True)
ddd = {wk: dd.loc[ [f"{yr}{wk}" for yr in years] ]['Value'].mean()
for wk in weeks}
ddd = pd.DataFrame.from_dict(ddd, orient='index', columns=['Average deaths'])
ddd['2020 deaths'] = [dd.loc[ [f"2020{wk}" for yr in years] ]['Value'].mean()
for wk in weeks]
ddd['2020 excess deaths'] = ddd['2020 deaths'] - ddd['Average deaths']
loc = loc.replace('Germany (until 1990 former territory of the FRG)', 'Germany')
excess_deaths[loc] = (ddd['2020 deaths'] - ddd['Average deaths']).sum()
return excess_deaths
def read_excess_deaths_cdc():
d = | pd.read_csv(EXCESS_DEATHS_CDC_URL, parse_dates=['Week Ending Date']) | pandas.read_csv |
from ..loaders import load_data
from ..utils import load_json_config
from deoxys_image.patch_sliding import get_patch_indice
from deoxys_vis import read_csv
import numpy as np
import h5py
import pandas as pd
import os
from time import time
import shutil
import matplotlib.pyplot as plt
import warnings
class H5Metric:
def __init__(self, ref_file, save_file, metric_name='score',
predicted_dataset='predicted',
target_dataset='y', batch_size=4,
map_file=None, map_column=None):
self.metric_name = metric_name
self.ref_file = ref_file
self.predicted = predicted_dataset
self.target = target_dataset
with h5py.File(ref_file, 'r') as f:
keys = list(f.keys())
if target_dataset not in keys:
self.predicted = [f'{key}/{predicted_dataset}' for key in keys]
self.target = [f'{key}/{target_dataset}' for key in keys]
self.batch_size = batch_size
self.res_file = save_file
self.map_file = map_file
self.map_column = map_column
def get_img_batch(self):
self.scores = []
if self.map_file is None:
if type(self.predicted) == str:
with h5py.File(self.ref_file, 'r') as f:
size = f[self.target].shape[0]
for i in range(0, size, self.batch_size):
with h5py.File(self.ref_file, 'r') as f:
predicted = f[self.predicted][i:i+self.batch_size]
targets = f[self.target][i:i+self.batch_size]
yield targets, predicted
else:
for pred, target in zip(self.predicted, self.target):
with h5py.File(self.ref_file, 'r') as f:
size = f[target].shape[0]
for i in range(0, size, self.batch_size):
with h5py.File(self.ref_file, 'r') as f:
predicted = f[pred][i:i+self.batch_size]
targets = f[target][i:i+self.batch_size]
yield targets, predicted
else: # handle 3d with different sizes
map_df = pd.read_csv(self.map_file)
map_data = map_df[self.map_column].values
for idx in map_data:
with h5py.File(self.ref_file, 'r') as f:
predicted = f[self.predicted][str(idx)][:]
targets = f[self.target][str(idx)][:]
yield np.expand_dims(targets, axis=0), np.expand_dims(
predicted, axis=0)
def update_score(self, scores):
self.scores.extend(scores)
def save_score(self):
if os.path.isfile(self.res_file):
df = pd.read_csv(self.res_file)
df[f'{self.metric_name}'] = self.scores
else:
df = pd.DataFrame(self.scores, columns=[f'{self.metric_name}'])
df.to_csv(self.res_file, index=False)
def post_process(self, **kwargs):
for targets, prediction in self.get_img_batch():
scores = self.calculate_metrics(
targets, prediction, **kwargs)
self.update_score(scores)
self.save_score()
def calculate_metrics(targets, predictions, **kwargs):
raise NotImplementedError
class H5CalculateFScore(H5Metric):
def __init__(self, ref_file, save_file, metric_name='f1_score',
predicted_dataset='predicted',
target_dataset='y', batch_size=4, beta=1, threshold=None,
map_file=None, map_column=None):
super().__init__(ref_file, save_file, metric_name,
predicted_dataset,
target_dataset, batch_size,
map_file, map_column)
self.threshold = 0.5 if threshold is None else threshold
self.beta = beta
def calculate_metrics(self, y_true, y_pred, **kwargs):
assert len(y_true) == len(y_pred), "Shape not match"
eps = 1e-8
size = len(y_true.shape)
reduce_ax = tuple(range(1, size))
y_pred = (y_pred > self.threshold).astype(y_pred.dtype)
if y_pred.ndim - y_true.ndim == 1 and y_pred.shape[-1] == 1:
y_pred = y_pred[..., 0]
true_positive = np.sum(y_pred * y_true, axis=reduce_ax)
target_positive = np.sum(y_true, axis=reduce_ax)
predicted_positive = np.sum(y_pred, axis=reduce_ax)
fb_numerator = (1 + self.beta ** 2) * true_positive + eps
fb_denominator = (
(self.beta ** 2) * target_positive + predicted_positive + eps
)
return fb_numerator / fb_denominator
class H5MetaDataMapping:
def __init__(self, ref_file, save_file, folds, fold_prefix='fold',
dataset_names=None):
self.ref_file = ref_file
self.save_file = save_file
if fold_prefix:
self.folds = ['{}_{}'.format(
fold_prefix, fold) for fold in folds]
else:
self.folds = folds
self.dataset_names = dataset_names
def post_process(self, *args, **kwargs):
data = {dataset_name: [] for dataset_name in self.dataset_names}
for fold in self.folds:
with h5py.File(self.ref_file, 'r') as f:
for dataset_name in self.dataset_names:
meta_data = f[fold][dataset_name][:]
dtype = meta_data.dtype.name
if 'int' not in dtype and 'float' not in dtype:
meta_data = meta_data.astype(str)
data[dataset_name].extend(meta_data)
df = pd.DataFrame(data)
df.to_csv(self.save_file, index=False)
class H5Merge2dSlice:
def __init__(self, ref_file, map_file, map_column, merge_file, save_file,
predicted_dataset='predicted', target_dataset='y',
input_dataset='x'):
self.ref_file = ref_file
self.map_file = map_file
self.map_column = map_column
self.merge_file = merge_file
self.save_file = save_file
self.predicted = predicted_dataset
self.target = target_dataset
self.inputs = input_dataset
with h5py.File(ref_file, 'r') as f:
keys = list(f.keys())
if input_dataset not in keys:
self.predicted = [f'{key}/{predicted_dataset}' for key in keys]
self.target = [f'{key}/{target_dataset}' for key in keys]
self.inputs = [f'{key}/{input_dataset}' for key in keys]
def post_process(self):
map_df = pd.read_csv(self.map_file)
map_data = map_df[self.map_column].values
unique_val = []
first, last = map_data[0], map_data[-1]
tmp = np.concatenate([[first], map_data, [last]])
indice = np.where(tmp[1:] != tmp[:-1])[0]
indice = np.concatenate([[0], indice, [len(map_data)]])
if type(self.inputs) == str:
with h5py.File(self.merge_file, 'w') as mf:
mf.create_group(self.inputs)
mf.create_group(self.target)
mf.create_group(self.predicted)
for i in range(len(indice) - 1):
start = indice[i]
end = indice[i+1]
unique_val.append(map_data[start])
assert map_data[start] == map_data[end-1], "id not match"
curr_name = str(map_data[start])
with h5py.File(self.ref_file, 'r') as f:
img = f[self.inputs][start:end]
with h5py.File(self.merge_file, 'a') as mf:
mf[self.inputs].create_dataset(
curr_name, data=img, compression="gzip")
with h5py.File(self.ref_file, 'r') as f:
img = f[self.target][start:end]
with h5py.File(self.merge_file, 'a') as mf:
mf[self.target].create_dataset(
curr_name, data=img, compression="gzip")
with h5py.File(self.ref_file, 'r') as f:
img = f[self.predicted][start:end]
with h5py.File(self.merge_file, 'a') as mf:
mf[self.predicted].create_dataset(
curr_name, data=img, compression="gzip")
else:
inputs = self.inputs[0].split('/')[-1]
target = self.target[0].split('/')[-1]
predicted = self.predicted[0].split('/')[-1]
with h5py.File(self.merge_file, 'w') as mf:
mf.create_group(inputs)
mf.create_group(target)
mf.create_group(predicted)
offset = 0
curr_data_idx = 0
with h5py.File(self.ref_file, 'r') as f:
total = f[self.inputs[curr_data_idx]].shape[0]
for i in range(len(indice) - 1):
if indice[i] - offset >= total:
offset = indice[i]
curr_data_idx += 1
with h5py.File(self.ref_file, 'r') as f:
total = f[self.inputs[curr_data_idx]].shape[0]
map_start, map_end = indice[i], indice[i+1]
start = indice[i] - offset
end = indice[i+1] - offset
unique_val.append(map_data[map_start])
assert map_data[map_start] == map_data[map_end -
1], "id not match"
curr_name = str(map_data[map_start])
with h5py.File(self.ref_file, 'r') as f:
img = f[self.inputs[curr_data_idx]][start:end]
with h5py.File(self.merge_file, 'a') as mf:
mf[inputs].create_dataset(
curr_name, data=img, compression="gzip")
with h5py.File(self.ref_file, 'r') as f:
img = f[self.target[curr_data_idx]][start:end]
with h5py.File(self.merge_file, 'a') as mf:
mf[target].create_dataset(
curr_name, data=img, compression="gzip")
with h5py.File(self.ref_file, 'r') as f:
img = f[self.predicted[curr_data_idx]][start:end]
with h5py.File(self.merge_file, 'a') as mf:
mf[predicted].create_dataset(
curr_name, data=img, compression="gzip")
df = pd.DataFrame(data=unique_val, columns=[self.map_column])
df.to_csv(self.save_file, index=False)
class H5Transform3d:
def __init__(self, ref_file, map_file, map_column, merge_file,
predicted_dataset='predicted', target_dataset='y',
input_dataset='x'):
self.ref_file = ref_file
self.map_file = map_file
self.map_column = map_column
self.merge_file = merge_file
self.predicted = predicted_dataset
self.target = target_dataset
self.inputs = input_dataset
with h5py.File(ref_file, 'r') as f:
keys = list(f.keys())
if input_dataset not in keys:
self.predicted = [f'{key}/{predicted_dataset}' for key in keys]
self.target = [f'{key}/{target_dataset}' for key in keys]
self.inputs = [f'{key}/{input_dataset}' for key in keys]
def post_process(self):
map_df = pd.read_csv(self.map_file)
map_data = map_df[self.map_column].values
first, last = map_data[0], map_data[-1]
tmp = np.concatenate([[first], map_data, [last]])
indice = np.where(tmp[1:] != tmp[:-1])[0]
indice = np.concatenate([[0], indice, [len(map_data)]])
if type(self.inputs) == str:
with h5py.File(self.merge_file, 'w') as mf:
mf.create_group(self.inputs)
mf.create_group(self.target)
mf.create_group(self.predicted)
for i in range(len(map_data)):
curr_name = str(map_data[i])
with h5py.File(self.ref_file, 'r') as f:
img = f[self.inputs][i]
with h5py.File(self.merge_file, 'a') as mf:
mf[self.inputs].create_dataset(
curr_name, data=img, compression="gzip")
with h5py.File(self.ref_file, 'r') as f:
img = f[self.target][i]
with h5py.File(self.merge_file, 'a') as mf:
mf[self.target].create_dataset(
curr_name, data=img, compression="gzip")
with h5py.File(self.ref_file, 'r') as f:
img = f[self.predicted][i]
with h5py.File(self.merge_file, 'a') as mf:
mf[self.predicted].create_dataset(
curr_name, data=img, compression="gzip")
else: # pragma: no cover
inputs = self.inputs[0].split('/')[-1]
target = self.target[0].split('/')[-1]
predicted = self.predicted[0].split('/')[-1]
with h5py.File(self.merge_file, 'w') as mf:
mf.create_group(inputs)
mf.create_group(target)
mf.create_group(predicted)
offset = 0
curr_data_idx = 0
with h5py.File(self.ref_file, 'r') as f:
total = f[self.inputs[curr_data_idx]].shape[0]
for i in range(len(map_data)):
if i - offset >= total:
offset = i
curr_data_idx += 1
with h5py.File(self.ref_file, 'r') as f:
total = f[self.inputs[curr_data_idx]].shape[0]
curr_name = str(map_data[i])
with h5py.File(self.ref_file, 'r') as f:
img = f[self.inputs[curr_data_idx]][i-offset]
with h5py.File(self.merge_file, 'a') as mf:
mf[inputs].create_dataset(
curr_name, data=img, compression="gzip")
with h5py.File(self.ref_file, 'r') as f:
img = f[self.target[curr_data_idx]][i-offset]
with h5py.File(self.merge_file, 'a') as mf:
mf[target].create_dataset(
curr_name, data=img, compression="gzip")
with h5py.File(self.ref_file, 'r') as f:
img = f[self.predicted[curr_data_idx]][i-offset]
with h5py.File(self.merge_file, 'a') as mf:
mf[predicted].create_dataset(
curr_name, data=img, compression="gzip")
# df = pd.DataFrame(data=unique_val, columns=[self.map_column])
# df.to_csv(self.save_file, index=False)
class H5MergePatches: # pragma: no cover
def __init__(self, ref_file, predicted_file,
map_column, merge_file, save_file,
patch_size, overlap,
folds, fold_prefix='fold',
original_input_dataset='x',
original_target_dataset='y',
predicted_dataset='predicted', target_dataset='y',
input_dataset='x'
):
self.ref_file = ref_file
self.predicted_file = predicted_file
self.map_column = map_column
self.merge_file = merge_file
self.save_file = save_file
self.ref_inputs = original_input_dataset
self.ref_targets = original_target_dataset
self.predicted = predicted_dataset
self.target = target_dataset
self.inputs = input_dataset
if fold_prefix:
self.folds = ['{}_{}'.format(
fold_prefix, fold) for fold in folds]
else:
self.folds = folds
self.patch_size = patch_size
self.overlap = overlap
print('merge images of patch', patch_size)
def _save_inputs_target_to_merge_file(self, fold, meta, index):
with h5py.File(self.ref_file, 'r') as f:
inputs = f[fold][self.ref_inputs][index]
targets = f[fold][self.ref_targets][index]
with h5py.File(self.merge_file, 'a') as mf:
mf[self.inputs].create_dataset(
meta, data=inputs, compression="gzip")
mf[self.target].create_dataset(
meta, data=targets, compression="gzip")
def _merge_patches_to_merge_file(self, meta, start_cursor):
with h5py.File(self.merge_file, 'r') as mf:
shape = mf[self.target][meta].shape[:-1]
# fix patch size
if '__iter__' not in dir(self.patch_size):
self.patch_size = [self.patch_size] * len(shape)
indice = get_patch_indice(shape, self.patch_size, self.overlap)
next_cursor = start_cursor + len(indice)
with h5py.File(self.predicted_file, 'r') as f:
data = f[self.predicted][start_cursor: next_cursor]
predicted = np.zeros(shape)
weight = np.zeros(shape)
for i in range(len(indice)):
x, y, z = indice[i]
w, h, d = self.patch_size
predicted[x:x+w, y:y+h, z:z+d] = predicted[x:x+w, y:y+h, z:z+d] \
+ data[i][..., 0]
weight[x:x+w, y:y+h, z:z+d] = weight[x:x+w, y:y+h, z:z+d] \
+ np.ones(self.patch_size)
predicted = (predicted/weight)[..., np.newaxis]
with h5py.File(self.merge_file, 'a') as mf:
mf[self.predicted].create_dataset(
meta, data=predicted, compression="gzip")
return next_cursor
def post_process(self):
# create merge file
with h5py.File(self.merge_file, 'w') as mf:
mf.create_group(self.inputs)
mf.create_group(self.target)
mf.create_group(self.predicted)
data = []
start_cursor = 0
for fold in self.folds:
with h5py.File(self.ref_file, 'r') as f:
meta_data = f[fold][self.map_column][:]
data.extend(meta_data)
for index, meta in enumerate(meta_data):
self._save_inputs_target_to_merge_file(
fold, str(meta), index)
start_cursor = self._merge_patches_to_merge_file(
str(meta), start_cursor)
# create map file
df = pd.DataFrame(data, columns=[self.map_column])
df.to_csv(self.save_file, index=False)
class AnalysisPerEpoch: # pragma: no cover
_markers = ['o-', 'v-', '^-', '<-', '>-',
'1-', '2-', 's-', 'p-', 'P-',
'*-', '+-', 'x-', 'D-', 'd-'] * 10 + ['--']
def __init__(self, save_path, log_file_templates, epochs,
map_column='patient idx', monitor='', model_name=''):
self.save_path = save_path
self.log_file_templates = log_file_templates
self.epochs = epochs
self.map_column = map_column
self.monitor = monitor
self.model_name = model_name or save_path.split('/')[-2]
def post_process(self):
patient_dice_per_epoch = []
monitor = self.monitor
epochs = self.epochs
map_column = self.map_column
for epoch in epochs:
# load each log file
data = pd.read_csv(self.log_file_templates.format(epoch))
# metric column
if not monitor:
monitor = data.columns[-1]
patient_dice_per_epoch.append(data[monitor].values)
# Plot dice per epoch
patient_idx = data[map_column].values
# print(patient_dice_per_epoch)
all_data = np.vstack(patient_dice_per_epoch)
df = pd.DataFrame(all_data, columns=patient_idx)
df.index = epochs
df.index.name = 'epoch'
# df['mean'] = df.mean(axis=1)
df['mean'] = df[[pid for pid in patient_idx]].mean(axis=1)
best_epoch = df['mean'].idxmax()
best_metric = df['mean'].max()
plt.figure(figsize=(10, 8))
df.plot(style=self._markers[:len(patient_idx) + 1], ax=plt.gca())
plt.legend(loc='center left', bbox_to_anchor=(1.0, 0.5))
plt.title(
f'Model {self.model_name}' +
f'\nBest Epoch {best_epoch} - Mean {monitor} {best_metric:.6f}')
plt.savefig(self.save_path + '/dice_per_epoch.png')
plt.savefig(self.save_path + '/dice_per_epoch.pdf')
plt.close('all')
# save to csv
df.to_csv(self.save_path + '/dice_per_epoch.csv')
violin_df = df[df.columns[:-1]]
group_df = violin_df.reset_index().melt(
id_vars=violin_df.columns[:-len(patient_idx)],
var_name=map_column, value_name=monitor)
def Q1(x):
return x.quantile(0.25)
def Q3(x):
return x.quantile(0.75)
def to_int(x):
return x.astype(int)
group_df.groupby('epoch').agg(
{monitor: ['min', Q1, 'median', Q3, 'max', 'mean', 'std']})
with open(self.save_path + '/val_summary.txt') as f:
f.write(str(group_df))
class PostProcessor:
MODEL_PATH = '/model'
MODEL_NAME = '/model.{epoch:03d}.h5'
BEST_MODEL_PATH = '/best'
PREDICTION_PATH = '/prediction'
PREDICTION_NAME = '/prediction.{epoch:03d}.h5'
LOG_FILE = '/logs.csv'
PERFORMANCE_PATH = '/performance'
TEST_OUTPUT_PATH = '/test'
PREDICT_TEST_NAME = '/prediction_test.h5'
def __init__(self, log_base_path='logs',
temp_base_path='',
analysis_base_path='',
run_test=False, new_dataset_params=None):
self.temp_base_path = temp_base_path
self.log_base_path = log_base_path
self.update_data_reader(new_dataset_params)
try:
model_path = log_base_path + self.MODEL_PATH
model_files = os.listdir(model_path)
self.epochs = [int(filename[-6:-3])
for filename in model_files]
except Exception as e: # pragma: no cover
print('No saved models', e)
warnings.warn('load_best_model does not work')
if len(self.epochs) == 0:
print('No saved models in', model_path)
warnings.warn('load_best_model does not work')
self.run_test = run_test
def update_data_reader(self, new_dataset_params):
model_path = self.log_base_path + self.MODEL_PATH
sample_model_filename = model_path + '/' + os.listdir(model_path)[0]
with h5py.File(sample_model_filename, 'r') as f:
config = f.attrs['deoxys_config']
config = load_json_config(config)
dataset_params = config['dataset_params']
# update until level 2
if new_dataset_params is not None:
for key in new_dataset_params:
if key in dataset_params:
dataset_params[key].update(new_dataset_params[key])
else:
dataset_params[key] = new_dataset_params[key]
self.dataset_filename = dataset_params['config']['filename']
self.data_reader = load_data(dataset_params)
self.dataset_params = dataset_params
def _best_epoch_from_raw_log(self, monitor='', mode='max'):
print(F'Finding best model based on the {mode}imum {monitor} from '
'raw logs')
epochs = self.epochs
if len(epochs) == 0:
print('No saved models in', self.log_base_path)
raise Exception('load_best_model does not work')
logger_path = self.log_base_path + self.LOG_FILE
if os.path.isfile(logger_path):
df = read_csv(logger_path, usecols=['epoch', monitor])
df['epoch'] = df['epoch'] + 1
# only compare models that were saved
min_df = df[df['epoch'].isin(epochs)].min()
min_epoch = df[df['epoch'].isin(epochs)].idxmin()
max_df = df[df['epoch'].isin(epochs)].max()
max_epoch = df[df['epoch'].isin(epochs)].idxmax()
if mode == 'min':
val = min_df[monitor]
best_epoch = min_epoch[monitor] + 1
else:
val = max_df[monitor]
best_epoch = max_epoch[monitor] + 1
else:
warnings.warn('No log files to check for best model')
print('Best epoch:', best_epoch, f', with {monitor}={val}')
return best_epoch
def get_best_model(self, monitor='', mode='max',
keep_best_only=True): # pragma: no cover
best_epoch = self._best_epoch_from_raw_log(monitor, mode)
epochs = self.epochs
for epoch in epochs:
if epoch == best_epoch or not keep_best_only:
shutil.copy(
self.temp_base_path + self.PREDICTION_PATH +
self.PREDICTION_NAME.format(epoch=epoch),
self.log_base_path + self.PREDICTION_PATH +
self.PREDICTION_NAME.format(epoch=epoch))
return self.log_base_path + self.MODEL_PATH + \
self.MODEL_NAME.format(epoch=best_epoch)
class SegmentationPostProcessor(PostProcessor):
MODEL_PATH = '/model'
MODEL_NAME = '/model.{epoch:03d}.h5'
BEST_MODEL_PATH = '/best'
PREDICTION_PATH = '/prediction'
PREDICTION_NAME = '/prediction.{epoch:03d}.h5'
LOG_FILE = '/logs.csv'
PERFORMANCE_PATH = '/performance'
PREDICTED_IMAGE_PATH = '/images'
TEST_OUTPUT_PATH = '/test'
PREDICT_TEST_NAME = '/prediction_test.h5'
SINGLE_MAP_PATH = '/single_map'
SINGLE_MAP_NAME = '/logs.{epoch:03d}.csv'
MAP_PATH = '/logs'
MAP_NAME = '/logs.{epoch:03d}.csv'
TEST_SINGLE_MAP_NAME = '/single_result.csv'
TEST_MAP_NAME = '/result.csv'
def __init__(self, log_base_path='logs',
temp_base_path='',
analysis_base_path='',
map_meta_data=None, main_meta_data='',
run_test=False, new_dataset_params=None):
self.temp_base_path = temp_base_path
self.log_base_path = log_base_path
self.analysis_base_path = analysis_base_path or log_base_path
if not os.path.exists(self.analysis_base_path):
os.mkdir(self.analysis_base_path)
if not os.path.exists(self.analysis_base_path + self.PREDICTION_PATH):
os.mkdir(self.analysis_base_path + self.PREDICTION_PATH)
self.update_data_reader(new_dataset_params)
try:
temp_prediction_path = temp_base_path + self.PREDICTION_PATH
predicted_files = os.listdir(temp_prediction_path)
self.epochs = [int(filename[-6:-3])
for filename in predicted_files]
except Exception as e: # pragma: no cover
print("Error while getting epochs by temp folder:", e)
print("Using post-process log files as alternative")
try:
log_files = os.listdir(self.log_base_path + self.MAP_PATH)
self.epochs = [int(filename[-7:-4])
for filename in log_files]
except Exception as e:
print("Error while getting epochs by log files:", e)
print("Using dummy epochs as alternative.")
self.epochs = [5]
print("Post-process only works on test data.")
if map_meta_data:
if type(map_meta_data) == str:
self.map_meta_data = map_meta_data.split(',')
else:
self.map_meta_data = map_meta_data
else:
self.map_meta_data = ['patient_idx', 'slice_idx']
if main_meta_data:
self.main_meta_data = main_meta_data
else:
self.main_meta_data = self.map_meta_data[0]
self.run_test = run_test
# def update_data_reader(self, new_dataset_params):
# model_path = self.log_base_path + self.MODEL_PATH
# sample_model_filename = model_path + '/' + os.listdir(model_path)[0]
# with h5py.File(sample_model_filename, 'r') as f:
# config = f.attrs['deoxys_config']
# config = load_json_config(config)
# dataset_params = config['dataset_params']
# # update until level 2
# if new_dataset_params is not None:
# for key in new_dataset_params:
# if key in dataset_params:
# dataset_params[key].update(new_dataset_params[key])
# else:
# dataset_params[key] = new_dataset_params[key]
# self.dataset_filename = dataset_params['config']['filename']
# self.data_reader = load_data(dataset_params)
# self.dataset_params = dataset_params
def map_2d_meta_data(self):
print('mapping 2d meta data')
if not self.run_test:
map_folder = self.log_base_path + self.SINGLE_MAP_PATH
if not os.path.exists(map_folder):
os.makedirs(map_folder)
map_filename = map_folder + self.SINGLE_MAP_NAME
for epoch in self.epochs:
H5MetaDataMapping(
ref_file=self.dataset_filename,
save_file=map_filename.format(epoch=epoch),
folds=self.data_reader.val_folds,
fold_prefix='',
dataset_names=self.map_meta_data).post_process()
else:
test_folder = self.log_base_path + self.TEST_OUTPUT_PATH
if not os.path.exists(test_folder):
os.makedirs(test_folder)
map_filename = test_folder + self.TEST_SINGLE_MAP_NAME
H5MetaDataMapping(
ref_file=self.dataset_filename,
save_file=map_filename,
folds=self.data_reader.test_folds,
fold_prefix='',
dataset_names=self.map_meta_data).post_process()
return self
def calculate_fscore_single(self):
if not self.run_test:
print('calculating dice score per items in val set')
predicted_path = self.temp_base_path + \
self.PREDICTION_PATH + self.PREDICTION_NAME
map_folder = self.log_base_path + self.SINGLE_MAP_PATH
map_filename = map_folder + self.SINGLE_MAP_NAME
for epoch in self.epochs:
H5CalculateFScore(
predicted_path.format(epoch=epoch),
map_filename.format(epoch=epoch)
).post_process()
else:
print('calculating dice score per items in test set')
predicted_path = self.temp_base_path + \
self.TEST_OUTPUT_PATH + self.PREDICT_TEST_NAME
test_folder = self.log_base_path + self.TEST_OUTPUT_PATH
map_filename = test_folder + self.TEST_SINGLE_MAP_NAME
H5CalculateFScore(
predicted_path,
map_filename
).post_process()
return self
def calculate_fscore_single_3d(self):
self.calculate_fscore_single()
if not self.run_test:
map_folder = self.log_base_path + self.SINGLE_MAP_PATH
main_log_folder = self.log_base_path + self.MAP_PATH
try:
os.rename(map_folder, main_log_folder)
except Exception as e:
print("Files exist:", e)
print("Copying new logs file")
os.rename(main_log_folder,
main_log_folder + '-' + str(time()))
os.rename(map_folder, main_log_folder)
for epoch in self.epochs:
H5Transform3d(
ref_file=self.temp_base_path + self.PREDICTION_PATH +
self.PREDICTION_NAME.format(epoch=epoch),
map_file=main_log_folder +
self.MAP_NAME.format(epoch=epoch),
map_column=self.main_meta_data,
merge_file=self.log_base_path + self.PREDICTION_PATH +
self.PREDICTION_NAME.format(epoch=epoch),
).post_process()
else:
test_folder = self.log_base_path + self.TEST_OUTPUT_PATH
map_filename = test_folder + self.TEST_SINGLE_MAP_NAME
main_result_file_name = test_folder + self.TEST_MAP_NAME
try:
os.rename(map_filename, main_result_file_name)
except Exception as e:
print("Files exist:", e)
print("Copying new result file")
os.rename(main_result_file_name,
main_result_file_name + '-' + str(time()) + '.csv')
os.rename(map_filename, main_result_file_name)
H5Transform3d(
ref_file=self.temp_base_path + self.TEST_OUTPUT_PATH +
self.PREDICT_TEST_NAME,
map_file=main_result_file_name,
map_column=self.main_meta_data,
merge_file=test_folder + self.PREDICT_TEST_NAME,
).post_process()
def merge_2d_slice(self):
print('merge 2d slice to 3d images')
if not self.run_test:
predicted_path = self.temp_base_path + \
self.PREDICTION_PATH + self.PREDICTION_NAME
map_folder = self.log_base_path + self.SINGLE_MAP_PATH
map_filename = map_folder + self.SINGLE_MAP_NAME
merge_path = self.log_base_path + \
self.PREDICTION_PATH + self.PREDICTION_NAME
main_log_folder = self.log_base_path + self.MAP_PATH
if not os.path.exists(main_log_folder):
os.makedirs(main_log_folder)
main_log_filename = main_log_folder + self.MAP_NAME
for epoch in self.epochs:
H5Merge2dSlice(
predicted_path.format(epoch=epoch),
map_filename.format(epoch=epoch),
self.main_meta_data,
merge_path.format(epoch=epoch),
main_log_filename.format(epoch=epoch)
).post_process()
else:
predicted_path = self.temp_base_path + \
self.TEST_OUTPUT_PATH + self.PREDICT_TEST_NAME
test_folder = self.log_base_path + self.TEST_OUTPUT_PATH
map_filename = test_folder + self.TEST_SINGLE_MAP_NAME
merge_path = test_folder + self.PREDICT_TEST_NAME
main_result_file_name = test_folder + self.TEST_MAP_NAME
H5Merge2dSlice(
predicted_path,
map_filename,
self.main_meta_data,
merge_path,
main_result_file_name
).post_process()
return self
def merge_3d_patches(self): # pragma: no cover
print('merge 3d patches to 3d images')
if not self.run_test:
predicted_path = self.temp_base_path + \
self.PREDICTION_PATH + self.PREDICTION_NAME
# map_folder = self.log_base_path + self.SINGLE_MAP_PATH
# map_filename = map_folder + self.SINGLE_MAP_NAME
merge_path = self.analysis_base_path + \
self.PREDICTION_PATH + self.PREDICTION_NAME
main_log_folder = self.log_base_path + self.MAP_PATH
if not os.path.exists(main_log_folder):
os.makedirs(main_log_folder)
main_log_filename = main_log_folder + self.MAP_NAME
for epoch in self.epochs:
H5MergePatches(
ref_file=self.dataset_filename,
predicted_file=predicted_path.format(epoch=epoch),
map_column=self.main_meta_data,
merge_file=merge_path.format(epoch=epoch),
save_file=main_log_filename.format(epoch=epoch),
patch_size=self.data_reader.patch_size,
overlap=self.data_reader.overlap,
folds=self.data_reader.val_folds,
fold_prefix='',
original_input_dataset=self.data_reader.x_name,
original_target_dataset=self.data_reader.y_name,
).post_process()
else:
predicted_path = self.temp_base_path + \
self.TEST_OUTPUT_PATH + self.PREDICT_TEST_NAME
test_folder = self.log_base_path + self.TEST_OUTPUT_PATH
merge_path = test_folder + self.PREDICT_TEST_NAME
main_result_file_name = test_folder + self.TEST_MAP_NAME
if not os.path.exists(test_folder):
os.makedirs(test_folder)
H5MergePatches(
ref_file=self.dataset_filename,
predicted_file=predicted_path,
map_column=self.main_meta_data,
merge_file=merge_path,
save_file=main_result_file_name,
patch_size=self.data_reader.patch_size,
overlap=self.data_reader.overlap,
folds=self.data_reader.test_folds,
fold_prefix='',
original_input_dataset=self.data_reader.x_name,
original_target_dataset=self.data_reader.y_name,
).post_process()
return self
def calculate_fscore(self):
print('calculating dice score per 3d image')
if not self.run_test:
merge_path = self.analysis_base_path + \
self.PREDICTION_PATH + self.PREDICTION_NAME
main_log_folder = self.log_base_path + self.MAP_PATH
main_log_filename = main_log_folder + self.MAP_NAME
for epoch in self.epochs:
H5CalculateFScore(
merge_path.format(epoch=epoch),
main_log_filename.format(epoch=epoch),
map_file=main_log_filename.format(epoch=epoch),
map_column=self.main_meta_data
).post_process()
else:
test_folder = self.log_base_path + self.TEST_OUTPUT_PATH
merge_path = test_folder + self.PREDICT_TEST_NAME
main_result_file_name = test_folder + self.TEST_MAP_NAME
H5CalculateFScore(
merge_path,
main_result_file_name,
map_file=main_result_file_name,
map_column=self.main_meta_data
).post_process()
return self
def get_best_model(self, monitor='', mode='max', keep_best_only=True,
use_raw_log=False):
print('finding best model')
epochs = self.epochs
if use_raw_log:
best_epoch = self._best_epoch_from_raw_log(monitor, mode)
else:
res_df = | pd.DataFrame(epochs, columns=['epochs']) | pandas.DataFrame |
#!/usr/bin/env python3
import pandas
import re
def sanitize(string):
string = string.lower()
string = re.sub(r' ','_',string)
string = re.sub(r'[^a-zA-Z_]','',string)
return string
class BptImporter():
playName = ''
dirtyData = ''
headers = []
cleanedData = pandas.DataFrame()
def __init__(self, name):
self.playName = name
def importData(self, path):
self.dirtyData = open(path,'r').read()
self.dirtyData = self.dirtyData.split('\n')
self.dirtyData = self.dirtyData[4:] # Top four lines are junk
self.dirtyData = [line.split('\t') for line in self.dirtyData]
self.headers = self.dirtyData[1]
self.dirtyData = [line for line in self.dirtyData if len(line[0]) > 0]
self.dirtyData = [line for line in self.dirtyData if line != self.headers]
self.dirtyData = [line for line in self.dirtyData if line[0] != 'None']
def processData(self):
sectionHeads = [idx for idx, x in enumerate(self.dirtyData) if len(x) == 1]
for idx in range(len(sectionHeads)):
if len(self.dirtyData[sectionHeads[idx] + 1]) == 1:
pass
else:
groupTicketClass = sanitize(self.dirtyData[sectionHeads[idx]][0])
groupStart = sectionHeads[idx] + 1
if idx != (len(sectionHeads) - 1):
groupEnd = sectionHeads[idx+1] - 1
else: # End of data
groupEnd = len(self.dirtyData) - 1
df = pandas.DataFrame(self.dirtyData[groupStart:groupEnd], columns=self.headers)
df['ticket_purchase_type'] = groupTicketClass
self.cleanedData = | pandas.concat((self.cleanedData,df), ignore_index=True) | pandas.concat |
import os
import pandas as pd
import glob
import nibabel as nib
from nilearn.image import resample_img
import numpy as np
from sklearn.model_selection import StratifiedKFold
# split data train, validate
def generate_csv(options):
# order ISBI dataset
train_csv_path = options["train_csv_path"]
modalities_names = options['preprocess_x_names']
modalities = options['modalities']
masks_names = options['preprocess_y_names']
masks = options['masks']
# generate csv file of files names and split dataset
_, dirs, _ = next(os.walk(options['train_folder']))
if os.path.isfile(train_csv_path):
os.remove(train_csv_path)
train_data = pd.DataFrame(columns=['root_path', 'patient_id', 'study', *masks, *modalities, 'fold'])
train_data = train_data.astype({"study": str})
for dir_ in dirs:
patient_id = dir_.split('_')[0]
study = "_"+dir_.split('_')[1]
root_path = os.path.join(options['train_folder'], dir_, options['tmp_folder'])
df = pd.DataFrame([[root_path, patient_id, study, *masks_names, *modalities_names, 1]], columns=['root_path', 'patient_id', 'study', *masks, *modalities, 'fold'])
train_data = train_data.append(df)
train_data.reset_index(inplace=True)
train_data.drop(columns=['index'], inplace=True)
train_data.to_csv(train_csv_path, index=False)
return train_data
def miccai_generate_csv(options):
# order miccai dataset
train_csv_path = options["train_csv_path"]
modalities_names = options['preprocess_x_names']
modalities = options['modalities']
masks_names = options['preprocess_y_names']
masks = options['masks']
# generate csv file of files names and split dataset
_, dirs, _ = next(os.walk(options['train_folder']))
if os.path.isfile(train_csv_path):
os.remove(train_csv_path)
train_data = pd.DataFrame(columns=['root_path', 'center_id', 'patient', *masks, *modalities, 'fold'])
for dir_ in dirs:
center_id = '_'.join(dir_.split('_')[:2])
patient = '_'.join(dir_.split('_')[2:])
root_path = os.path.join(options['train_folder'], dir_, options['tmp_folder'])
df = pd.DataFrame([[root_path, center_id, patient, *masks_names, *modalities_names, 1]], columns=['root_path', 'center_id', 'patient', *masks, *modalities, 'fold'])
train_data = train_data.append(df)
train_data.reset_index(inplace=True)
train_data.drop(columns=['index'], inplace=True)
train_data.to_csv(train_csv_path, index=False)
return train_data
def resize_images(options):
# resize images just for test as image large remove it later
root, dirs, _ = next(os.walk(options['train_folder']))
for dir_ in dirs:
files = glob.glob(os.path.join(root, dir_, options['tmp_folder'])+'/*.nii.gz')
print(files)
for file in files:
# resize
data = nib.load(file)
data = resample_img(data, target_affine=np.eye(3)*2., interpolation='nearest')
# save new size
nib.save(data, file)
def split_folds(train_csv_path, seed=300, k_fold=5):
df = pd.read_csv(train_csv_path)
skf = StratifiedKFold(n_splits=k_fold, random_state=seed, shuffle=True)
for i, (train_index, val_index) in enumerate(
skf.split(df, df["patient_id"])
):
df.loc[val_index, "fold"] = i
df.reset_index(inplace=True)
df.drop(columns=['index'], inplace=True)
df.to_csv(train_csv_path, index=False)
def miccai_split_folds(train_csv_path, seed=300, k_fold=5):
df = | pd.read_csv(train_csv_path) | pandas.read_csv |
import pytest
import pandas as pd
import datetime as dt
import goldenowl.portfolio.holding as hd
import goldenowl.portfolio.portfolio as pf
import goldenowl.asset.asset as at
def get_prdata():
price_dict_raw ={
'1992-11-23': 234.2,
'1999-01-23': 34.2,
'2000-10-03': 134.2,
'2001-11-01': 333.9,
'2002-11-23': 234.2,
'2005-05-2': 100,
'2012-01-23': 4000.0,
};
price_dict = {pd.to_datetime(key):val for key, val in price_dict_raw.items()}
tup = list(zip(price_dict.keys(), price_dict.values()));
data = pd.DataFrame(tup, columns=['Date', 'Close']);
data['Open'] = data['High'] = data['Low'] = 0;
return data;
def test_hedge():
asset1 = at.Asset('Asset1', get_prdata());
asset2 = at.Asset('Asset2', get_prdata());
asset_ratio_list = [(asset1, 0.4), (asset2, 0.6)]
prtf = pf.Portfolio('TestP', asset_ratio_list);
prtf.addAmount(200, '1992-11-23');
prtf.setLongPutHedge(asset1, 0.146, 0.01, dt.timedelta(days =36000), '1992-12-01');
val_start = prtf.getValue('1992-12-23');
hldng1 =hd.Holding('Test1', asset1);
hldng2 =hd.Holding('Test2', asset2);
hldng1.buyAmount(200*0.4*0.99, '1992-11-23');
hldng2.buyAmount(200*0.6*0.99, '1992-11-23');
valh1 = hldng1.getValue('2012-01-23');
valh2 = hldng2.getValue('2012-01-23');
val = prtf.getValue('2012-01-23');
assert (0.99 * val) == pytest.approx(valh1+ valh2, 0.01), "hedge OTM failed"
val_low = prtf.getValue('2005-06-1');
assert (val_low) == pytest.approx((200/234.2)*val_start, 0.01), "hedge ITM failed"
def test_addAmount():
asset1 = at.Asset('Asset1', get_prdata());
asset2 = at.Asset('Asset2', get_prdata());
asset_ratio_list = [(asset1, 0.4), (asset2, 0.6)]
prtf = pf.Portfolio('TestP', asset_ratio_list);
hldng1 =hd.Holding('Test1', asset1);
hldng2 =hd.Holding('Test2', asset2);
prtf.addAmount(200, '1992-11-23');
val = prtf.getValue('2012-01-23');
hldng1.buyAmount(200*0.4, '1992-11-23');
hldng2.buyAmount(200*0.6, '1992-11-23');
valh1 = hldng1.getValue('2012-01-23');
valh2 = hldng2.getValue('2012-01-23');
assert (val) == (valh1+ valh2), "addAmount failed"
def test_removeAmount():
asset1 = at.Asset('Asset1', get_prdata());
asset2 = at.Asset('Asset2', get_prdata());
asset_ratio_list = [(asset1, 0.4), (asset2, 0.6)]
prtf = pf.Portfolio('TestP', asset_ratio_list);
hldng1 =hd.Holding('Test1', asset1);
hldng2 =hd.Holding('Test2', asset2);
prtf.addAmount(200, '1992-11-23');
prtf.removeAmount(100, '2000-10-03');
val = prtf.getValue('2012-01-23');
hldng1.buyAmount(200*0.4, '1992-11-23');
hldng2.buyAmount(200*0.6, '1992-11-23');
hldng1.sellAmount(100*0.4, '2000-10-03');
hldng2.sellAmount(100*0.6, '2000-10-03');
valh1 = hldng1.getValue('2012-01-23');
valh2 = hldng2.getValue('2012-01-23');
assert (val) == (valh1+ valh2), "removeAmount failed"
def test_rebalance():
asset1 = at.Asset('Asset1', get_prdata());
pr_data = get_prdata();
pr_data.loc[pr_data.Date == pd.to_datetime("2000-10-03"), ['Close']] = 200;
pr_data.loc[pr_data.Date == pd.to_datetime("2012-01-23"), ['Close']] = 3000;
asset2 = at.Asset('Asset2', pr_data);
asset_ratio_list = [(asset1, 0.4), (asset2, 0.6)]
prtf = pf.Portfolio('TestP', asset_ratio_list);
prtf.addAmount(200, '1992-11-23');
prtf.removeAmount(100, '2000-10-03');
final_val = prtf.getValue('2012-01-23');
val_bef_rebalance = prtf.getValue('2000-10-03');
prtf.rebalance('2000-10-03');
hldng1 =hd.Holding('Test1', asset1);
hldng2 =hd.Holding('Test2', asset2);
hldng1.buyAmount(val_bef_rebalance*0.4, '2000-10-03');
hldng2.buyAmount(val_bef_rebalance*0.6, '2000-10-03');
hldval1= hldng1.getValue('2012-01-23');
hldval2= hldng2.getValue('2012-01-23');
final_val_rb = prtf.getValue('2012-01-23');
assert (final_val_rb) == pytest.approx(hldval1 + hldval2,0.1), "post rebalance tally failed"
assert (final_val_rb) != pytest.approx(final_val, 0.1), "post and pre rebalance same"
def test_XIRR():
asset1 = at.Asset('Asset1', get_prdata());
pr_data = get_prdata();
pr_data.loc[pr_data.Date == pd.to_datetime("2000-10-03"), ['Close']] = 200;
pr_data.loc[pr_data.Date == | pd.to_datetime("2012-01-23") | pandas.to_datetime |
import warnings
warnings.simplefilter("ignore", category=FutureWarning)
from pmaf.biome.essentials._metakit import (
EssentialFeatureMetabase,
EssentialSampleMetabase,
)
from pmaf.biome.essentials._base import EssentialBackboneBase
from collections import defaultdict
from os import path
import pandas as pd
import numpy as np
import biom
from typing import Union, Sequence, Tuple, Callable, Any, Optional
from pmaf.internal._typing import AnyGenericIdentifier, Mapper
class FrequencyTable(
EssentialBackboneBase, EssentialFeatureMetabase, EssentialSampleMetabase
):
"""An essential class for handling frequency data."""
def __init__(
self,
frequency: Union[pd.DataFrame, str],
skipcols: Union[Sequence[Union[str, int]], str, int] = None,
allow_nan: bool = False,
**kwargs
):
"""Constructor for :class:`.FrequencyTable`
Parameters
----------
frequency
Data containing frequency data.
skipcols
Columns to skip when processing data.
allow_nan
Allow NA/NaN values or raise an error.
kwargs
Remaining parameters passed to :func:`~pandas.read_csv` or :mod:`biom` loader
"""
self.__internal_frequency = None
tmp_skipcols = np.asarray([])
tmp_metadata = kwargs.pop("metadata", {})
if skipcols is not None:
if isinstance(skipcols, (str, int)):
tmp_skipcols = np.asarray([skipcols])
elif isinstance(skipcols, (list, tuple)):
if not isinstance(skipcols[0], (str, int)):
tmp_skipcols = np.asarray(skipcols)
else:
raise TypeError(
"`skipcols` can be int/str or list-like of int/str."
)
else:
raise TypeError("`skipcols` can be int/str or list-like of int/str.")
if isinstance(frequency, pd.DataFrame):
if all(frequency.shape):
tmp_frequency = frequency
else:
raise ValueError("Provided `frequency` Datafame is invalid.")
elif isinstance(frequency, str):
if not path.isfile(frequency):
raise FileNotFoundError("Provided `frequency` file path is invalid.")
file_extension = path.splitext(frequency)[-1].lower()
if file_extension in [".csv", ".tsv"]:
tmp_frequency = pd.read_csv(frequency, **kwargs)
elif file_extension in [".biom", ".biome"]:
tmp_frequency, new_metadata = self.__load_biom(frequency, **kwargs)
tmp_metadata.update({"biom": new_metadata})
else:
raise NotImplementedError("File type is not supported.")
else:
raise TypeError("Provided `frequency` has invalid type.")
if skipcols is not None:
if np.issubdtype(tmp_skipcols.dtype, np.number):
if tmp_frequency.columns.isin(tmp_skipcols).any():
tmp_frequency.drop(columns=tmp_skipcols, inplace=True)
else:
tmp_frequency.drop(
columns=tmp_frequency.columns[tmp_skipcols], inplace=True
)
else:
tmp_frequency.drop(columns=tmp_skipcols, inplace=True)
tmp_dtypes = list(set(tmp_frequency.dtypes.values))
if len(tmp_dtypes) == 1 and pd.api.types.is_numeric_dtype(tmp_dtypes[0]):
self.__init_frequency_table(tmp_frequency)
else:
if not allow_nan:
raise ValueError(
"Provided `frequency` must have numeric dtypes. "
"Use `allow_nan` to allow missing values."
)
if len(tmp_dtypes) == 1 and pd.api.types.is_numeric_dtype(tmp_dtypes[0]):
self.__init_frequency_table(tmp_frequency)
elif len(tmp_dtypes) == 2:
tmp_dtypes_cond = [
(dt == object) or ( | pd.api.types.is_numeric_dtype(dt) | pandas.api.types.is_numeric_dtype |
import json
import os
import unittest
from collections import OrderedDict
import numpy as np
import pandas as pd
import xarray as xr
from cate.core.workflow import Workflow, OpStep
from cate.core.workspace import Workspace, WorkspaceError, mk_op_arg, mk_op_args, mk_op_kwargs
from cate.util.undefined import UNDEFINED
from cate.util.opmetainf import OpMetaInfo
NETCDF_TEST_FILE_1 = os.path.join(os.path.dirname(__file__), '..', 'data', 'precip_and_temp.nc')
NETCDF_TEST_FILE_2 = os.path.join(os.path.dirname(__file__), '..', 'data', 'precip_and_temp_2.nc')
class WorkspaceTest(unittest.TestCase):
def test_utilities(self):
self.assertEqual(mk_op_arg(1), {'value': 1})
self.assertEqual(mk_op_arg('2'), {'value': '2'})
self.assertEqual(mk_op_arg('a'), {'value': 'a'})
self.assertEqual(mk_op_arg('@b'), {'source': 'b'})
self.assertEqual(mk_op_args(), [])
self.assertEqual(mk_op_args(1, '2', 'a', '@b'), [{'value': 1}, {'value': '2'}, {'value': 'a'}, {'source': 'b'}])
self.assertEqual(mk_op_kwargs(a=1), OrderedDict([('a', {'value': 1})]))
self.assertEqual(mk_op_kwargs(a=1, b='@c'), OrderedDict([('a', {'value': 1}), ('b', {'source': 'c'})]))
def test_workspace_is_part_of_context(self):
def some_op(ctx: dict) -> dict:
return dict(ctx)
from cate.core.op import OP_REGISTRY
try:
op_reg = OP_REGISTRY.add_op(some_op)
op_reg.op_meta_info.inputs['ctx']['context'] = True
ws = Workspace('/path', Workflow(OpMetaInfo('workspace_workflow', header=dict(description='Test!'))))
ws.set_resource(op_reg.op_meta_info.qualified_name, {}, res_name='new_ctx')
ws.execute_workflow('new_ctx')
self.assertTrue('new_ctx' in ws.resource_cache)
self.assertTrue('workspace' in ws.resource_cache['new_ctx'])
self.assertIs(ws.resource_cache['new_ctx']['workspace'], ws)
finally:
OP_REGISTRY.remove_op(some_op)
def test_workspace_can_create_new_res_names(self):
ws = Workspace('/path', Workflow(OpMetaInfo('workspace_workflow', header=dict(description='Test!'))))
res_name_1 = ws.set_resource('cate.ops.utility.identity', mk_op_kwargs(value='A'))
res_name_2 = ws.set_resource('cate.ops.utility.identity', mk_op_kwargs(value='B'))
res_name_3 = ws.set_resource('cate.ops.utility.identity', mk_op_kwargs(value='C'))
self.assertEqual(res_name_1, 'res_1')
self.assertEqual(res_name_2, 'res_2')
self.assertEqual(res_name_3, 'res_3')
self.assertIsNotNone(ws.workflow.find_node(res_name_1))
self.assertIsNotNone(ws.workflow.find_node(res_name_2))
self.assertIsNotNone(ws.workflow.find_node(res_name_3))
def test_to_json_dict(self):
def dataset_op() -> xr.Dataset:
periods = 5
temperature_data = (15 + 8 * np.random.randn(periods, 2, 2)).round(decimals=1)
temperature_attrs = {'a': np.array([1, 2, 3]), 'comment': 'hot', '_FillValue': np.nan}
precipitation_data = (10 * np.random.rand(periods, 2, 2)).round(decimals=1)
precipitation_attrs = {'x': True, 'comment': 'wet', '_FillValue': -1.0}
ds = xr.Dataset(
data_vars={
'temperature': (('time', 'lat', 'lon'), temperature_data, temperature_attrs),
'precipitation': (('time', 'lat', 'lon'), precipitation_data, precipitation_attrs)
},
coords={
'lon': np.array([12, 13]),
'lat': np.array([50, 51]),
'time': pd.date_range('2014-09-06', periods=periods)
},
attrs={
'history': 'a b c'
})
return ds
def data_frame_op() -> pd.DataFrame:
data = {'A': [1, 2, 3, np.nan, 4, 9, np.nan, np.nan, 1, 0, 4, 6],
'B': [5, 6, 8, 7, 5, 5, 5, 9, 1, 2, 7, 6]}
time = | pd.date_range('2000-01-01', freq='MS', periods=12) | pandas.date_range |
# -*- coding: utf-8 -*-
import pandas as pd
import numpy as np
from config_fh import get_db_engine, get_db_session, get_cache_file_path, STR_FORMAT_DATE
from fh_tools.fh_utils import return_risk_analysis, str_2_date
from fh_tools import fh_utils
import matplotlib.pyplot as plt # pycharm 需要通过现实调用 plt.show 才能显示plot
from datetime import date, datetime, timedelta
from sqlalchemy.types import String, Date, FLOAT
import datetime as dt
import logging
logger = logging.getLogger()
STRATEGY_TYPE_CN_EN_DIC = {'债券策略': 'fixed_income',
'套利策略': 'arbitrage',
'管理期货策略': 'cta',
'股票多头策略': 'long_only',
'阿尔法策略': 'alpha',
'宏观策略': 'macro',
'组合基金策略': 'fof'}
STRATEGY_TYPE_EN_CN_DIC = {en: cn for cn, en in STRATEGY_TYPE_CN_EN_DIC.items()}
def calc_wind_code_list_index(wind_code_list, date_since, file_name=None):
"""
计算 wind_code_list 组成的指数
:param wind_code_list:
:param date_since:
:param file_name: 默认为None,不生成文件
:return: 合成后的指数每日收益率列表
"""
# 获取样本子基金行情数据
wind_code_list_str = ', '.join(["'" + wind_code + "'" for wind_code in wind_code_list])
query_base_str = r'''select fv.wind_code, nav_date_week, fv.nav_acc
from (
select wind_code, adddate(nav_date, 4 - weekday(nav_date)) as nav_date_week, max(nav_date) as nav_date_max
from fund_nav
where wind_code in (%s)
group by wind_code, nav_date_week
) as ffv,
fund_nav fv
where ffv.nav_date_week >= %s
and fv.wind_code = ffv.wind_code
and fv.nav_date = ffv.nav_date_max
group by wind_code, nav_date_week
order by nav_date_week desc'''
query_str = query_base_str % (wind_code_list_str, date_since)
# logger.info(query_str)
engine = get_db_engine()
fund_nav_df = | pd.read_sql_query(query_str, engine) | pandas.read_sql_query |
# -*- coding: utf-8 -*-
"""
Created on Sat May 22 14:41:01 2021
@author: Thomas
"""
import pandas as pd
import numpy as np
import os
import matplotlib.pyplot as plt
from scipy import stats
################## Calibration Data ###########################
CalibrationFolder = 'data/Calibration' # Path to calibation data
pivFolder = "data/piv_data2"
save_fig = False
piv_files = [pivFolder + "/" + file for file in os.listdir(pivFolder)]
files = os.listdir(CalibrationFolder) # File names in calibration folder
CalibrationData = pd.DataFrame({'X_Value': [], 'Voltage': [], 'U': []}) # Empty dataframe
U_list = np.linspace(0, 20, 11)
for file in files[1:]: # Loop over files in folder, read file, specify velocity in df, concat to main df
FilePath = CalibrationFolder + '/' + file # Create path
df = pd.read_csv(FilePath, sep=('\t'), skiprows=(22), usecols=[0, 1])
df['U'] = float(file[-2:])
df = df[(np.abs(stats.zscore(df["Voltage"])) < 3)]
CalibrationData = pd.concat([CalibrationData, df], ignore_index=True)
# Obtain polyfit coefficients 4th order
coef = np.polyfit(CalibrationData['Voltage'], CalibrationData["U"], 4)
# plot polyfit
voltages = np.linspace(1.4, 1.9, 100)
fig, ax = plt.subplots(1, 1, constrained_layout=True, dpi=150)
ax.plot(np.polyval(coef, voltages), voltages, label=r"$4^{th}$-order polynomial fit", c='C00')
ax.scatter(CalibrationData["U"][::10000], CalibrationData['Voltage'][::10000], label="measurements", c='C01', zorder=1)
ax.grid()
ax.legend(prop={"size": 14})
ax.set_ylabel("E [V]", fontsize=14)
ax.set_xlabel("U [m/s]", fontsize=14)
plt.savefig('HWA_Calibration.png', bbox_inches='tight') if save_fig else None
#################### 0 AOA ###############################
ZeroAOAFolder = 'data/0 aoa'
files = os.listdir(ZeroAOAFolder)
# List of measurement heights
HeightList = np.arange(-40, 44, 4)
# create empty df
ZeroAoAData = pd.DataFrame({'X_Value': [], 'Voltage': [], 'height': []})
# read all files and append to single df
for file in files:
FilePath = ZeroAOAFolder + '/' + file
df = pd.read_csv(FilePath, sep=('\t'), skiprows=(22), usecols=[0, 1])
df['height'] = float(file[12:15]) # define height from specific data file
df = df[(np.abs(stats.zscore(df["Voltage"])) < 3)]
ZeroAoAData = | pd.concat([ZeroAoAData, df], ignore_index=True) | pandas.concat |
import pandas
import numpy as np
import scipy
import statsmodels.api as sm
import os
import sys
import json
import traceback
import logging
from time import time
from elasticsearch import Elasticsearch
MAX_RESOLUTION = 1000
MAX_TOLERABLE_BOREDOM = 100
MIN_TOLERABLE_LENGTH = 1
REDOM_SET_SIZE = 1
BOREDOM_SET_SIZE = 1
FULL_DURATION = 86400
CONSENSUS = 6
STALE_PERIOD = 500
ALGORITHMS = [
'first_hour_average',
'mean_subtraction_cumulation',
'stddev_from_average',
'stddev_from_moving_average',
'least_squares',
'grubbs',
'histogram_bins',
'median_absolute_deviation',
'ks_test',
]
logger = logging.getLogger("AnalyzerLog")
"""
This is no man's land. Do anything you want in here,
as long as you return a boolean that determines whether the input
timeseries is anomalous or not.
To add an algorithm, define it here, and add its name to settings.ALGORITHMS.
"""
def tail_avg(timeseries):
"""
This is a utility function used to calculate the average of the last three
datapoints in the series as a measure, instead of just the last datapoint.
It reduces noise, but it also reduces sensitivity and increases the delay
to detection.
"""
try:
t = (timeseries[-1][1] + timeseries[-2][1] + timeseries[-3][1]) / 3
return t
except IndexError:
return timeseries[-1][1]
def median_absolute_deviation(timeseries):
"""
A timeseries is anomalous if the deviation of its latest datapoint with
respect to the median is X times larger than the median of deviations.
"""
series = pandas.Series([x[1] for x in timeseries])
median = series.median()
demedianed = np.abs(series - median)
median_deviation = demedianed.median()
# The test statistic is infinite when the median is zero,
# so it becomes super sensitive. We play it safe and skip when this happens.
if median_deviation == 0:
return False
test_statistic = demedianed.iget(-1) / median_deviation
# Completely arbitary...triggers if the median deviation is
# 6 times bigger than the median
if test_statistic > 6:
return True
def grubbs(timeseries):
"""
A timeseries is anomalous if the Z score is greater than the Grubb's score.
"""
series = scipy.array([x[1] for x in timeseries])
stdDev = scipy.std(series)
mean = np.mean(series)
tail_average = tail_avg(timeseries)
z_score = (tail_average - mean) / stdDev
len_series = len(series)
threshold = scipy.stats.t.isf(.05 / (2 * len_series), len_series - 2)
threshold_squared = threshold * threshold
grubbs_score = ((len_series - 1) / np.sqrt(len_series)) * np.sqrt(threshold_squared / (len_series - 2 + threshold_squared))
return z_score > grubbs_score
def first_hour_average(timeseries):
"""
Calcuate the simple average over one hour, FULL_DURATION seconds ago.
A timeseries is anomalous if the average of the last three datapoints
are outside of three standard deviations of this value.
"""
last_hour_threshold = time() - (FULL_DURATION - 3600)
series = pandas.Series([x[1] for x in timeseries if x[0] < last_hour_threshold])
mean = (series).mean()
stdDev = (series).std()
t = tail_avg(timeseries)
return abs(t - mean) > 3 * stdDev
def stddev_from_average(timeseries):
"""
A timeseries is anomalous if the absolute value of the average of the latest
three datapoint minus the moving average is greater than three standard
deviations of the average. This does not exponentially weight the MA and so
is better for detecting anomalies with respect to the entire series.
"""
series = pandas.Series([x[1] for x in timeseries])
mean = series.mean()
stdDev = series.std()
t = tail_avg(timeseries)
return abs(t - mean) > 3 * stdDev
def stddev_from_moving_average(timeseries):
"""
A timeseries is anomalous if the absolute value of the average of the latest
three datapoint minus the moving average is greater than three standard
deviations of the moving average. This is better for finding anomalies with
respect to the short term trends.
"""
series = | pandas.Series([x[1] for x in timeseries]) | pandas.Series |
import os
from zhihu_user_info_spider.util.Utils import Util
import pandas as pd
import threading
from zhihu_user_info_spider.Exception.SpiderException import SpiderException
# 保存方法工具类主要作用:读取json文件并按配置文件中所指定的方法进行保存
class SaveUtil(Util):
question_list_model = 1
user_uuid_list_model = 3
# answer_list_model = 2
HOT_LIST = 11
USER_ID_LIST = 12
PD_DF = 13
def __init__(self):
super().__init__()
self.path = os.path.dirname(os.path.dirname(self.abs_path)) + os.sep + "result" + os.sep
self.update_date()
self.hot_path = self.path + "hotList" + os.sep + self.year + os.sep + self.month + os.sep + self.day + os.sep
self.user_path = self.path + "userUUID" + os.sep + self.year + os.sep + self.month + os.sep + self.day + os.sep
self.final_path = self.path + "userInfo" + os.sep + self.year + os.sep + self.month + os.sep + self.day + os.sep
self.lock = threading.RLock()
# 用来更新路径
def get_paths(self):
self.update_date()
self.hot_path = self.path + "hotList" + os.sep + self.year + os.sep + self.month + os.sep + self.day + os.sep
self.user_path = self.path + "userUUID" + os.sep + self.year + os.sep + self.month + os.sep + self.day + os.sep
self.final_path = self.path + "userInfo" + os.sep + self.year + os.sep + self.month + os.sep + self.day + os.sep
# 用来保存中间产物
def middle_save(self, model: int, data: list, attach=False):
file_name = ""
self.get_paths()
if model == self.question_list_model:
flag = os.path.exists(self.hot_path)
if not flag:
os.makedirs(self.hot_path)
file_name = "hot_list-" + self.year + "-" + self.month + "-" + self.day + ".txt"
f_w = open(self.hot_path + file_name, mode="w", encoding="utf-8")
for i in data:
f_w.write(str(i) + "\n")
f_w.close()
elif model == self.user_uuid_list_model:
flag = os.path.exists(self.user_path)
if not flag:
os.makedirs(self.user_path)
file_name = "user_uuid-" + self.year + "-" + self.month + "-" + self.day + ".txt"
if os.path.exists(self.user_path + file_name):
f_w = open(self.user_path + file_name, mode="a", encoding="utf-8")
elif attach:
f_w = open(self.user_path + file_name, mode="a", encoding="utf-8")
else:
f_w = open(self.user_path + file_name, mode="w", encoding="utf-8")
for i in data:
f_w.write(str(i) + "\n")
f_w.close()
# 该方法用于恢复中间产物的数据,
# 该方法如果是hotlist调用,那么应该每天晚上十二点之前调用一次,以免出现路劲错误
def restore_middle_data(self, file_type: int):
data_list = []
self.get_paths()
if file_type == self.HOT_LIST:
for root, dirs, files in os.walk(self.hot_path):
if len(files) == 0:
raise SpiderException("未找到当日的hot_list【热榜问题】文件")
for f in files:
with open(os.path.join(root, f), mode="r", encoding="utf-8") as f_r:
for i in f_r.readlines():
data_list.append(i.strip("\n"))
if file_type == self.USER_ID_LIST:
for root, dirs, files in os.walk(self.user_path):
if len(files) == 0:
raise SpiderException("未找到当日的user_uuid【用户uuid】文件")
for f in files:
with open(os.path.join(root, f), mode="r", encoding="utf-8") as f_r:
for i in f_r.readlines():
data_list.append(i.strip("\n"))
return data_list
def save(self, data_dict, is_month=False):
self.get_paths()
path_flag = os.path.exists(self.final_path)
if not path_flag:
os.makedirs(self.final_path)
if "save_method" in self.json_result:
save_method = self.json_result["save_method"]
if save_method == "csv":
self.__save_by_csv(data_dict, is_month)
elif save_method == "txt":
self.__save_by_txt(data_dict)
else:
print("请在util_content.json中配置save_method。")
# 使用csv保存
def __save_by_csv(self, data_dict, is_month=False):
self.get_paths()
df = | pd.DataFrame(data_dict) | pandas.DataFrame |
# execute simulation script with ALL solver setting combinations
######################################################################################
# very important: linSol, nonLinSolIter, solAlg must be their corresponding integers #
# #
# linSol: Dense == 1 GMRES == 6 BiCGStab == 7 SPTFQMR == 8 KLU == 9 #
# nonLinSolIter: Functional == 1 Newton-type == 2 #
# solAlg: Adams == 1 BDF == 2 #
# #
######################################################################################
import logging
import os
import pandas as pd
import numpy as np
from C import DIR_MODELS_AMICI_FINAL, DIR_MODELS_COPASI_FINAL, \
DIR_RESULTS_ALGORITHMS, \
DIR_BASE, DIR_MODELS, SIMCONFIG, DIR_COPASI_BIN
from simulation_wrapper_amici import simulation_wrapper as simulate_with_amici
from simulation_wrapper_copasi import simulation_wrapper as simulate_with_copasi
# create logger object
logger = logging.getLogger()
# initialize the log settings
logging.basicConfig(
filename=os.path.join(DIR_BASE, 'trajectoryComparison.log'),
level=logging.DEBUG)
def run_study_amici(model_info):
# the settings we want to simulate with AMICI
settings_amici = [
{
'id': f'atol_{atol}__rtol_{rtol}__linSol_{ls}__nonlinSol_{nls}__solAlg_{algo}',
'atol': float(atol), 'rtol': float(rtol),
'linSol': ls, 'nonlinSol': nls, 'solAlg': algo}
for (atol, rtol) in (('1e-8', '1e-6'), ('1e-6', '1e-8'),
('1e-12', '1e-10'), ('1e-10', '1e-12'),
('1e-16', '1e-8'), ('1e-8', '1e-16'),
('1e-14', '1e-14'))
for ls in (1, 6, 7, 8, 9)
for nls in (1, 2)
for algo in (1, 2)
]
for setting in settings_amici:
# collect results as a list
results = []
for model_name in sorted(os.listdir(DIR_MODELS_AMICI_FINAL)):
# Get information about the current model
model_rows = model_info[model_info['short_id'] == model_name]
idx = model_rows.index.values[0]
models_to_average = sum([acc for acc in list(model_rows['accepted'])])
# run the simulation
result = simulate_with_amici(simulation_mode=SIMCONFIG.CPUTIME,
settings=setting, model_name=model_name)
# save results in a dict
model_result = {'model_name': model_name,
'median_intern': np.median(result['cpu_times_intern']),
'median_extern': np.median(result['cpu_times_extern']),
'failure': result['failure'],
'n_species': model_rows.loc[idx, 'n_species'],
'n_reactions': model_rows.loc[idx, 'n_reactions'],
'n_submodels': models_to_average}
for i_run, runtime in enumerate(result['cpu_times_intern']):
model_result[f'run_{i_run}'] = runtime
# save in the DataFrame to be
results.append(model_result)
results_df = | pd.DataFrame(results) | pandas.DataFrame |
# coding: utf-8
# <h1 align="center"> Lending Club Loan Analysis </h1> <br>
# ## Company Information:
# Lending Club is a peer to peer lending company based in the United States, in which investors provide funds for potential borrowers and investors earn a profit depending on the risk they take (the borrowers credit score). Lending Club provides the "bridge" between investors and borrowers. For more basic information about the company please check out the wikipedia article about the company. <br><br>
#
#
# <a src="https://en.wikipedia.org/wiki/Lending_Club"> Lending Club Information </a>
#
#
#
#
# ## How Lending Club Works?
# <img src="http://echeck.org/wp-content/uploads/2016/12/Showing-how-the-lending-club-works-and-makes-money-1.png"><br><br>
#
#
# ## Outline: <br><br>
# I. Introduction <br>
# a) [General Information](#general_information)<br>
# b) [Similar Distributions](#similar_distributions)<br><br>
#
# II. <b>Good Loans vs Bad Loans</b><br>
# a) [Types of Loans](#types_of_loans)<br>
# b) [Loans issued by Region](#by_region)<br>
# c) [A Deeper Look into Bad Loans](#deeper_bad_loans)<br><br>
#
# III. <b>The Business Perspective</b><br>
# a) [Understanding the Operative side of Business](#operative_side)<br>
# b) [Analysis by Income Category](#income_category) <br><br>
#
# IV. <b>Assesing Risks</b><br>
# a) [Understanding the Risky Side of Business](#risky_side)<br>
# b) [The importance of Credit Scores](#credit_scores)<br>
# c) [What determines a bad loan](#determines_bad_loan)<br>
# d) [Defaulted Loans](#defaulted_loans)
#
# ## References:
# 1) <a src="https://www.kaggle.com/arthurtok/global-religion-1945-2010-plotly-pandas-visuals"> Global Religion 1945-2010: Plotly & Pandas visuals</a> by Anisotropic <br>
# 2) <a src="https://www.kaggle.com/vigilanf/loan-metrics-by-state"> Loan Metrics By State </a> by <NAME><br>
# 3) Hands on Machine Learning by <NAME> <br>
# 4) <a src="https://www.youtube.com/watch?v=oYbVFhK_olY&list=PLSPWNkAMSvv5DKeSVDbEbUKSsK4Z-GgiP"> Deep Learning with Neural Networks and TensorFlow </a> by Sentdex
# # Introduction:
# ## General Information:
# <a id="general_information"></a>
# In[ ]:
# Import our libraries we are going to use for our data analysis.
import pandas as pd
import seaborn as sns
import numpy as np
import matplotlib.pyplot as plt
# Plotly visualizations
from plotly import tools
import plotly.plotly as py
import plotly.figure_factory as ff
import plotly.graph_objs as go
from plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot
init_notebook_mode(connected=True)
# plotly.tools.set_credentials_file(username='AlexanderBach', api_key='o4fx6i1MtEIJQxfWYvU1')
get_ipython().run_line_magic('matplotlib', 'inline')
df = pd.read_csv('../input/loan.csv', low_memory=False)
# Copy of the dataframe
original_df = df.copy()
df.head()
# In[ ]:
df.info()
# In[ ]:
# Replace the name of some columns
df = df.rename(columns={"loan_amnt": "loan_amount", "funded_amnt": "funded_amount", "funded_amnt_inv": "investor_funds",
"int_rate": "interest_rate", "annual_inc": "annual_income"})
# Drop irrelevant columns
df.drop(['id', 'member_id', 'emp_title', 'url', 'desc', 'zip_code', 'title'], axis=1, inplace=True)
# ## Similar Distributions:
# <a id="similar_distributions"></a>
# We will start by exploring the distribution of the loan amounts and see when did the loan amount issued increased significantly. <br>
#
# <h4> What we need to know: </h4> <br>
# <ul>
# <li> Understand what amount was <b>mostly issued</b> to borrowers. </li>
# <li> Which <b>year</b> issued the most loans. </li>
# <li> The distribution of loan amounts is a <b>multinomial distribution </b>.</li>
# </ul>
#
#
#
# <h4> Summary: </h4><br>
# <ul>
# <li> Most of the <b>loans issued</b> were in the range of 10,000 to 20,000 USD. </li>
# <li> The <b>year of 2015</b> was the year were most loans were issued.</li>
# <li> Loans were issued in an <b>incremental manner</b>. (Possible due to a recovery in the U.S economy) </li>
# <li> The loans <b>applied</b> by potential borrowers, the amount <b>issued</b> to the borrowers and the amount <b>funded</b> by investors are similarly distributed, <b>meaning</b> that it is most likely that qualified borrowers are going to get the loan they had applied for. </li>
#
# </ul>
#
#
#
#
# In[ ]:
fig, ax = plt.subplots(1, 3, figsize=(16,5))
loan_amount = df["loan_amount"].values
funded_amount = df["funded_amount"].values
investor_funds = df["investor_funds"].values
sns.distplot(loan_amount, ax=ax[0], color="#F7522F")
ax[0].set_title("Loan Applied by the Borrower", fontsize=14)
sns.distplot(funded_amount, ax=ax[1], color="#2F8FF7")
ax[1].set_title("Amount Funded by the Lender", fontsize=14)
sns.distplot(investor_funds, ax=ax[2], color="#2EAD46")
ax[2].set_title("Total committed by Investors", fontsize=14)
# In[ ]:
# Lets' transform the issue dates by year.
df['issue_d'].head()
dt_series = pd.to_datetime(df['issue_d'])
df['year'] = dt_series.dt.year
# In[ ]:
# The year of 2015 was the year were the highest amount of loans were issued
# This is an indication that the economy is quiet recovering itself.
plt.figure(figsize=(12,8))
sns.barplot('year', 'loan_amount', data=df, palette='tab10')
plt.title('Issuance of Loans', fontsize=16)
plt.xlabel('Year', fontsize=14)
plt.ylabel('Average loan amount issued', fontsize=14)
# <h1 align="center"> Good Loans vs Bad Loans: </h1>
# <h2>Types of Loans: </h2>
# <a id="types_of_loans"></a>
# <img src="http://strongarticle.com/wp-content/uploads/2017/09/1f42d6e77042d87f3bb6ae171ebbc530.jpg">
# <br><br>
# In this section, we will see what is the amount of bad loans Lending Club has declared so far, of course we have to understand that there are still loans that are at a risk of defaulting in the future.
#
# <h4> What we need to know: </h4>
# <ul>
# <li> The amount of bad loans could <b>increment</b> as the days pass by, since we still have a great amount of current loans. </li>
# <li> <b>Average annual income</b> is an important key metric for finding possible opportunities of investments in a specific region. </li>
#
# </ul>
#
# <h4> Summary: </h4>
# <ul>
# <li> Currently, <b>bad loans</b> consist 7.60% of total loans but remember that we still have <b>current loans</b> which have the risk of becoming bad loans. (So this percentage is subjected to possible changes.) </li>
# <li> The <b> NorthEast </b> region seems to be the most attractive in term of funding loans to borrowers. </li>
# <li> The <b> SouthWest </b> and <b> West</b> regions have experienced a slight increase in the "median income" in the past years. </li>
# <li> <b>Average interest</b> rates have declined since 2012 but this might explain the <b>increase in the volume</b> of loans. </li>
# <li> <b>Employment Length</b> tends to be greater in the regions of the <b>SouthWest</b> and <b>West</b></li>
# <li> Clients located in the regions of <b>NorthEast</b> and <b>MidWest</b> have not experienced a drastic increase in debt-to-income(dti) as compared to the other regions. </li>
# </ul>
# In[ ]:
df["loan_status"].value_counts()
# In[ ]:
# Determining the loans that are bad from loan_status column
bad_loan = ["Charged Off", "Default", "Does not meet the credit policy. Status:Charged Off", "In Grace Period",
"Late (16-30 days)", "Late (31-120 days)"]
df['loan_condition'] = np.nan
def loan_condition(status):
if status in bad_loan:
return 'Bad Loan'
else:
return 'Good Loan'
df['loan_condition'] = df['loan_status'].apply(loan_condition)
# In[ ]:
f, ax = plt.subplots(1,2, figsize=(16,8))
colors = ["#3791D7", "#D72626"]
labels ="Good Loans", "Bad Loans"
plt.suptitle('Information on Loan Conditions', fontsize=20)
df["loan_condition"].value_counts().plot.pie(explode=[0,0.25], autopct='%1.2f%%', ax=ax[0], shadow=True, colors=colors,
labels=labels, fontsize=12, startangle=70)
# ax[0].set_title('State of Loan', fontsize=16)
ax[0].set_ylabel('% of Condition of Loans', fontsize=14)
# sns.countplot('loan_condition', data=df, ax=ax[1], palette=colors)
# ax[1].set_title('Condition of Loans', fontsize=20)
# ax[1].set_xticklabels(['Good', 'Bad'], rotation='horizontal')
palette = ["#3791D7", "#E01E1B"]
sns.barplot(x="year", y="loan_amount", hue="loan_condition", data=df, palette=palette, estimator=lambda x: len(x) / len(df) * 100)
ax[1].set(ylabel="(%)")
# <h2> Loans Issued by Region</h2>
# <a id="by_region"></a>
# In this section we want to analyze loans issued by region in order to see region patters that will allow us to understand which region gives Lending Club.<br><br>
#
# ## Summary: <br>
# <ul>
# <li> <b> SouthEast</b> , <b>West </b> and <b>NorthEast</b> regions had the highest amount lof loans issued. </li>
# <li> <b>West </b> and <b>SouthWest </b> had a rapid increase in debt-to-income starting in 2012. </li>
# <li><b>West </b> and <b>SouthWest </b> had a rapid decrease in interest rates (This might explain the increase in debt to income). </li>
# </ul>
# In[ ]:
df['addr_state'].unique()
# Make a list with each of the regions by state.
west = ['CA', 'OR', 'UT','WA', 'CO', 'NV', 'AK', 'MT', 'HI', 'WY', 'ID']
south_west = ['AZ', 'TX', 'NM', 'OK']
south_east = ['GA', 'NC', 'VA', 'FL', 'KY', 'SC', 'LA', 'AL', 'WV', 'DC', 'AR', 'DE', 'MS', 'TN' ]
mid_west = ['IL', 'MO', 'MN', 'OH', 'WI', 'KS', 'MI', 'SD', 'IA', 'NE', 'IN', 'ND']
north_east = ['CT', 'NY', 'PA', 'NJ', 'RI','MA', 'MD', 'VT', 'NH', 'ME']
df['region'] = np.nan
def finding_regions(state):
if state in west:
return 'West'
elif state in south_west:
return 'SouthWest'
elif state in south_east:
return 'SouthEast'
elif state in mid_west:
return 'MidWest'
elif state in north_east:
return 'NorthEast'
df['region'] = df['addr_state'].apply(finding_regions)
# In[ ]:
# This code will take the current date and transform it into a year-month format
df['complete_date'] = pd.to_datetime(df['issue_d'])
group_dates = df.groupby(['complete_date', 'region'], as_index=False).sum()
group_dates['issue_d'] = [month.to_period('M') for
month in group_dates['complete_date']]
group_dates = group_dates.groupby(['issue_d', 'region'], as_index=False).sum()
group_dates = group_dates.groupby(['issue_d', 'region'], as_index=False).sum()
group_dates['loan_amount'] = group_dates['loan_amount']/1000
df_dates = pd.DataFrame(data=group_dates[['issue_d','region','loan_amount']])
# In[ ]:
plt.style.use('dark_background')
cmap = plt.cm.Set3
by_issued_amount = df_dates.groupby(['issue_d', 'region']).loan_amount.sum()
by_issued_amount.unstack().plot(stacked=False, colormap=cmap, grid=False, legend=True, figsize=(15,6))
plt.title('Loans issued by Region', fontsize=16)
# In[ ]:
employment_length = ['10+ years', '< 1 year', '1 year', '3 years', '8 years', '9 years',
'4 years', '5 years', '6 years', '2 years', '7 years', 'n/a']
# Create a new column and convert emp_length to integers.
lst = [df]
df['emp_length_int'] = np.nan
for col in lst:
col.loc[col['emp_length'] == '10+ years', "emp_length_int"] = 10
col.loc[col['emp_length'] == '9 years', "emp_length_int"] = 9
col.loc[col['emp_length'] == '8 years', "emp_length_int"] = 8
col.loc[col['emp_length'] == '7 years', "emp_length_int"] = 7
col.loc[col['emp_length'] == '6 years', "emp_length_int"] = 6
col.loc[col['emp_length'] == '5 years', "emp_length_int"] = 5
col.loc[col['emp_length'] == '4 years', "emp_length_int"] = 4
col.loc[col['emp_length'] == '3 years', "emp_length_int"] = 3
col.loc[col['emp_length'] == '2 years', "emp_length_int"] = 2
col.loc[col['emp_length'] == '1 year', "emp_length_int"] = 1
col.loc[col['emp_length'] == '< 1 year', "emp_length_int"] = 0.5
col.loc[col['emp_length'] == 'n/a', "emp_length_int"] = 0
# In[ ]:
# Loan issued by Region and by Credit Score grade
# Change the colormap for tomorrow!
sns.set_style('whitegrid')
f, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2)
cmap = plt.cm.inferno
by_interest_rate = df.groupby(['year', 'region']).interest_rate.mean()
by_interest_rate.unstack().plot(kind='area', stacked=True, colormap=cmap, grid=False, legend=False, ax=ax1, figsize=(16,12))
ax1.set_title('Average Interest Rate by Region', fontsize=14)
by_employment_length = df.groupby(['year', 'region']).emp_length_int.mean()
by_employment_length.unstack().plot(kind='area', stacked=True, colormap=cmap, grid=False, legend=False, ax=ax2, figsize=(16,12))
ax2.set_title('Average Employment Length by Region', fontsize=14)
# plt.xlabel('Year of Issuance', fontsize=14)
by_dti = df.groupby(['year', 'region']).dti.mean()
by_dti.unstack().plot(kind='area', stacked=True, colormap=cmap, grid=False, legend=False, ax=ax3, figsize=(16,12))
ax3.set_title('Average Debt-to-Income by Region', fontsize=14)
by_income = df.groupby(['year', 'region']).annual_income.mean()
by_income.unstack().plot(kind='area', stacked=True, colormap=cmap, grid=False, ax=ax4, figsize=(16,12))
ax4.set_title('Average Annual Income by Region', fontsize=14)
ax4.legend(bbox_to_anchor=(-1.0, -0.5, 1.8, 0.1), loc=10,prop={'size':12},
ncol=5, mode="expand", borderaxespad=0.)
# ## A Deeper Look into Bad Loans:
# <a id="deeper_bad_loans"></a>
#
# <h4> What we need to know: </h4>
# <ul>
# <li>The number of loans that were classified as bad loans for each region by its <b>loan status</b>. (This will be shown in a dataframe below.)</li>
# <li> This won't give us the exact reasons why a loan is categorized as a bad loan (other variables that might have influence the condition of the loan) but it will give us a <b> deeper insight on the level of risk </b> in a particular region. </li>
# </ul>
#
# <h4> Summary: </h4>
# <ul>
# <li>The regions of the <b> West </b> and <b> SouthEast </b> had a higher percentage in most of the b "bad" loan statuses.</li>
# <li> The <b>NorthEast</b> region had a higher percentage in <b>Grace Period</b> and <b>Does not meet Credit Policy</b> loan status. However, both of these are not considered as bad as <b>default</b> for instance. </li>
# <li> Based on this small and brief summary we can conclude that the <b>West</b> and <b>SouthEast</b> regions have the most undesirable loan status, but just by a slightly higher percentage compared to the <b>NorthEast</b> region. </li>
# <li> Again, this does not tell us what causes a loan to be a <b> bad loan </b>, but it gives us some idea about <b>the level of risk</b> within the regions across the United States. </li>
# </ul>
# In[ ]:
# We have 67429 loans categorized as bad loans
badloans_df = df.loc[df["loan_condition"] == "Bad Loan"]
# loan_status cross
loan_status_cross = pd.crosstab(badloans_df['region'], badloans_df['loan_status']).apply(lambda x: x/x.sum() * 100)
number_of_loanstatus = pd.crosstab(badloans_df['region'], badloans_df['loan_status'])
# Round our values
loan_status_cross['Charged Off'] = loan_status_cross['Charged Off'].apply(lambda x: round(x, 2))
loan_status_cross['Default'] = loan_status_cross['Default'].apply(lambda x: round(x, 2))
loan_status_cross['Does not meet the credit policy. Status:Charged Off'] = loan_status_cross['Does not meet the credit policy. Status:Charged Off'].apply(lambda x: round(x, 2))
loan_status_cross['In Grace Period'] = loan_status_cross['In Grace Period'].apply(lambda x: round(x, 2))
loan_status_cross['Late (16-30 days)'] = loan_status_cross['Late (16-30 days)'].apply(lambda x: round(x, 2))
loan_status_cross['Late (31-120 days)'] = loan_status_cross['Late (31-120 days)'].apply(lambda x: round(x, 2))
number_of_loanstatus['Total'] = number_of_loanstatus.sum(axis=1)
# number_of_badloans
number_of_loanstatus
# In[ ]:
charged_off = loan_status_cross['Charged Off'].values.tolist()
default = loan_status_cross['Default'].values.tolist()
not_meet_credit = loan_status_cross['Does not meet the credit policy. Status:Charged Off'].values.tolist()
grace_period = loan_status_cross['In Grace Period'].values.tolist()
short_pay = loan_status_cross['Late (16-30 days)'] .values.tolist()
long_pay = loan_status_cross['Late (31-120 days)'].values.tolist()
charged = go.Bar(
x=['MidWest', 'NorthEast', 'SouthEast', 'SouthWest', 'West'],
y= charged_off,
name='Charged Off',
marker=dict(
color='rgb(192, 148, 246)'
),
text = '%'
)
defaults = go.Bar(
x=['MidWest', 'NorthEast', 'SouthEast', 'SouthWest', 'West'],
y=default,
name='Defaults',
marker=dict(
color='rgb(176, 26, 26)'
),
text = '%'
)
credit_policy = go.Bar(
x=['MidWest', 'NorthEast', 'SouthEast', 'SouthWest', 'West'],
y= not_meet_credit,
name='Does not meet Credit Policy',
marker = dict(
color='rgb(229, 121, 36)'
),
text = '%'
)
grace = go.Bar(
x=['MidWest', 'NorthEast', 'SouthEast', 'SouthWest', 'West'],
y= grace_period,
name='Grace Period',
marker = dict(
color='rgb(147, 147, 147)'
),
text = '%'
)
short_pays = go.Bar(
x=['MidWest', 'NorthEast', 'SouthEast', 'SouthWest', 'West'],
y= short_pay,
name='Late Payment (16-30 days)',
marker = dict(
color='rgb(246, 157, 135)'
),
text = '%'
)
long_pays = go.Bar(
x=['MidWest', 'NorthEast', 'SouthEast', 'SouthWest', 'West'],
y= long_pay,
name='Late Payment (31-120 days)',
marker = dict(
color = 'rgb(238, 76, 73)'
),
text = '%'
)
data = [charged, defaults, credit_policy, grace, short_pays, long_pays]
layout = go.Layout(
barmode='stack',
title = '% of Bad Loan Status by Region',
xaxis=dict(title='US Regions')
)
fig = go.Figure(data=data, layout=layout)
iplot(fig, filename='stacked-bar')
# In[ ]:
# Average interest rates clients pay
df['interest_rate'].mean()
# Average annual income of clients
df['annual_income'].mean()
# <h1 align="center"> The Business Perspective </h1>
# <h2 > Understanding the Operative Side of Business </h2>
# <a id="operative_side"></a>
# <img src="http://bestcredit.sg/wp-content/uploads/2017/07/licensed-money-lender.jpg"><br><br>
# Now we will have a closer look at the <b> operative side </b> of business by state. This will give us a clearer idea in which state we have a higher operating activity. This will allow us to ask further questions such as Why do we have a higher level of operating activity in this state? Could it be because of economic factors? or the risk level is low and returns are fairly decent? Let's explore!
#
# <h4> What we need to know: </h4>
# <ul>
# <li> We will focus on <b>three key metrics</b>: Loans issued by state (Total Sum), Average interest rates charged to customers and average annual income of all customers by state. </li>
# <li> The purpose of this analysis is to see states that give high returns at a descent risk. </li>
#
# </ul>
#
# <h4> Summary: </h4>
# <ul>
# <li> <b>California, Texas, New York and Florida</b> are the states in which the highest amount of loans were issued. </li>
# <li> Interesting enough, all four states have a approximate <b>interest rate of 13%</b> which is at the same level of the average interest rate for all states (13.24%) </li>
# <li> California, Texas and New York are <b>all above the average annual income</b> (with the exclusion of Florida), this might give possible indication why most loans are issued in these states. </li>
# </ul>
# In[ ]:
# Plotting by states
# Grouping by our metrics
# First Plotly Graph (We evaluate the operative side of the business)
by_loan_amount = df.groupby(['region','addr_state'], as_index=False).loan_amount.sum()
by_interest_rate = df.groupby(['region', 'addr_state'], as_index=False).interest_rate.mean()
by_income = df.groupby(['region', 'addr_state'], as_index=False).annual_income.mean()
# Take the values to a list for visualization purposes.
states = by_loan_amount['addr_state'].values.tolist()
average_loan_amounts = by_loan_amount['loan_amount'].values.tolist()
average_interest_rates = by_interest_rate['interest_rate'].values.tolist()
average_annual_income = by_income['annual_income'].values.tolist()
from collections import OrderedDict
# Figure Number 1 (Perspective for the Business Operations)
metrics_data = OrderedDict([('state_codes', states),
('issued_loans', average_loan_amounts),
('interest_rate', average_interest_rates),
('annual_income', average_annual_income)])
metrics_df = pd.DataFrame.from_dict(metrics_data)
metrics_df = metrics_df.round(decimals=2)
metrics_df.head()
# Think of a way to add default rate
# Consider adding a few more metrics for the future
# In[ ]:
# Now it comes the part where we plot out plotly United States map
import plotly.plotly as py
import plotly.graph_objs as go
for col in metrics_df.columns:
metrics_df[col] = metrics_df[col].astype(str)
scl = [[0.0, 'rgb(210, 241, 198)'],[0.2, 'rgb(188, 236, 169)'],[0.4, 'rgb(171, 235, 145)'], [0.6, 'rgb(140, 227, 105)'],[0.8, 'rgb(105, 201, 67)'],[1.0, 'rgb(59, 159, 19)']]
metrics_df['text'] = metrics_df['state_codes'] + '<br>' +'Average loan interest rate: ' + metrics_df['interest_rate'] + '<br>'+'Average annual income: ' + metrics_df['annual_income']
data = [ dict(
type='choropleth',
colorscale = scl,
autocolorscale = False,
locations = metrics_df['state_codes'],
z = metrics_df['issued_loans'],
locationmode = 'USA-states',
text = metrics_df['text'],
marker = dict(
line = dict (
color = 'rgb(255,255,255)',
width = 2
) ),
colorbar = dict(
title = "$s USD")
) ]
layout = dict(
title = 'Lending Clubs Issued Loans <br> (A Perspective for the Business Operations)',
geo = dict(
scope = 'usa',
projection=dict(type='albers usa'),
showlakes = True,
lakecolor = 'rgb(255, 255, 255)')
)
fig = dict(data=data, layout=layout)
iplot(fig, filename='d3-cloropleth-map')
# ## Analysis by Income Category:
# <a id="income_category"></a>
# In this section we will create different <b> income categories </b> in order to detect important patters and go more into depth in our analysis.
#
# **What we need to know:** <br>
# <ul>
# <li><b>Low income category:</b> Borrowers that have an annual income lower or equal to 100,000 usd.</li>
# <li> <b> Medium income category:</b> Borrowers that have an annual income higher than 100,000 usd but lower or equal to 200,000 usd. </li>
# <li><b> High income category: </b> Borrowers that have an annual income higher tha 200,000 usd. </li>
# </ul>
#
# **Summary:**
# <ul>
# <li>Borrowers that made part of the <b>high income category</b> took higher loan amounts than people from <b>low</b> and <b>medium income categories.</b> Of course, people with higher annual incomes are more likely to pay loans with a higher amount. (First row to the left of the subplots) </li>
# <li> Loans that were borrowed by the <b>Low income category</b> had a slightly higher change of becoming a bad loan. (First row to the right of the subplots) </li>
# <li>Borrowers with <b>High</b> and <b> Medium</b> annual incomes had a longer employment length than people with lower incomes.(Second row to the left of the subplots) </li>
# <li> Borrowers with a lower income had on average <b>higher interest rates</b> while people with a higher annual income had <b>lower interest rates</b> on their loans. (Second row to the right of the subplots)</li>
#
# </ul>
# In[ ]:
# Let's create categories for annual_income since most of the bad loans are located below 100k
df['income_category'] = np.nan
lst = [df]
for col in lst:
col.loc[col['annual_income'] <= 100000, 'income_category'] = 'Low'
col.loc[(col['annual_income'] > 100000) & (col['annual_income'] <= 200000), 'income_category'] = 'Medium'
col.loc[col['annual_income'] > 200000, 'income_category'] = 'High'
# In[ ]:
# Let's transform the column loan_condition into integrers.
lst = [df]
df['loan_condition_int'] = np.nan
for col in lst:
col.loc[df['loan_condition'] == 'Bad Loan', 'loan_condition_int'] = 0 # Negative (Bad Loan)
col.loc[df['loan_condition'] == 'Good Loan', 'loan_condition_int'] = 1 # Positive (Good Loan)
# In[ ]:
fig, ((ax1, ax2), (ax3, ax4))= plt.subplots(nrows=2, ncols=2, figsize=(14,6))
# Change the Palette types tomorrow!
sns.violinplot(x="income_category", y="loan_amount", data=df, palette="Set2", ax=ax1 )
sns.violinplot(x="income_category", y="loan_condition_int", data=df, palette="Set2", ax=ax2)
sns.boxplot(x="income_category", y="emp_length_int", data=df, palette="Set2", ax=ax3)
sns.boxplot(x="income_category", y="interest_rate", data=df, palette="Set2", ax=ax4)
# <h1 align="center"> Assesing Risks </h1>
# <h2> Understanding the Risky side of Business </h2>
# <a id="risky_side"></a>
#
# Although the <b> operative side of business </b> is important, we have to also analyze the level of risk in each state. Credit scores are important metrics to analyze the level of risk of an individual customer. However, there are also other important metrics to somehow estimate the level of risk of other states. <br><br>
#
# <h4> What we need to know: </h4>
# <ul>
# <li> <b>Debt-to-income</b> is an important metric since it says approximately the level of debt of each individual consumer with respect to its total income. </li>
# <li> The <b>average length of employment</b> tells us a better story about the labor market in each state which is helpful to assess the levelof risk. </li>
# </ul>
#
# <h4> Summary: </h4>
# <ul>
# <li> <b>IOWA</b> has the highest level of default ratio neverthless, the amount of loans issued in that state is <b>too low</b>. (Number of Bad loans is equal to 3) </li>
# <li> California and Texas seem to have the lowest risk and the highest possible return for investors. However, I will look more deeply into these states and create other metrics analyze the level of risk for each state. </li>
#
# </ul>
#
#
# **Note: I will be updating these section sooner or later (Stay in touch!)**
# In[ ]:
by_condition = df.groupby('addr_state')['loan_condition'].value_counts()/ df.groupby('addr_state')['loan_condition'].count()
by_emp_length = df.groupby(['region', 'addr_state'], as_index=False).emp_length_int.mean().sort_values(by="addr_state")
loan_condition_bystate = pd.crosstab(df['addr_state'], df['loan_condition'] )
cross_condition = pd.crosstab(df["addr_state"], df["loan_condition"])
# Percentage of condition of loan
percentage_loan_contributor = pd.crosstab(df['addr_state'], df['loan_condition']).apply(lambda x: x/x.sum() * 100)
condition_ratio = cross_condition["Bad Loan"]/cross_condition["Good Loan"]
by_dti = df.groupby(['region', 'addr_state'], as_index=False).dti.mean()
state_codes = sorted(states)
# Take to a list
default_ratio = condition_ratio.values.tolist()
average_dti = by_dti['dti'].values.tolist()
average_emp_length = by_emp_length["emp_length_int"].values.tolist()
number_of_badloans = loan_condition_bystate['Bad Loan'].values.tolist()
percentage_ofall_badloans = percentage_loan_contributor['Bad Loan'].values.tolist()
# Figure Number 2
risk_data = OrderedDict([('state_codes', state_codes),
('default_ratio', default_ratio),
('badloans_amount', number_of_badloans),
('percentage_of_badloans', percentage_ofall_badloans),
('average_dti', average_dti),
('average_emp_length', average_emp_length)])
# Figure 2 Dataframe
risk_df = | pd.DataFrame.from_dict(risk_data) | pandas.DataFrame.from_dict |
import os, glob
import ccxt, pickle, time
import datetime
import pandas as pd
class History():
def __init__(self, dir_path="./history"):
self.dir_path = dir_path
self.context = {}
if not os.path.exists(dir_path):
os.makedirs(dir_path)
for filepath in glob.iglob(f"{self.dir_path}/*.pkl"):
df = pd.read_pickle(filepath)
filepath = filepath.replace(f"{self.dir_path}/",'')
filepath = filepath.replace('.pkl','')
if 'ohlcv' in filepath:
exchange,symbol,ohlc,ohlc_t = filepath.split('_')
exchange = exchange.lower()
symbol = symbol.replace('-','/')
self.init_symbol_context(exchange,symbol)
self.context[exchange][symbol]['ohlc'][ohlc_t] = df
def init_exchange_context(self, exchange):
exchange = exchange.lower()
if exchange not in self.context.keys():
self.context[exchange] = { }
def init_symbol_context(self, exchange, symbol):
exchange = exchange.lower()
self.init_exchange_context(exchange)
if symbol not in self.context[exchange]:
self.context[exchange][symbol] = { 'ohlc': {} }
def get_symbol(self,exchange,symbol):
if exchange in self.context:
if symbol in self.context[exchange]:
return self.context[exchange][symbol]
else:
print('no symbol in ', exchange)
return None
else:
print('no', exchange, 'in context')
def exchanges(self):
return list(self.context.keys())
def exchange_symbols(self, exchange):
if exchange in self.context:
return list(self.context[exchange].keys())
else:
print('no', exchange, 'in context', self.context.keys())
return []
def download_ohlc_pandas(self, exchange, symbol, ohlcv, since=None):
df = pd.DataFrame(columns=['timestamp','o','h','l','c','v'])
try:
if not since:
ohlcv_downloaded = exchange.fetch_ohlcv(symbol, ohlcv)
else:
ohlcv_downloaded = exchange.fetch_ohlcv(symbol=symbol, timeframe=ohlcv, since=since)
except Exception as e:
print(e)
else:
df = pd.DataFrame(ohlcv_downloaded, columns=['timestamp','o','h','l','c','v'])
df['timestamp'] = df['timestamp'].apply(lambda x: datetime.datetime.fromtimestamp(x/1000))
df = df.set_index('timestamp')
return df
def append_ohlc(self, exchange, symbol, ohlc):
print(ohlc)
i = ohlc['i']
if exchange not in ccxt.exchanges:
print(exchange, 'not in driver')
return
file_name = f"{self.dir_path}/{exchange}_{symbol}_ohlcv_{i}.pkl"
def update_ohlc(self, exchange, ohlcv, symbol_filter=[]):
if exchange not in ccxt.exchanges:
print(exchange, 'not in driver')
return
ccxt_driver = eval(f"ccxt.{exchange}")
exchange = ccxt_driver()
self.init_exchange_context(exchange.id)
if not exchange.has['fetchOHLCV']:
print(exchange.id, 'has no method fetchOHLCV()')
return
markets = exchange.load_markets()
symbols = [ s[0] for s in markets.items()]
for s in symbols:
if any( substr in s for substr in symbol_filter) and symbol_filter:
file_name = f"{self.dir_path}/{exchange}_{s.replace('/','-')}_ohlcv_{ohlcv}.pkl"
#-----------------------------
# download full ohlc history
#-----------------------------
if not os.path.isfile(f"{file_name}"):
print('downloading', exchange, s, ohlcv)
df = self.download_ohlc_pandas(exchange,s,ohlcv)
df.to_pickle(file_name)
time.sleep(exchange.rateLimit / 100)
else:
df = | pd.read_pickle(file_name) | pandas.read_pickle |
# pylint: disable-msg=E1101,W0612
from datetime import datetime, time, timedelta, date
import sys
import os
import operator
from distutils.version import LooseVersion
import nose
import numpy as np
randn = np.random.randn
from pandas import (Index, Series, TimeSeries, DataFrame,
isnull, date_range, Timestamp, Period, DatetimeIndex,
Int64Index, to_datetime, bdate_range, Float64Index)
import pandas.core.datetools as datetools
import pandas.tseries.offsets as offsets
import pandas.tseries.tools as tools
import pandas.tseries.frequencies as fmod
import pandas as pd
from pandas.util.testing import assert_series_equal, assert_almost_equal
import pandas.util.testing as tm
from pandas.tslib import NaT, iNaT
import pandas.lib as lib
import pandas.tslib as tslib
import pandas.index as _index
from pandas.compat import range, long, StringIO, lrange, lmap, zip, product
import pandas.core.datetools as dt
from numpy.random import rand
from numpy.testing import assert_array_equal
from pandas.util.testing import assert_frame_equal
import pandas.compat as compat
import pandas.core.common as com
from pandas import concat
from pandas import _np_version_under1p7
from numpy.testing.decorators import slow
def _skip_if_no_pytz():
try:
import pytz
except ImportError:
raise nose.SkipTest("pytz not installed")
def _skip_if_has_locale():
import locale
lang, _ = locale.getlocale()
if lang is not None:
raise nose.SkipTest("Specific locale is set {0}".format(lang))
class TestTimeSeriesDuplicates(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
dates = [datetime(2000, 1, 2), datetime(2000, 1, 2),
datetime(2000, 1, 2), datetime(2000, 1, 3),
datetime(2000, 1, 3), datetime(2000, 1, 3),
datetime(2000, 1, 4), datetime(2000, 1, 4),
datetime(2000, 1, 4), datetime(2000, 1, 5)]
self.dups = Series(np.random.randn(len(dates)), index=dates)
def test_constructor(self):
tm.assert_isinstance(self.dups, TimeSeries)
tm.assert_isinstance(self.dups.index, DatetimeIndex)
def test_is_unique_monotonic(self):
self.assertFalse(self.dups.index.is_unique)
def test_index_unique(self):
uniques = self.dups.index.unique()
expected = DatetimeIndex([datetime(2000, 1, 2), datetime(2000, 1, 3),
datetime(2000, 1, 4), datetime(2000, 1, 5)])
self.assertEqual(uniques.dtype, 'M8[ns]') # sanity
self.assertTrue(uniques.equals(expected))
self.assertEqual(self.dups.index.nunique(), 4)
# #2563
self.assertTrue(isinstance(uniques, DatetimeIndex))
dups_local = self.dups.index.tz_localize('US/Eastern')
dups_local.name = 'foo'
result = dups_local.unique()
expected = DatetimeIndex(expected, tz='US/Eastern')
self.assertTrue(result.tz is not None)
self.assertEqual(result.name, 'foo')
self.assertTrue(result.equals(expected))
# NaT
arr = [ 1370745748 + t for t in range(20) ] + [iNaT]
idx = DatetimeIndex(arr * 3)
self.assertTrue(idx.unique().equals(DatetimeIndex(arr)))
self.assertEqual(idx.nunique(), 21)
arr = [ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ] + [NaT]
idx = DatetimeIndex(arr * 3)
self.assertTrue(idx.unique().equals(DatetimeIndex(arr)))
self.assertEqual(idx.nunique(), 21)
def test_index_dupes_contains(self):
d = datetime(2011, 12, 5, 20, 30)
ix = DatetimeIndex([d, d])
self.assertTrue(d in ix)
def test_duplicate_dates_indexing(self):
ts = self.dups
uniques = ts.index.unique()
for date in uniques:
result = ts[date]
mask = ts.index == date
total = (ts.index == date).sum()
expected = ts[mask]
if total > 1:
assert_series_equal(result, expected)
else:
assert_almost_equal(result, expected[0])
cp = ts.copy()
cp[date] = 0
expected = Series(np.where(mask, 0, ts), index=ts.index)
assert_series_equal(cp, expected)
self.assertRaises(KeyError, ts.__getitem__, datetime(2000, 1, 6))
# new index
ts[datetime(2000,1,6)] = 0
self.assertEqual(ts[datetime(2000,1,6)], 0)
def test_range_slice(self):
idx = DatetimeIndex(['1/1/2000', '1/2/2000', '1/2/2000', '1/3/2000',
'1/4/2000'])
ts = Series(np.random.randn(len(idx)), index=idx)
result = ts['1/2/2000':]
expected = ts[1:]
assert_series_equal(result, expected)
result = ts['1/2/2000':'1/3/2000']
expected = ts[1:4]
assert_series_equal(result, expected)
def test_groupby_average_dup_values(self):
result = self.dups.groupby(level=0).mean()
expected = self.dups.groupby(self.dups.index).mean()
assert_series_equal(result, expected)
def test_indexing_over_size_cutoff(self):
import datetime
# #1821
old_cutoff = _index._SIZE_CUTOFF
try:
_index._SIZE_CUTOFF = 1000
# create large list of non periodic datetime
dates = []
sec = datetime.timedelta(seconds=1)
half_sec = datetime.timedelta(microseconds=500000)
d = datetime.datetime(2011, 12, 5, 20, 30)
n = 1100
for i in range(n):
dates.append(d)
dates.append(d + sec)
dates.append(d + sec + half_sec)
dates.append(d + sec + sec + half_sec)
d += 3 * sec
# duplicate some values in the list
duplicate_positions = np.random.randint(0, len(dates) - 1, 20)
for p in duplicate_positions:
dates[p + 1] = dates[p]
df = DataFrame(np.random.randn(len(dates), 4),
index=dates,
columns=list('ABCD'))
pos = n * 3
timestamp = df.index[pos]
self.assertIn(timestamp, df.index)
# it works!
df.ix[timestamp]
self.assertTrue(len(df.ix[[timestamp]]) > 0)
finally:
_index._SIZE_CUTOFF = old_cutoff
def test_indexing_unordered(self):
# GH 2437
rng = date_range(start='2011-01-01', end='2011-01-15')
ts = Series(randn(len(rng)), index=rng)
ts2 = concat([ts[0:4],ts[-4:],ts[4:-4]])
for t in ts.index:
s = str(t)
expected = ts[t]
result = ts2[t]
self.assertTrue(expected == result)
# GH 3448 (ranges)
def compare(slobj):
result = ts2[slobj].copy()
result = result.sort_index()
expected = ts[slobj]
assert_series_equal(result,expected)
compare(slice('2011-01-01','2011-01-15'))
compare(slice('2010-12-30','2011-01-15'))
compare(slice('2011-01-01','2011-01-16'))
# partial ranges
compare(slice('2011-01-01','2011-01-6'))
compare(slice('2011-01-06','2011-01-8'))
compare(slice('2011-01-06','2011-01-12'))
# single values
result = ts2['2011'].sort_index()
expected = ts['2011']
assert_series_equal(result,expected)
# diff freq
rng = date_range(datetime(2005, 1, 1), periods=20, freq='M')
ts = Series(np.arange(len(rng)), index=rng)
ts = ts.take(np.random.permutation(20))
result = ts['2005']
for t in result.index:
self.assertTrue(t.year == 2005)
def test_indexing(self):
idx = date_range("2001-1-1", periods=20, freq='M')
ts = Series(np.random.rand(len(idx)),index=idx)
# getting
# GH 3070, make sure semantics work on Series/Frame
expected = ts['2001']
df = DataFrame(dict(A = ts))
result = df['2001']['A']
assert_series_equal(expected,result)
# setting
ts['2001'] = 1
expected = ts['2001']
df.loc['2001','A'] = 1
result = df['2001']['A']
assert_series_equal(expected,result)
# GH3546 (not including times on the last day)
idx = date_range(start='2013-05-31 00:00', end='2013-05-31 23:00', freq='H')
ts = Series(lrange(len(idx)), index=idx)
expected = ts['2013-05']
assert_series_equal(expected,ts)
idx = date_range(start='2013-05-31 00:00', end='2013-05-31 23:59', freq='S')
ts = Series(lrange(len(idx)), index=idx)
expected = ts['2013-05']
assert_series_equal(expected,ts)
idx = [ Timestamp('2013-05-31 00:00'), Timestamp(datetime(2013,5,31,23,59,59,999999))]
ts = Series(lrange(len(idx)), index=idx)
expected = ts['2013']
assert_series_equal(expected,ts)
# GH 3925, indexing with a seconds resolution string / datetime object
df = DataFrame(randn(5,5),columns=['open','high','low','close','volume'],index=date_range('2012-01-02 18:01:00',periods=5,tz='US/Central',freq='s'))
expected = df.loc[[df.index[2]]]
result = df['2012-01-02 18:01:02']
assert_frame_equal(result,expected)
# this is a single date, so will raise
self.assertRaises(KeyError, df.__getitem__, df.index[2],)
def test_recreate_from_data(self):
if _np_version_under1p7:
freqs = ['M', 'Q', 'A', 'D', 'B', 'T', 'S', 'L', 'U', 'H']
else:
freqs = ['M', 'Q', 'A', 'D', 'B', 'T', 'S', 'L', 'U', 'H', 'N', 'C']
for f in freqs:
org = DatetimeIndex(start='2001/02/01 09:00', freq=f, periods=1)
idx = DatetimeIndex(org, freq=f)
self.assertTrue(idx.equals(org))
# unbale to create tz-aware 'A' and 'C' freq
if _np_version_under1p7:
freqs = ['M', 'Q', 'D', 'B', 'T', 'S', 'L', 'U', 'H']
else:
freqs = ['M', 'Q', 'D', 'B', 'T', 'S', 'L', 'U', 'H', 'N']
for f in freqs:
org = DatetimeIndex(start='2001/02/01 09:00', freq=f, tz='US/Pacific', periods=1)
idx = DatetimeIndex(org, freq=f, tz='US/Pacific')
self.assertTrue(idx.equals(org))
def assert_range_equal(left, right):
assert(left.equals(right))
assert(left.freq == right.freq)
assert(left.tz == right.tz)
class TestTimeSeries(tm.TestCase):
_multiprocess_can_split_ = True
def test_is_(self):
dti = DatetimeIndex(start='1/1/2005', end='12/1/2005', freq='M')
self.assertTrue(dti.is_(dti))
self.assertTrue(dti.is_(dti.view()))
self.assertFalse(dti.is_(dti.copy()))
def test_dti_slicing(self):
dti = DatetimeIndex(start='1/1/2005', end='12/1/2005', freq='M')
dti2 = dti[[1, 3, 5]]
v1 = dti2[0]
v2 = dti2[1]
v3 = dti2[2]
self.assertEqual(v1, Timestamp('2/28/2005'))
self.assertEqual(v2, Timestamp('4/30/2005'))
self.assertEqual(v3, Timestamp('6/30/2005'))
# don't carry freq through irregular slicing
self.assertIsNone(dti2.freq)
def test_pass_datetimeindex_to_index(self):
# Bugs in #1396
rng = date_range('1/1/2000', '3/1/2000')
idx = | Index(rng, dtype=object) | pandas.Index |
import glob
import matplotlib
matplotlib.use("Agg")
import bokeh.plotting as plt
from bokeh.embed import file_html
from bokeh.resources import CDN
import cherrypy
import pandas as pd
import numpy as np
class Main(object):
@cherrypy.expose
def index(self):
df = pd.concat([ | pd.read_csv(fname) | pandas.read_csv |
import pandas as pd
import numpy as np
import os
import gc
#%% Read in data
user_file_list = os.listdir('data/Archived users/')
user_set_v1 = set(map(lambda x: x[5: 15], user_file_list)) # [5: 15] to return just the user IDs
tappy_file_list = os.listdir('data/Tappy Data/')
user_set_v2 = set(map(lambda x: x[: 10], tappy_file_list)) # [: 10] to return just the user IDs
user_set = user_set_v1.intersection(user_set_v2)
print(len(user_set))
#%% Format into a Pandas dataframe
def read_user_file(file_name):
f = open('data/Archived users/' + file_name)
data = [line.split(': ')[1][: -1] for line in f.readlines()]
f.close()
return data
files = os.listdir('data/Archived users/')
columns = [
'BirthYear', 'Gender', 'Parkinsons', 'Tremors', 'DiagnosisYear',
'Sided', 'UPDRS', 'Impact', 'Levadopa', 'DA', 'MAOB', 'Other'
]
user_df = pd.DataFrame(columns=columns) # empty Data Frame for now
for user_id in user_set:
temp_file_name = 'User_' + user_id + '.txt' # tappy file names have the format of `User_[UserID].txt`
if temp_file_name in files: # check to see if the user ID is in our valid user set
temp_data = read_user_file(temp_file_name)
user_df.loc[user_id] = temp_data # adding data to our DataFrame
print(user_df.head())
#%% Change numeric data into appropriate format
# force some columns to have numeric data type
user_df['BirthYear'] = pd.to_numeric(user_df['BirthYear'], errors='coerce')
user_df['DiagnosisYear'] = | pd.to_numeric(user_df['DiagnosisYear'], errors='coerce') | pandas.to_numeric |
import numpy as np
import pandas as pd
from postproc_utils import rmse_masked, nse
def test_rmse_masked():
y_true = pd.Series([1, 5, 3, 4, 2])
y_pred = y_true.copy()
err = rmse_masked(y_true, y_pred)
assert err == 0
y_pred = pd.Series([0, 0, 0, 0, 0])
err = rmse_masked(y_true, y_pred)
assert round(err, 2) == 3.32
y_true = pd.Series([1, np.nan, 3, 4, 2])
err = rmse_masked(y_true, y_pred)
assert round(err, 2) == 2.74
def test_nse():
y_true = pd.Series([1, 5, 3, 4, 2])
y_pred = y_true.copy()
nse_samp = nse(y_true, y_pred)
assert nse_samp == 1
y_pred = pd.Series([1, 4, 0, 4, 2])
nse_samp = nse(y_true, y_pred)
assert nse_samp == 0
y_pred = pd.Series([2, 4, 0, 4, 2])
nse_samp = nse(y_true, y_pred)
assert round(nse_samp, 1) == -0.1
y_pred = pd.Series([1, 4, 2, 4, 2])
nse_samp = nse(y_true, y_pred)
assert round(nse_samp, 1) == 0.8
y_true = pd.Series([1, np.nan, 3, 4, 2])
y_pred = pd.Series([1, 4, 2, 4, 2])
nse_samp = nse(y_true, y_pred)
assert round(nse_samp, 1) == 0.8
y_true = pd.Series([1, np.nan, 3, 4, np.nan])
y_pred = pd.Series([1, 4, 2, 4, 2])
nse_samp = nse(y_true, y_pred)
assert round(nse_samp, 2) == 0.79
y_true = pd.Series([1, np.nan, 2, 4, np.nan])
y_pred = | pd.Series([1, 4, 2, 4, 2]) | pandas.Series |
# -*- encoding: utf-8 -*-
"""
******************************************************************************
* MÓDULO: datasets
******************************************************************************
"""
# IMPORTAÇÕES ****************************************************************
import json
from typing import Iterable
from tqdm.gui import tqdm_gui
from app.home.vups import const, data, utils
import os, sys, validators, requests, time, pandas as pd, inspect
from clint.textui import progress
from pathlib import Path
import pandas as pd
import tqdm
import typer
# ****************************************************************************
# @utils.print_caller_name(4)
def get_data(
filepath_or_buffer=None,
usecols=None,
nrows=None,
sep=None,
encoding=None,
warn_bad_lines=None,
error_bad_lines=None,
dtype=None,
mapa=None,
memory_map=True,
name=None,
format=None
):
"""FUNÇÃO CENTRAL PARA OBTENÇÃO DE DADOS
----------------------------------------------------------------------
- BUSCA O DATASET POR ORDEM DE PRECEDÊNCIA DO TIPO DE ARMAZENAMENTO.
PRECEDÊNCIA: PARQUET < CSV < URL
- TENTA IMPORTAR O DATASET SOLICITADO DE PARQUET FILE PARA DATAFRAME DO PANDAS, CASO O PARQUET EXISTA.
- SENÃO, TENTA IMPORTAR DE CSV PARA DATAFRAME DO PANDAS, TRATA OS DADOS E FAZ O CAST PARA PARQUET, UTILIZANDO POR FIM O PARQUET
- CASO NÃO ENCONTRE O DATASET EM CSV, RECORRE AS URLS NO REGISTRO DO DATASET
AS URLS (CSV) SÃO IMPORTADAS PARA DATAFRAME DO PANDAS, OS DADOS SÃO TRATADOS,
CONVERTIDO PARA PARQUET, O ARQUIVO É GRAVADO EM DISCO, POR FIM O PARQUET É UTILIZADO.
"""
filename, file_extension = os.path.splitext(os.path.basename(filepath_or_buffer[0][1]))
chunk_size = 1024
if validators.url(filepath_or_buffer[0][1]):
print("Baixando {} de URL ...".format(filename))
filepath = []
for url in filepath_or_buffer:
filename = url[0]
if validators.url(url[1]):
r = requests.get(url[1], stream=True)
# CRIA UM PATH PARA GRAVAR O ARQUIVO BAIXADO E, ADICIONA O PATH A UMA LISTA
DATENAME = time.strftime("%Y%m%d-%H%M%S")
filepath.append(
os.path.join(
const.DATATMP,
"{}-{}{}".format(DATENAME, filename.upper(), format)
)
)
# GRAVA CADA DATASET BAIXADO, PARA DEPOIS ...
# ... CONCATENÁ-LOS, TRANSFORMAR OS TIPOS DE DADOS E CONVERTER O DATASET UNIFICADO EM PARQUET
with open(filepath[-1], 'wb') as fd:
print("Obtendo o comprimento dos dados ...")
total_length = int(r.headers.get('content-length'))
print("Gravando os dados em disco ...")
for chunk in progress.bar(r.iter_content(chunk_size=chunk_size), expected_size=(total_length/chunk_size) + 1):
if chunk:
fd.write(chunk)
fd.flush()
# OS DATASETS BAIXADOS SÃO IMPORTADOS NOVAMENTE DOS ARQUIVOS EM DISCO
ds_lst = []
for f in filepath:
_PARAMS = {
"filepath_or_buffer": f,
"iterator": True,
"chunksize": chunk_size,
"usecols": usecols,
"sep": sep,
"nrows": nrows,
"encoding": encoding,
"warn_bad_lines": warn_bad_lines,
"error_bad_lines": error_bad_lines,
# "parse_dates": True,
# "dtype": dtype,
"memory_map": memory_map
}
iter = pd.read_csv(**_PARAMS)
subset = pd.concat(iter, ignore_index=True)
# (CHAMO CADA CONJUNTO QUE COMPÕE O MESMO DATASET DE "SUBSET")
# OS "SUBSETS" SÃO ADICIONADOS EM UMA LISTA PARA SEREM CONCATENADOS
ds_lst.append(subset)
print("Concatenando arquivos, caso sejam múltiplos datasets...")
dataset = pd.concat(ds_lst, ignore_index=True)
# AGORA É A HORA DO TRATAMENTO E TRANSFORMAÇÃO DOS DADOS, PARA NO FIM GRAVAR UM PARQUET TIPADO
# --------------------------------------------------------------------------------------------
# USO O DTYPE MAP QUE FOI DECLARADO NO REGISTRO DO DATASET
if mapa is not None:
print("Realizando transformações de dados no dataset consolidado...")
dataset = data.dtype_transform(dataset, mapa)
else:
# CASO NÃO EXISTA O DTYPE MAP, UM INICIAL É CRIADO PARA FUTURAMENTE SER CONFIGURADO E O TRATAMENTO REPROCESSADO
print("Inicializando mapeamento de tipos de dados...")
data.create_dtype_map(dataset, name)
# APÓS O TRATAMENTO E TRANSFORMAÇÃO DOS DADOS, UM ARQUIVO PARQUET É CRIADO, PASSANDO A SER O DATASET OFICIAL
print("Convertendo para PARQUET o dataset com os tipos de dados já tratados...")
filepath_or_buffer = data.convert_to_parquet([dataset], name)
file_extension = ".parquet"
# LIMPA O DIRETÓRIO DE DADOS TEMPORÁRIOS # ALTERNATIVA
# for f in os.listdir(const.DATATMP): # for f in filepath:
# file = os.path.join(const.DATATMP, f) # REMOVE APENAS OS ARQUIVOS USADOS NESTA IMPORTAÇÃO
# os.remove(file) # os.remove(f)
if file_extension == ".csv":
# print("Importando {} de CSV...".format(filename))
_PARAMS = {
"filepath_or_buffer": filepath_or_buffer[0][1],
"iterator": True,
"chunksize": chunk_size,
"usecols": usecols,
"sep": sep,
"nrows": nrows,
"encoding": encoding,
"warn_bad_lines": warn_bad_lines,
"error_bad_lines": error_bad_lines,
# "parse_dates": True,
# "dtype": dtype,
"memory_map": memory_map,
"low_memory": False
}
dataset = progress_read_csv(**_PARAMS)
# iter = progress_read_csv(**_PARAMS)
# dataset = pd.concat(iter, ignore_index=True)
if mapa is not None:
# print("Realizando transformações de dados no dataset CSV unificado...")
# print("MAPA:", mapa)
dataset = data.dtype_transform(dataset, mapa)
else:
# print("Inicializando as variáveis do mapeamento de tipos de dados...")
# print("MAPA:", mapa)
data.create_dtype_map(dataset, name)
filepath_or_buffer = data.convert_to_parquet([dataset], name)
file_extension = ".parquet"
if file_extension == ".parquet":
# print("Importando {} de PARQUET...".format(filename))
_PARAMS = {
"path": filepath_or_buffer[0][1],
"columns": usecols,
}
dataset = pd.read_parquet(**_PARAMS)
# PATH = const.METADIR
# FILENAME = "DTYPEMAP_{}.json".format(name)
# FILEPATH = os.path.join(PATH, FILENAME)
# if not os.path.exists(FILEPATH):
# print("NÃO EXISTE MAPA!")
# data.create_dtype_map(dataset, name)
# elif mapa != const.read_dtype_map(dataset):
# print("MAPAS DIFERENTES! REPROCESSANDO O TRATAMENTO DE DADOS ...")
# dataset = data.dtype_transform(dataset, mapa)
# print("ATUALIZANDO O ARQUIVO PARQUET ...")
# data.convert_to_parquet([dataset], name)
# if filename.strip() != "":
# curframe = inspect.currentframe()
# calframe = inspect.getouterframes(curframe, 2)
# print("===================================================================")
# print("FILENAME:", filename)
# print("# FILE: {}".format(__file__))
# print("# NAME: {}".format(__name__))
# print("# OBJECT: {}".format(sys._getframe( ).f_code.co_name))
# print("# CALLER:", calframe[1][3])
# print("-------------------------------------------------------------------")
# print(dataset.info())
# print("===================================================================")
return dataset
class Datasets:
"""CLASSE QUE INTERMEDIA A PASSAGEM DE PARÂMETROS DOS DATASETS REGISTRADOS
OS METADADOS E CONFIGURAÇÕES DE TODOS OS DATASETS SÃO ARMAZENADOS EM ARQUIVOS JSON
OS ARQUIVOS JSON SÃO LIDOS PELO MÓDULO [vups.const]
OS MÉTODOS DESTA CLASSE BUSCAM NO MÓDULO [vups.const] OS PARÂMETROS DO DATASET A SER IMPORTADO
ENTÃO, REPASSA ESSES PARÂMETROS PARA O MÉTODO get_data()
"""
# def __init__(self, name=None, usecols=None, nrows=None):
# self.name = name
# self.usecols = usecols
# self.nrows = nrows
# def to_json(self):
# pass
def microdados(columns=None, nrows=None, dtype=None):
"""MÉTODO QUE BUSCA OS PARÂMETROS DO DATASET [microdados]"""
name = sys._getframe( ).f_code.co_name.upper()
filepath_or_buffer = data.which_file_exists(name)
mapa = getattr(const, name).get("MAP")
format = getattr(const, name).get("FORMAT")
return get_data(
filepath_or_buffer=filepath_or_buffer,
usecols=columns,
nrows=nrows,
sep=getattr(const, name).get("DELIMITER"),
encoding=getattr(const, name).get("ENCODING"),
warn_bad_lines=False,
error_bad_lines=False,
dtype=dtype,
mapa=mapa,
name=name,
format=format
)
def microdados_bairros(columns=None, nrows=None, dtype=None):
name = sys._getframe( ).f_code.co_name.upper()
filepath_or_buffer = data.which_file_exists(name)
mapa = getattr(const, name).get("MAP")
format = getattr(const, name).get("FORMAT")
return get_data(
filepath_or_buffer=filepath_or_buffer,
usecols=columns,
nrows=nrows,
sep=getattr(const, name).get("DELIMITER"),
encoding=getattr(const, name).get("ENCODING"),
warn_bad_lines=False,
error_bad_lines=False,
dtype=dtype,
mapa=mapa,
name=name,
format=format
)
def arrecadacao(columns=None, nrows=None, dtype=None):
name = sys._getframe( ).f_code.co_name.upper()
filepath_or_buffer = data.which_file_exists(name)
mapa = getattr(const, name).get("MAP")
format = getattr(const, name).get("FORMAT")
return get_data(
filepath_or_buffer=filepath_or_buffer,
usecols=columns,
nrows=nrows,
sep=getattr(const, name).get("DELIMITER"),
encoding=getattr(const, name).get("ENCODING"),
warn_bad_lines=False,
error_bad_lines=False,
dtype=dtype,
mapa=mapa,
name=name,
format=format
)
def tipo_arrecadacao(columns=None, nrows=None, dtype=None):
name = sys._getframe( ).f_code.co_name.upper()
filepath_or_buffer = data.which_file_exists(name)
mapa = getattr(const, name).get("MAP")
format = getattr(const, name).get("FORMAT")
return get_data(
filepath_or_buffer=filepath_or_buffer,
usecols=columns,
nrows=nrows,
sep=getattr(const, name).get("DELIMITER"),
encoding=getattr(const, name).get("ENCODING"),
warn_bad_lines=False,
error_bad_lines=False,
dtype=dtype,
mapa=mapa,
name=name,
format=format
)
def transferencias(columns=None, nrows=None, dtype=None):
name = sys._getframe( ).f_code.co_name.upper()
filepath_or_buffer = data.which_file_exists(name)
mapa = getattr(const, name).get("MAP")
format = getattr(const, name).get("FORMAT")
return get_data(
filepath_or_buffer=filepath_or_buffer,
usecols=columns,
nrows=nrows,
sep=getattr(const, name).get("DELIMITER"),
encoding=getattr(const, name).get("ENCODING"),
warn_bad_lines=False,
error_bad_lines=False,
dtype=dtype,
mapa=mapa,
name=name,
format=format
)
def populacao(columns=None, nrows=None, dtype=None):
name = sys._getframe( ).f_code.co_name.upper()
filepath_or_buffer = data.which_file_exists(name)
mapa = getattr(const, name).get("MAP")
format = getattr(const, name).get("FORMAT")
return get_data(
filepath_or_buffer=filepath_or_buffer,
usecols=columns,
nrows=nrows,
sep=getattr(const, name).get("DELIMITER"),
encoding=getattr(const, name).get("ENCODING"),
warn_bad_lines=False,
error_bad_lines=False,
dtype=dtype,
mapa=mapa,
name=name,
format=format
)
def progress_read_csv(**_PARAMS):
# read number of rows quickly
filename = _PARAMS.get('filepath_or_buffer')
length = sum1forline(filename)
chunksize = _PARAMS.get('chunksize')
# initiate a blank dataframe
df = | pd.DataFrame() | pandas.DataFrame |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.